diff --git a/bin/__pycache__/pwiz.cpython-37.pyc b/bin/__pycache__/pwiz.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ae21a1b5604d0fd66fecf28c03b5f8a3e0120a3
Binary files /dev/null and b/bin/__pycache__/pwiz.cpython-37.pyc differ
diff --git a/bin/activate b/bin/activate
new file mode 100644
index 0000000000000000000000000000000000000000..ecb3cda2c3bd90f0a07b0cbeb1ff1e26cfd30a96
--- /dev/null
+++ b/bin/activate
@@ -0,0 +1,78 @@
+# This file must be used with "source bin/activate" *from bash*
+# you cannot run it directly
+
+deactivate () {
+    unset -f pydoc >/dev/null 2>&1
+
+    # reset old environment variables
+    # ! [ -z ${VAR+_} ] returns true if VAR is declared at all
+    if ! [ -z "${_OLD_VIRTUAL_PATH+_}" ] ; then
+        PATH="$_OLD_VIRTUAL_PATH"
+        export PATH
+        unset _OLD_VIRTUAL_PATH
+    fi
+    if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then
+        PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME"
+        export PYTHONHOME
+        unset _OLD_VIRTUAL_PYTHONHOME
+    fi
+
+    # This should detect bash and zsh, which have a hash command that must
+    # be called to get it to forget past commands.  Without forgetting
+    # past commands the $PATH changes we made may not be respected
+    if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then
+        hash -r 2>/dev/null
+    fi
+
+    if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then
+        PS1="$_OLD_VIRTUAL_PS1"
+        export PS1
+        unset _OLD_VIRTUAL_PS1
+    fi
+
+    unset VIRTUAL_ENV
+    if [ ! "${1-}" = "nondestructive" ] ; then
+    # Self destruct!
+        unset -f deactivate
+    fi
+}
+
+# unset irrelevant variables
+deactivate nondestructive
+
+VIRTUAL_ENV="/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend"
+export VIRTUAL_ENV
+
+_OLD_VIRTUAL_PATH="$PATH"
+PATH="$VIRTUAL_ENV/bin:$PATH"
+export PATH
+
+# unset PYTHONHOME if set
+if ! [ -z "${PYTHONHOME+_}" ] ; then
+    _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME"
+    unset PYTHONHOME
+fi
+
+if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then
+    _OLD_VIRTUAL_PS1="$PS1"
+    if [ "x" != x ] ; then
+        PS1="$PS1"
+    else
+        PS1="(`basename \"$VIRTUAL_ENV\"`) $PS1"
+    fi
+    export PS1
+fi
+
+# Make sure to unalias pydoc if it's already there
+alias pydoc 2>/dev/null >/dev/null && unalias pydoc
+
+pydoc () {
+    python -m pydoc "$@"
+}
+
+# This should detect bash and zsh, which have a hash command that must
+# be called to get it to forget past commands.  Without forgetting
+# past commands the $PATH changes we made may not be respected
+if [ -n "${BASH-}" ] || [ -n "${ZSH_VERSION-}" ] ; then
+    hash -r 2>/dev/null
+fi
diff --git a/bin/activate.csh b/bin/activate.csh
new file mode 100644
index 0000000000000000000000000000000000000000..9c880bffb0405f99f4e2a237d4e397801bcc8b8c
--- /dev/null
+++ b/bin/activate.csh
@@ -0,0 +1,42 @@
+# This file must be used with "source bin/activate.csh" *from csh*.
+# You cannot run it directly.
+# Created by Davide Di Blasi <davidedb@gmail.com>.
+
+set newline='\
+'
+
+alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH:q" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT:q" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc'
+
+# Unset irrelevant variables.
+deactivate nondestructive
+
+setenv VIRTUAL_ENV "/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend"
+
+set _OLD_VIRTUAL_PATH="$PATH:q"
+setenv PATH "$VIRTUAL_ENV:q/bin:$PATH:q"
+
+
+
+if ("" != "") then
+    set env_name = ""
+else
+    set env_name = "$VIRTUAL_ENV:t:q"
+endif
+
+# Could be in a non-interactive environment,
+# in which case, $prompt is undefined and we wouldn't
+# care about the prompt anyway.
+if ( $?prompt ) then
+    set _OLD_VIRTUAL_PROMPT="$prompt:q"
+if ( "$prompt:q" =~ *"$newline:q"* ) then
+    :
+else
+    set prompt = "[$env_name:q] $prompt:q"
+endif
+endif
+
+unset env_name
+
+alias pydoc python -m pydoc
+
+rehash
diff --git a/bin/activate.fish b/bin/activate.fish
new file mode 100644
index 0000000000000000000000000000000000000000..14c2fbbfef2a9347d7307a0ac4aa192db7263ff6
--- /dev/null
+++ b/bin/activate.fish
@@ -0,0 +1,76 @@
+# This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*.
+# Do not run it directly.
+
+function deactivate -d 'Exit virtualenv mode and return to the normal environment.'
+    # reset old environment variables
+    if test -n "$_OLD_VIRTUAL_PATH"
+        set -gx PATH $_OLD_VIRTUAL_PATH
+        set -e _OLD_VIRTUAL_PATH
+    end
+
+    if test -n "$_OLD_VIRTUAL_PYTHONHOME"
+        set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
+        set -e _OLD_VIRTUAL_PYTHONHOME
+    end
+
+    if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
+        # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`.
+        set -l fish_function_path
+
+        # Erase virtualenv's `fish_prompt` and restore the original.
+        functions -e fish_prompt
+        functions -c _old_fish_prompt fish_prompt
+        functions -e _old_fish_prompt
+        set -e _OLD_FISH_PROMPT_OVERRIDE
+    end
+
+    set -e VIRTUAL_ENV
+
+    if test "$argv[1]" != 'nondestructive'
+        # Self-destruct!
+        functions -e pydoc
+        functions -e deactivate
+    end
+end
+
+# Unset irrelevant variables.
+deactivate nondestructive
+
+set -gx VIRTUAL_ENV "/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend"
+
+set -gx _OLD_VIRTUAL_PATH $PATH
+set -gx PATH "$VIRTUAL_ENV/bin" $PATH
+
+# Unset `$PYTHONHOME` if set.
+if set -q PYTHONHOME
+    set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
+    set -e PYTHONHOME
+end
+
+function pydoc
+    python -m pydoc $argv
+end
+
+if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
+    # Copy the current `fish_prompt` function as `_old_fish_prompt`.
+    functions -c fish_prompt _old_fish_prompt
+
+    function fish_prompt
+        # Save the current $status, for fish_prompts that display it.
+        set -l old_status $status
+
+        # Prompt override provided?
+        # If not, just prepend the environment name.
+        if test -n ""
+            printf '%s%s' "" (set_color normal)
+        else
+            printf '%s(%s) ' (set_color normal) (basename "$VIRTUAL_ENV")
+        end
+
+        # Restore the original $status
+        echo "exit $old_status" | source
+        _old_fish_prompt
+    end
+
+    set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
+end
diff --git a/bin/activate.ps1 b/bin/activate.ps1
new file mode 100644
index 0000000000000000000000000000000000000000..89dbec6ccdc0e9a05e79dbde928707d284712d07
--- /dev/null
+++ b/bin/activate.ps1
@@ -0,0 +1,150 @@
+# This file must be dot sourced from PoSh; you cannot run it
+# directly. Do this: . ./activate.ps1
+
+# FIXME: clean up unused vars.
+$script:THIS_PATH = $myinvocation.mycommand.path
+$script:BASE_DIR = split-path (resolve-path "$THIS_PATH/..") -Parent
+$script:DIR_NAME = split-path $BASE_DIR -Leaf
+
+function global:deactivate ( [switch] $NonDestructive ){
+
+    if ( test-path variable:_OLD_VIRTUAL_PATH ) {
+        $env:PATH = $variable:_OLD_VIRTUAL_PATH
+        remove-variable "_OLD_VIRTUAL_PATH" -scope global
+    }
+
+    if ( test-path function:_old_virtual_prompt ) {
+        $function:prompt = $function:_old_virtual_prompt
+        remove-item function:\_old_virtual_prompt
+    }
+
+    if ($env:VIRTUAL_ENV) {
+        $old_env = split-path $env:VIRTUAL_ENV -leaf
+        remove-item env:VIRTUAL_ENV -erroraction silentlycontinue
+    }
+
+    if ( !$NonDestructive ) {
+        # Self destruct!
+        remove-item function:deactivate
+    }
+}
+
+# unset irrelevant variables
+deactivate -nondestructive
+
+$VIRTUAL_ENV = $BASE_DIR
+$env:VIRTUAL_ENV = $VIRTUAL_ENV
+
+$global:_OLD_VIRTUAL_PATH = $env:PATH
+$env:PATH = "$env:VIRTUAL_ENV/bin:" + $env:PATH
+if (! $env:VIRTUAL_ENV_DISABLE_PROMPT) {
+    function global:_old_virtual_prompt { "" }
+    $function:_old_virtual_prompt = $function:prompt
+    function global:prompt {
+        # Add a prefix to the current prompt, but don't discard it.
+        write-host "($(split-path $env:VIRTUAL_ENV -leaf)) " -nonewline
+        & $function:_old_virtual_prompt
+    }
+}
+
+# SIG # Begin signature block
+# MIISeAYJKoZIhvcNAQcCoIISaTCCEmUCAQExCzAJBgUrDgMCGgUAMGkGCisGAQQB
+# gjcCAQSgWzBZMDQGCisGAQQBgjcCAR4wJgIDAQAABBAfzDtgWUsITrck0sYpfvNR
+# AgEAAgEAAgEAAgEAAgEAMCEwCQYFKw4DAhoFAAQUS5reBwSg3zOUwhXf2jPChZzf
+# yPmggg6tMIIGcDCCBFigAwIBAgIBJDANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQG
+# EwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERp
+# Z2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2Vy
+# dGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDcxMDI0MjIwMTQ2WhcNMTcxMDI0MjIw
+# MTQ2WjCBjDELMAkGA1UEBhMCSUwxFjAUBgNVBAoTDVN0YXJ0Q29tIEx0ZC4xKzAp
+# BgNVBAsTIlNlY3VyZSBEaWdpdGFsIENlcnRpZmljYXRlIFNpZ25pbmcxODA2BgNV
+# BAMTL1N0YXJ0Q29tIENsYXNzIDIgUHJpbWFyeSBJbnRlcm1lZGlhdGUgT2JqZWN0
+# IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyiOLIjUemqAbPJ1J
+# 0D8MlzgWKbr4fYlbRVjvhHDtfhFN6RQxq0PjTQxRgWzwFQNKJCdU5ftKoM5N4YSj
+# Id6ZNavcSa6/McVnhDAQm+8H3HWoD030NVOxbjgD/Ih3HaV3/z9159nnvyxQEckR
+# ZfpJB2Kfk6aHqW3JnSvRe+XVZSufDVCe/vtxGSEwKCaNrsLc9pboUoYIC3oyzWoU
+# TZ65+c0H4paR8c8eK/mC914mBo6N0dQ512/bkSdaeY9YaQpGtW/h/W/FkbQRT3sC
+# pttLVlIjnkuY4r9+zvqhToPjxcfDYEf+XD8VGkAqle8Aa8hQ+M1qGdQjAye8OzbV
+# uUOw7wIDAQABo4IB6TCCAeUwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+# AQYwHQYDVR0OBBYEFNBOD0CZbLhLGW87KLjg44gHNKq3MB8GA1UdIwQYMBaAFE4L
+# 7xqkQFulF2mHMMo0aEPQQa7yMD0GCCsGAQUFBwEBBDEwLzAtBggrBgEFBQcwAoYh
+# aHR0cDovL3d3dy5zdGFydHNzbC5jb20vc2ZzY2EuY3J0MFsGA1UdHwRUMFIwJ6Al
+# oCOGIWh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3Nmc2NhLmNybDAnoCWgI4YhaHR0
+# cDovL2NybC5zdGFydHNzbC5jb20vc2ZzY2EuY3JsMIGABgNVHSAEeTB3MHUGCysG
+# AQQBgbU3AQIBMGYwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29t
+# L3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t
+# L2ludGVybWVkaWF0ZS5wZGYwEQYJYIZIAYb4QgEBBAQDAgABMFAGCWCGSAGG+EIB
+# DQRDFkFTdGFydENvbSBDbGFzcyAyIFByaW1hcnkgSW50ZXJtZWRpYXRlIE9iamVj
+# dCBTaWduaW5nIENlcnRpZmljYXRlczANBgkqhkiG9w0BAQUFAAOCAgEAcnMLA3Va
+# N4OIE9l4QT5OEtZy5PByBit3oHiqQpgVEQo7DHRsjXD5H/IyTivpMikaaeRxIv95
+# baRd4hoUcMwDj4JIjC3WA9FoNFV31SMljEZa66G8RQECdMSSufgfDYu1XQ+cUKxh
+# D3EtLGGcFGjjML7EQv2Iol741rEsycXwIXcryxeiMbU2TPi7X3elbwQMc4JFlJ4B
+# y9FhBzuZB1DV2sN2irGVbC3G/1+S2doPDjL1CaElwRa/T0qkq2vvPxUgryAoCppU
+# FKViw5yoGYC+z1GaesWWiP1eFKAL0wI7IgSvLzU3y1Vp7vsYaxOVBqZtebFTWRHt
+# XjCsFrrQBngt0d33QbQRI5mwgzEp7XJ9xu5d6RVWM4TPRUsd+DDZpBHm9mszvi9g
+# VFb2ZG7qRRXCSqys4+u/NLBPbXi/m/lU00cODQTlC/euwjk9HQtRrXQ/zqsBJS6U
+# J+eLGw1qOfj+HVBl/ZQpfoLk7IoWlRQvRL1s7oirEaqPZUIWY/grXq9r6jDKAp3L
+# ZdKQpPOnnogtqlU4f7/kLjEJhrrc98mrOWmVMK/BuFRAfQ5oDUMnVmCzAzLMjKfG
+# cVW/iMew41yfhgKbwpfzm3LBr1Zv+pEBgcgW6onRLSAn3XHM0eNtz+AkxH6rRf6B
+# 2mYhLEEGLapH8R1AMAo4BbVFOZR5kXcMCwowggg1MIIHHaADAgECAgIEuDANBgkq
+# hkiG9w0BAQUFADCBjDELMAkGA1UEBhMCSUwxFjAUBgNVBAoTDVN0YXJ0Q29tIEx0
+# ZC4xKzApBgNVBAsTIlNlY3VyZSBEaWdpdGFsIENlcnRpZmljYXRlIFNpZ25pbmcx
+# ODA2BgNVBAMTL1N0YXJ0Q29tIENsYXNzIDIgUHJpbWFyeSBJbnRlcm1lZGlhdGUg
+# T2JqZWN0IENBMB4XDTExMTIwMzE1MzQxOVoXDTEzMTIwMzE0NTgwN1owgYwxIDAe
+# BgNVBA0TFzU4MTc5Ni1HaDd4Zkp4a3hRU0lPNEUwMQswCQYDVQQGEwJERTEPMA0G
+# A1UECBMGQmVybGluMQ8wDQYDVQQHEwZCZXJsaW4xFjAUBgNVBAMTDUphbm5pcyBM
+# ZWlkZWwxITAfBgkqhkiG9w0BCQEWEmphbm5pc0BsZWlkZWwuaW5mbzCCAiIwDQYJ
+# KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMcPeABYdN7nPq/AkZ/EkyUBGx/l2Yui
+# Lfm8ZdLG0ulMb/kQL3fRY7sUjYPyn9S6PhqqlFnNoGHJvbbReCdUC9SIQYmOEjEA
+# raHfb7MZU10NjO4U2DdGucj2zuO5tYxKizizOJF0e4yRQZVxpUGdvkW/+GLjCNK5
+# L7mIv3Z1dagxDKHYZT74HXiS4VFUwHF1k36CwfM2vsetdm46bdgSwV+BCMmZICYT
+# IJAS9UQHD7kP4rik3bFWjUx08NtYYFAVOd/HwBnemUmJe4j3IhZHr0k1+eDG8hDH
+# KVvPgLJIoEjC4iMFk5GWsg5z2ngk0LLu3JZMtckHsnnmBPHQK8a3opUNd8hdMNJx
+# gOwKjQt2JZSGUdIEFCKVDqj0FmdnDMPfwy+FNRtpBMl1sz78dUFhSrnM0D8NXrqa
+# 4rG+2FoOXlmm1rb6AFtpjAKksHRpYcPk2DPGWp/1sWB+dUQkS3gOmwFzyqeTuXpT
+# 0juqd3iAxOGx1VRFQ1VHLLf3AzV4wljBau26I+tu7iXxesVucSdsdQu293jwc2kN
+# xK2JyHCoZH+RyytrwS0qw8t7rMOukU9gwP8mn3X6mgWlVUODMcHTULjSiCEtvyZ/
+# aafcwjUbt4ReEcnmuZtWIha86MTCX7U7e+cnpWG4sIHPnvVTaz9rm8RyBkIxtFCB
+# nQ3FnoQgyxeJAgMBAAGjggOdMIIDmTAJBgNVHRMEAjAAMA4GA1UdDwEB/wQEAwIH
+# gDAuBgNVHSUBAf8EJDAiBggrBgEFBQcDAwYKKwYBBAGCNwIBFQYKKwYBBAGCNwoD
+# DTAdBgNVHQ4EFgQUWyCgrIWo8Ifvvm1/YTQIeMU9nc8wHwYDVR0jBBgwFoAU0E4P
+# QJlsuEsZbzsouODjiAc0qrcwggIhBgNVHSAEggIYMIICFDCCAhAGCysGAQQBgbU3
+# AQICMIIB/zAuBggrBgEFBQcCARYiaHR0cDovL3d3dy5zdGFydHNzbC5jb20vcG9s
+# aWN5LnBkZjA0BggrBgEFBQcCARYoaHR0cDovL3d3dy5zdGFydHNzbC5jb20vaW50
+# ZXJtZWRpYXRlLnBkZjCB9wYIKwYBBQUHAgIwgeowJxYgU3RhcnRDb20gQ2VydGlm
+# aWNhdGlvbiBBdXRob3JpdHkwAwIBARqBvlRoaXMgY2VydGlmaWNhdGUgd2FzIGlz
+# c3VlZCBhY2NvcmRpbmcgdG8gdGhlIENsYXNzIDIgVmFsaWRhdGlvbiByZXF1aXJl
+# bWVudHMgb2YgdGhlIFN0YXJ0Q29tIENBIHBvbGljeSwgcmVsaWFuY2Ugb25seSBm
+# b3IgdGhlIGludGVuZGVkIHB1cnBvc2UgaW4gY29tcGxpYW5jZSBvZiB0aGUgcmVs
+# eWluZyBwYXJ0eSBvYmxpZ2F0aW9ucy4wgZwGCCsGAQUFBwICMIGPMCcWIFN0YXJ0
+# Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MAMCAQIaZExpYWJpbGl0eSBhbmQg
+# d2FycmFudGllcyBhcmUgbGltaXRlZCEgU2VlIHNlY3Rpb24gIkxlZ2FsIGFuZCBM
+# aW1pdGF0aW9ucyIgb2YgdGhlIFN0YXJ0Q29tIENBIHBvbGljeS4wNgYDVR0fBC8w
+# LTAroCmgJ4YlaHR0cDovL2NybC5zdGFydHNzbC5jb20vY3J0YzItY3JsLmNybDCB
+# iQYIKwYBBQUHAQEEfTB7MDcGCCsGAQUFBzABhitodHRwOi8vb2NzcC5zdGFydHNz
+# bC5jb20vc3ViL2NsYXNzMi9jb2RlL2NhMEAGCCsGAQUFBzAChjRodHRwOi8vYWlh
+# LnN0YXJ0c3NsLmNvbS9jZXJ0cy9zdWIuY2xhc3MyLmNvZGUuY2EuY3J0MCMGA1Ud
+# EgQcMBqGGGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tLzANBgkqhkiG9w0BAQUFAAOC
+# AQEAhrzEV6zwoEtKjnFRhCsjwiPykVpo5Eiye77Ve801rQDiRKgSCCiW6g3HqedL
+# OtaSs65Sj2pm3Viea4KR0TECLcbCTgsdaHqw2x1yXwWBQWZEaV6EB05lIwfr94P1
+# SFpV43zkuc+bbmA3+CRK45LOcCNH5Tqq7VGTCAK5iM7tvHwFlbQRl+I6VEL2mjpF
+# NsuRjDOVrv/9qw/a22YJ9R7Y1D0vUSs3IqZx2KMUaYDP7H2mSRxJO2nADQZBtriF
+# gTyfD3lYV12MlIi5CQwe3QC6DrrfSMP33i5Wa/OFJiQ27WPxmScYVhiqozpImFT4
+# PU9goiBv9RKXdgTmZE1PN0NQ5jGCAzUwggMxAgEBMIGTMIGMMQswCQYDVQQGEwJJ
+# TDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0
+# YWwgQ2VydGlmaWNhdGUgU2lnbmluZzE4MDYGA1UEAxMvU3RhcnRDb20gQ2xhc3Mg
+# MiBQcmltYXJ5IEludGVybWVkaWF0ZSBPYmplY3QgQ0ECAgS4MAkGBSsOAwIaBQCg
+# eDAYBgorBgEEAYI3AgEMMQowCKACgAChAoAAMBkGCSqGSIb3DQEJAzEMBgorBgEE
+# AYI3AgEEMBwGCisGAQQBgjcCAQsxDjAMBgorBgEEAYI3AgEVMCMGCSqGSIb3DQEJ
+# BDEWBBRVGw0FDSiaIi38dWteRUAg/9Pr6DANBgkqhkiG9w0BAQEFAASCAgCInvOZ
+# FdaNFzbf6trmFDZKMojyx3UjKMCqNjHVBbuKY0qXwFC/ElYDV1ShJ2CBZbdurydO
+# OQ6cIQ0KREOCwmX/xB49IlLHHUxNhEkVv7HGU3EKAFf9IBt9Yr7jikiR9cjIsfHK
+# 4cjkoKJL7g28yEpLLkHt1eo37f1Ga9lDWEa5Zq3U5yX+IwXhrUBm1h8Xr033FhTR
+# VEpuSz6LHtbrL/zgJnCzJ2ahjtJoYevdcWiNXffosJHFaSfYDDbiNsPRDH/1avmb
+# 5j/7BhP8BcBaR6Fp8tFbNGIcWHHGcjqLMnTc4w13b7b4pDhypqElBa4+lCmwdvv9
+# GydYtRgPz8GHeoBoKj30YBlMzRIfFYaIFGIC4Ai3UEXkuH9TxYohVbGm/W0Kl4Lb
+# RJ1FwiVcLcTOJdgNId2vQvKc+jtNrjcg5SP9h2v/C4aTx8tyc6tE3TOPh2f9b8DL
+# S+SbVArJpuJqrPTxDDoO1QNjTgLcdVYeZDE+r/NjaGZ6cMSd8db3EaG3ijD/0bud
+# SItbm/OlNVbQOFRR76D+ZNgPcU5iNZ3bmvQQIg6aSB9MHUpIE/SeCkNl9YeVk1/1
+# GFULgNMRmIYP4KLvu9ylh5Gu3hvD5VNhH6+FlXANwFy07uXks5uF8mfZVxVCnodG
+# xkNCx+6PsrA5Z7WP4pXcmYnMn97npP/Q9EHJWw==
+# SIG # End signature block
diff --git a/bin/activate_this.py b/bin/activate_this.py
new file mode 100644
index 0000000000000000000000000000000000000000..444d3fd35df0c017f09ecde40c1c56d20ca48d2d
--- /dev/null
+++ b/bin/activate_this.py
@@ -0,0 +1,36 @@
+"""By using execfile(this_file, dict(__file__=this_file)) you will
+activate this virtualenv environment.
+
+This can be used when you must use an existing Python interpreter, not
+the virtualenv bin/python
+"""
+
+try:
+    __file__
+except NameError:
+    raise AssertionError(
+        "You must run this like execfile('path/to/activate_this.py', dict(__file__='path/to/activate_this.py'))"
+    )
+import os
+import site
+import sys
+
+old_os_path = os.environ.get("PATH", "")
+os.environ["PATH"] = os.path.dirname(os.path.abspath(__file__)) + os.pathsep + old_os_path
+base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+if sys.platform == "win32":
+    site_packages = os.path.join(base, "Lib", "site-packages")
+else:
+    site_packages = os.path.join(base, "lib", "python%s" % sys.version[:3], "site-packages")
+prev_sys_path = list(sys.path)
+
+site.addsitedir(site_packages)
+sys.real_prefix = sys.prefix
+sys.prefix = base
+# Move the added items to the front of the path:
+new_sys_path = []
+for item in list(sys.path):
+    if item not in prev_sys_path:
+        new_sys_path.append(item)
+        sys.path.remove(item)
+sys.path[:0] = new_sys_path
diff --git a/bin/easy_install b/bin/easy_install
new file mode 100755
index 0000000000000000000000000000000000000000..3af40d4bcc54e7d72ff51ea4f92ae46c96972ad6
--- /dev/null
+++ b/bin/easy_install
@@ -0,0 +1,11 @@
+#!/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend/bin/python
+
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from setuptools.command.easy_install import main
+
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())
diff --git a/bin/easy_install-3.7 b/bin/easy_install-3.7
new file mode 100755
index 0000000000000000000000000000000000000000..3af40d4bcc54e7d72ff51ea4f92ae46c96972ad6
--- /dev/null
+++ b/bin/easy_install-3.7
@@ -0,0 +1,11 @@
+#!/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend/bin/python
+
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from setuptools.command.easy_install import main
+
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())
diff --git a/bin/flask b/bin/flask
new file mode 100755
index 0000000000000000000000000000000000000000..0fd5854342996b4b620120240eff7c082d64c779
--- /dev/null
+++ b/bin/flask
@@ -0,0 +1,10 @@
+#!/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend/bin/python
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from flask.cli import main
+
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())
diff --git a/bin/pip b/bin/pip
new file mode 100755
index 0000000000000000000000000000000000000000..13aa9e55a12ef006fd7b4529538aa4e34cea0e50
--- /dev/null
+++ b/bin/pip
@@ -0,0 +1,11 @@
+#!/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend/bin/python
+
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from pip._internal import main
+
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())
diff --git a/bin/pip3 b/bin/pip3
new file mode 100755
index 0000000000000000000000000000000000000000..13aa9e55a12ef006fd7b4529538aa4e34cea0e50
--- /dev/null
+++ b/bin/pip3
@@ -0,0 +1,11 @@
+#!/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend/bin/python
+
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from pip._internal import main
+
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())
diff --git a/bin/pip3.7 b/bin/pip3.7
new file mode 100755
index 0000000000000000000000000000000000000000..13aa9e55a12ef006fd7b4529538aa4e34cea0e50
--- /dev/null
+++ b/bin/pip3.7
@@ -0,0 +1,11 @@
+#!/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend/bin/python
+
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from pip._internal import main
+
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())
diff --git a/bin/pwiz.py b/bin/pwiz.py
new file mode 100755
index 0000000000000000000000000000000000000000..d14fca302827910a1715119b9868081b26912332
--- /dev/null
+++ b/bin/pwiz.py
@@ -0,0 +1,221 @@
+#!/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend/bin/python
+
+import datetime
+import sys
+from getpass import getpass
+from optparse import OptionParser
+
+from peewee import *
+from peewee import print_
+from peewee import __version__ as peewee_version
+from playhouse.reflection import *
+
+
+HEADER = """from peewee import *%s
+
+database = %s('%s'%s)
+"""
+
+BASE_MODEL = """\
+class BaseModel(Model):
+    class Meta:
+        database = database
+"""
+
+UNKNOWN_FIELD = """\
+class UnknownField(object):
+    def __init__(self, *_, **__): pass
+"""
+
+DATABASE_ALIASES = {
+    MySQLDatabase: ['mysql', 'mysqldb'],
+    PostgresqlDatabase: ['postgres', 'postgresql'],
+    SqliteDatabase: ['sqlite', 'sqlite3'],
+}
+
+DATABASE_MAP = dict((value, key)
+                    for key in DATABASE_ALIASES
+                    for value in DATABASE_ALIASES[key])
+
+def make_introspector(database_type, database_name, **kwargs):
+    if database_type not in DATABASE_MAP:
+        err('Unrecognized database, must be one of: %s' %
+            ', '.join(DATABASE_MAP.keys()))
+        sys.exit(1)
+
+    schema = kwargs.pop('schema', None)
+    DatabaseClass = DATABASE_MAP[database_type]
+    db = DatabaseClass(database_name, **kwargs)
+    return Introspector.from_database(db, schema=schema)
+
+def print_models(introspector, tables=None, preserve_order=False,
+                 include_views=False, ignore_unknown=False, snake_case=True):
+    database = introspector.introspect(table_names=tables,
+                                       include_views=include_views,
+                                       snake_case=snake_case)
+
+    db_kwargs = introspector.get_database_kwargs()
+    header = HEADER % (
+        introspector.get_additional_imports(),
+        introspector.get_database_class().__name__,
+        introspector.get_database_name(),
+        ', **%s' % repr(db_kwargs) if db_kwargs else '')
+    print_(header)
+
+    if not ignore_unknown:
+        print_(UNKNOWN_FIELD)
+
+    print_(BASE_MODEL)
+
+    def _print_table(table, seen, accum=None):
+        accum = accum or []
+        foreign_keys = database.foreign_keys[table]
+        for foreign_key in foreign_keys:
+            dest = foreign_key.dest_table
+
+            # In the event the destination table has already been pushed
+            # for printing, then we have a reference cycle.
+            if dest in accum and table not in accum:
+                print_('# Possible reference cycle: %s' % dest)
+
+            # If this is not a self-referential foreign key, and we have
+            # not already processed the destination table, do so now.
+            if dest not in seen and dest not in accum:
+                seen.add(dest)
+                if dest != table:
+                    _print_table(dest, seen, accum + [table])
+
+        print_('class %s(BaseModel):' % database.model_names[table])
+        columns = database.columns[table].items()
+        if not preserve_order:
+            columns = sorted(columns)
+        primary_keys = database.primary_keys[table]
+        for name, column in columns:
+            skip = all([
+                name in primary_keys,
+                name == 'id',
+                len(primary_keys) == 1,
+                column.field_class in introspector.pk_classes])
+            if skip:
+                continue
+            if column.primary_key and len(primary_keys) > 1:
+                # If we have a CompositeKey, then we do not want to explicitly
+                # mark the columns as being primary keys.
+                column.primary_key = False
+
+            is_unknown = column.field_class is UnknownField
+            if is_unknown and ignore_unknown:
+                disp = '%s - %s' % (column.name, column.raw_column_type or '?')
+                print_('    # %s' % disp)
+            else:
+                print_('    %s' % column.get_field())
+
+        print_('')
+        print_('    class Meta:')
+        print_('        table_name = \'%s\'' % table)
+        multi_column_indexes = database.multi_column_indexes(table)
+        if multi_column_indexes:
+            print_('        indexes = (')
+            for fields, unique in sorted(multi_column_indexes):
+                print_('            ((%s), %s),' % (
+                    ', '.join("'%s'" % field for field in fields),
+                    unique,
+                ))
+            print_('        )')
+
+        if introspector.schema:
+            print_('        schema = \'%s\'' % introspector.schema)
+        if len(primary_keys) > 1:
+            pk_field_names = sorted([
+                field.name for col, field in columns
+                if col in primary_keys])
+            pk_list = ', '.join("'%s'" % pk for pk in pk_field_names)
+            print_('        primary_key = CompositeKey(%s)' % pk_list)
+        elif not primary_keys:
+            print_('        primary_key = False')
+        print_('')
+
+        seen.add(table)
+
+    seen = set()
+    for table in sorted(database.model_names.keys()):
+        if table not in seen:
+            if not tables or table in tables:
+                _print_table(table, seen)
+
+def print_header(cmd_line, introspector):
+    timestamp = datetime.datetime.now()
+    print_('# Code generated by:')
+    print_('# python -m pwiz %s' % cmd_line)
+    print_('# Date: %s' % timestamp.strftime('%B %d, %Y %I:%M%p'))
+    print_('# Database: %s' % introspector.get_database_name())
+    print_('# Peewee version: %s' % peewee_version)
+    print_('')
+
+
+def err(msg):
+    sys.stderr.write('\033[91m%s\033[0m\n' % msg)
+    sys.stderr.flush()
+
+def get_option_parser():
+    parser = OptionParser(usage='usage: %prog [options] database_name')
+    ao = parser.add_option
+    ao('-H', '--host', dest='host')
+    ao('-p', '--port', dest='port', type='int')
+    ao('-u', '--user', dest='user')
+    ao('-P', '--password', dest='password', action='store_true')
+    engines = sorted(DATABASE_MAP)
+    ao('-e', '--engine', dest='engine', default='postgresql', choices=engines,
+       help=('Database type, e.g. sqlite, mysql or postgresql. Default '
+             'is "postgresql".'))
+    ao('-s', '--schema', dest='schema')
+    ao('-t', '--tables', dest='tables',
+       help=('Only generate the specified tables. Multiple table names should '
+             'be separated by commas.'))
+    ao('-v', '--views', dest='views', action='store_true',
+       help='Generate model classes for VIEWs in addition to tables.')
+    ao('-i', '--info', dest='info', action='store_true',
+       help=('Add database information and other metadata to top of the '
+             'generated file.'))
+    ao('-o', '--preserve-order', action='store_true', dest='preserve_order',
+       help='Model definition column ordering matches source table.')
+    ao('-I', '--ignore-unknown', action='store_true', dest='ignore_unknown',
+       help='Ignore fields whose type cannot be determined.')
+    ao('-L', '--legacy-naming', action='store_true', dest='legacy_naming',
+       help='Use legacy table- and column-name generation.')
+    return parser
+
+def get_connect_kwargs(options):
+    ops = ('host', 'port', 'user', 'schema')
+    kwargs = dict((o, getattr(options, o)) for o in ops if getattr(options, o))
+    if options.password:
+        kwargs['password'] = getpass()
+    return kwargs
+
+
+if __name__ == '__main__':
+    raw_argv = sys.argv
+
+    parser = get_option_parser()
+    options, args = parser.parse_args()
+
+    if len(args) < 1:
+        err('Missing required parameter "database"')
+        parser.print_help()
+        sys.exit(1)
+
+    connect = get_connect_kwargs(options)
+    database = args[-1]
+
+    tables = None
+    if options.tables:
+        tables = [table.strip() for table in options.tables.split(',')
+                  if table.strip()]
+
+    introspector = make_introspector(options.engine, database, **connect)
+    if options.info:
+        cmd_line = ' '.join(raw_argv[1:])
+        print_header(cmd_line, introspector)
+
+    print_models(introspector, tables, options.preserve_order, options.views,
+                 options.ignore_unknown, not options.legacy_naming)
diff --git a/bin/python b/bin/python
new file mode 100755
index 0000000000000000000000000000000000000000..7ab007825217cbd41a7585b32e45c766699fecac
Binary files /dev/null and b/bin/python differ
diff --git a/bin/python-config b/bin/python-config
new file mode 100755
index 0000000000000000000000000000000000000000..e56b2747c58f2100db4123705792e00a6cdd3a34
--- /dev/null
+++ b/bin/python-config
@@ -0,0 +1,78 @@
+#!/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend/bin/python
+
+import sys
+import getopt
+import sysconfig
+
+valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
+              'ldflags', 'help']
+
+if sys.version_info >= (3, 2):
+    valid_opts.insert(-1, 'extension-suffix')
+    valid_opts.append('abiflags')
+if sys.version_info >= (3, 3):
+    valid_opts.append('configdir')
+
+
+def exit_with_usage(code=1):
+    sys.stderr.write("Usage: {0} [{1}]\n".format(
+        sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
+    sys.exit(code)
+
+try:
+    opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
+except getopt.error:
+    exit_with_usage()
+
+if not opts:
+    exit_with_usage()
+
+pyver = sysconfig.get_config_var('VERSION')
+getvar = sysconfig.get_config_var
+
+opt_flags = [flag for (flag, val) in opts]
+
+if '--help' in opt_flags:
+    exit_with_usage(code=0)
+
+for opt in opt_flags:
+    if opt == '--prefix':
+        print(sysconfig.get_config_var('prefix'))
+
+    elif opt == '--exec-prefix':
+        print(sysconfig.get_config_var('exec_prefix'))
+
+    elif opt in ('--includes', '--cflags'):
+        flags = ['-I' + sysconfig.get_path('include'),
+                 '-I' + sysconfig.get_path('platinclude')]
+        if opt == '--cflags':
+            flags.extend(getvar('CFLAGS').split())
+        print(' '.join(flags))
+
+    elif opt in ('--libs', '--ldflags'):
+        abiflags = getattr(sys, 'abiflags', '')
+        libs = ['-lpython' + pyver + abiflags]
+        libs += getvar('LIBS').split()
+        libs += getvar('SYSLIBS').split()
+        # add the prefix/lib/pythonX.Y/config dir, but only if there is no
+        # shared library in prefix/lib/.
+        if opt == '--ldflags':
+            if not getvar('Py_ENABLE_SHARED'):
+                libs.insert(0, '-L' + getvar('LIBPL'))
+            if not getvar('PYTHONFRAMEWORK'):
+                libs.extend(getvar('LINKFORSHARED').split())
+        print(' '.join(libs))
+
+    elif opt == '--extension-suffix':
+        ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
+        if ext_suffix is None:
+            ext_suffix = sysconfig.get_config_var('SO')
+        print(ext_suffix)
+
+    elif opt == '--abiflags':
+        if not getattr(sys, 'abiflags', None):
+            exit_with_usage()
+        print(sys.abiflags)
+
+    elif opt == '--configdir':
+        print(sysconfig.get_config_var('LIBPL'))
diff --git a/bin/python3 b/bin/python3
new file mode 120000
index 0000000000000000000000000000000000000000..d8654aa0e2f2f3c1760e0fcbcbb52c1c5941fba7
--- /dev/null
+++ b/bin/python3
@@ -0,0 +1 @@
+python
\ No newline at end of file
diff --git a/bin/python3.7 b/bin/python3.7
new file mode 120000
index 0000000000000000000000000000000000000000..d8654aa0e2f2f3c1760e0fcbcbb52c1c5941fba7
--- /dev/null
+++ b/bin/python3.7
@@ -0,0 +1 @@
+python
\ No newline at end of file
diff --git a/bin/wheel b/bin/wheel
new file mode 100755
index 0000000000000000000000000000000000000000..21720469dbe51c9d2b2f89795238425ff8edb14f
--- /dev/null
+++ b/bin/wheel
@@ -0,0 +1,11 @@
+#!/home/reverend/Programmierung/git_projects/chaos_familien_duell_frontend/bin/python
+
+# -*- coding: utf-8 -*-
+import re
+import sys
+
+from wheel.cli import main
+
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+    sys.exit(main())
diff --git a/include/python3.7m b/include/python3.7m
new file mode 120000
index 0000000000000000000000000000000000000000..4fe7f6cf9fba788818066956711299381de9403f
--- /dev/null
+++ b/include/python3.7m
@@ -0,0 +1 @@
+/usr/include/python3.7m
\ No newline at end of file
diff --git a/lib/python3.7/__future__.py b/lib/python3.7/__future__.py
new file mode 120000
index 0000000000000000000000000000000000000000..f8b4fc852c49f9b7f4810fc5e30fdebee9fa3250
--- /dev/null
+++ b/lib/python3.7/__future__.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/__future__.py
\ No newline at end of file
diff --git a/lib/python3.7/__pycache__/__future__.cpython-37.pyc b/lib/python3.7/__pycache__/__future__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..243d5280e0cfc123d326653037edf4b611fb302e
Binary files /dev/null and b/lib/python3.7/__pycache__/__future__.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/_bootlocale.cpython-37.pyc b/lib/python3.7/__pycache__/_bootlocale.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..555603ceef7505383656f3fbe844437f8171a70f
Binary files /dev/null and b/lib/python3.7/__pycache__/_bootlocale.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/_collections_abc.cpython-37.pyc b/lib/python3.7/__pycache__/_collections_abc.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e9cd94f3d3476c190f74011da94bda63401f945
Binary files /dev/null and b/lib/python3.7/__pycache__/_collections_abc.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/_weakrefset.cpython-37.pyc b/lib/python3.7/__pycache__/_weakrefset.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9ecf6ec5db9b2f7952a10a3f0513ddfe1e4e1cd
Binary files /dev/null and b/lib/python3.7/__pycache__/_weakrefset.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/abc.cpython-37.pyc b/lib/python3.7/__pycache__/abc.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c236ceb31923aa4af186dabc7027f78614926775
Binary files /dev/null and b/lib/python3.7/__pycache__/abc.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/base64.cpython-37.pyc b/lib/python3.7/__pycache__/base64.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b14842192c4e5f480d482b7d3c95aebecff5072a
Binary files /dev/null and b/lib/python3.7/__pycache__/base64.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/bisect.cpython-37.pyc b/lib/python3.7/__pycache__/bisect.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18027a78fd7a681378ddef4daf469a5e4f065fc5
Binary files /dev/null and b/lib/python3.7/__pycache__/bisect.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/codecs.cpython-37.pyc b/lib/python3.7/__pycache__/codecs.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..547f1f3ebd9ab4fb63e19b82f4d448dd7f3b6bee
Binary files /dev/null and b/lib/python3.7/__pycache__/codecs.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/copy.cpython-37.pyc b/lib/python3.7/__pycache__/copy.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4cf96d4b780392d45172a162c410a1fb9ffa865f
Binary files /dev/null and b/lib/python3.7/__pycache__/copy.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/copyreg.cpython-37.pyc b/lib/python3.7/__pycache__/copyreg.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..db1ae6604c379e40e3b102737a9ab64b2a97d202
Binary files /dev/null and b/lib/python3.7/__pycache__/copyreg.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/enum.cpython-37.pyc b/lib/python3.7/__pycache__/enum.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..73e91cdab508d1e67bdfdc0e5642b84d73edbee9
Binary files /dev/null and b/lib/python3.7/__pycache__/enum.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/fnmatch.cpython-37.pyc b/lib/python3.7/__pycache__/fnmatch.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3dae8c0def96bfff2a042c1048ce53c99f0abdf
Binary files /dev/null and b/lib/python3.7/__pycache__/fnmatch.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/functools.cpython-37.pyc b/lib/python3.7/__pycache__/functools.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10c06a03d4db6ef1341652db04a7a3367838bdcc
Binary files /dev/null and b/lib/python3.7/__pycache__/functools.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/genericpath.cpython-37.pyc b/lib/python3.7/__pycache__/genericpath.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cb9f8dfd78d71d23b4570784dec82cb3b73d8a62
Binary files /dev/null and b/lib/python3.7/__pycache__/genericpath.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/hashlib.cpython-37.pyc b/lib/python3.7/__pycache__/hashlib.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..efc4128dbf46d02b1afd987523e8bd08fde817b6
Binary files /dev/null and b/lib/python3.7/__pycache__/hashlib.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/heapq.cpython-37.pyc b/lib/python3.7/__pycache__/heapq.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e4d03188d749e90e6f54ab17610c0a20e47726b
Binary files /dev/null and b/lib/python3.7/__pycache__/heapq.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/hmac.cpython-37.pyc b/lib/python3.7/__pycache__/hmac.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a38e600185eba40c2672f74f1659dbe1ba20a66
Binary files /dev/null and b/lib/python3.7/__pycache__/hmac.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/imp.cpython-37.pyc b/lib/python3.7/__pycache__/imp.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56d477569bac65dc4833481a8610e7c8fd80304a
Binary files /dev/null and b/lib/python3.7/__pycache__/imp.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/io.cpython-37.pyc b/lib/python3.7/__pycache__/io.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e5495e58346437a3a82d43159f214beda777d59f
Binary files /dev/null and b/lib/python3.7/__pycache__/io.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/keyword.cpython-37.pyc b/lib/python3.7/__pycache__/keyword.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba64f62b329525d4c9b4a75c16650e6fef7422b0
Binary files /dev/null and b/lib/python3.7/__pycache__/keyword.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/linecache.cpython-37.pyc b/lib/python3.7/__pycache__/linecache.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6f30cb3e2afdcbd926cbfaf0cb0848f281ef5f32
Binary files /dev/null and b/lib/python3.7/__pycache__/linecache.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/locale.cpython-37.pyc b/lib/python3.7/__pycache__/locale.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d0957ecd5a4f8a287995928ebabab5b5fdf0dcb7
Binary files /dev/null and b/lib/python3.7/__pycache__/locale.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/ntpath.cpython-37.pyc b/lib/python3.7/__pycache__/ntpath.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..06815edd62e6719f44eb7f4acab6a3bc12090811
Binary files /dev/null and b/lib/python3.7/__pycache__/ntpath.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/operator.cpython-37.pyc b/lib/python3.7/__pycache__/operator.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e6ccef05d7c06ccb31ca5cf24a39c3a252747fa
Binary files /dev/null and b/lib/python3.7/__pycache__/operator.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/os.cpython-37.pyc b/lib/python3.7/__pycache__/os.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..07a7e9b0453706e9e81da25091003cd40708701e
Binary files /dev/null and b/lib/python3.7/__pycache__/os.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/posixpath.cpython-37.pyc b/lib/python3.7/__pycache__/posixpath.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5bc6dbe36396544a784b3b78e4294be456ebd601
Binary files /dev/null and b/lib/python3.7/__pycache__/posixpath.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/random.cpython-37.pyc b/lib/python3.7/__pycache__/random.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bfb26f9b0cee2a7a9c38d080a61815809f665e4e
Binary files /dev/null and b/lib/python3.7/__pycache__/random.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/re.cpython-37.pyc b/lib/python3.7/__pycache__/re.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b96b612d90f91aea2511a044f69b7a21c89269a
Binary files /dev/null and b/lib/python3.7/__pycache__/re.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/reprlib.cpython-37.pyc b/lib/python3.7/__pycache__/reprlib.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..359c7bc89ec7ea0e048eb66204e117ad762e4d80
Binary files /dev/null and b/lib/python3.7/__pycache__/reprlib.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/shutil.cpython-37.pyc b/lib/python3.7/__pycache__/shutil.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82855731b3c83fda5c307c0c5f4abcae65a37691
Binary files /dev/null and b/lib/python3.7/__pycache__/shutil.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/site.cpython-37.pyc b/lib/python3.7/__pycache__/site.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f05e060c5ad50cf9c1342efe1a5f8a927e5c405
Binary files /dev/null and b/lib/python3.7/__pycache__/site.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/sre_compile.cpython-37.pyc b/lib/python3.7/__pycache__/sre_compile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e9387f4bbdbb1dab26fe2d1e50dcfc745882d7f
Binary files /dev/null and b/lib/python3.7/__pycache__/sre_compile.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/sre_constants.cpython-37.pyc b/lib/python3.7/__pycache__/sre_constants.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..969b6b86c4ea72f2ed64fdea8961d7df5353d2f2
Binary files /dev/null and b/lib/python3.7/__pycache__/sre_constants.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/sre_parse.cpython-37.pyc b/lib/python3.7/__pycache__/sre_parse.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3a7149f910cb45e33e165ffa97929292ce7005d6
Binary files /dev/null and b/lib/python3.7/__pycache__/sre_parse.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/stat.cpython-37.pyc b/lib/python3.7/__pycache__/stat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a960003518998f1bbe3f78952b855e84ed0fdc00
Binary files /dev/null and b/lib/python3.7/__pycache__/stat.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/struct.cpython-37.pyc b/lib/python3.7/__pycache__/struct.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..31a55bd2994b8a304a6fe4298ca0158b103756f1
Binary files /dev/null and b/lib/python3.7/__pycache__/struct.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/tarfile.cpython-37.pyc b/lib/python3.7/__pycache__/tarfile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e0228b6100a4dbde57ffb6a11ac243a9d3bde4d3
Binary files /dev/null and b/lib/python3.7/__pycache__/tarfile.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/tempfile.cpython-37.pyc b/lib/python3.7/__pycache__/tempfile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5c214251c5f5471a40837ca62e909194e9790a63
Binary files /dev/null and b/lib/python3.7/__pycache__/tempfile.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/token.cpython-37.pyc b/lib/python3.7/__pycache__/token.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7103b8210c38f2c4810a92c8352abccf2561ad72
Binary files /dev/null and b/lib/python3.7/__pycache__/token.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/tokenize.cpython-37.pyc b/lib/python3.7/__pycache__/tokenize.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df29a4b74980c25d939fc6cacc7546a06e960f87
Binary files /dev/null and b/lib/python3.7/__pycache__/tokenize.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/types.cpython-37.pyc b/lib/python3.7/__pycache__/types.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..12b5df71e5e4f4fff8b01e1256e48f369db0bf91
Binary files /dev/null and b/lib/python3.7/__pycache__/types.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/warnings.cpython-37.pyc b/lib/python3.7/__pycache__/warnings.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f57cf120180450d7709c618b15c96bd66a62868e
Binary files /dev/null and b/lib/python3.7/__pycache__/warnings.cpython-37.pyc differ
diff --git a/lib/python3.7/__pycache__/weakref.cpython-37.pyc b/lib/python3.7/__pycache__/weakref.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..14335f5b256045cca698f9df600ade0297025280
Binary files /dev/null and b/lib/python3.7/__pycache__/weakref.cpython-37.pyc differ
diff --git a/lib/python3.7/_bootlocale.py b/lib/python3.7/_bootlocale.py
new file mode 120000
index 0000000000000000000000000000000000000000..e04cb131dc2e353f30b98a41de671d80e7f5dec5
--- /dev/null
+++ b/lib/python3.7/_bootlocale.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/_bootlocale.py
\ No newline at end of file
diff --git a/lib/python3.7/_collections_abc.py b/lib/python3.7/_collections_abc.py
new file mode 120000
index 0000000000000000000000000000000000000000..e72b39989e7b5aeb15ba232788e7905c38785423
--- /dev/null
+++ b/lib/python3.7/_collections_abc.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/_collections_abc.py
\ No newline at end of file
diff --git a/lib/python3.7/_dummy_thread.py b/lib/python3.7/_dummy_thread.py
new file mode 120000
index 0000000000000000000000000000000000000000..ef6d8b0952395655909e4357c846ce8014c5a449
--- /dev/null
+++ b/lib/python3.7/_dummy_thread.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/_dummy_thread.py
\ No newline at end of file
diff --git a/lib/python3.7/_weakrefset.py b/lib/python3.7/_weakrefset.py
new file mode 120000
index 0000000000000000000000000000000000000000..922c8c5021c436336e5b43c5a3c7bb1e139a46cd
--- /dev/null
+++ b/lib/python3.7/_weakrefset.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/_weakrefset.py
\ No newline at end of file
diff --git a/lib/python3.7/abc.py b/lib/python3.7/abc.py
new file mode 120000
index 0000000000000000000000000000000000000000..f95e00f0e4d1ad0e90346c2486cec8fb4412f3cb
--- /dev/null
+++ b/lib/python3.7/abc.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/abc.py
\ No newline at end of file
diff --git a/lib/python3.7/base64.py b/lib/python3.7/base64.py
new file mode 120000
index 0000000000000000000000000000000000000000..5a53c06aa5521012a5f881c3858d3afe2aabc4e8
--- /dev/null
+++ b/lib/python3.7/base64.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/base64.py
\ No newline at end of file
diff --git a/lib/python3.7/bisect.py b/lib/python3.7/bisect.py
new file mode 120000
index 0000000000000000000000000000000000000000..06caa654672f1e84f812cfb2cb13c6fa316bbd34
--- /dev/null
+++ b/lib/python3.7/bisect.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/bisect.py
\ No newline at end of file
diff --git a/lib/python3.7/codecs.py b/lib/python3.7/codecs.py
new file mode 120000
index 0000000000000000000000000000000000000000..06b45200e2ad5d880d9cff8312f071e192cf5ce2
--- /dev/null
+++ b/lib/python3.7/codecs.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/codecs.py
\ No newline at end of file
diff --git a/lib/python3.7/collections b/lib/python3.7/collections
new file mode 120000
index 0000000000000000000000000000000000000000..9b59f26290369960dee6888bb799dcc7eaf1b8c1
--- /dev/null
+++ b/lib/python3.7/collections
@@ -0,0 +1 @@
+/usr/lib/python3.7/collections
\ No newline at end of file
diff --git a/lib/python3.7/config-3.7m-x86_64-linux-gnu b/lib/python3.7/config-3.7m-x86_64-linux-gnu
new file mode 120000
index 0000000000000000000000000000000000000000..9e6ede4a40c5f7495ca1986780aff241f749cdae
--- /dev/null
+++ b/lib/python3.7/config-3.7m-x86_64-linux-gnu
@@ -0,0 +1 @@
+/usr/lib/python3.7/config-3.7m-x86_64-linux-gnu
\ No newline at end of file
diff --git a/lib/python3.7/copy.py b/lib/python3.7/copy.py
new file mode 120000
index 0000000000000000000000000000000000000000..3c1771fba6a5702dabc333588f005a2d451a6c4d
--- /dev/null
+++ b/lib/python3.7/copy.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/copy.py
\ No newline at end of file
diff --git a/lib/python3.7/copyreg.py b/lib/python3.7/copyreg.py
new file mode 120000
index 0000000000000000000000000000000000000000..f1c47ed203eca6a1934ba88f4096bcc9363e9d26
--- /dev/null
+++ b/lib/python3.7/copyreg.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/copyreg.py
\ No newline at end of file
diff --git a/lib/python3.7/distutils/__init__.py b/lib/python3.7/distutils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..59f55f17db9a8a68addf8751d50f5bc96548be53
--- /dev/null
+++ b/lib/python3.7/distutils/__init__.py
@@ -0,0 +1,116 @@
+import imp
+import os
+import sys
+import warnings
+
+# opcode is not a virtualenv module, so we can use it to find the stdlib
+# Important! To work on pypy, this must be a module that resides in the
+# lib-python/modified-x.y.z directory
+import opcode
+
+dirname = os.path.dirname
+
+distutils_path = os.path.join(os.path.dirname(opcode.__file__), "distutils")
+if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
+    warnings.warn("The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
+else:
+    __path__.insert(0, distutils_path)  # noqa: F821
+    real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ("", "", imp.PKG_DIRECTORY))
+    # Copy the relevant attributes
+    try:
+        __revision__ = real_distutils.__revision__
+    except AttributeError:
+        pass
+    __version__ = real_distutils.__version__
+
+from distutils import dist, sysconfig  # isort:skip
+
+try:
+    basestring
+except NameError:
+    basestring = str
+
+# patch build_ext (distutils doesn't know how to get the libs directory
+# path on windows - it hardcodes the paths around the patched sys.prefix)
+
+if sys.platform == "win32":
+    from distutils.command.build_ext import build_ext as old_build_ext
+
+    class build_ext(old_build_ext):
+        def finalize_options(self):
+            if self.library_dirs is None:
+                self.library_dirs = []
+            elif isinstance(self.library_dirs, basestring):
+                self.library_dirs = self.library_dirs.split(os.pathsep)
+
+            self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
+            old_build_ext.finalize_options(self)
+
+    from distutils.command import build_ext as build_ext_module
+
+    build_ext_module.build_ext = build_ext
+
+# distutils.dist patches:
+
+old_find_config_files = dist.Distribution.find_config_files
+
+
+def find_config_files(self):
+    found = old_find_config_files(self)
+    if os.name == "posix":
+        user_filename = ".pydistutils.cfg"
+    else:
+        user_filename = "pydistutils.cfg"
+    user_filename = os.path.join(sys.prefix, user_filename)
+    if os.path.isfile(user_filename):
+        for item in list(found):
+            if item.endswith("pydistutils.cfg"):
+                found.remove(item)
+        found.append(user_filename)
+    return found
+
+
+dist.Distribution.find_config_files = find_config_files
+
+# distutils.sysconfig patches:
+
+old_get_python_inc = sysconfig.get_python_inc
+
+
+def sysconfig_get_python_inc(plat_specific=0, prefix=None):
+    if prefix is None:
+        prefix = sys.real_prefix
+    return old_get_python_inc(plat_specific, prefix)
+
+
+sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
+sysconfig.get_python_inc = sysconfig_get_python_inc
+
+old_get_python_lib = sysconfig.get_python_lib
+
+
+def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
+    if standard_lib and prefix is None:
+        prefix = sys.real_prefix
+    return old_get_python_lib(plat_specific, standard_lib, prefix)
+
+
+sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
+sysconfig.get_python_lib = sysconfig_get_python_lib
+
+old_get_config_vars = sysconfig.get_config_vars
+
+
+def sysconfig_get_config_vars(*args):
+    real_vars = old_get_config_vars(*args)
+    if sys.platform == "win32":
+        lib_dir = os.path.join(sys.real_prefix, "libs")
+        if isinstance(real_vars, dict) and "LIBDIR" not in real_vars:
+            real_vars["LIBDIR"] = lib_dir  # asked for all
+        elif isinstance(real_vars, list) and "LIBDIR" in args:
+            real_vars = real_vars + [lib_dir]  # asked for list
+    return real_vars
+
+
+sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
+sysconfig.get_config_vars = sysconfig_get_config_vars
diff --git a/lib/python3.7/distutils/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/distutils/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10831e130a661cc60f2b99199e98b1910ad6211f
Binary files /dev/null and b/lib/python3.7/distutils/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/distutils/distutils.cfg b/lib/python3.7/distutils/distutils.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..1af230ec911a5d31aa8c4bed50bff2e8e1af8512
--- /dev/null
+++ b/lib/python3.7/distutils/distutils.cfg
@@ -0,0 +1,6 @@
+# This is a config file local to this virtualenv installation
+# You may include options that will be used by all distutils commands,
+# and by easy_install.  For instance:
+#
+#   [easy_install]
+#   find_links = http://mylocalsite
diff --git a/lib/python3.7/encodings b/lib/python3.7/encodings
new file mode 120000
index 0000000000000000000000000000000000000000..bc6d310fb4ac4908550c8e7700531d63807ebd1d
--- /dev/null
+++ b/lib/python3.7/encodings
@@ -0,0 +1 @@
+/usr/lib/python3.7/encodings
\ No newline at end of file
diff --git a/lib/python3.7/enum.py b/lib/python3.7/enum.py
new file mode 120000
index 0000000000000000000000000000000000000000..77ccb8a1e34e3ecd2819456e75112909f73bf93d
--- /dev/null
+++ b/lib/python3.7/enum.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/enum.py
\ No newline at end of file
diff --git a/lib/python3.7/fnmatch.py b/lib/python3.7/fnmatch.py
new file mode 120000
index 0000000000000000000000000000000000000000..2063ce7ef2c2d4d6ac8caaa196b5f53deb6ce18d
--- /dev/null
+++ b/lib/python3.7/fnmatch.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/fnmatch.py
\ No newline at end of file
diff --git a/lib/python3.7/functools.py b/lib/python3.7/functools.py
new file mode 120000
index 0000000000000000000000000000000000000000..516834e94481ffe7e8ece6bceb290dca6b220778
--- /dev/null
+++ b/lib/python3.7/functools.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/functools.py
\ No newline at end of file
diff --git a/lib/python3.7/genericpath.py b/lib/python3.7/genericpath.py
new file mode 120000
index 0000000000000000000000000000000000000000..56b9556781d063311bc8c2793682e075780a420f
--- /dev/null
+++ b/lib/python3.7/genericpath.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/genericpath.py
\ No newline at end of file
diff --git a/lib/python3.7/hashlib.py b/lib/python3.7/hashlib.py
new file mode 120000
index 0000000000000000000000000000000000000000..90e4cebb564886811233ef595e54ea4b2cb419ea
--- /dev/null
+++ b/lib/python3.7/hashlib.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/hashlib.py
\ No newline at end of file
diff --git a/lib/python3.7/heapq.py b/lib/python3.7/heapq.py
new file mode 120000
index 0000000000000000000000000000000000000000..3c076dc021de07d1d047a3b12d306fb8a8ab29b7
--- /dev/null
+++ b/lib/python3.7/heapq.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/heapq.py
\ No newline at end of file
diff --git a/lib/python3.7/hmac.py b/lib/python3.7/hmac.py
new file mode 120000
index 0000000000000000000000000000000000000000..57e88c32657a4081739ffa381c6778415e96b8b2
--- /dev/null
+++ b/lib/python3.7/hmac.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/hmac.py
\ No newline at end of file
diff --git a/lib/python3.7/imp.py b/lib/python3.7/imp.py
new file mode 120000
index 0000000000000000000000000000000000000000..56ec73e2679ff6404dbb8151961e501bdcf89565
--- /dev/null
+++ b/lib/python3.7/imp.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/imp.py
\ No newline at end of file
diff --git a/lib/python3.7/importlib b/lib/python3.7/importlib
new file mode 120000
index 0000000000000000000000000000000000000000..4c5f4cab0229e8d9a786dfec577f9412f2c0b2c0
--- /dev/null
+++ b/lib/python3.7/importlib
@@ -0,0 +1 @@
+/usr/lib/python3.7/importlib
\ No newline at end of file
diff --git a/lib/python3.7/io.py b/lib/python3.7/io.py
new file mode 120000
index 0000000000000000000000000000000000000000..339c817308dd4ffd9b410332c8319b9d1137e287
--- /dev/null
+++ b/lib/python3.7/io.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/io.py
\ No newline at end of file
diff --git a/lib/python3.7/keyword.py b/lib/python3.7/keyword.py
new file mode 120000
index 0000000000000000000000000000000000000000..af7e802e7dc8df4052c3900dc5984e1e7d838193
--- /dev/null
+++ b/lib/python3.7/keyword.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/keyword.py
\ No newline at end of file
diff --git a/lib/python3.7/lib-dynload b/lib/python3.7/lib-dynload
new file mode 120000
index 0000000000000000000000000000000000000000..12d11dbd7617122922130214b662c8f865850300
--- /dev/null
+++ b/lib/python3.7/lib-dynload
@@ -0,0 +1 @@
+/usr/lib/python3.7/lib-dynload
\ No newline at end of file
diff --git a/lib/python3.7/linecache.py b/lib/python3.7/linecache.py
new file mode 120000
index 0000000000000000000000000000000000000000..fa0de008fabc5b4b64bae2726a433a1a14f89b9d
--- /dev/null
+++ b/lib/python3.7/linecache.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/linecache.py
\ No newline at end of file
diff --git a/lib/python3.7/locale.py b/lib/python3.7/locale.py
new file mode 120000
index 0000000000000000000000000000000000000000..1742884a9131a712a55ca1cdf4d68dcdddced4fa
--- /dev/null
+++ b/lib/python3.7/locale.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/locale.py
\ No newline at end of file
diff --git a/lib/python3.7/no-global-site-packages.txt b/lib/python3.7/no-global-site-packages.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.7/ntpath.py b/lib/python3.7/ntpath.py
new file mode 120000
index 0000000000000000000000000000000000000000..ac3e5993dfb551cba6a5d0b80de22bdd17044b3f
--- /dev/null
+++ b/lib/python3.7/ntpath.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/ntpath.py
\ No newline at end of file
diff --git a/lib/python3.7/operator.py b/lib/python3.7/operator.py
new file mode 120000
index 0000000000000000000000000000000000000000..20aa91f776b69f850c89845444c573ba824700a8
--- /dev/null
+++ b/lib/python3.7/operator.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/operator.py
\ No newline at end of file
diff --git a/lib/python3.7/orig-prefix.txt b/lib/python3.7/orig-prefix.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e25db584b91486de5db5f56a271923324202d338
--- /dev/null
+++ b/lib/python3.7/orig-prefix.txt
@@ -0,0 +1 @@
+/usr
\ No newline at end of file
diff --git a/lib/python3.7/os.py b/lib/python3.7/os.py
new file mode 120000
index 0000000000000000000000000000000000000000..3f4e638fb88f1c567b417d48c30e490da31185a5
--- /dev/null
+++ b/lib/python3.7/os.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/os.py
\ No newline at end of file
diff --git a/lib/python3.7/posixpath.py b/lib/python3.7/posixpath.py
new file mode 120000
index 0000000000000000000000000000000000000000..fae2a64bf97e5c2bd9a61ce053d63a48974f439c
--- /dev/null
+++ b/lib/python3.7/posixpath.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/posixpath.py
\ No newline at end of file
diff --git a/lib/python3.7/random.py b/lib/python3.7/random.py
new file mode 120000
index 0000000000000000000000000000000000000000..904f9fab6b84c3ed9c863a45e226645cdee02b17
--- /dev/null
+++ b/lib/python3.7/random.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/random.py
\ No newline at end of file
diff --git a/lib/python3.7/re.py b/lib/python3.7/re.py
new file mode 120000
index 0000000000000000000000000000000000000000..f92bf3435cf1a9ba9600e5a940f786a2bdb682a4
--- /dev/null
+++ b/lib/python3.7/re.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/re.py
\ No newline at end of file
diff --git a/lib/python3.7/reprlib.py b/lib/python3.7/reprlib.py
new file mode 120000
index 0000000000000000000000000000000000000000..26e0a4194c7becb6f9eba01c90c150bc1cc67bb4
--- /dev/null
+++ b/lib/python3.7/reprlib.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/reprlib.py
\ No newline at end of file
diff --git a/lib/python3.7/rlcompleter.py b/lib/python3.7/rlcompleter.py
new file mode 120000
index 0000000000000000000000000000000000000000..a823cd0db405aef6b391a90a5b5c3f6d20e3c9f8
--- /dev/null
+++ b/lib/python3.7/rlcompleter.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/rlcompleter.py
\ No newline at end of file
diff --git a/lib/python3.7/shutil.py b/lib/python3.7/shutil.py
new file mode 120000
index 0000000000000000000000000000000000000000..c2f4a48537bd034c26924cd9dced79eff99f3289
--- /dev/null
+++ b/lib/python3.7/shutil.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/shutil.py
\ No newline at end of file
diff --git a/lib/python3.7/site-packages/Click-7.0.dist-info/INSTALLER b/lib/python3.7/site-packages/Click-7.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/Click-7.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/Click-7.0.dist-info/LICENSE.txt b/lib/python3.7/site-packages/Click-7.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..87ce152aafba76f0200eae513fd0541f4f767acd
--- /dev/null
+++ b/lib/python3.7/site-packages/Click-7.0.dist-info/LICENSE.txt
@@ -0,0 +1,39 @@
+Copyright © 2014 by the Pallets team.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms of the software as
+well as documentation, with or without modification, are permitted
+provided that the following conditions are met:
+
+-   Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+-   Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+-   Neither the name of the copyright holder nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+----
+
+Click uses parts of optparse written by Gregory P. Ward and maintained
+by the Python Software Foundation. This is limited to code in parser.py.
+
+Copyright © 2001-2006 Gregory P. Ward. All rights reserved.
+Copyright © 2002-2006 Python Software Foundation. All rights reserved.
diff --git a/lib/python3.7/site-packages/Click-7.0.dist-info/METADATA b/lib/python3.7/site-packages/Click-7.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..625bdaddbba169ae3b25b492a591eff6cc2e4013
--- /dev/null
+++ b/lib/python3.7/site-packages/Click-7.0.dist-info/METADATA
@@ -0,0 +1,121 @@
+Metadata-Version: 2.1
+Name: Click
+Version: 7.0
+Summary: Composable command line interface toolkit
+Home-page: https://palletsprojects.com/p/click/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+Maintainer: Pallets Team
+Maintainer-email: contact@palletsprojects.com
+License: BSD
+Project-URL: Documentation, https://click.palletsprojects.com/
+Project-URL: Code, https://github.com/pallets/click
+Project-URL: Issue tracker, https://github.com/pallets/click/issues
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+
+\$ click\_
+==========
+
+Click is a Python package for creating beautiful command line interfaces
+in a composable way with as little code as necessary. It's the "Command
+Line Interface Creation Kit". It's highly configurable but comes with
+sensible defaults out of the box.
+
+It aims to make the process of writing command line tools quick and fun
+while also preventing any frustration caused by the inability to
+implement an intended CLI API.
+
+Click in three points:
+
+-   Arbitrary nesting of commands
+-   Automatic help page generation
+-   Supports lazy loading of subcommands at runtime
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+    $ pip install click
+
+Click supports Python 3.4 and newer, Python 2.7, and PyPy.
+
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+A Simple Example
+----------------
+
+What does it look like? Here is an example of a simple Click program:
+
+.. code-block:: python
+
+    import click
+
+    @click.command()
+    @click.option("--count", default=1, help="Number of greetings.")
+    @click.option("--name", prompt="Your name",
+                  help="The person to greet.")
+    def hello(count, name):
+        """Simple program that greets NAME for a total of COUNT times."""
+        for _ in range(count):
+            click.echo("Hello, %s!" % name)
+
+    if __name__ == '__main__':
+        hello()
+
+And what it looks like when run:
+
+.. code-block:: text
+
+    $ python hello.py --count=3
+    Your name: Click
+    Hello, Click!
+    Hello, Click!
+    Hello, Click!
+
+
+Donate
+------
+
+The Pallets organization develops and supports Click and other popular
+packages. In order to grow the community of contributors and users, and
+allow the maintainers to devote more time to the projects, `please
+donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+*   Website: https://palletsprojects.com/p/click/
+*   Documentation: https://click.palletsprojects.com/
+*   License: `BSD <https://github.com/pallets/click/blob/master/LICENSE.rst>`_
+*   Releases: https://pypi.org/project/click/
+*   Code: https://github.com/pallets/click
+*   Issue tracker: https://github.com/pallets/click/issues
+*   Test status:
+
+    *   Linux, Mac: https://travis-ci.org/pallets/click
+    *   Windows: https://ci.appveyor.com/project/pallets/click
+
+*   Test coverage: https://codecov.io/gh/pallets/click
+
+
diff --git a/lib/python3.7/site-packages/Click-7.0.dist-info/RECORD b/lib/python3.7/site-packages/Click-7.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..f17ac66fc4da4ed55a2763cd3c0c0c30756a08b2
--- /dev/null
+++ b/lib/python3.7/site-packages/Click-7.0.dist-info/RECORD
@@ -0,0 +1,40 @@
+Click-7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Click-7.0.dist-info/LICENSE.txt,sha256=4hIxn676T0Wcisk3_chVcECjyrivKTZsoqSNI5AlIlw,1876
+Click-7.0.dist-info/METADATA,sha256=-r8jeke3Zer4diRvT1MjFZuiJ6yTT_qFP39svLqdaLI,3516
+Click-7.0.dist-info/RECORD,,
+Click-7.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110
+Click-7.0.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6
+click/__init__.py,sha256=HjGThQ7tef9kkwCV371TBnrf0SAi6fKfU_jtEnbYTvQ,2789
+click/__pycache__/__init__.cpython-37.pyc,,
+click/__pycache__/_bashcomplete.cpython-37.pyc,,
+click/__pycache__/_compat.cpython-37.pyc,,
+click/__pycache__/_termui_impl.cpython-37.pyc,,
+click/__pycache__/_textwrap.cpython-37.pyc,,
+click/__pycache__/_unicodefun.cpython-37.pyc,,
+click/__pycache__/_winconsole.cpython-37.pyc,,
+click/__pycache__/core.cpython-37.pyc,,
+click/__pycache__/decorators.cpython-37.pyc,,
+click/__pycache__/exceptions.cpython-37.pyc,,
+click/__pycache__/formatting.cpython-37.pyc,,
+click/__pycache__/globals.cpython-37.pyc,,
+click/__pycache__/parser.cpython-37.pyc,,
+click/__pycache__/termui.cpython-37.pyc,,
+click/__pycache__/testing.cpython-37.pyc,,
+click/__pycache__/types.cpython-37.pyc,,
+click/__pycache__/utils.cpython-37.pyc,,
+click/_bashcomplete.py,sha256=iaNUmtxag0YPfxba3TDYCNietiTMQIrvhRLj-H8okFU,11014
+click/_compat.py,sha256=vYmvoj4opPxo-c-2GMQQjYT_r_QkOKybkfGoeVrt0dA,23399
+click/_termui_impl.py,sha256=xHmLtOJhKUCVD6168yucJ9fknUJPAMs0eUTPgVUO-GQ,19611
+click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198
+click/_unicodefun.py,sha256=QHy2_5jYlX-36O-JVrTHNnHOqg8tquUR0HmQFev7Ics,4364
+click/_winconsole.py,sha256=PPWVak8Iikm_gAPsxMrzwsVFCvHgaW3jPaDWZ1JBl3U,8965
+click/core.py,sha256=q8FLcDZsagBGSRe5Y9Hi_FGvAeZvusNfoO5EkhkSQ8Y,75305
+click/decorators.py,sha256=idKt6duLUUfAFftrHoREi8MJSd39XW36pUVHthdglwk,11226
+click/exceptions.py,sha256=CNpAjBAE7qjaV4WChxQeak95e5yUOau8AsvT-8m6wss,7663
+click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889
+click/globals.py,sha256=oQkou3ZQ5DgrbVM6BwIBirwiqozbjfirzsLGAlLRRdg,1514
+click/parser.py,sha256=m-nGZz4VwprM42_qtFlWFGo7yRJQxkBlRcZodoH593Y,15510
+click/termui.py,sha256=o_ZXB2jyvL2Rce7P_bFGq452iyBq9ykJyRApIPMCZO0,23207
+click/testing.py,sha256=aYGqY_iWLu2p4k7lkuJ6t3fqpf6aPGqTsyLzNY_ngKg,13062
+click/types.py,sha256=2Q929p-aBP_ZYuMFJqJR-Ipucofv3fmDc5JzBDPmzJU,23287
+click/utils.py,sha256=6-D0WkAxvv9FkgHXSHwDIv0l9Gdx9Mm6Z5vuKNLIfZI,15763
diff --git a/lib/python3.7/site-packages/Click-7.0.dist-info/WHEEL b/lib/python3.7/site-packages/Click-7.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..1316c41d0706f2dbe6e8d73a809c31d67f02954a
--- /dev/null
+++ b/lib/python3.7/site-packages/Click-7.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.31.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/lib/python3.7/site-packages/Click-7.0.dist-info/top_level.txt b/lib/python3.7/site-packages/Click-7.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..dca9a909647e3b066931de2909c2d1e65c78c995
--- /dev/null
+++ b/lib/python3.7/site-packages/Click-7.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+click
diff --git a/lib/python3.7/site-packages/Flask-1.0.2.dist-info/INSTALLER b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/Flask-1.0.2.dist-info/LICENSE.txt b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8f9252f4527debb8ec5ae57df4ffd134cfd3c643
--- /dev/null
+++ b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/LICENSE.txt
@@ -0,0 +1,31 @@
+Copyright © 2010 by the Pallets team.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms of the software as
+well as documentation, with or without modification, are permitted
+provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice,
+  this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holder nor the names of its
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
diff --git a/lib/python3.7/site-packages/Flask-1.0.2.dist-info/METADATA b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..c600e730a953ee3a109ededd74fb36650b601a69
--- /dev/null
+++ b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/METADATA
@@ -0,0 +1,130 @@
+Metadata-Version: 2.1
+Name: Flask
+Version: 1.0.2
+Summary: A simple framework for building complex web applications.
+Home-page: https://www.palletsprojects.com/p/flask/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+Maintainer: Pallets team
+Maintainer-email: contact@palletsprojects.com
+License: BSD
+Project-URL: Documentation, http://flask.pocoo.org/docs/
+Project-URL: Code, https://github.com/pallets/flask
+Project-URL: Issue tracker, https://github.com/pallets/flask/issues
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Framework :: Flask
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application
+Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Provides-Extra: dev
+Provides-Extra: docs
+Provides-Extra: dotenv
+Requires-Dist: Werkzeug (>=0.14)
+Requires-Dist: Jinja2 (>=2.10)
+Requires-Dist: itsdangerous (>=0.24)
+Requires-Dist: click (>=5.1)
+Provides-Extra: dev
+Requires-Dist: pytest (>=3); extra == 'dev'
+Requires-Dist: coverage; extra == 'dev'
+Requires-Dist: tox; extra == 'dev'
+Requires-Dist: sphinx; extra == 'dev'
+Requires-Dist: pallets-sphinx-themes; extra == 'dev'
+Requires-Dist: sphinxcontrib-log-cabinet; extra == 'dev'
+Provides-Extra: docs
+Requires-Dist: sphinx; extra == 'docs'
+Requires-Dist: pallets-sphinx-themes; extra == 'docs'
+Requires-Dist: sphinxcontrib-log-cabinet; extra == 'docs'
+Provides-Extra: dotenv
+Requires-Dist: python-dotenv; extra == 'dotenv'
+
+Flask
+=====
+
+Flask is a lightweight `WSGI`_ web application framework. It is designed
+to make getting started quick and easy, with the ability to scale up to
+complex applications. It began as a simple wrapper around `Werkzeug`_
+and `Jinja`_ and has become one of the most popular Python web
+application frameworks.
+
+Flask offers suggestions, but doesn't enforce any dependencies or
+project layout. It is up to the developer to choose the tools and
+libraries they want to use. There are many extensions provided by the
+community that make adding new functionality easy.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+    pip install -U Flask
+
+
+A Simple Example
+----------------
+
+.. code-block:: python
+
+    from flask import Flask
+
+    app = Flask(__name__)
+
+    @app.route('/')
+    def hello():
+        return 'Hello, World!'
+
+.. code-block:: text
+
+    $ FLASK_APP=hello.py flask run
+     * Serving Flask app "hello"
+     * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
+
+
+Donate
+------
+
+The Pallets organization develops and supports Flask and the libraries
+it uses. In order to grow the community of contributors and users, and
+allow the maintainers to devote more time to the projects, `please
+donate today`_.
+
+.. _please donate today: https://psfmember.org/civicrm/contribute/transact?reset=1&id=20
+
+
+Links
+-----
+
+* Website: https://www.palletsprojects.com/p/flask/
+* Documentation: http://flask.pocoo.org/docs/
+* License: `BSD <https://github.com/pallets/flask/blob/master/LICENSE>`_
+* Releases: https://pypi.org/project/Flask/
+* Code: https://github.com/pallets/flask
+* Issue tracker: https://github.com/pallets/flask/issues
+* Test status:
+
+  * Linux, Mac: https://travis-ci.org/pallets/flask
+  * Windows: https://ci.appveyor.com/project/pallets/flask
+
+* Test coverage: https://codecov.io/gh/pallets/flask
+
+.. _WSGI: https://wsgi.readthedocs.io
+.. _Werkzeug: https://www.palletsprojects.com/p/werkzeug/
+.. _Jinja: https://www.palletsprojects.com/p/jinja/
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
diff --git a/lib/python3.7/site-packages/Flask-1.0.2.dist-info/RECORD b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..3cb2ce027d13d03666d48e6ffdb31edb5cea555d
--- /dev/null
+++ b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/RECORD
@@ -0,0 +1,48 @@
+../../../bin/flask,sha256=JC7i-_SHfLL6gBof9RZ0LuoSTN2q35U1UMxLbs4uIes,278
+Flask-1.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Flask-1.0.2.dist-info/LICENSE.txt,sha256=ziEXA3AIuaiUn1qe4cd1XxCESWTYrk4TjN7Qb06J3l8,1575
+Flask-1.0.2.dist-info/METADATA,sha256=iA5tiNWzTtgCVe80aTZGNWsckj853fJyfvHs9U-WZRk,4182
+Flask-1.0.2.dist-info/RECORD,,
+Flask-1.0.2.dist-info/WHEEL,sha256=J3CsTk7Mf2JNUyhImI-mjX-fmI4oDjyiXgWT4qgZiCE,110
+Flask-1.0.2.dist-info/entry_points.txt,sha256=gBLA1aKg0OYR8AhbAfg8lnburHtKcgJLDU52BBctN0k,42
+Flask-1.0.2.dist-info/top_level.txt,sha256=dvi65F6AeGWVU0TBpYiC04yM60-FX1gJFkK31IKQr5c,6
+flask/__init__.py,sha256=qq8lK6QQbxJALf1igz7qsvUwOTAoKvFGfdLm7jPNsso,1673
+flask/__main__.py,sha256=pgIXrHhxM5MAMvgzAqWpw_t6AXZ1zG38us4JRgJKtxk,291
+flask/__pycache__/__init__.cpython-37.pyc,,
+flask/__pycache__/__main__.cpython-37.pyc,,
+flask/__pycache__/_compat.cpython-37.pyc,,
+flask/__pycache__/app.cpython-37.pyc,,
+flask/__pycache__/blueprints.cpython-37.pyc,,
+flask/__pycache__/cli.cpython-37.pyc,,
+flask/__pycache__/config.cpython-37.pyc,,
+flask/__pycache__/ctx.cpython-37.pyc,,
+flask/__pycache__/debughelpers.cpython-37.pyc,,
+flask/__pycache__/globals.cpython-37.pyc,,
+flask/__pycache__/helpers.cpython-37.pyc,,
+flask/__pycache__/logging.cpython-37.pyc,,
+flask/__pycache__/sessions.cpython-37.pyc,,
+flask/__pycache__/signals.cpython-37.pyc,,
+flask/__pycache__/templating.cpython-37.pyc,,
+flask/__pycache__/testing.cpython-37.pyc,,
+flask/__pycache__/views.cpython-37.pyc,,
+flask/__pycache__/wrappers.cpython-37.pyc,,
+flask/_compat.py,sha256=UDFGhosh6mOdNB-4evKPuneHum1OpcAlwTNJCRm0irQ,2892
+flask/app.py,sha256=ahpe3T8w98rQd_Er5d7uDxK57S1nnqGQx3V3hirBovU,94147
+flask/blueprints.py,sha256=Cyhl_x99tgwqEZPtNDJUFneAfVJxWfEU4bQA7zWS6VU,18331
+flask/cli.py,sha256=30QYAO10Do9LbZYCLgfI_xhKjASdLopL8wKKVUGS2oA,29442
+flask/config.py,sha256=kznUhj4DLYxsTF_4kfDG8GEHto1oZG_kqblyrLFtpqQ,9951
+flask/ctx.py,sha256=leFzS9fzmo0uaLCdxpHc5_iiJZ1H0X_Ig4yPCOvT--g,16224
+flask/debughelpers.py,sha256=1ceC-UyqZTd4KsJkf0OObHPsVt5R3T6vnmYhiWBjV-w,6479
+flask/globals.py,sha256=pGg72QW_-4xUfsI33I5L_y76c21AeqfSqXDcbd8wvXU,1649
+flask/helpers.py,sha256=YCl8D1plTO1evEYP4KIgaY3H8Izww5j4EdgRJ89oHTw,40106
+flask/json/__init__.py,sha256=Ns1Hj805XIxuBMh2z0dYnMVfb_KUgLzDmP3WoUYaPhw,10729
+flask/json/__pycache__/__init__.cpython-37.pyc,,
+flask/json/__pycache__/tag.cpython-37.pyc,,
+flask/json/tag.py,sha256=9ehzrmt5k7hxf7ZEK0NOs3swvQyU9fWNe-pnYe69N60,8223
+flask/logging.py,sha256=qV9h0vt7NIRkKM9OHDWndzO61E5CeBMlqPJyTt-W2Wc,2231
+flask/sessions.py,sha256=2XHV4ASREhSEZ8bsPQW6pNVNuFtbR-04BzfKg0AfvHo,14452
+flask/signals.py,sha256=BGQbVyCYXnzKK2DVCzppKFyWN1qmrtW1QMAYUs-1Nr8,2211
+flask/templating.py,sha256=FDfWMbpgpC3qObW8GGXRAVrkHFF8K4CHOJymB1wvULI,4914
+flask/testing.py,sha256=XD3gWNvLUV8dqVHwKd9tZzsj81fSHtjOphQ1wTNtlMs,9379
+flask/views.py,sha256=Wy-_WkUVtCfE2zCXYeJehNgHuEtviE4v3HYfJ--MpbY,5733
+flask/wrappers.py,sha256=1Z9hF5-hXQajn_58XITQFRY8efv3Vy3uZ0avBfZu6XI,7511
diff --git a/lib/python3.7/site-packages/Flask-1.0.2.dist-info/WHEEL b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..f21b51cd8af46f70e8fc3a7bd678e2db8baf104c
--- /dev/null
+++ b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.31.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/lib/python3.7/site-packages/Flask-1.0.2.dist-info/entry_points.txt b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1eb025200e62eb7e4f4c5d27a8498df07b84c5f2
--- /dev/null
+++ b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+flask = flask.cli:main
+
diff --git a/lib/python3.7/site-packages/Flask-1.0.2.dist-info/top_level.txt b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7e1060246fd6746a14204539a72e199a25469a05
--- /dev/null
+++ b/lib/python3.7/site-packages/Flask-1.0.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+flask
diff --git a/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/INSTALLER b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/LICENSE b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..10145a264342b7888ec6accfedc4f2808fb67a0e
--- /dev/null
+++ b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/LICENSE
@@ -0,0 +1,31 @@
+Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+
+    * The names of the contributors may not be used to endorse or
+      promote products derived from this software without specific
+      prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/METADATA b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..fb4a867d0fadad267ab2ee901695ab43caf00144
--- /dev/null
+++ b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/METADATA
@@ -0,0 +1,67 @@
+Metadata-Version: 2.1
+Name: Jinja2
+Version: 2.10.1
+Summary: A small but fast and easy to use stand-alone template engine written in pure python.
+Home-page: http://jinja.pocoo.org/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+License: BSD
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Dist: MarkupSafe (>=0.23)
+Provides-Extra: i18n
+Requires-Dist: Babel (>=0.8) ; extra == 'i18n'
+
+
+Jinja2
+~~~~~~
+
+Jinja2 is a template engine written in pure Python.  It provides a
+`Django`_ inspired non-XML syntax but supports inline expressions and
+an optional `sandboxed`_ environment.
+
+Nutshell
+--------
+
+Here a small example of a Jinja template::
+
+    {% extends 'base.html' %}
+    {% block title %}Memberlist{% endblock %}
+    {% block content %}
+      <ul>
+      {% for user in users %}
+        <li><a href="{{ user.url }}">{{ user.username }}</a></li>
+      {% endfor %}
+      </ul>
+    {% endblock %}
+
+Philosophy
+----------
+
+Application logic is for the controller but don't try to make the life
+for the template designer too hard by giving him too few functionality.
+
+For more informations visit the new `Jinja2 webpage`_ and `documentation`_.
+
+.. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security)
+.. _Django: https://www.djangoproject.com/
+.. _Jinja2 webpage: http://jinja.pocoo.org/
+.. _documentation: http://jinja.pocoo.org/2/documentation/
+
+
diff --git a/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/RECORD b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..914594a8f81b38069142c60c919d9d7735bcb4d0
--- /dev/null
+++ b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/RECORD
@@ -0,0 +1,61 @@
+Jinja2-2.10.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Jinja2-2.10.1.dist-info/LICENSE,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554
+Jinja2-2.10.1.dist-info/METADATA,sha256=rx0eN8lX8iq8-YVppmCzV1Qx4y3Pj9IWi08mXUCrewI,2227
+Jinja2-2.10.1.dist-info/RECORD,,
+Jinja2-2.10.1.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110
+Jinja2-2.10.1.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72
+Jinja2-2.10.1.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
+jinja2/__init__.py,sha256=V1D-JHQKklZseXOMA-uAW7-BeKe_TfPpOFi9-dV04ZA,2616
+jinja2/__pycache__/__init__.cpython-37.pyc,,
+jinja2/__pycache__/_compat.cpython-37.pyc,,
+jinja2/__pycache__/_identifier.cpython-37.pyc,,
+jinja2/__pycache__/asyncfilters.cpython-37.pyc,,
+jinja2/__pycache__/asyncsupport.cpython-37.pyc,,
+jinja2/__pycache__/bccache.cpython-37.pyc,,
+jinja2/__pycache__/compiler.cpython-37.pyc,,
+jinja2/__pycache__/constants.cpython-37.pyc,,
+jinja2/__pycache__/debug.cpython-37.pyc,,
+jinja2/__pycache__/defaults.cpython-37.pyc,,
+jinja2/__pycache__/environment.cpython-37.pyc,,
+jinja2/__pycache__/exceptions.cpython-37.pyc,,
+jinja2/__pycache__/ext.cpython-37.pyc,,
+jinja2/__pycache__/filters.cpython-37.pyc,,
+jinja2/__pycache__/idtracking.cpython-37.pyc,,
+jinja2/__pycache__/lexer.cpython-37.pyc,,
+jinja2/__pycache__/loaders.cpython-37.pyc,,
+jinja2/__pycache__/meta.cpython-37.pyc,,
+jinja2/__pycache__/nativetypes.cpython-37.pyc,,
+jinja2/__pycache__/nodes.cpython-37.pyc,,
+jinja2/__pycache__/optimizer.cpython-37.pyc,,
+jinja2/__pycache__/parser.cpython-37.pyc,,
+jinja2/__pycache__/runtime.cpython-37.pyc,,
+jinja2/__pycache__/sandbox.cpython-37.pyc,,
+jinja2/__pycache__/tests.cpython-37.pyc,,
+jinja2/__pycache__/utils.cpython-37.pyc,,
+jinja2/__pycache__/visitor.cpython-37.pyc,,
+jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596
+jinja2/_identifier.py,sha256=W1QBSY-iJsyt6oR_nKSuNNCzV95vLIOYgUNPUI1d5gU,1726
+jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144
+jinja2/asyncsupport.py,sha256=UErQ3YlTLaSjFb94P4MVn08-aVD9jJxty2JVfMRb-1M,7878
+jinja2/bccache.py,sha256=nQldx0ZRYANMyfvOihRoYFKSlUdd5vJkS7BjxNwlOZM,12794
+jinja2/compiler.py,sha256=BqC5U6JxObSRhblyT_a6Tp5GtEU5z3US1a4jLQaxxgo,65386
+jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626
+jinja2/debug.py,sha256=WTVeUFGUa4v6ReCsYv-iVPa3pkNB75OinJt3PfxNdXs,12045
+jinja2/defaults.py,sha256=Em-95hmsJxIenDCZFB1YSvf9CNhe9rBmytN3yUrBcWA,1400
+jinja2/environment.py,sha256=VnkAkqw8JbjZct4tAyHlpBrka2vqB-Z58RAP-32P1ZY,50849
+jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428
+jinja2/ext.py,sha256=atMQydEC86tN1zUsdQiHw5L5cF62nDbqGue25Yiu3N4,24500
+jinja2/filters.py,sha256=yOAJk0MsH-_gEC0i0U6NweVQhbtYaC-uE8xswHFLF4w,36528
+jinja2/idtracking.py,sha256=2GbDSzIvGArEBGLkovLkqEfmYxmWsEf8c3QZwM4uNsw,9197
+jinja2/lexer.py,sha256=ySEPoXd1g7wRjsuw23uimS6nkGN5aqrYwcOKxCaVMBQ,28559
+jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382
+jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340
+jinja2/nativetypes.py,sha256=_sJhS8f-8Q0QMIC0dm1YEdLyxEyoO-kch8qOL5xUDfE,7308
+jinja2/nodes.py,sha256=L10L_nQDfubLhO3XjpF9qz46FSh2clL-3e49ogVlMmA,30853
+jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722
+jinja2/parser.py,sha256=lPzTEbcpTRBLw8ii6OYyExHeAhaZLMA05Hpv4ll3ULk,35875
+jinja2/runtime.py,sha256=DHdD38Pq8gj7uWQC5usJyWFoNWL317A9AvXOW_CLB34,27755
+jinja2/sandbox.py,sha256=UmX8hVjdaERCbA3RXBwrV1f-beA23KmijG5kzPJyU4A,17106
+jinja2/tests.py,sha256=iJQLwbapZr-EKquTG_fVOVdwHUUKf3SX9eNkjQDF8oU,4237
+jinja2/utils.py,sha256=q24VupGZotQ-uOyrJxCaXtDWhZC1RgsQG7kcdmjck2Q,20629
+jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316
diff --git a/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/WHEEL b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..c8240f03e87fdc556c4cb9a1f721a73138177cc5
--- /dev/null
+++ b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/entry_points.txt b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..32e6b7530284307358fa869cf232318221f791fd
--- /dev/null
+++ b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/entry_points.txt
@@ -0,0 +1,4 @@
+
+    [babel.extractors]
+    jinja2 = jinja2.ext:babel_extract[i18n]
+    
\ No newline at end of file
diff --git a/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/top_level.txt b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7f7afbf3bf54b346092be6a72070fcbd305ead1e
--- /dev/null
+++ b/lib/python3.7/site-packages/Jinja2-2.10.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+jinja2
diff --git a/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.txt b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9d227a0cc43c3268d15722b763bd94ad298645a1
--- /dev/null
+++ b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.txt
@@ -0,0 +1,28 @@
+Copyright 2010 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1.  Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+2.  Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+3.  Neither the name of the copyright holder nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/METADATA b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..b208d936fe8302367ad469a3eb470a52c86b45bf
--- /dev/null
+++ b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/METADATA
@@ -0,0 +1,103 @@
+Metadata-Version: 2.1
+Name: MarkupSafe
+Version: 1.1.1
+Summary: Safely add untrusted strings to HTML/XML markup.
+Home-page: https://palletsprojects.com/p/markupsafe/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+Maintainer: The Pallets Team
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Documentation, https://markupsafe.palletsprojects.com/
+Project-URL: Code, https://github.com/pallets/markupsafe
+Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*
+
+MarkupSafe
+==========
+
+MarkupSafe implements a text object that escapes characters so it is
+safe to use in HTML and XML. Characters that have special meanings are
+replaced so that they display as the actual characters. This mitigates
+injection attacks, meaning untrusted user input can safely be displayed
+on a page.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+    pip install -U MarkupSafe
+
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+Examples
+--------
+
+.. code-block:: pycon
+
+    >>> from markupsafe import Markup, escape
+    >>> # escape replaces special characters and wraps in Markup
+    >>> escape('<script>alert(document.cookie);</script>')
+    Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
+    >>> # wrap in Markup to mark text "safe" and prevent escaping
+    >>> Markup('<strong>Hello</strong>')
+    Markup('<strong>hello</strong>')
+    >>> escape(Markup('<strong>Hello</strong>'))
+    Markup('<strong>hello</strong>')
+    >>> # Markup is a text subclass (str on Python 3, unicode on Python 2)
+    >>> # methods and operators escape their arguments
+    >>> template = Markup("Hello <em>%s</em>")
+    >>> template % '"World"'
+    Markup('Hello <em>&#34;World&#34;</em>')
+
+
+Donate
+------
+
+The Pallets organization develops and supports MarkupSafe and other
+libraries that use it. In order to grow the community of contributors
+and users, and allow the maintainers to devote more time to the
+projects, `please donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+*   Website: https://palletsprojects.com/p/markupsafe/
+*   Documentation: https://markupsafe.palletsprojects.com/
+*   License: `BSD-3-Clause <https://github.com/pallets/markupsafe/blob/master/LICENSE.rst>`_
+*   Releases: https://pypi.org/project/MarkupSafe/
+*   Code: https://github.com/pallets/markupsafe
+*   Issue tracker: https://github.com/pallets/markupsafe/issues
+*   Test status:
+
+    *   Linux, Mac: https://travis-ci.org/pallets/markupsafe
+    *   Windows: https://ci.appveyor.com/project/pallets/markupsafe
+
+*   Test coverage: https://codecov.io/gh/pallets/markupsafe
+
+
diff --git a/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/RECORD b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..207f391891c36380ed4fbcaac9e99df50878251f
--- /dev/null
+++ b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/RECORD
@@ -0,0 +1,16 @@
+MarkupSafe-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+MarkupSafe-1.1.1.dist-info/LICENSE.txt,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475
+MarkupSafe-1.1.1.dist-info/METADATA,sha256=nJHwJ4_4ka-V39QH883jPrslj6inNdyyNASBXbYgHXQ,3570
+MarkupSafe-1.1.1.dist-info/RECORD,,
+MarkupSafe-1.1.1.dist-info/WHEEL,sha256=AhV6RMqZ2IDfreRJKo44QWYxYeP-0Jr0bezzBLQ1eog,109
+MarkupSafe-1.1.1.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
+markupsafe/__init__.py,sha256=oTblO5f9KFM-pvnq9bB0HgElnqkJyqHnFN1Nx2NIvnY,10126
+markupsafe/__pycache__/__init__.cpython-37.pyc,,
+markupsafe/__pycache__/_compat.cpython-37.pyc,,
+markupsafe/__pycache__/_constants.cpython-37.pyc,,
+markupsafe/__pycache__/_native.cpython-37.pyc,,
+markupsafe/_compat.py,sha256=uEW1ybxEjfxIiuTbRRaJpHsPFf4yQUMMKaPgYEC5XbU,558
+markupsafe/_constants.py,sha256=zo2ajfScG-l1Sb_52EP3MlDCqO7Y1BVHUXXKRsVDRNk,4690
+markupsafe/_native.py,sha256=d-8S_zzYt2y512xYcuSxq0NeG2DUUvG80wVdTn-4KI8,1873
+markupsafe/_speedups.c,sha256=k0fzEIK3CP6MmMqeY0ob43TP90mVN0DTyn7BAl3RqSg,9884
+markupsafe/_speedups.cpython-37m-x86_64-linux-gnu.so,sha256=pz-ucGdAq6kJtq9lEY1kY2Ed6LQjbRrIicdu_i4HFqU,38875
diff --git a/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..697e4324518901b69631cacf7491e74949229a9d
--- /dev/null
+++ b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.31.1)
+Root-Is-Purelib: false
+Tag: cp37-cp37m-manylinux1_x86_64
+
diff --git a/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..75bf729258f9daef77370b6df1a57940f90fc23f
--- /dev/null
+++ b/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+markupsafe
diff --git a/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/INSTALLER b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/LICENSE.rst b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/LICENSE.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c37cae49ec77ad6ebb25568c1605f1fee5313cfb
--- /dev/null
+++ b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2007 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1.  Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+2.  Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+3.  Neither the name of the copyright holder nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/METADATA b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..2c07a511f2726244fa397ac025e7366263317c4f
--- /dev/null
+++ b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/METADATA
@@ -0,0 +1,132 @@
+Metadata-Version: 2.1
+Name: Werkzeug
+Version: 0.15.2
+Summary: The comprehensive WSGI web application library.
+Home-page: https://palletsprojects.com/p/werkzeug/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+Maintainer: The Pallets Team
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Documentation, https://werkzeug.palletsprojects.com/
+Project-URL: Code, https://github.com/pallets/werkzeug
+Project-URL: Issue tracker, https://github.com/pallets/werkzeug/issues
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Internet :: WWW/HTTP :: WSGI
+Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application
+Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware
+Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+Provides-Extra: dev
+Requires-Dist: pytest ; extra == 'dev'
+Requires-Dist: coverage ; extra == 'dev'
+Requires-Dist: tox ; extra == 'dev'
+Requires-Dist: sphinx ; extra == 'dev'
+Requires-Dist: pallets-sphinx-themes ; extra == 'dev'
+Requires-Dist: sphinx-issues ; extra == 'dev'
+Provides-Extra: termcolor
+Requires-Dist: termcolor ; extra == 'termcolor'
+Provides-Extra: watchdog
+Requires-Dist: watchdog ; extra == 'watchdog'
+
+Werkzeug
+========
+
+*werkzeug* German noun: "tool". Etymology: *werk* ("work"), *zeug* ("stuff")
+
+Werkzeug is a comprehensive `WSGI`_ web application library. It began as
+a simple collection of various utilities for WSGI applications and has
+become one of the most advanced WSGI utility libraries.
+
+It includes:
+
+-   An interactive debugger that allows inspecting stack traces and
+    source code in the browser with an interactive interpreter for any
+    frame in the stack.
+-   A full-featured request object with objects to interact with
+    headers, query args, form data, files, and cookies.
+-   A response object that can wrap other WSGI applications and handle
+    streaming data.
+-   A routing system for matching URLs to endpoints and generating URLs
+    for endpoints, with an extensible system for capturing variables
+    from URLs.
+-   HTTP utilities to handle entity tags, cache control, dates, user
+    agents, cookies, files, and more.
+-   A threaded WSGI server for use while developing applications
+    locally.
+-   A test client for simulating HTTP requests during testing without
+    requiring running a server.
+
+Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up
+to the developer to choose a template engine, database adapter, and even
+how to handle requests. It can be used to build all sorts of end user
+applications such as blogs, wikis, or bulletin boards.
+
+`Flask`_ wraps Werkzeug, using it to handle the details of WSGI while
+providing more structure and patterns for defining powerful
+applications.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+    pip install -U Werkzeug
+
+
+A Simple Example
+----------------
+
+.. code-block:: python
+
+    from werkzeug.wrappers import Request, Response
+
+    @Request.application
+    def application(request):
+        return Response('Hello, World!')
+
+    if __name__ == '__main__':
+        from werkzeug.serving import run_simple
+        run_simple('localhost', 4000, application)
+
+
+Links
+-----
+
+-   Website: https://www.palletsprojects.com/p/werkzeug/
+-   Documentation: https://werkzeug.palletsprojects.com/
+-   Releases: https://pypi.org/project/Werkzeug/
+-   Code: https://github.com/pallets/werkzeug
+-   Issue tracker: https://github.com/pallets/werkzeug/issues
+-   Test status:
+
+    -   Linux, Mac: https://travis-ci.org/pallets/werkzeug
+    -   Windows: https://ci.appveyor.com/project/pallets/werkzeug
+
+-   Test coverage: https://codecov.io/gh/pallets/werkzeug
+
+.. _WSGI: https://wsgi.readthedocs.io/en/latest/
+.. _Flask: https://www.palletsprojects.com/p/flask/
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
diff --git a/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/RECORD b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..61cf975e8e0ab28c76977aebb11f7e23104c17b2
--- /dev/null
+++ b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/RECORD
@@ -0,0 +1,119 @@
+Werkzeug-0.15.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Werkzeug-0.15.2.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
+Werkzeug-0.15.2.dist-info/METADATA,sha256=YmEsyyRecto-hVeBAWJ6QXzxjMIjCLWXMGPKGQpnc4E,4818
+Werkzeug-0.15.2.dist-info/RECORD,,
+Werkzeug-0.15.2.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110
+Werkzeug-0.15.2.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9
+werkzeug/__init__.py,sha256=CxWnTwGG-pO1kcwNE3EKdb73DrCXyhY9cF5mIkp8DMs,6805
+werkzeug/__pycache__/__init__.cpython-37.pyc,,
+werkzeug/__pycache__/_compat.cpython-37.pyc,,
+werkzeug/__pycache__/_internal.cpython-37.pyc,,
+werkzeug/__pycache__/_reloader.cpython-37.pyc,,
+werkzeug/__pycache__/datastructures.cpython-37.pyc,,
+werkzeug/__pycache__/exceptions.cpython-37.pyc,,
+werkzeug/__pycache__/filesystem.cpython-37.pyc,,
+werkzeug/__pycache__/formparser.cpython-37.pyc,,
+werkzeug/__pycache__/http.cpython-37.pyc,,
+werkzeug/__pycache__/local.cpython-37.pyc,,
+werkzeug/__pycache__/posixemulation.cpython-37.pyc,,
+werkzeug/__pycache__/routing.cpython-37.pyc,,
+werkzeug/__pycache__/security.cpython-37.pyc,,
+werkzeug/__pycache__/serving.cpython-37.pyc,,
+werkzeug/__pycache__/test.cpython-37.pyc,,
+werkzeug/__pycache__/testapp.cpython-37.pyc,,
+werkzeug/__pycache__/urls.cpython-37.pyc,,
+werkzeug/__pycache__/useragents.cpython-37.pyc,,
+werkzeug/__pycache__/utils.cpython-37.pyc,,
+werkzeug/__pycache__/wsgi.cpython-37.pyc,,
+werkzeug/_compat.py,sha256=oBEVVrJT4sqYdIZbUWmgV9T9w257RhTSDBlTjh0Zbb0,6431
+werkzeug/_internal.py,sha256=Wx7cpTRWqeBd0LAqobo0lCO4pNUW4oav6XKf7Taumgk,14590
+werkzeug/_reloader.py,sha256=8B8T1npsQT-96nGeVJjV1KXWK_ong6ZlTXOWgxfRLpg,11241
+werkzeug/contrib/__init__.py,sha256=EvNyiiCF49j5P0fZYJ3ZGe82ofXdSBvUNqWFwwBMibQ,553
+werkzeug/contrib/__pycache__/__init__.cpython-37.pyc,,
+werkzeug/contrib/__pycache__/atom.cpython-37.pyc,,
+werkzeug/contrib/__pycache__/cache.cpython-37.pyc,,
+werkzeug/contrib/__pycache__/fixers.cpython-37.pyc,,
+werkzeug/contrib/__pycache__/iterio.cpython-37.pyc,,
+werkzeug/contrib/__pycache__/lint.cpython-37.pyc,,
+werkzeug/contrib/__pycache__/profiler.cpython-37.pyc,,
+werkzeug/contrib/__pycache__/securecookie.cpython-37.pyc,,
+werkzeug/contrib/__pycache__/sessions.cpython-37.pyc,,
+werkzeug/contrib/__pycache__/wrappers.cpython-37.pyc,,
+werkzeug/contrib/atom.py,sha256=KpPJcTfzNW1J0VNQckCbVtVGBe3V8s451tOUya4qByI,15415
+werkzeug/contrib/cache.py,sha256=AEh5UIw-Ui7sHZnlpvrD7ueOKUhCaAD55FXiPtXbbRs,32115
+werkzeug/contrib/fixers.py,sha256=peEtAiIWYT5bh00EWEPOGKzGZXivOzVhhzKPvvzk1RM,9193
+werkzeug/contrib/iterio.py,sha256=KKHa_8aCF_uhoeQVyPGUwrivuB6y6nNdXYo2D2vzOA8,10928
+werkzeug/contrib/lint.py,sha256=NdIxP0E2kVt1xDIxoaIz3Rcl8ZdgmHaFbGTOaybGpN4,296
+werkzeug/contrib/profiler.py,sha256=k_oMLU-AtsVvQ9TxNdermY6FuzSTYr-WE-ZmWb_DMyU,1229
+werkzeug/contrib/securecookie.py,sha256=xbtElskGmtbiApgOJ5WhGgqGDs_68_PcWzqDIAY_QZY,13076
+werkzeug/contrib/sessions.py,sha256=oVXh_7-6_CWOMxDKqcaK05H8RpYoWqAd3al-KzMFPYs,13042
+werkzeug/contrib/wrappers.py,sha256=ZmNk0wpzD66yomPnQxapndZQs4c0kNJaRzqI-BVxeQk,13199
+werkzeug/datastructures.py,sha256=8HoA4Gu9i7ZWi5OBjx244OLWvDEE4JTQQUUTRoAYKog,91761
+werkzeug/debug/__init__.py,sha256=bpogxaNQ_0oOnkDZV7D38CeVvxlCsYCk0H2D7TYrcIM,17939
+werkzeug/debug/__pycache__/__init__.cpython-37.pyc,,
+werkzeug/debug/__pycache__/console.cpython-37.pyc,,
+werkzeug/debug/__pycache__/repr.cpython-37.pyc,,
+werkzeug/debug/__pycache__/tbtools.cpython-37.pyc,,
+werkzeug/debug/console.py,sha256=HoBL21bbcmtiCLqiLDJLZi1LYnWMZxjoXYH5WaZB1XY,5469
+werkzeug/debug/repr.py,sha256=lIwuhbyrMwVe3P_cFqNyqzHL7P93TLKod7lw9clydEw,9621
+werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673
+werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507
+werkzeug/debug/shared/debugger.js,sha256=rOhqZMRfpZnnu6_XCGn6wMWPhtfwRAcyZKksdIxPJas,6400
+werkzeug/debug/shared/jquery.js,sha256=FgpCb_KJQlLNfOu91ta32o_NMZxltwRo8QtmkMRdAu8,86927
+werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191
+werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200
+werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818
+werkzeug/debug/shared/style.css,sha256=_Y98F6dR2CBUZNKylsOdgSHjwVaVy717WqE3-xJVcmE,6581
+werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220
+werkzeug/debug/tbtools.py,sha256=HstooKsBY2a3iy6bzplU68JwmYHpn-iblnBcy5o4vA0,20236
+werkzeug/exceptions.py,sha256=YQSIZDq-_xpUvGklLVlehso1mcGMzF2AJaOhWUTIs68,22933
+werkzeug/filesystem.py,sha256=HzKl-j0Hd8Jl66j778UbPTAYNnY6vUZgYLlBZ0e7uw0,2101
+werkzeug/formparser.py,sha256=tN6SO4mn6RUsxRZq4qVBWXbNWNuasn2KaBznTieMaVk,21790
+werkzeug/http.py,sha256=t0ET2tySAf9ZWdEelVWJoLaZzFViYpjoUmiYHPz10-E,43304
+werkzeug/local.py,sha256=USVEcgIg-oCiUJFPIecFIW9jkIejfw4Fjf1u5yN-Np4,14456
+werkzeug/middleware/__init__.py,sha256=f1SFZo67IlW4k1uqKzNHxYQlsakUS-D6KK_j0e3jjwQ,549
+werkzeug/middleware/__pycache__/__init__.cpython-37.pyc,,
+werkzeug/middleware/__pycache__/dispatcher.cpython-37.pyc,,
+werkzeug/middleware/__pycache__/http_proxy.cpython-37.pyc,,
+werkzeug/middleware/__pycache__/lint.cpython-37.pyc,,
+werkzeug/middleware/__pycache__/profiler.cpython-37.pyc,,
+werkzeug/middleware/__pycache__/proxy_fix.cpython-37.pyc,,
+werkzeug/middleware/__pycache__/shared_data.cpython-37.pyc,,
+werkzeug/middleware/dispatcher.py,sha256=_-KoMzHtcISHS7ouWKAOraqlCLprdh83YOAn_8DjLp8,2240
+werkzeug/middleware/http_proxy.py,sha256=lRjTdMmghHiZuZrS7_UJ3gZc-vlFizhBbFZ-XZPLwIA,7117
+werkzeug/middleware/lint.py,sha256=6fmVw6-pLEKtfkF9VwgtGsZhSzoSFl6D466rqJeNL1M,12791
+werkzeug/middleware/profiler.py,sha256=2BhLPLFGwEmXQ1nVDr9F5Dijf7vvqQHXZGPLZ1vh110,4468
+werkzeug/middleware/proxy_fix.py,sha256=Y86VcU2oAQ--x0mi4iFVJyEFMzp3Ao8q0zvr_SsrpNw,8506
+werkzeug/middleware/shared_data.py,sha256=6aUzMABeOLul0Krf5S_hs-T7oUc7ZIQ3B8tAO4p8C7E,8541
+werkzeug/posixemulation.py,sha256=gSSiv1SCmOyzOM_nq1ZaZCtxP__C5MeDJl_4yXJmi4Q,3541
+werkzeug/routing.py,sha256=w36bDJ4-LFo56aY6Uv_FfZ69id7qsB9AmKdQ4j8Ihj0,80733
+werkzeug/security.py,sha256=mfxfcM-D6U8LhsyDK5W_rnL1oVTZWgyt-E8E4FlSdrI,8026
+werkzeug/serving.py,sha256=xMxg69-SL7I7cIQqA9c2Znb1JvnVX12EHZCu0xm_gu8,35668
+werkzeug/test.py,sha256=jTWQtkfYpmopDGB6SKqDTe7H1X-WiSYq98AkYimyhXo,40641
+werkzeug/testapp.py,sha256=hcKBzorVlSHC-uGvGXXjCm3FzCwGWq4yjbTG3Pr7MV8,9301
+werkzeug/urls.py,sha256=8yHdYI99N__-isoTwvGqvuj9QhOh66dd1Xh1DIp0q0g,39261
+werkzeug/useragents.py,sha256=FIonyUF790Ro8OG8cJqG1zixhg5YzXdHmkZbrnK0QRo,5965
+werkzeug/utils.py,sha256=O20Y0qWk5O1IWamC_A5gkmzR5cgBd3yDIHviwBTfNB0,27387
+werkzeug/wrappers/__init__.py,sha256=S4VioKAmF_av9Ec9zQvG71X1EOkYfPx1TYck9jyDiyY,1384
+werkzeug/wrappers/__pycache__/__init__.cpython-37.pyc,,
+werkzeug/wrappers/__pycache__/accept.cpython-37.pyc,,
+werkzeug/wrappers/__pycache__/auth.cpython-37.pyc,,
+werkzeug/wrappers/__pycache__/base_request.cpython-37.pyc,,
+werkzeug/wrappers/__pycache__/base_response.cpython-37.pyc,,
+werkzeug/wrappers/__pycache__/common_descriptors.cpython-37.pyc,,
+werkzeug/wrappers/__pycache__/etag.cpython-37.pyc,,
+werkzeug/wrappers/__pycache__/json.cpython-37.pyc,,
+werkzeug/wrappers/__pycache__/request.cpython-37.pyc,,
+werkzeug/wrappers/__pycache__/response.cpython-37.pyc,,
+werkzeug/wrappers/__pycache__/user_agent.cpython-37.pyc,,
+werkzeug/wrappers/accept.py,sha256=TIvjUc0g73fhTWX54wg_D9NNzKvpnG1X8u1w26tK1o8,1760
+werkzeug/wrappers/auth.py,sha256=Pmn6iaGHBrUyHbJpW0lZhO_q9RVoAa5QalaTqcavdAI,1158
+werkzeug/wrappers/base_request.py,sha256=k5mu1UU99X_xrPqmXj44pzJbkPRpgvwMuP2j9vl8QFU,26873
+werkzeug/wrappers/base_response.py,sha256=ZA1XlxtsbvG4SpbdOEMT5--z7aZM0w6C5y33W8wOXa4,27906
+werkzeug/wrappers/common_descriptors.py,sha256=OJ8jOwMun4L-BxCuFPkK1vaefx_-Y5IndVXvvn_ems4,12089
+werkzeug/wrappers/etag.py,sha256=TwMO1fvluXbBqnFTj2DvrCNa3mYhbHYe1UZAVzfXvuU,12533
+werkzeug/wrappers/json.py,sha256=HvK_A4NpO0sLqgb10sTJcoZydYOwyNiPCJPV7SVgcgE,4343
+werkzeug/wrappers/request.py,sha256=qPo2zmmBv4HxboywtWZb2pJL8OPXo07BUXBKw2j9Fi8,1338
+werkzeug/wrappers/response.py,sha256=vDZFEGzDOG0jjmS0uVVjeT3hqRt1hFaf15npnx7RD28,2329
+werkzeug/wrappers/user_agent.py,sha256=4bTgQKTLQmGUyxOREYOzbeiFP2VwIOE7E14AhUB5NqM,444
+werkzeug/wsgi.py,sha256=h-zyAeInwE6X6ciSnHI14ImA85adV-F861PmR7UGtRk,36681
diff --git a/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/WHEEL b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..c8240f03e87fdc556c4cb9a1f721a73138177cc5
--- /dev/null
+++ b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/top_level.txt b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6fe8da8499399d7c0484847967ad49e4b165589b
--- /dev/null
+++ b/lib/python3.7/site-packages/Werkzeug-0.15.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+werkzeug
diff --git a/lib/python3.7/site-packages/__pycache__/easy_install.cpython-37.pyc b/lib/python3.7/site-packages/__pycache__/easy_install.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1eace2ae380d3f3c4309b888b49898cc44122511
Binary files /dev/null and b/lib/python3.7/site-packages/__pycache__/easy_install.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/__pycache__/peewee.cpython-37.pyc b/lib/python3.7/site-packages/__pycache__/peewee.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4de94585597b7e1a023ea91d66e7df71983012de
Binary files /dev/null and b/lib/python3.7/site-packages/__pycache__/peewee.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/__pycache__/pwiz.cpython-37.pyc b/lib/python3.7/site-packages/__pycache__/pwiz.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d35e236483d3379234cc5675ebdaafa90ecdbf51
Binary files /dev/null and b/lib/python3.7/site-packages/__pycache__/pwiz.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__init__.py b/lib/python3.7/site-packages/click/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3c33660a9e03096c57d7cbec2a9909fcdfa2080
--- /dev/null
+++ b/lib/python3.7/site-packages/click/__init__.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+"""
+click
+~~~~~
+
+Click is a simple Python module inspired by the stdlib optparse to make
+writing command line scripts fun. Unlike other modules, it's based
+around a simple API that does not come with too much magic and is
+composable.
+
+:copyright: © 2014 by the Pallets team.
+:license: BSD, see LICENSE.rst for more details.
+"""
+
+# Core classes
+from .core import Context, BaseCommand, Command, MultiCommand, Group, \
+     CommandCollection, Parameter, Option, Argument
+
+# Globals
+from .globals import get_current_context
+
+# Decorators
+from .decorators import pass_context, pass_obj, make_pass_decorator, \
+     command, group, argument, option, confirmation_option, \
+     password_option, version_option, help_option
+
+# Types
+from .types import ParamType, File, Path, Choice, IntRange, Tuple, \
+     DateTime, STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED, FloatRange
+
+# Utilities
+from .utils import echo, get_binary_stream, get_text_stream, open_file, \
+     format_filename, get_app_dir, get_os_args
+
+# Terminal functions
+from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \
+     progressbar, clear, style, unstyle, secho, edit, launch, getchar, \
+     pause
+
+# Exceptions
+from .exceptions import ClickException, UsageError, BadParameter, \
+     FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \
+     MissingParameter
+
+# Formatting
+from .formatting import HelpFormatter, wrap_text
+
+# Parsing
+from .parser import OptionParser
+
+
+__all__ = [
+    # Core classes
+    'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group',
+    'CommandCollection', 'Parameter', 'Option', 'Argument',
+
+    # Globals
+    'get_current_context',
+
+    # Decorators
+    'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group',
+    'argument', 'option', 'confirmation_option', 'password_option',
+    'version_option', 'help_option',
+
+    # Types
+    'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple',
+    'DateTime', 'STRING', 'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED',
+    'FloatRange',
+
+    # Utilities
+    'echo', 'get_binary_stream', 'get_text_stream', 'open_file',
+    'format_filename', 'get_app_dir', 'get_os_args',
+
+    # Terminal functions
+    'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager',
+    'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch',
+    'getchar', 'pause',
+
+    # Exceptions
+    'ClickException', 'UsageError', 'BadParameter', 'FileError',
+    'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage',
+    'MissingParameter',
+
+    # Formatting
+    'HelpFormatter', 'wrap_text',
+
+    # Parsing
+    'OptionParser',
+]
+
+
+# Controls if click should emit the warning about the use of unicode
+# literals.
+disable_unicode_literals_warning = False
+
+
+__version__ = '7.0'
diff --git a/lib/python3.7/site-packages/click/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..178edba70c4d0247d85fac9a198a5f15da9dbe5b
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/_bashcomplete.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/_bashcomplete.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..434bc921682974aa3e4383ee3dbaf13cc4d2a468
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/_bashcomplete.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/_compat.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/_compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc201a34dca01d1220a34f4c56efea534fd26f83
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/_compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/_termui_impl.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/_termui_impl.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c691c1c1d4fb7a283dce49184544bb0a54addaf7
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/_termui_impl.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/_textwrap.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/_textwrap.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..31572385b376d376e451a57861700bb5f36a792f
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/_textwrap.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/_unicodefun.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/_unicodefun.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ece172f028b1bec985b28a450c136386c4c36fde
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/_unicodefun.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/_winconsole.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/_winconsole.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6a1d113697c051b7da7de944c6686ed77ab0c4a0
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/_winconsole.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/core.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/core.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ec3318190c56b8060201278894ce2c8fbbe94186
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/core.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/decorators.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/decorators.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c2f99fa2e3dd5e960b7d9d16cdfc026823f3483e
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/decorators.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/exceptions.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/exceptions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4297a32710b76f8c4d224272ec015c7b335b95af
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/exceptions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/formatting.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/formatting.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5fe497da57ee45cab66a9fa735273fb04b82395f
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/formatting.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/globals.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/globals.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ffad39e6faa56e8182c0441cf7877ce5d473d06
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/globals.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/parser.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/parser.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ffe1ba34a3e8672010f61c2cc511b31d1c941cdd
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/parser.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/termui.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/termui.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fa41478d9edd2b3378629d7fb7139a2c25716f15
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/termui.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/testing.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/testing.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f06e4f64b5e0c3620a03184655462156d85146d0
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/testing.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/types.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/types.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e9cce172ab387eefc3e9f0b4bb772a3ef581994
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/types.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/__pycache__/utils.cpython-37.pyc b/lib/python3.7/site-packages/click/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7e41adecee4abd511b3ad2d9c1f8713791dddd1
Binary files /dev/null and b/lib/python3.7/site-packages/click/__pycache__/utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/click/_bashcomplete.py b/lib/python3.7/site-packages/click/_bashcomplete.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5f1084c9abb4ffd848b2f59f1bffd714713062c
--- /dev/null
+++ b/lib/python3.7/site-packages/click/_bashcomplete.py
@@ -0,0 +1,293 @@
+import copy
+import os
+import re
+
+from .utils import echo
+from .parser import split_arg_string
+from .core import MultiCommand, Option, Argument
+from .types import Choice
+
+try:
+    from collections import abc
+except ImportError:
+    import collections as abc
+
+WORDBREAK = '='
+
+# Note, only BASH version 4.4 and later have the nosort option.
+COMPLETION_SCRIPT_BASH = '''
+%(complete_func)s() {
+    local IFS=$'\n'
+    COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
+                   COMP_CWORD=$COMP_CWORD \\
+                   %(autocomplete_var)s=complete $1 ) )
+    return 0
+}
+
+%(complete_func)setup() {
+    local COMPLETION_OPTIONS=""
+    local BASH_VERSION_ARR=(${BASH_VERSION//./ })
+    # Only BASH version 4.4 and later have the nosort option.
+    if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then
+        COMPLETION_OPTIONS="-o nosort"
+    fi
+
+    complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s
+}
+
+%(complete_func)setup
+'''
+
+COMPLETION_SCRIPT_ZSH = '''
+%(complete_func)s() {
+    local -a completions
+    local -a completions_with_descriptions
+    local -a response
+    response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\
+                        COMP_CWORD=$((CURRENT-1)) \\
+                        %(autocomplete_var)s=\"complete_zsh\" \\
+                        %(script_names)s )}")
+
+    for key descr in ${(kv)response}; do
+      if [[ "$descr" == "_" ]]; then
+          completions+=("$key")
+      else
+          completions_with_descriptions+=("$key":"$descr")
+      fi
+    done
+
+    if [ -n "$completions_with_descriptions" ]; then
+        _describe -V unsorted completions_with_descriptions -U -Q
+    fi
+
+    if [ -n "$completions" ]; then
+        compadd -U -V unsorted -Q -a completions
+    fi
+    compstate[insert]="automenu"
+}
+
+compdef %(complete_func)s %(script_names)s
+'''
+
+_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')
+
+
+def get_completion_script(prog_name, complete_var, shell):
+    cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))
+    script = COMPLETION_SCRIPT_ZSH if shell == 'zsh' else COMPLETION_SCRIPT_BASH
+    return (script % {
+        'complete_func': '_%s_completion' % cf_name,
+        'script_names': prog_name,
+        'autocomplete_var': complete_var,
+    }).strip() + ';'
+
+
+def resolve_ctx(cli, prog_name, args):
+    """
+    Parse into a hierarchy of contexts. Contexts are connected through the parent variable.
+    :param cli: command definition
+    :param prog_name: the program that is running
+    :param args: full list of args
+    :return: the final context/command parsed
+    """
+    ctx = cli.make_context(prog_name, args, resilient_parsing=True)
+    args = ctx.protected_args + ctx.args
+    while args:
+        if isinstance(ctx.command, MultiCommand):
+            if not ctx.command.chain:
+                cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
+                if cmd is None:
+                    return ctx
+                ctx = cmd.make_context(cmd_name, args, parent=ctx,
+                                       resilient_parsing=True)
+                args = ctx.protected_args + ctx.args
+            else:
+                # Walk chained subcommand contexts saving the last one.
+                while args:
+                    cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
+                    if cmd is None:
+                        return ctx
+                    sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
+                                               allow_extra_args=True,
+                                               allow_interspersed_args=False,
+                                               resilient_parsing=True)
+                    args = sub_ctx.args
+                ctx = sub_ctx
+                args = sub_ctx.protected_args + sub_ctx.args
+        else:
+            break
+    return ctx
+
+
+def start_of_option(param_str):
+    """
+    :param param_str: param_str to check
+    :return: whether or not this is the start of an option declaration (i.e. starts "-" or "--")
+    """
+    return param_str and param_str[:1] == '-'
+
+
+def is_incomplete_option(all_args, cmd_param):
+    """
+    :param all_args: the full original list of args supplied
+    :param cmd_param: the current command paramter
+    :return: whether or not the last option declaration (i.e. starts "-" or "--") is incomplete and
+    corresponds to this cmd_param. In other words whether this cmd_param option can still accept
+    values
+    """
+    if not isinstance(cmd_param, Option):
+        return False
+    if cmd_param.is_flag:
+        return False
+    last_option = None
+    for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):
+        if index + 1 > cmd_param.nargs:
+            break
+        if start_of_option(arg_str):
+            last_option = arg_str
+
+    return True if last_option and last_option in cmd_param.opts else False
+
+
+def is_incomplete_argument(current_params, cmd_param):
+    """
+    :param current_params: the current params and values for this argument as already entered
+    :param cmd_param: the current command parameter
+    :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In
+    other words whether or not the this cmd_param argument can still accept values
+    """
+    if not isinstance(cmd_param, Argument):
+        return False
+    current_param_values = current_params[cmd_param.name]
+    if current_param_values is None:
+        return True
+    if cmd_param.nargs == -1:
+        return True
+    if isinstance(current_param_values, abc.Iterable) \
+            and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:
+        return True
+    return False
+
+
+def get_user_autocompletions(ctx, args, incomplete, cmd_param):
+    """
+    :param ctx: context associated with the parsed command
+    :param args: full list of args
+    :param incomplete: the incomplete text to autocomplete
+    :param cmd_param: command definition
+    :return: all the possible user-specified completions for the param
+    """
+    results = []
+    if isinstance(cmd_param.type, Choice):
+        # Choices don't support descriptions.
+        results = [(c, None)
+                   for c in cmd_param.type.choices if str(c).startswith(incomplete)]
+    elif cmd_param.autocompletion is not None:
+        dynamic_completions = cmd_param.autocompletion(ctx=ctx,
+                                                       args=args,
+                                                       incomplete=incomplete)
+        results = [c if isinstance(c, tuple) else (c, None)
+                   for c in dynamic_completions]
+    return results
+
+
+def get_visible_commands_starting_with(ctx, starts_with):
+    """
+    :param ctx: context associated with the parsed command
+    :starts_with: string that visible commands must start with.
+    :return: all visible (not hidden) commands that start with starts_with.
+    """
+    for c in ctx.command.list_commands(ctx):
+        if c.startswith(starts_with):
+            command = ctx.command.get_command(ctx, c)
+            if not command.hidden:
+                yield command
+
+
+def add_subcommand_completions(ctx, incomplete, completions_out):
+    # Add subcommand completions.
+    if isinstance(ctx.command, MultiCommand):
+        completions_out.extend(
+            [(c.name, c.get_short_help_str()) for c in get_visible_commands_starting_with(ctx, incomplete)])
+
+    # Walk up the context list and add any other completion possibilities from chained commands
+    while ctx.parent is not None:
+        ctx = ctx.parent
+        if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
+            remaining_commands = [c for c in get_visible_commands_starting_with(ctx, incomplete)
+                                  if c.name not in ctx.protected_args]
+            completions_out.extend([(c.name, c.get_short_help_str()) for c in remaining_commands])
+
+
+def get_choices(cli, prog_name, args, incomplete):
+    """
+    :param cli: command definition
+    :param prog_name: the program that is running
+    :param args: full list of args
+    :param incomplete: the incomplete text to autocomplete
+    :return: all the possible completions for the incomplete
+    """
+    all_args = copy.deepcopy(args)
+
+    ctx = resolve_ctx(cli, prog_name, args)
+    if ctx is None:
+        return []
+
+    # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse
+    # without the '='
+    if start_of_option(incomplete) and WORDBREAK in incomplete:
+        partition_incomplete = incomplete.partition(WORDBREAK)
+        all_args.append(partition_incomplete[0])
+        incomplete = partition_incomplete[2]
+    elif incomplete == WORDBREAK:
+        incomplete = ''
+
+    completions = []
+    if start_of_option(incomplete):
+        # completions for partial options
+        for param in ctx.command.params:
+            if isinstance(param, Option) and not param.hidden:
+                param_opts = [param_opt for param_opt in param.opts +
+                              param.secondary_opts if param_opt not in all_args or param.multiple]
+                completions.extend([(o, param.help) for o in param_opts if o.startswith(incomplete)])
+        return completions
+    # completion for option values from user supplied values
+    for param in ctx.command.params:
+        if is_incomplete_option(all_args, param):
+            return get_user_autocompletions(ctx, all_args, incomplete, param)
+    # completion for argument values from user supplied values
+    for param in ctx.command.params:
+        if is_incomplete_argument(ctx.params, param):
+            return get_user_autocompletions(ctx, all_args, incomplete, param)
+
+    add_subcommand_completions(ctx, incomplete, completions)
+    # Sort before returning so that proper ordering can be enforced in custom types.
+    return sorted(completions)
+
+
+def do_complete(cli, prog_name, include_descriptions):
+    cwords = split_arg_string(os.environ['COMP_WORDS'])
+    cword = int(os.environ['COMP_CWORD'])
+    args = cwords[1:cword]
+    try:
+        incomplete = cwords[cword]
+    except IndexError:
+        incomplete = ''
+
+    for item in get_choices(cli, prog_name, args, incomplete):
+        echo(item[0])
+        if include_descriptions:
+            # ZSH has trouble dealing with empty array parameters when returned from commands, so use a well defined character '_' to indicate no description is present.
+            echo(item[1] if item[1] else '_')
+
+    return True
+
+
+def bashcomplete(cli, prog_name, complete_var, complete_instr):
+    if complete_instr.startswith('source'):
+        shell = 'zsh' if complete_instr == 'source_zsh' else 'bash'
+        echo(get_completion_script(prog_name, complete_var, shell))
+        return True
+    elif complete_instr == 'complete' or complete_instr == 'complete_zsh':
+        return do_complete(cli, prog_name, complete_instr == 'complete_zsh')
+    return False
diff --git a/lib/python3.7/site-packages/click/_compat.py b/lib/python3.7/site-packages/click/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..937e2301d493591b5620ed1efb100098741c8c4e
--- /dev/null
+++ b/lib/python3.7/site-packages/click/_compat.py
@@ -0,0 +1,703 @@
+import re
+import io
+import os
+import sys
+import codecs
+from weakref import WeakKeyDictionary
+
+
+PY2 = sys.version_info[0] == 2
+CYGWIN = sys.platform.startswith('cygwin')
+# Determine local App Engine environment, per Google's own suggestion
+APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and
+              'Development/' in os.environ['SERVER_SOFTWARE'])
+WIN = sys.platform.startswith('win') and not APP_ENGINE
+DEFAULT_COLUMNS = 80
+
+
+_ansi_re = re.compile(r'\033\[((?:\d|;)*)([a-zA-Z])')
+
+
+def get_filesystem_encoding():
+    return sys.getfilesystemencoding() or sys.getdefaultencoding()
+
+
+def _make_text_stream(stream, encoding, errors,
+                      force_readable=False, force_writable=False):
+    if encoding is None:
+        encoding = get_best_encoding(stream)
+    if errors is None:
+        errors = 'replace'
+    return _NonClosingTextIOWrapper(stream, encoding, errors,
+                                    line_buffering=True,
+                                    force_readable=force_readable,
+                                    force_writable=force_writable)
+
+
+def is_ascii_encoding(encoding):
+    """Checks if a given encoding is ascii."""
+    try:
+        return codecs.lookup(encoding).name == 'ascii'
+    except LookupError:
+        return False
+
+
+def get_best_encoding(stream):
+    """Returns the default stream encoding if not found."""
+    rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
+    if is_ascii_encoding(rv):
+        return 'utf-8'
+    return rv
+
+
+class _NonClosingTextIOWrapper(io.TextIOWrapper):
+
+    def __init__(self, stream, encoding, errors,
+                 force_readable=False, force_writable=False, **extra):
+        self._stream = stream = _FixupStream(stream, force_readable,
+                                             force_writable)
+        io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
+
+    # The io module is a place where the Python 3 text behavior
+    # was forced upon Python 2, so we need to unbreak
+    # it to look like Python 2.
+    if PY2:
+        def write(self, x):
+            if isinstance(x, str) or is_bytes(x):
+                try:
+                    self.flush()
+                except Exception:
+                    pass
+                return self.buffer.write(str(x))
+            return io.TextIOWrapper.write(self, x)
+
+        def writelines(self, lines):
+            for line in lines:
+                self.write(line)
+
+    def __del__(self):
+        try:
+            self.detach()
+        except Exception:
+            pass
+
+    def isatty(self):
+        # https://bitbucket.org/pypy/pypy/issue/1803
+        return self._stream.isatty()
+
+
+class _FixupStream(object):
+    """The new io interface needs more from streams than streams
+    traditionally implement.  As such, this fix-up code is necessary in
+    some circumstances.
+
+    The forcing of readable and writable flags are there because some tools
+    put badly patched objects on sys (one such offender are certain version
+    of jupyter notebook).
+    """
+
+    def __init__(self, stream, force_readable=False, force_writable=False):
+        self._stream = stream
+        self._force_readable = force_readable
+        self._force_writable = force_writable
+
+    def __getattr__(self, name):
+        return getattr(self._stream, name)
+
+    def read1(self, size):
+        f = getattr(self._stream, 'read1', None)
+        if f is not None:
+            return f(size)
+        # We only dispatch to readline instead of read in Python 2 as we
+        # do not want cause problems with the different implementation
+        # of line buffering.
+        if PY2:
+            return self._stream.readline(size)
+        return self._stream.read(size)
+
+    def readable(self):
+        if self._force_readable:
+            return True
+        x = getattr(self._stream, 'readable', None)
+        if x is not None:
+            return x()
+        try:
+            self._stream.read(0)
+        except Exception:
+            return False
+        return True
+
+    def writable(self):
+        if self._force_writable:
+            return True
+        x = getattr(self._stream, 'writable', None)
+        if x is not None:
+            return x()
+        try:
+            self._stream.write('')
+        except Exception:
+            try:
+                self._stream.write(b'')
+            except Exception:
+                return False
+        return True
+
+    def seekable(self):
+        x = getattr(self._stream, 'seekable', None)
+        if x is not None:
+            return x()
+        try:
+            self._stream.seek(self._stream.tell())
+        except Exception:
+            return False
+        return True
+
+
+if PY2:
+    text_type = unicode
+    bytes = str
+    raw_input = raw_input
+    string_types = (str, unicode)
+    int_types = (int, long)
+    iteritems = lambda x: x.iteritems()
+    range_type = xrange
+
+    def is_bytes(x):
+        return isinstance(x, (buffer, bytearray))
+
+    _identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
+
+    # For Windows, we need to force stdout/stdin/stderr to binary if it's
+    # fetched for that.  This obviously is not the most correct way to do
+    # it as it changes global state.  Unfortunately, there does not seem to
+    # be a clear better way to do it as just reopening the file in binary
+    # mode does not change anything.
+    #
+    # An option would be to do what Python 3 does and to open the file as
+    # binary only, patch it back to the system, and then use a wrapper
+    # stream that converts newlines.  It's not quite clear what's the
+    # correct option here.
+    #
+    # This code also lives in _winconsole for the fallback to the console
+    # emulation stream.
+    #
+    # There are also Windows environments where the `msvcrt` module is not
+    # available (which is why we use try-catch instead of the WIN variable
+    # here), such as the Google App Engine development server on Windows. In
+    # those cases there is just nothing we can do.
+    def set_binary_mode(f):
+        return f
+
+    try:
+        import msvcrt
+    except ImportError:
+        pass
+    else:
+        def set_binary_mode(f):
+            try:
+                fileno = f.fileno()
+            except Exception:
+                pass
+            else:
+                msvcrt.setmode(fileno, os.O_BINARY)
+            return f
+
+    try:
+        import fcntl
+    except ImportError:
+        pass
+    else:
+        def set_binary_mode(f):
+            try:
+                fileno = f.fileno()
+            except Exception:
+                pass
+            else:
+                flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
+                fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
+            return f
+
+    def isidentifier(x):
+        return _identifier_re.search(x) is not None
+
+    def get_binary_stdin():
+        return set_binary_mode(sys.stdin)
+
+    def get_binary_stdout():
+        _wrap_std_stream('stdout')
+        return set_binary_mode(sys.stdout)
+
+    def get_binary_stderr():
+        _wrap_std_stream('stderr')
+        return set_binary_mode(sys.stderr)
+
+    def get_text_stdin(encoding=None, errors=None):
+        rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+        if rv is not None:
+            return rv
+        return _make_text_stream(sys.stdin, encoding, errors,
+                                 force_readable=True)
+
+    def get_text_stdout(encoding=None, errors=None):
+        _wrap_std_stream('stdout')
+        rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+        if rv is not None:
+            return rv
+        return _make_text_stream(sys.stdout, encoding, errors,
+                                 force_writable=True)
+
+    def get_text_stderr(encoding=None, errors=None):
+        _wrap_std_stream('stderr')
+        rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+        if rv is not None:
+            return rv
+        return _make_text_stream(sys.stderr, encoding, errors,
+                                 force_writable=True)
+
+    def filename_to_ui(value):
+        if isinstance(value, bytes):
+            value = value.decode(get_filesystem_encoding(), 'replace')
+        return value
+else:
+    import io
+    text_type = str
+    raw_input = input
+    string_types = (str,)
+    int_types = (int,)
+    range_type = range
+    isidentifier = lambda x: x.isidentifier()
+    iteritems = lambda x: iter(x.items())
+
+    def is_bytes(x):
+        return isinstance(x, (bytes, memoryview, bytearray))
+
+    def _is_binary_reader(stream, default=False):
+        try:
+            return isinstance(stream.read(0), bytes)
+        except Exception:
+            return default
+            # This happens in some cases where the stream was already
+            # closed.  In this case, we assume the default.
+
+    def _is_binary_writer(stream, default=False):
+        try:
+            stream.write(b'')
+        except Exception:
+            try:
+                stream.write('')
+                return False
+            except Exception:
+                pass
+            return default
+        return True
+
+    def _find_binary_reader(stream):
+        # We need to figure out if the given stream is already binary.
+        # This can happen because the official docs recommend detaching
+        # the streams to get binary streams.  Some code might do this, so
+        # we need to deal with this case explicitly.
+        if _is_binary_reader(stream, False):
+            return stream
+
+        buf = getattr(stream, 'buffer', None)
+
+        # Same situation here; this time we assume that the buffer is
+        # actually binary in case it's closed.
+        if buf is not None and _is_binary_reader(buf, True):
+            return buf
+
+    def _find_binary_writer(stream):
+        # We need to figure out if the given stream is already binary.
+        # This can happen because the official docs recommend detatching
+        # the streams to get binary streams.  Some code might do this, so
+        # we need to deal with this case explicitly.
+        if _is_binary_writer(stream, False):
+            return stream
+
+        buf = getattr(stream, 'buffer', None)
+
+        # Same situation here; this time we assume that the buffer is
+        # actually binary in case it's closed.
+        if buf is not None and _is_binary_writer(buf, True):
+            return buf
+
+    def _stream_is_misconfigured(stream):
+        """A stream is misconfigured if its encoding is ASCII."""
+        # If the stream does not have an encoding set, we assume it's set
+        # to ASCII.  This appears to happen in certain unittest
+        # environments.  It's not quite clear what the correct behavior is
+        # but this at least will force Click to recover somehow.
+        return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii')
+
+    def _is_compatible_text_stream(stream, encoding, errors):
+        stream_encoding = getattr(stream, 'encoding', None)
+        stream_errors = getattr(stream, 'errors', None)
+
+        # Perfect match.
+        if stream_encoding == encoding and stream_errors == errors:
+            return True
+
+        # Otherwise, it's only a compatible stream if we did not ask for
+        # an encoding.
+        if encoding is None:
+            return stream_encoding is not None
+
+        return False
+
+    def _force_correct_text_reader(text_reader, encoding, errors,
+                                   force_readable=False):
+        if _is_binary_reader(text_reader, False):
+            binary_reader = text_reader
+        else:
+            # If there is no target encoding set, we need to verify that the
+            # reader is not actually misconfigured.
+            if encoding is None and not _stream_is_misconfigured(text_reader):
+                return text_reader
+
+            if _is_compatible_text_stream(text_reader, encoding, errors):
+                return text_reader
+
+            # If the reader has no encoding, we try to find the underlying
+            # binary reader for it.  If that fails because the environment is
+            # misconfigured, we silently go with the same reader because this
+            # is too common to happen.  In that case, mojibake is better than
+            # exceptions.
+            binary_reader = _find_binary_reader(text_reader)
+            if binary_reader is None:
+                return text_reader
+
+        # At this point, we default the errors to replace instead of strict
+        # because nobody handles those errors anyways and at this point
+        # we're so fundamentally fucked that nothing can repair it.
+        if errors is None:
+            errors = 'replace'
+        return _make_text_stream(binary_reader, encoding, errors,
+                                 force_readable=force_readable)
+
+    def _force_correct_text_writer(text_writer, encoding, errors,
+                                   force_writable=False):
+        if _is_binary_writer(text_writer, False):
+            binary_writer = text_writer
+        else:
+            # If there is no target encoding set, we need to verify that the
+            # writer is not actually misconfigured.
+            if encoding is None and not _stream_is_misconfigured(text_writer):
+                return text_writer
+
+            if _is_compatible_text_stream(text_writer, encoding, errors):
+                return text_writer
+
+            # If the writer has no encoding, we try to find the underlying
+            # binary writer for it.  If that fails because the environment is
+            # misconfigured, we silently go with the same writer because this
+            # is too common to happen.  In that case, mojibake is better than
+            # exceptions.
+            binary_writer = _find_binary_writer(text_writer)
+            if binary_writer is None:
+                return text_writer
+
+        # At this point, we default the errors to replace instead of strict
+        # because nobody handles those errors anyways and at this point
+        # we're so fundamentally fucked that nothing can repair it.
+        if errors is None:
+            errors = 'replace'
+        return _make_text_stream(binary_writer, encoding, errors,
+                                 force_writable=force_writable)
+
+    def get_binary_stdin():
+        reader = _find_binary_reader(sys.stdin)
+        if reader is None:
+            raise RuntimeError('Was not able to determine binary '
+                               'stream for sys.stdin.')
+        return reader
+
+    def get_binary_stdout():
+        writer = _find_binary_writer(sys.stdout)
+        if writer is None:
+            raise RuntimeError('Was not able to determine binary '
+                               'stream for sys.stdout.')
+        return writer
+
+    def get_binary_stderr():
+        writer = _find_binary_writer(sys.stderr)
+        if writer is None:
+            raise RuntimeError('Was not able to determine binary '
+                               'stream for sys.stderr.')
+        return writer
+
+    def get_text_stdin(encoding=None, errors=None):
+        rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+        if rv is not None:
+            return rv
+        return _force_correct_text_reader(sys.stdin, encoding, errors,
+                                          force_readable=True)
+
+    def get_text_stdout(encoding=None, errors=None):
+        rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+        if rv is not None:
+            return rv
+        return _force_correct_text_writer(sys.stdout, encoding, errors,
+                                          force_writable=True)
+
+    def get_text_stderr(encoding=None, errors=None):
+        rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+        if rv is not None:
+            return rv
+        return _force_correct_text_writer(sys.stderr, encoding, errors,
+                                          force_writable=True)
+
+    def filename_to_ui(value):
+        if isinstance(value, bytes):
+            value = value.decode(get_filesystem_encoding(), 'replace')
+        else:
+            value = value.encode('utf-8', 'surrogateescape') \
+                .decode('utf-8', 'replace')
+        return value
+
+
+def get_streerror(e, default=None):
+    if hasattr(e, 'strerror'):
+        msg = e.strerror
+    else:
+        if default is not None:
+            msg = default
+        else:
+            msg = str(e)
+    if isinstance(msg, bytes):
+        msg = msg.decode('utf-8', 'replace')
+    return msg
+
+
+def open_stream(filename, mode='r', encoding=None, errors='strict',
+                atomic=False):
+    # Standard streams first.  These are simple because they don't need
+    # special handling for the atomic flag.  It's entirely ignored.
+    if filename == '-':
+        if any(m in mode for m in ['w', 'a', 'x']):
+            if 'b' in mode:
+                return get_binary_stdout(), False
+            return get_text_stdout(encoding=encoding, errors=errors), False
+        if 'b' in mode:
+            return get_binary_stdin(), False
+        return get_text_stdin(encoding=encoding, errors=errors), False
+
+    # Non-atomic writes directly go out through the regular open functions.
+    if not atomic:
+        if encoding is None:
+            return open(filename, mode), True
+        return io.open(filename, mode, encoding=encoding, errors=errors), True
+
+    # Some usability stuff for atomic writes
+    if 'a' in mode:
+        raise ValueError(
+            'Appending to an existing file is not supported, because that '
+            'would involve an expensive `copy`-operation to a temporary '
+            'file. Open the file in normal `w`-mode and copy explicitly '
+            'if that\'s what you\'re after.'
+        )
+    if 'x' in mode:
+        raise ValueError('Use the `overwrite`-parameter instead.')
+    if 'w' not in mode:
+        raise ValueError('Atomic writes only make sense with `w`-mode.')
+
+    # Atomic writes are more complicated.  They work by opening a file
+    # as a proxy in the same folder and then using the fdopen
+    # functionality to wrap it in a Python file.  Then we wrap it in an
+    # atomic file that moves the file over on close.
+    import tempfile
+    fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename),
+                                        prefix='.__atomic-write')
+
+    if encoding is not None:
+        f = io.open(fd, mode, encoding=encoding, errors=errors)
+    else:
+        f = os.fdopen(fd, mode)
+
+    return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True
+
+
+# Used in a destructor call, needs extra protection from interpreter cleanup.
+if hasattr(os, 'replace'):
+    _replace = os.replace
+    _can_replace = True
+else:
+    _replace = os.rename
+    _can_replace = not WIN
+
+
+class _AtomicFile(object):
+
+    def __init__(self, f, tmp_filename, real_filename):
+        self._f = f
+        self._tmp_filename = tmp_filename
+        self._real_filename = real_filename
+        self.closed = False
+
+    @property
+    def name(self):
+        return self._real_filename
+
+    def close(self, delete=False):
+        if self.closed:
+            return
+        self._f.close()
+        if not _can_replace:
+            try:
+                os.remove(self._real_filename)
+            except OSError:
+                pass
+        _replace(self._tmp_filename, self._real_filename)
+        self.closed = True
+
+    def __getattr__(self, name):
+        return getattr(self._f, name)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self.close(delete=exc_type is not None)
+
+    def __repr__(self):
+        return repr(self._f)
+
+
+auto_wrap_for_ansi = None
+colorama = None
+get_winterm_size = None
+
+
+def strip_ansi(value):
+    return _ansi_re.sub('', value)
+
+
+def should_strip_ansi(stream=None, color=None):
+    if color is None:
+        if stream is None:
+            stream = sys.stdin
+        return not isatty(stream)
+    return not color
+
+
+# If we're on Windows, we provide transparent integration through
+# colorama.  This will make ANSI colors through the echo function
+# work automatically.
+if WIN:
+    # Windows has a smaller terminal
+    DEFAULT_COLUMNS = 79
+
+    from ._winconsole import _get_windows_console_stream, _wrap_std_stream
+
+    def _get_argv_encoding():
+        import locale
+        return locale.getpreferredencoding()
+
+    if PY2:
+        def raw_input(prompt=''):
+            sys.stderr.flush()
+            if prompt:
+                stdout = _default_text_stdout()
+                stdout.write(prompt)
+            stdin = _default_text_stdin()
+            return stdin.readline().rstrip('\r\n')
+
+    try:
+        import colorama
+    except ImportError:
+        pass
+    else:
+        _ansi_stream_wrappers = WeakKeyDictionary()
+
+        def auto_wrap_for_ansi(stream, color=None):
+            """This function wraps a stream so that calls through colorama
+            are issued to the win32 console API to recolor on demand.  It
+            also ensures to reset the colors if a write call is interrupted
+            to not destroy the console afterwards.
+            """
+            try:
+                cached = _ansi_stream_wrappers.get(stream)
+            except Exception:
+                cached = None
+            if cached is not None:
+                return cached
+            strip = should_strip_ansi(stream, color)
+            ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
+            rv = ansi_wrapper.stream
+            _write = rv.write
+
+            def _safe_write(s):
+                try:
+                    return _write(s)
+                except:
+                    ansi_wrapper.reset_all()
+                    raise
+
+            rv.write = _safe_write
+            try:
+                _ansi_stream_wrappers[stream] = rv
+            except Exception:
+                pass
+            return rv
+
+        def get_winterm_size():
+            win = colorama.win32.GetConsoleScreenBufferInfo(
+                colorama.win32.STDOUT).srWindow
+            return win.Right - win.Left, win.Bottom - win.Top
+else:
+    def _get_argv_encoding():
+        return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding()
+
+    _get_windows_console_stream = lambda *x: None
+    _wrap_std_stream = lambda *x: None
+
+
+def term_len(x):
+    return len(strip_ansi(x))
+
+
+def isatty(stream):
+    try:
+        return stream.isatty()
+    except Exception:
+        return False
+
+
+def _make_cached_stream_func(src_func, wrapper_func):
+    cache = WeakKeyDictionary()
+    def func():
+        stream = src_func()
+        try:
+            rv = cache.get(stream)
+        except Exception:
+            rv = None
+        if rv is not None:
+            return rv
+        rv = wrapper_func()
+        try:
+            stream = src_func()  # In case wrapper_func() modified the stream
+            cache[stream] = rv
+        except Exception:
+            pass
+        return rv
+    return func
+
+
+_default_text_stdin = _make_cached_stream_func(
+    lambda: sys.stdin, get_text_stdin)
+_default_text_stdout = _make_cached_stream_func(
+    lambda: sys.stdout, get_text_stdout)
+_default_text_stderr = _make_cached_stream_func(
+    lambda: sys.stderr, get_text_stderr)
+
+
+binary_streams = {
+    'stdin': get_binary_stdin,
+    'stdout': get_binary_stdout,
+    'stderr': get_binary_stderr,
+}
+
+text_streams = {
+    'stdin': get_text_stdin,
+    'stdout': get_text_stdout,
+    'stderr': get_text_stderr,
+}
diff --git a/lib/python3.7/site-packages/click/_termui_impl.py b/lib/python3.7/site-packages/click/_termui_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..00a8e5ef1ca817fea214246d8f045d45ae5edde3
--- /dev/null
+++ b/lib/python3.7/site-packages/click/_termui_impl.py
@@ -0,0 +1,621 @@
+# -*- coding: utf-8 -*-
+"""
+click._termui_impl
+~~~~~~~~~~~~~~~~~~
+
+This module contains implementations for the termui module. To keep the
+import time of Click down, some infrequently used functionality is
+placed in this module and only imported as needed.
+
+:copyright: © 2014 by the Pallets team.
+:license: BSD, see LICENSE.rst for more details.
+"""
+
+import os
+import sys
+import time
+import math
+import contextlib
+from ._compat import _default_text_stdout, range_type, PY2, isatty, \
+     open_stream, strip_ansi, term_len, get_best_encoding, WIN, int_types, \
+     CYGWIN
+from .utils import echo
+from .exceptions import ClickException
+
+
+if os.name == 'nt':
+    BEFORE_BAR = '\r'
+    AFTER_BAR = '\n'
+else:
+    BEFORE_BAR = '\r\033[?25l'
+    AFTER_BAR = '\033[?25h\n'
+
+
+def _length_hint(obj):
+    """Returns the length hint of an object."""
+    try:
+        return len(obj)
+    except (AttributeError, TypeError):
+        try:
+            get_hint = type(obj).__length_hint__
+        except AttributeError:
+            return None
+        try:
+            hint = get_hint(obj)
+        except TypeError:
+            return None
+        if hint is NotImplemented or \
+           not isinstance(hint, int_types) or \
+           hint < 0:
+            return None
+        return hint
+
+
+class ProgressBar(object):
+
+    def __init__(self, iterable, length=None, fill_char='#', empty_char=' ',
+                 bar_template='%(bar)s', info_sep='  ', show_eta=True,
+                 show_percent=None, show_pos=False, item_show_func=None,
+                 label=None, file=None, color=None, width=30):
+        self.fill_char = fill_char
+        self.empty_char = empty_char
+        self.bar_template = bar_template
+        self.info_sep = info_sep
+        self.show_eta = show_eta
+        self.show_percent = show_percent
+        self.show_pos = show_pos
+        self.item_show_func = item_show_func
+        self.label = label or ''
+        if file is None:
+            file = _default_text_stdout()
+        self.file = file
+        self.color = color
+        self.width = width
+        self.autowidth = width == 0
+
+        if length is None:
+            length = _length_hint(iterable)
+        if iterable is None:
+            if length is None:
+                raise TypeError('iterable or length is required')
+            iterable = range_type(length)
+        self.iter = iter(iterable)
+        self.length = length
+        self.length_known = length is not None
+        self.pos = 0
+        self.avg = []
+        self.start = self.last_eta = time.time()
+        self.eta_known = False
+        self.finished = False
+        self.max_width = None
+        self.entered = False
+        self.current_item = None
+        self.is_hidden = not isatty(self.file)
+        self._last_line = None
+        self.short_limit = 0.5
+
+    def __enter__(self):
+        self.entered = True
+        self.render_progress()
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self.render_finish()
+
+    def __iter__(self):
+        if not self.entered:
+            raise RuntimeError('You need to use progress bars in a with block.')
+        self.render_progress()
+        return self.generator()
+
+    def is_fast(self):
+        return time.time() - self.start <= self.short_limit
+
+    def render_finish(self):
+        if self.is_hidden or self.is_fast():
+            return
+        self.file.write(AFTER_BAR)
+        self.file.flush()
+
+    @property
+    def pct(self):
+        if self.finished:
+            return 1.0
+        return min(self.pos / (float(self.length) or 1), 1.0)
+
+    @property
+    def time_per_iteration(self):
+        if not self.avg:
+            return 0.0
+        return sum(self.avg) / float(len(self.avg))
+
+    @property
+    def eta(self):
+        if self.length_known and not self.finished:
+            return self.time_per_iteration * (self.length - self.pos)
+        return 0.0
+
+    def format_eta(self):
+        if self.eta_known:
+            t = int(self.eta)
+            seconds = t % 60
+            t //= 60
+            minutes = t % 60
+            t //= 60
+            hours = t % 24
+            t //= 24
+            if t > 0:
+                days = t
+                return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
+            else:
+                return '%02d:%02d:%02d' % (hours, minutes, seconds)
+        return ''
+
+    def format_pos(self):
+        pos = str(self.pos)
+        if self.length_known:
+            pos += '/%s' % self.length
+        return pos
+
+    def format_pct(self):
+        return ('% 4d%%' % int(self.pct * 100))[1:]
+
+    def format_bar(self):
+        if self.length_known:
+            bar_length = int(self.pct * self.width)
+            bar = self.fill_char * bar_length
+            bar += self.empty_char * (self.width - bar_length)
+        elif self.finished:
+            bar = self.fill_char * self.width
+        else:
+            bar = list(self.empty_char * (self.width or 1))
+            if self.time_per_iteration != 0:
+                bar[int((math.cos(self.pos * self.time_per_iteration)
+                    / 2.0 + 0.5) * self.width)] = self.fill_char
+            bar = ''.join(bar)
+        return bar
+
+    def format_progress_line(self):
+        show_percent = self.show_percent
+
+        info_bits = []
+        if self.length_known and show_percent is None:
+            show_percent = not self.show_pos
+
+        if self.show_pos:
+            info_bits.append(self.format_pos())
+        if show_percent:
+            info_bits.append(self.format_pct())
+        if self.show_eta and self.eta_known and not self.finished:
+            info_bits.append(self.format_eta())
+        if self.item_show_func is not None:
+            item_info = self.item_show_func(self.current_item)
+            if item_info is not None:
+                info_bits.append(item_info)
+
+        return (self.bar_template % {
+            'label': self.label,
+            'bar': self.format_bar(),
+            'info': self.info_sep.join(info_bits)
+        }).rstrip()
+
+    def render_progress(self):
+        from .termui import get_terminal_size
+
+        if self.is_hidden:
+            return
+
+        buf = []
+        # Update width in case the terminal has been resized
+        if self.autowidth:
+            old_width = self.width
+            self.width = 0
+            clutter_length = term_len(self.format_progress_line())
+            new_width = max(0, get_terminal_size()[0] - clutter_length)
+            if new_width < old_width:
+                buf.append(BEFORE_BAR)
+                buf.append(' ' * self.max_width)
+                self.max_width = new_width
+            self.width = new_width
+
+        clear_width = self.width
+        if self.max_width is not None:
+            clear_width = self.max_width
+
+        buf.append(BEFORE_BAR)
+        line = self.format_progress_line()
+        line_len = term_len(line)
+        if self.max_width is None or self.max_width < line_len:
+            self.max_width = line_len
+
+        buf.append(line)
+        buf.append(' ' * (clear_width - line_len))
+        line = ''.join(buf)
+        # Render the line only if it changed.
+
+        if line != self._last_line and not self.is_fast():
+            self._last_line = line
+            echo(line, file=self.file, color=self.color, nl=False)
+            self.file.flush()
+
+    def make_step(self, n_steps):
+        self.pos += n_steps
+        if self.length_known and self.pos >= self.length:
+            self.finished = True
+
+        if (time.time() - self.last_eta) < 1.0:
+            return
+
+        self.last_eta = time.time()
+
+        # self.avg is a rolling list of length <= 7 of steps where steps are
+        # defined as time elapsed divided by the total progress through
+        # self.length.
+        if self.pos:
+            step = (time.time() - self.start) / self.pos
+        else:
+            step = time.time() - self.start
+
+        self.avg = self.avg[-6:] + [step]
+
+        self.eta_known = self.length_known
+
+    def update(self, n_steps):
+        self.make_step(n_steps)
+        self.render_progress()
+
+    def finish(self):
+        self.eta_known = 0
+        self.current_item = None
+        self.finished = True
+
+    def generator(self):
+        """
+        Returns a generator which yields the items added to the bar during
+        construction, and updates the progress bar *after* the yielded block
+        returns.
+        """
+        if not self.entered:
+            raise RuntimeError('You need to use progress bars in a with block.')
+
+        if self.is_hidden:
+            for rv in self.iter:
+                yield rv
+        else:
+            for rv in self.iter:
+                self.current_item = rv
+                yield rv
+                self.update(1)
+            self.finish()
+            self.render_progress()
+
+
+def pager(generator, color=None):
+    """Decide what method to use for paging through text."""
+    stdout = _default_text_stdout()
+    if not isatty(sys.stdin) or not isatty(stdout):
+        return _nullpager(stdout, generator, color)
+    pager_cmd = (os.environ.get('PAGER', None) or '').strip()
+    if pager_cmd:
+        if WIN:
+            return _tempfilepager(generator, pager_cmd, color)
+        return _pipepager(generator, pager_cmd, color)
+    if os.environ.get('TERM') in ('dumb', 'emacs'):
+        return _nullpager(stdout, generator, color)
+    if WIN or sys.platform.startswith('os2'):
+        return _tempfilepager(generator, 'more <', color)
+    if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
+        return _pipepager(generator, 'less', color)
+
+    import tempfile
+    fd, filename = tempfile.mkstemp()
+    os.close(fd)
+    try:
+        if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
+            return _pipepager(generator, 'more', color)
+        return _nullpager(stdout, generator, color)
+    finally:
+        os.unlink(filename)
+
+
+def _pipepager(generator, cmd, color):
+    """Page through text by feeding it to another program.  Invoking a
+    pager through this might support colors.
+    """
+    import subprocess
+    env = dict(os.environ)
+
+    # If we're piping to less we might support colors under the
+    # condition that
+    cmd_detail = cmd.rsplit('/', 1)[-1].split()
+    if color is None and cmd_detail[0] == 'less':
+        less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:])
+        if not less_flags:
+            env['LESS'] = '-R'
+            color = True
+        elif 'r' in less_flags or 'R' in less_flags:
+            color = True
+
+    c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
+                         env=env)
+    encoding = get_best_encoding(c.stdin)
+    try:
+        for text in generator:
+            if not color:
+                text = strip_ansi(text)
+
+            c.stdin.write(text.encode(encoding, 'replace'))
+    except (IOError, KeyboardInterrupt):
+        pass
+    else:
+        c.stdin.close()
+
+    # Less doesn't respect ^C, but catches it for its own UI purposes (aborting
+    # search or other commands inside less).
+    #
+    # That means when the user hits ^C, the parent process (click) terminates,
+    # but less is still alive, paging the output and messing up the terminal.
+    #
+    # If the user wants to make the pager exit on ^C, they should set
+    # `LESS='-K'`. It's not our decision to make.
+    while True:
+        try:
+            c.wait()
+        except KeyboardInterrupt:
+            pass
+        else:
+            break
+
+
+def _tempfilepager(generator, cmd, color):
+    """Page through text by invoking a program on a temporary file."""
+    import tempfile
+    filename = tempfile.mktemp()
+    # TODO: This never terminates if the passed generator never terminates.
+    text = "".join(generator)
+    if not color:
+        text = strip_ansi(text)
+    encoding = get_best_encoding(sys.stdout)
+    with open_stream(filename, 'wb')[0] as f:
+        f.write(text.encode(encoding))
+    try:
+        os.system(cmd + ' "' + filename + '"')
+    finally:
+        os.unlink(filename)
+
+
+def _nullpager(stream, generator, color):
+    """Simply print unformatted text.  This is the ultimate fallback."""
+    for text in generator:
+        if not color:
+            text = strip_ansi(text)
+        stream.write(text)
+
+
+class Editor(object):
+
+    def __init__(self, editor=None, env=None, require_save=True,
+                 extension='.txt'):
+        self.editor = editor
+        self.env = env
+        self.require_save = require_save
+        self.extension = extension
+
+    def get_editor(self):
+        if self.editor is not None:
+            return self.editor
+        for key in 'VISUAL', 'EDITOR':
+            rv = os.environ.get(key)
+            if rv:
+                return rv
+        if WIN:
+            return 'notepad'
+        for editor in 'vim', 'nano':
+            if os.system('which %s >/dev/null 2>&1' % editor) == 0:
+                return editor
+        return 'vi'
+
+    def edit_file(self, filename):
+        import subprocess
+        editor = self.get_editor()
+        if self.env:
+            environ = os.environ.copy()
+            environ.update(self.env)
+        else:
+            environ = None
+        try:
+            c = subprocess.Popen('%s "%s"' % (editor, filename),
+                                 env=environ, shell=True)
+            exit_code = c.wait()
+            if exit_code != 0:
+                raise ClickException('%s: Editing failed!' % editor)
+        except OSError as e:
+            raise ClickException('%s: Editing failed: %s' % (editor, e))
+
+    def edit(self, text):
+        import tempfile
+
+        text = text or ''
+        if text and not text.endswith('\n'):
+            text += '\n'
+
+        fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension)
+        try:
+            if WIN:
+                encoding = 'utf-8-sig'
+                text = text.replace('\n', '\r\n')
+            else:
+                encoding = 'utf-8'
+            text = text.encode(encoding)
+
+            f = os.fdopen(fd, 'wb')
+            f.write(text)
+            f.close()
+            timestamp = os.path.getmtime(name)
+
+            self.edit_file(name)
+
+            if self.require_save \
+               and os.path.getmtime(name) == timestamp:
+                return None
+
+            f = open(name, 'rb')
+            try:
+                rv = f.read()
+            finally:
+                f.close()
+            return rv.decode('utf-8-sig').replace('\r\n', '\n')
+        finally:
+            os.unlink(name)
+
+
+def open_url(url, wait=False, locate=False):
+    import subprocess
+
+    def _unquote_file(url):
+        try:
+            import urllib
+        except ImportError:
+            import urllib
+        if url.startswith('file://'):
+            url = urllib.unquote(url[7:])
+        return url
+
+    if sys.platform == 'darwin':
+        args = ['open']
+        if wait:
+            args.append('-W')
+        if locate:
+            args.append('-R')
+        args.append(_unquote_file(url))
+        null = open('/dev/null', 'w')
+        try:
+            return subprocess.Popen(args, stderr=null).wait()
+        finally:
+            null.close()
+    elif WIN:
+        if locate:
+            url = _unquote_file(url)
+            args = 'explorer /select,"%s"' % _unquote_file(
+                url.replace('"', ''))
+        else:
+            args = 'start %s "" "%s"' % (
+                wait and '/WAIT' or '', url.replace('"', ''))
+        return os.system(args)
+    elif CYGWIN:
+        if locate:
+            url = _unquote_file(url)
+            args = 'cygstart "%s"' % (os.path.dirname(url).replace('"', ''))
+        else:
+            args = 'cygstart %s "%s"' % (
+                wait and '-w' or '', url.replace('"', ''))
+        return os.system(args)
+
+    try:
+        if locate:
+            url = os.path.dirname(_unquote_file(url)) or '.'
+        else:
+            url = _unquote_file(url)
+        c = subprocess.Popen(['xdg-open', url])
+        if wait:
+            return c.wait()
+        return 0
+    except OSError:
+        if url.startswith(('http://', 'https://')) and not locate and not wait:
+            import webbrowser
+            webbrowser.open(url)
+            return 0
+        return 1
+
+
+def _translate_ch_to_exc(ch):
+    if ch == u'\x03':
+        raise KeyboardInterrupt()
+    if ch == u'\x04' and not WIN:  # Unix-like, Ctrl+D
+        raise EOFError()
+    if ch == u'\x1a' and WIN:      # Windows, Ctrl+Z
+        raise EOFError()
+
+
+if WIN:
+    import msvcrt
+
+    @contextlib.contextmanager
+    def raw_terminal():
+        yield
+
+    def getchar(echo):
+        # The function `getch` will return a bytes object corresponding to
+        # the pressed character. Since Windows 10 build 1803, it will also
+        # return \x00 when called a second time after pressing a regular key.
+        #
+        # `getwch` does not share this probably-bugged behavior. Moreover, it
+        # returns a Unicode object by default, which is what we want.
+        #
+        # Either of these functions will return \x00 or \xe0 to indicate
+        # a special key, and you need to call the same function again to get
+        # the "rest" of the code. The fun part is that \u00e0 is
+        # "latin small letter a with grave", so if you type that on a French
+        # keyboard, you _also_ get a \xe0.
+        # E.g., consider the Up arrow. This returns \xe0 and then \x48. The
+        # resulting Unicode string reads as "a with grave" + "capital H".
+        # This is indistinguishable from when the user actually types
+        # "a with grave" and then "capital H".
+        #
+        # When \xe0 is returned, we assume it's part of a special-key sequence
+        # and call `getwch` again, but that means that when the user types
+        # the \u00e0 character, `getchar` doesn't return until a second
+        # character is typed.
+        # The alternative is returning immediately, but that would mess up
+        # cross-platform handling of arrow keys and others that start with
+        # \xe0. Another option is using `getch`, but then we can't reliably
+        # read non-ASCII characters, because return values of `getch` are
+        # limited to the current 8-bit codepage.
+        #
+        # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`
+        # is doing the right thing in more situations than with `getch`.
+        if echo:
+            func = msvcrt.getwche
+        else:
+            func = msvcrt.getwch
+
+        rv = func()
+        if rv in (u'\x00', u'\xe0'):
+            # \x00 and \xe0 are control characters that indicate special key,
+            # see above.
+            rv += func()
+        _translate_ch_to_exc(rv)
+        return rv
+else:
+    import tty
+    import termios
+
+    @contextlib.contextmanager
+    def raw_terminal():
+        if not isatty(sys.stdin):
+            f = open('/dev/tty')
+            fd = f.fileno()
+        else:
+            fd = sys.stdin.fileno()
+            f = None
+        try:
+            old_settings = termios.tcgetattr(fd)
+            try:
+                tty.setraw(fd)
+                yield fd
+            finally:
+                termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+                sys.stdout.flush()
+                if f is not None:
+                    f.close()
+        except termios.error:
+            pass
+
+    def getchar(echo):
+        with raw_terminal() as fd:
+            ch = os.read(fd, 32)
+            ch = ch.decode(get_best_encoding(sys.stdin), 'replace')
+            if echo and isatty(sys.stdout):
+                sys.stdout.write(ch)
+            _translate_ch_to_exc(ch)
+            return ch
diff --git a/lib/python3.7/site-packages/click/_textwrap.py b/lib/python3.7/site-packages/click/_textwrap.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e776031eaa92904fc77e934a1125346bc9fed6a
--- /dev/null
+++ b/lib/python3.7/site-packages/click/_textwrap.py
@@ -0,0 +1,38 @@
+import textwrap
+from contextlib import contextmanager
+
+
+class TextWrapper(textwrap.TextWrapper):
+
+    def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+        space_left = max(width - cur_len, 1)
+
+        if self.break_long_words:
+            last = reversed_chunks[-1]
+            cut = last[:space_left]
+            res = last[space_left:]
+            cur_line.append(cut)
+            reversed_chunks[-1] = res
+        elif not cur_line:
+            cur_line.append(reversed_chunks.pop())
+
+    @contextmanager
+    def extra_indent(self, indent):
+        old_initial_indent = self.initial_indent
+        old_subsequent_indent = self.subsequent_indent
+        self.initial_indent += indent
+        self.subsequent_indent += indent
+        try:
+            yield
+        finally:
+            self.initial_indent = old_initial_indent
+            self.subsequent_indent = old_subsequent_indent
+
+    def indent_only(self, text):
+        rv = []
+        for idx, line in enumerate(text.splitlines()):
+            indent = self.initial_indent
+            if idx > 0:
+                indent = self.subsequent_indent
+            rv.append(indent + line)
+        return '\n'.join(rv)
diff --git a/lib/python3.7/site-packages/click/_unicodefun.py b/lib/python3.7/site-packages/click/_unicodefun.py
new file mode 100644
index 0000000000000000000000000000000000000000..620edff37e260e244108bdabddd0b3434a6d3d31
--- /dev/null
+++ b/lib/python3.7/site-packages/click/_unicodefun.py
@@ -0,0 +1,125 @@
+import os
+import sys
+import codecs
+
+from ._compat import PY2
+
+
+# If someone wants to vendor click, we want to ensure the
+# correct package is discovered.  Ideally we could use a
+# relative import here but unfortunately Python does not
+# support that.
+click = sys.modules[__name__.rsplit('.', 1)[0]]
+
+
+def _find_unicode_literals_frame():
+    import __future__
+    if not hasattr(sys, '_getframe'):  # not all Python implementations have it
+        return 0
+    frm = sys._getframe(1)
+    idx = 1
+    while frm is not None:
+        if frm.f_globals.get('__name__', '').startswith('click.'):
+            frm = frm.f_back
+            idx += 1
+        elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag:
+            return idx
+        else:
+            break
+    return 0
+
+
+def _check_for_unicode_literals():
+    if not __debug__:
+        return
+    if not PY2 or click.disable_unicode_literals_warning:
+        return
+    bad_frame = _find_unicode_literals_frame()
+    if bad_frame <= 0:
+        return
+    from warnings import warn
+    warn(Warning('Click detected the use of the unicode_literals '
+                 '__future__ import.  This is heavily discouraged '
+                 'because it can introduce subtle bugs in your '
+                 'code.  You should instead use explicit u"" literals '
+                 'for your unicode strings.  For more information see '
+                 'https://click.palletsprojects.com/python3/'),
+         stacklevel=bad_frame)
+
+
+def _verify_python3_env():
+    """Ensures that the environment is good for unicode on Python 3."""
+    if PY2:
+        return
+    try:
+        import locale
+        fs_enc = codecs.lookup(locale.getpreferredencoding()).name
+    except Exception:
+        fs_enc = 'ascii'
+    if fs_enc != 'ascii':
+        return
+
+    extra = ''
+    if os.name == 'posix':
+        import subprocess
+        try:
+            rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE,
+                                  stderr=subprocess.PIPE).communicate()[0]
+        except OSError:
+            rv = b''
+        good_locales = set()
+        has_c_utf8 = False
+
+        # Make sure we're operating on text here.
+        if isinstance(rv, bytes):
+            rv = rv.decode('ascii', 'replace')
+
+        for line in rv.splitlines():
+            locale = line.strip()
+            if locale.lower().endswith(('.utf-8', '.utf8')):
+                good_locales.add(locale)
+                if locale.lower() in ('c.utf8', 'c.utf-8'):
+                    has_c_utf8 = True
+
+        extra += '\n\n'
+        if not good_locales:
+            extra += (
+                'Additional information: on this system no suitable UTF-8\n'
+                'locales were discovered.  This most likely requires resolving\n'
+                'by reconfiguring the locale system.'
+            )
+        elif has_c_utf8:
+            extra += (
+                'This system supports the C.UTF-8 locale which is recommended.\n'
+                'You might be able to resolve your issue by exporting the\n'
+                'following environment variables:\n\n'
+                '    export LC_ALL=C.UTF-8\n'
+                '    export LANG=C.UTF-8'
+            )
+        else:
+            extra += (
+                'This system lists a couple of UTF-8 supporting locales that\n'
+                'you can pick from.  The following suitable locales were\n'
+                'discovered: %s'
+            ) % ', '.join(sorted(good_locales))
+
+        bad_locale = None
+        for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'):
+            if locale and locale.lower().endswith(('.utf-8', '.utf8')):
+                bad_locale = locale
+            if locale is not None:
+                break
+        if bad_locale is not None:
+            extra += (
+                '\n\nClick discovered that you exported a UTF-8 locale\n'
+                'but the locale system could not pick up from it because\n'
+                'it does not exist.  The exported locale is "%s" but it\n'
+                'is not supported'
+            ) % bad_locale
+
+    raise RuntimeError(
+        'Click will abort further execution because Python 3 was'
+        ' configured to use ASCII as encoding for the environment.'
+        ' Consult https://click.palletsprojects.com/en/7.x/python3/ for'
+        ' mitigation steps.' + extra
+    )
diff --git a/lib/python3.7/site-packages/click/_winconsole.py b/lib/python3.7/site-packages/click/_winconsole.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbb080ddaea2b271522cfc7a15329d7259056c56
--- /dev/null
+++ b/lib/python3.7/site-packages/click/_winconsole.py
@@ -0,0 +1,307 @@
+# -*- coding: utf-8 -*-
+# This module is based on the excellent work by Adam Bartoš who
+# provided a lot of what went into the implementation here in
+# the discussion to issue1602 in the Python bug tracker.
+#
+# There are some general differences in regards to how this works
+# compared to the original patches as we do not need to patch
+# the entire interpreter but just work in our little world of
+# echo and prmopt.
+
+import io
+import os
+import sys
+import zlib
+import time
+import ctypes
+import msvcrt
+from ._compat import _NonClosingTextIOWrapper, text_type, PY2
+from ctypes import byref, POINTER, c_int, c_char, c_char_p, \
+     c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE
+try:
+    from ctypes import pythonapi
+    PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
+    PyBuffer_Release = pythonapi.PyBuffer_Release
+except ImportError:
+    pythonapi = None
+from ctypes.wintypes import LPWSTR, LPCWSTR
+
+
+c_ssize_p = POINTER(c_ssize_t)
+
+kernel32 = windll.kernel32
+GetStdHandle = kernel32.GetStdHandle
+ReadConsoleW = kernel32.ReadConsoleW
+WriteConsoleW = kernel32.WriteConsoleW
+GetLastError = kernel32.GetLastError
+GetCommandLineW = WINFUNCTYPE(LPWSTR)(
+    ('GetCommandLineW', windll.kernel32))
+CommandLineToArgvW = WINFUNCTYPE(
+    POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
+        ('CommandLineToArgvW', windll.shell32))
+
+
+STDIN_HANDLE = GetStdHandle(-10)
+STDOUT_HANDLE = GetStdHandle(-11)
+STDERR_HANDLE = GetStdHandle(-12)
+
+
+PyBUF_SIMPLE = 0
+PyBUF_WRITABLE = 1
+
+ERROR_SUCCESS = 0
+ERROR_NOT_ENOUGH_MEMORY = 8
+ERROR_OPERATION_ABORTED = 995
+
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+
+EOF = b'\x1a'
+MAX_BYTES_WRITTEN = 32767
+
+
+class Py_buffer(ctypes.Structure):
+    _fields_ = [
+        ('buf', c_void_p),
+        ('obj', py_object),
+        ('len', c_ssize_t),
+        ('itemsize', c_ssize_t),
+        ('readonly', c_int),
+        ('ndim', c_int),
+        ('format', c_char_p),
+        ('shape', c_ssize_p),
+        ('strides', c_ssize_p),
+        ('suboffsets', c_ssize_p),
+        ('internal', c_void_p)
+    ]
+
+    if PY2:
+        _fields_.insert(-1, ('smalltable', c_ssize_t * 2))
+
+
+# On PyPy we cannot get buffers so our ability to operate here is
+# serverly limited.
+if pythonapi is None:
+    get_buffer = None
+else:
+    def get_buffer(obj, writable=False):
+        buf = Py_buffer()
+        flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
+        PyObject_GetBuffer(py_object(obj), byref(buf), flags)
+        try:
+            buffer_type = c_char * buf.len
+            return buffer_type.from_address(buf.buf)
+        finally:
+            PyBuffer_Release(byref(buf))
+
+
+class _WindowsConsoleRawIOBase(io.RawIOBase):
+
+    def __init__(self, handle):
+        self.handle = handle
+
+    def isatty(self):
+        io.RawIOBase.isatty(self)
+        return True
+
+
+class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
+
+    def readable(self):
+        return True
+
+    def readinto(self, b):
+        bytes_to_be_read = len(b)
+        if not bytes_to_be_read:
+            return 0
+        elif bytes_to_be_read % 2:
+            raise ValueError('cannot read odd number of bytes from '
+                             'UTF-16-LE encoded console')
+
+        buffer = get_buffer(b, writable=True)
+        code_units_to_be_read = bytes_to_be_read // 2
+        code_units_read = c_ulong()
+
+        rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read,
+                          byref(code_units_read), None)
+        if GetLastError() == ERROR_OPERATION_ABORTED:
+            # wait for KeyboardInterrupt
+            time.sleep(0.1)
+        if not rv:
+            raise OSError('Windows error: %s' % GetLastError())
+
+        if buffer[0] == EOF:
+            return 0
+        return 2 * code_units_read.value
+
+
+class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
+
+    def writable(self):
+        return True
+
+    @staticmethod
+    def _get_error_message(errno):
+        if errno == ERROR_SUCCESS:
+            return 'ERROR_SUCCESS'
+        elif errno == ERROR_NOT_ENOUGH_MEMORY:
+            return 'ERROR_NOT_ENOUGH_MEMORY'
+        return 'Windows error %s' % errno
+
+    def write(self, b):
+        bytes_to_be_written = len(b)
+        buf = get_buffer(b)
+        code_units_to_be_written = min(bytes_to_be_written,
+                                       MAX_BYTES_WRITTEN) // 2
+        code_units_written = c_ulong()
+
+        WriteConsoleW(self.handle, buf, code_units_to_be_written,
+                      byref(code_units_written), None)
+        bytes_written = 2 * code_units_written.value
+
+        if bytes_written == 0 and bytes_to_be_written > 0:
+            raise OSError(self._get_error_message(GetLastError()))
+        return bytes_written
+
+
+class ConsoleStream(object):
+
+    def __init__(self, text_stream, byte_stream):
+        self._text_stream = text_stream
+        self.buffer = byte_stream
+
+    @property
+    def name(self):
+        return self.buffer.name
+
+    def write(self, x):
+        if isinstance(x, text_type):
+            return self._text_stream.write(x)
+        try:
+            self.flush()
+        except Exception:
+            pass
+        return self.buffer.write(x)
+
+    def writelines(self, lines):
+        for line in lines:
+            self.write(line)
+
+    def __getattr__(self, name):
+        return getattr(self._text_stream, name)
+
+    def isatty(self):
+        return self.buffer.isatty()
+
+    def __repr__(self):
+        return '<ConsoleStream name=%r encoding=%r>' % (
+            self.name,
+            self.encoding,
+        )
+
+
+class WindowsChunkedWriter(object):
+    """
+    Wraps a stream (such as stdout), acting as a transparent proxy for all
+    attribute access apart from method 'write()' which we wrap to write in
+    limited chunks due to a Windows limitation on binary console streams.
+    """
+    def __init__(self, wrapped):
+        # double-underscore everything to prevent clashes with names of
+        # attributes on the wrapped stream object.
+        self.__wrapped = wrapped
+
+    def __getattr__(self, name):
+        return getattr(self.__wrapped, name)
+
+    def write(self, text):
+        total_to_write = len(text)
+        written = 0
+
+        while written < total_to_write:
+            to_write = min(total_to_write - written, MAX_BYTES_WRITTEN)
+            self.__wrapped.write(text[written:written+to_write])
+            written += to_write
+
+
+_wrapped_std_streams = set()
+
+
+def _wrap_std_stream(name):
+    # Python 2 & Windows 7 and below
+    if PY2 and sys.getwindowsversion()[:2] <= (6, 1) and name not in _wrapped_std_streams:
+        setattr(sys, name, WindowsChunkedWriter(getattr(sys, name)))
+        _wrapped_std_streams.add(name)
+
+
+def _get_text_stdin(buffer_stream):
+    text_stream = _NonClosingTextIOWrapper(
+        io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
+        'utf-16-le', 'strict', line_buffering=True)
+    return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stdout(buffer_stream):
+    text_stream = _NonClosingTextIOWrapper(
+        io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
+        'utf-16-le', 'strict', line_buffering=True)
+    return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stderr(buffer_stream):
+    text_stream = _NonClosingTextIOWrapper(
+        io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
+        'utf-16-le', 'strict', line_buffering=True)
+    return ConsoleStream(text_stream, buffer_stream)
+
+
+if PY2:
+    def _hash_py_argv():
+        return zlib.crc32('\x00'.join(sys.argv[1:]))
+
+    _initial_argv_hash = _hash_py_argv()
+
+    def _get_windows_argv():
+        argc = c_int(0)
+        argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
+        argv = [argv_unicode[i] for i in range(0, argc.value)]
+
+        if not hasattr(sys, 'frozen'):
+            argv = argv[1:]
+            while len(argv) > 0:
+                arg = argv[0]
+                if not arg.startswith('-') or arg == '-':
+                    break
+                argv = argv[1:]
+                if arg.startswith(('-c', '-m')):
+                    break
+
+        return argv[1:]
+
+
+_stream_factories = {
+    0: _get_text_stdin,
+    1: _get_text_stdout,
+    2: _get_text_stderr,
+}
+
+
+def _get_windows_console_stream(f, encoding, errors):
+    if get_buffer is not None and \
+       encoding in ('utf-16-le', None) \
+       and errors in ('strict', None) and \
+       hasattr(f, 'isatty') and f.isatty():
+        func = _stream_factories.get(f.fileno())
+        if func is not None:
+            if not PY2:
+                f = getattr(f, 'buffer', None)
+                if f is None:
+                    return None
+            else:
+                # If we are on Python 2 we need to set the stream that we
+                # deal with to binary mode as otherwise the exercise if a
+                # bit moot.  The same problems apply as for
+                # get_binary_stdin and friends from _compat.
+                msvcrt.setmode(f.fileno(), os.O_BINARY)
+            return func(f)
diff --git a/lib/python3.7/site-packages/click/core.py b/lib/python3.7/site-packages/click/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a1e3422bec8c06344985fc6e3cdf2cd488d0ab4
--- /dev/null
+++ b/lib/python3.7/site-packages/click/core.py
@@ -0,0 +1,1856 @@
+import errno
+import inspect
+import os
+import sys
+from contextlib import contextmanager
+from itertools import repeat
+from functools import update_wrapper
+
+from .types import convert_type, IntRange, BOOL
+from .utils import PacifyFlushWrapper, make_str, make_default_short_help, \
+     echo, get_os_args
+from .exceptions import ClickException, UsageError, BadParameter, Abort, \
+     MissingParameter, Exit
+from .termui import prompt, confirm, style
+from .formatting import HelpFormatter, join_options
+from .parser import OptionParser, split_opt
+from .globals import push_context, pop_context
+
+from ._compat import PY2, isidentifier, iteritems, string_types
+from ._unicodefun import _check_for_unicode_literals, _verify_python3_env
+
+
+_missing = object()
+
+
+SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...'
+SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...'
+
+DEPRECATED_HELP_NOTICE = ' (DEPRECATED)'
+DEPRECATED_INVOKE_NOTICE = 'DeprecationWarning: ' + \
+                           'The command %(name)s is deprecated.'
+
+
+def _maybe_show_deprecated_notice(cmd):
+    if cmd.deprecated:
+        echo(style(DEPRECATED_INVOKE_NOTICE % {'name': cmd.name}, fg='red'), err=True)
+
+
+def fast_exit(code):
+    """Exit without garbage collection, this speeds up exit by about 10ms for
+    things like bash completion.
+    """
+    sys.stdout.flush()
+    sys.stderr.flush()
+    os._exit(code)
+
+
+def _bashcomplete(cmd, prog_name, complete_var=None):
+    """Internal handler for the bash completion support."""
+    if complete_var is None:
+        complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper()
+    complete_instr = os.environ.get(complete_var)
+    if not complete_instr:
+        return
+
+    from ._bashcomplete import bashcomplete
+    if bashcomplete(cmd, prog_name, complete_var, complete_instr):
+        fast_exit(1)
+
+
+def _check_multicommand(base_command, cmd_name, cmd, register=False):
+    if not base_command.chain or not isinstance(cmd, MultiCommand):
+        return
+    if register:
+        hint = 'It is not possible to add multi commands as children to ' \
+               'another multi command that is in chain mode'
+    else:
+        hint = 'Found a multi command as subcommand to a multi command ' \
+               'that is in chain mode.  This is not supported'
+    raise RuntimeError('%s.  Command "%s" is set to chain and "%s" was '
+                       'added as subcommand but it in itself is a '
+                       'multi command.  ("%s" is a %s within a chained '
+                       '%s named "%s").' % (
+                           hint, base_command.name, cmd_name,
+                           cmd_name, cmd.__class__.__name__,
+                           base_command.__class__.__name__,
+                           base_command.name))
+
+
+def batch(iterable, batch_size):
+    return list(zip(*repeat(iter(iterable), batch_size)))
+
+
+def invoke_param_callback(callback, ctx, param, value):
+    code = getattr(callback, '__code__', None)
+    args = getattr(code, 'co_argcount', 3)
+
+    if args < 3:
+        # This will become a warning in Click 3.0:
+        from warnings import warn
+        warn(Warning('Invoked legacy parameter callback "%s".  The new '
+                     'signature for such callbacks starting with '
+                     'click 2.0 is (ctx, param, value).'
+                     % callback), stacklevel=3)
+        return callback(ctx, value)
+    return callback(ctx, param, value)
+
+
+@contextmanager
+def augment_usage_errors(ctx, param=None):
+    """Context manager that attaches extra information to exceptions that
+    fly.
+    """
+    try:
+        yield
+    except BadParameter as e:
+        if e.ctx is None:
+            e.ctx = ctx
+        if param is not None and e.param is None:
+            e.param = param
+        raise
+    except UsageError as e:
+        if e.ctx is None:
+            e.ctx = ctx
+        raise
+
+
+def iter_params_for_processing(invocation_order, declaration_order):
+    """Given a sequence of parameters in the order as should be considered
+    for processing and an iterable of parameters that exist, this returns
+    a list in the correct order as they should be processed.
+    """
+    def sort_key(item):
+        try:
+            idx = invocation_order.index(item)
+        except ValueError:
+            idx = float('inf')
+        return (not item.is_eager, idx)
+
+    return sorted(declaration_order, key=sort_key)
+
+
+class Context(object):
+    """The context is a special internal object that holds state relevant
+    for the script execution at every single level.  It's normally invisible
+    to commands unless they opt-in to getting access to it.
+
+    The context is useful as it can pass internal objects around and can
+    control special execution features such as reading data from
+    environment variables.
+
+    A context can be used as context manager in which case it will call
+    :meth:`close` on teardown.
+
+    .. versionadded:: 2.0
+       Added the `resilient_parsing`, `help_option_names`,
+       `token_normalize_func` parameters.
+
+    .. versionadded:: 3.0
+       Added the `allow_extra_args` and `allow_interspersed_args`
+       parameters.
+
+    .. versionadded:: 4.0
+       Added the `color`, `ignore_unknown_options`, and
+       `max_content_width` parameters.
+
+    :param command: the command class for this context.
+    :param parent: the parent context.
+    :param info_name: the info name for this invocation.  Generally this
+                      is the most descriptive name for the script or
+                      command.  For the toplevel script it is usually
+                      the name of the script, for commands below it it's
+                      the name of the script.
+    :param obj: an arbitrary object of user data.
+    :param auto_envvar_prefix: the prefix to use for automatic environment
+                               variables.  If this is `None` then reading
+                               from environment variables is disabled.  This
+                               does not affect manually set environment
+                               variables which are always read.
+    :param default_map: a dictionary (like object) with default values
+                        for parameters.
+    :param terminal_width: the width of the terminal.  The default is
+                           inherit from parent context.  If no context
+                           defines the terminal width then auto
+                           detection will be applied.
+    :param max_content_width: the maximum width for content rendered by
+                              Click (this currently only affects help
+                              pages).  This defaults to 80 characters if
+                              not overridden.  In other words: even if the
+                              terminal is larger than that, Click will not
+                              format things wider than 80 characters by
+                              default.  In addition to that, formatters might
+                              add some safety mapping on the right.
+    :param resilient_parsing: if this flag is enabled then Click will
+                              parse without any interactivity or callback
+                              invocation.  Default values will also be
+                              ignored.  This is useful for implementing
+                              things such as completion support.
+    :param allow_extra_args: if this is set to `True` then extra arguments
+                             at the end will not raise an error and will be
+                             kept on the context.  The default is to inherit
+                             from the command.
+    :param allow_interspersed_args: if this is set to `False` then options
+                                    and arguments cannot be mixed.  The
+                                    default is to inherit from the command.
+    :param ignore_unknown_options: instructs click to ignore options it does
+                                   not know and keeps them for later
+                                   processing.
+    :param help_option_names: optionally a list of strings that define how
+                              the default help parameter is named.  The
+                              default is ``['--help']``.
+    :param token_normalize_func: an optional function that is used to
+                                 normalize tokens (options, choices,
+                                 etc.).  This for instance can be used to
+                                 implement case insensitive behavior.
+    :param color: controls if the terminal supports ANSI colors or not.  The
+                  default is autodetection.  This is only needed if ANSI
+                  codes are used in texts that Click prints which is by
+                  default not the case.  This for instance would affect
+                  help output.
+    """
+
+    def __init__(self, command, parent=None, info_name=None, obj=None,
+                 auto_envvar_prefix=None, default_map=None,
+                 terminal_width=None, max_content_width=None,
+                 resilient_parsing=False, allow_extra_args=None,
+                 allow_interspersed_args=None,
+                 ignore_unknown_options=None, help_option_names=None,
+                 token_normalize_func=None, color=None):
+        #: the parent context or `None` if none exists.
+        self.parent = parent
+        #: the :class:`Command` for this context.
+        self.command = command
+        #: the descriptive information name
+        self.info_name = info_name
+        #: the parsed parameters except if the value is hidden in which
+        #: case it's not remembered.
+        self.params = {}
+        #: the leftover arguments.
+        self.args = []
+        #: protected arguments.  These are arguments that are prepended
+        #: to `args` when certain parsing scenarios are encountered but
+        #: must be never propagated to another arguments.  This is used
+        #: to implement nested parsing.
+        self.protected_args = []
+        if obj is None and parent is not None:
+            obj = parent.obj
+        #: the user object stored.
+        self.obj = obj
+        self._meta = getattr(parent, 'meta', {})
+
+        #: A dictionary (-like object) with defaults for parameters.
+        if default_map is None \
+           and parent is not None \
+           and parent.default_map is not None:
+            default_map = parent.default_map.get(info_name)
+        self.default_map = default_map
+
+        #: This flag indicates if a subcommand is going to be executed. A
+        #: group callback can use this information to figure out if it's
+        #: being executed directly or because the execution flow passes
+        #: onwards to a subcommand. By default it's None, but it can be
+        #: the name of the subcommand to execute.
+        #:
+        #: If chaining is enabled this will be set to ``'*'`` in case
+        #: any commands are executed.  It is however not possible to
+        #: figure out which ones.  If you require this knowledge you
+        #: should use a :func:`resultcallback`.
+        self.invoked_subcommand = None
+
+        if terminal_width is None and parent is not None:
+            terminal_width = parent.terminal_width
+        #: The width of the terminal (None is autodetection).
+        self.terminal_width = terminal_width
+
+        if max_content_width is None and parent is not None:
+            max_content_width = parent.max_content_width
+        #: The maximum width of formatted content (None implies a sensible
+        #: default which is 80 for most things).
+        self.max_content_width = max_content_width
+
+        if allow_extra_args is None:
+            allow_extra_args = command.allow_extra_args
+        #: Indicates if the context allows extra args or if it should
+        #: fail on parsing.
+        #:
+        #: .. versionadded:: 3.0
+        self.allow_extra_args = allow_extra_args
+
+        if allow_interspersed_args is None:
+            allow_interspersed_args = command.allow_interspersed_args
+        #: Indicates if the context allows mixing of arguments and
+        #: options or not.
+        #:
+        #: .. versionadded:: 3.0
+        self.allow_interspersed_args = allow_interspersed_args
+
+        if ignore_unknown_options is None:
+            ignore_unknown_options = command.ignore_unknown_options
+        #: Instructs click to ignore options that a command does not
+        #: understand and will store it on the context for later
+        #: processing.  This is primarily useful for situations where you
+        #: want to call into external programs.  Generally this pattern is
+        #: strongly discouraged because it's not possibly to losslessly
+        #: forward all arguments.
+        #:
+        #: .. versionadded:: 4.0
+        self.ignore_unknown_options = ignore_unknown_options
+
+        if help_option_names is None:
+            if parent is not None:
+                help_option_names = parent.help_option_names
+            else:
+                help_option_names = ['--help']
+
+        #: The names for the help options.
+        self.help_option_names = help_option_names
+
+        if token_normalize_func is None and parent is not None:
+            token_normalize_func = parent.token_normalize_func
+
+        #: An optional normalization function for tokens.  This is
+        #: options, choices, commands etc.
+        self.token_normalize_func = token_normalize_func
+
+        #: Indicates if resilient parsing is enabled.  In that case Click
+        #: will do its best to not cause any failures and default values
+        #: will be ignored. Useful for completion.
+        self.resilient_parsing = resilient_parsing
+
+        # If there is no envvar prefix yet, but the parent has one and
+        # the command on this level has a name, we can expand the envvar
+        # prefix automatically.
+        if auto_envvar_prefix is None:
+            if parent is not None \
+               and parent.auto_envvar_prefix is not None and \
+               self.info_name is not None:
+                auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix,
+                                           self.info_name.upper())
+        else:
+            auto_envvar_prefix = auto_envvar_prefix.upper()
+        self.auto_envvar_prefix = auto_envvar_prefix
+
+        if color is None and parent is not None:
+            color = parent.color
+
+        #: Controls if styling output is wanted or not.
+        self.color = color
+
+        self._close_callbacks = []
+        self._depth = 0
+
+    def __enter__(self):
+        self._depth += 1
+        push_context(self)
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self._depth -= 1
+        if self._depth == 0:
+            self.close()
+        pop_context()
+
+    @contextmanager
+    def scope(self, cleanup=True):
+        """This helper method can be used with the context object to promote
+        it to the current thread local (see :func:`get_current_context`).
+        The default behavior of this is to invoke the cleanup functions which
+        can be disabled by setting `cleanup` to `False`.  The cleanup
+        functions are typically used for things such as closing file handles.
+
+        If the cleanup is intended the context object can also be directly
+        used as a context manager.
+
+        Example usage::
+
+            with ctx.scope():
+                assert get_current_context() is ctx
+
+        This is equivalent::
+
+            with ctx:
+                assert get_current_context() is ctx
+
+        .. versionadded:: 5.0
+
+        :param cleanup: controls if the cleanup functions should be run or
+                        not.  The default is to run these functions.  In
+                        some situations the context only wants to be
+                        temporarily pushed in which case this can be disabled.
+                        Nested pushes automatically defer the cleanup.
+        """
+        if not cleanup:
+            self._depth += 1
+        try:
+            with self as rv:
+                yield rv
+        finally:
+            if not cleanup:
+                self._depth -= 1
+
+    @property
+    def meta(self):
+        """This is a dictionary which is shared with all the contexts
+        that are nested.  It exists so that click utilities can store some
+        state here if they need to.  It is however the responsibility of
+        that code to manage this dictionary well.
+
+        The keys are supposed to be unique dotted strings.  For instance
+        module paths are a good choice for it.  What is stored in there is
+        irrelevant for the operation of click.  However what is important is
+        that code that places data here adheres to the general semantics of
+        the system.
+
+        Example usage::
+
+            LANG_KEY = __name__ + '.lang'
+
+            def set_language(value):
+                ctx = get_current_context()
+                ctx.meta[LANG_KEY] = value
+
+            def get_language():
+                return get_current_context().meta.get(LANG_KEY, 'en_US')
+
+        .. versionadded:: 5.0
+        """
+        return self._meta
+
+    def make_formatter(self):
+        """Creates the formatter for the help and usage output."""
+        return HelpFormatter(width=self.terminal_width,
+                             max_width=self.max_content_width)
+
+    def call_on_close(self, f):
+        """This decorator remembers a function as callback that should be
+        executed when the context tears down.  This is most useful to bind
+        resource handling to the script execution.  For instance, file objects
+        opened by the :class:`File` type will register their close callbacks
+        here.
+
+        :param f: the function to execute on teardown.
+        """
+        self._close_callbacks.append(f)
+        return f
+
+    def close(self):
+        """Invokes all close callbacks."""
+        for cb in self._close_callbacks:
+            cb()
+        self._close_callbacks = []
+
+    @property
+    def command_path(self):
+        """The computed command path.  This is used for the ``usage``
+        information on the help page.  It's automatically created by
+        combining the info names of the chain of contexts to the root.
+        """
+        rv = ''
+        if self.info_name is not None:
+            rv = self.info_name
+        if self.parent is not None:
+            rv = self.parent.command_path + ' ' + rv
+        return rv.lstrip()
+
+    def find_root(self):
+        """Finds the outermost context."""
+        node = self
+        while node.parent is not None:
+            node = node.parent
+        return node
+
+    def find_object(self, object_type):
+        """Finds the closest object of a given type."""
+        node = self
+        while node is not None:
+            if isinstance(node.obj, object_type):
+                return node.obj
+            node = node.parent
+
+    def ensure_object(self, object_type):
+        """Like :meth:`find_object` but sets the innermost object to a
+        new instance of `object_type` if it does not exist.
+        """
+        rv = self.find_object(object_type)
+        if rv is None:
+            self.obj = rv = object_type()
+        return rv
+
+    def lookup_default(self, name):
+        """Looks up the default for a parameter name.  This by default
+        looks into the :attr:`default_map` if available.
+        """
+        if self.default_map is not None:
+            rv = self.default_map.get(name)
+            if callable(rv):
+                rv = rv()
+            return rv
+
+    def fail(self, message):
+        """Aborts the execution of the program with a specific error
+        message.
+
+        :param message: the error message to fail with.
+        """
+        raise UsageError(message, self)
+
+    def abort(self):
+        """Aborts the script."""
+        raise Abort()
+
+    def exit(self, code=0):
+        """Exits the application with a given exit code."""
+        raise Exit(code)
+
+    def get_usage(self):
+        """Helper method to get formatted usage string for the current
+        context and command.
+        """
+        return self.command.get_usage(self)
+
+    def get_help(self):
+        """Helper method to get formatted help page for the current
+        context and command.
+        """
+        return self.command.get_help(self)
+
+    def invoke(*args, **kwargs):
+        """Invokes a command callback in exactly the way it expects.  There
+        are two ways to invoke this method:
+
+        1.  the first argument can be a callback and all other arguments and
+            keyword arguments are forwarded directly to the function.
+        2.  the first argument is a click command object.  In that case all
+            arguments are forwarded as well but proper click parameters
+            (options and click arguments) must be keyword arguments and Click
+            will fill in defaults.
+
+        Note that before Click 3.2 keyword arguments were not properly filled
+        in against the intention of this code and no context was created.  For
+        more information about this change and why it was done in a bugfix
+        release see :ref:`upgrade-to-3.2`.
+        """
+        self, callback = args[:2]
+        ctx = self
+
+        # It's also possible to invoke another command which might or
+        # might not have a callback.  In that case we also fill
+        # in defaults and make a new context for this command.
+        if isinstance(callback, Command):
+            other_cmd = callback
+            callback = other_cmd.callback
+            ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
+            if callback is None:
+                raise TypeError('The given command does not have a '
+                                'callback that can be invoked.')
+
+            for param in other_cmd.params:
+                if param.name not in kwargs and param.expose_value:
+                    kwargs[param.name] = param.get_default(ctx)
+
+        args = args[2:]
+        with augment_usage_errors(self):
+            with ctx:
+                return callback(*args, **kwargs)
+
+    def forward(*args, **kwargs):
+        """Similar to :meth:`invoke` but fills in default keyword
+        arguments from the current context if the other command expects
+        it.  This cannot invoke callbacks directly, only other commands.
+        """
+        self, cmd = args[:2]
+
+        # It's also possible to invoke another command which might or
+        # might not have a callback.
+        if not isinstance(cmd, Command):
+            raise TypeError('Callback is not a command.')
+
+        for param in self.params:
+            if param not in kwargs:
+                kwargs[param] = self.params[param]
+
+        return self.invoke(cmd, **kwargs)
+
+
+class BaseCommand(object):
+    """The base command implements the minimal API contract of commands.
+    Most code will never use this as it does not implement a lot of useful
+    functionality but it can act as the direct subclass of alternative
+    parsing methods that do not depend on the Click parser.
+
+    For instance, this can be used to bridge Click and other systems like
+    argparse or docopt.
+
+    Because base commands do not implement a lot of the API that other
+    parts of Click take for granted, they are not supported for all
+    operations.  For instance, they cannot be used with the decorators
+    usually and they have no built-in callback system.
+
+    .. versionchanged:: 2.0
+       Added the `context_settings` parameter.
+
+    :param name: the name of the command to use unless a group overrides it.
+    :param context_settings: an optional dictionary with defaults that are
+                             passed to the context object.
+    """
+    #: the default for the :attr:`Context.allow_extra_args` flag.
+    allow_extra_args = False
+    #: the default for the :attr:`Context.allow_interspersed_args` flag.
+    allow_interspersed_args = True
+    #: the default for the :attr:`Context.ignore_unknown_options` flag.
+    ignore_unknown_options = False
+
+    def __init__(self, name, context_settings=None):
+        #: the name the command thinks it has.  Upon registering a command
+        #: on a :class:`Group` the group will default the command name
+        #: with this information.  You should instead use the
+        #: :class:`Context`\'s :attr:`~Context.info_name` attribute.
+        self.name = name
+        if context_settings is None:
+            context_settings = {}
+        #: an optional dictionary with defaults passed to the context.
+        self.context_settings = context_settings
+
+    def get_usage(self, ctx):
+        raise NotImplementedError('Base commands cannot get usage')
+
+    def get_help(self, ctx):
+        raise NotImplementedError('Base commands cannot get help')
+
+    def make_context(self, info_name, args, parent=None, **extra):
+        """This function when given an info name and arguments will kick
+        off the parsing and create a new :class:`Context`.  It does not
+        invoke the actual command callback though.
+
+        :param info_name: the info name for this invokation.  Generally this
+                          is the most descriptive name for the script or
+                          command.  For the toplevel script it's usually
+                          the name of the script, for commands below it it's
+                          the name of the script.
+        :param args: the arguments to parse as list of strings.
+        :param parent: the parent context if available.
+        :param extra: extra keyword arguments forwarded to the context
+                      constructor.
+        """
+        for key, value in iteritems(self.context_settings):
+            if key not in extra:
+                extra[key] = value
+        ctx = Context(self, info_name=info_name, parent=parent, **extra)
+        with ctx.scope(cleanup=False):
+            self.parse_args(ctx, args)
+        return ctx
+
+    def parse_args(self, ctx, args):
+        """Given a context and a list of arguments this creates the parser
+        and parses the arguments, then modifies the context as necessary.
+        This is automatically invoked by :meth:`make_context`.
+        """
+        raise NotImplementedError('Base commands do not know how to parse '
+                                  'arguments.')
+
+    def invoke(self, ctx):
+        """Given a context, this invokes the command.  The default
+        implementation is raising a not implemented error.
+        """
+        raise NotImplementedError('Base commands are not invokable by default')
+
+    def main(self, args=None, prog_name=None, complete_var=None,
+             standalone_mode=True, **extra):
+        """This is the way to invoke a script with all the bells and
+        whistles as a command line application.  This will always terminate
+        the application after a call.  If this is not wanted, ``SystemExit``
+        needs to be caught.
+
+        This method is also available by directly calling the instance of
+        a :class:`Command`.
+
+        .. versionadded:: 3.0
+           Added the `standalone_mode` flag to control the standalone mode.
+
+        :param args: the arguments that should be used for parsing.  If not
+                     provided, ``sys.argv[1:]`` is used.
+        :param prog_name: the program name that should be used.  By default
+                          the program name is constructed by taking the file
+                          name from ``sys.argv[0]``.
+        :param complete_var: the environment variable that controls the
+                             bash completion support.  The default is
+                             ``"_<prog_name>_COMPLETE"`` with prog_name in
+                             uppercase.
+        :param standalone_mode: the default behavior is to invoke the script
+                                in standalone mode.  Click will then
+                                handle exceptions and convert them into
+                                error messages and the function will never
+                                return but shut down the interpreter.  If
+                                this is set to `False` they will be
+                                propagated to the caller and the return
+                                value of this function is the return value
+                                of :meth:`invoke`.
+        :param extra: extra keyword arguments are forwarded to the context
+                      constructor.  See :class:`Context` for more information.
+        """
+        # If we are in Python 3, we will verify that the environment is
+        # sane at this point or reject further execution to avoid a
+        # broken script.
+        if not PY2:
+            _verify_python3_env()
+        else:
+            _check_for_unicode_literals()
+
+        if args is None:
+            args = get_os_args()
+        else:
+            args = list(args)
+
+        if prog_name is None:
+            prog_name = make_str(os.path.basename(
+                sys.argv and sys.argv[0] or __file__))
+
+        # Hook for the Bash completion.  This only activates if the Bash
+        # completion is actually enabled, otherwise this is quite a fast
+        # noop.
+        _bashcomplete(self, prog_name, complete_var)
+
+        try:
+            try:
+                with self.make_context(prog_name, args, **extra) as ctx:
+                    rv = self.invoke(ctx)
+                    if not standalone_mode:
+                        return rv
+                    # it's not safe to `ctx.exit(rv)` here!
+                    # note that `rv` may actually contain data like "1" which
+                    # has obvious effects
+                    # more subtle case: `rv=[None, None]` can come out of
+                    # chained commands which all returned `None` -- so it's not
+                    # even always obvious that `rv` indicates success/failure
+                    # by its truthiness/falsiness
+                    ctx.exit()
+            except (EOFError, KeyboardInterrupt):
+                echo(file=sys.stderr)
+                raise Abort()
+            except ClickException as e:
+                if not standalone_mode:
+                    raise
+                e.show()
+                sys.exit(e.exit_code)
+            except IOError as e:
+                if e.errno == errno.EPIPE:
+                    sys.stdout = PacifyFlushWrapper(sys.stdout)
+                    sys.stderr = PacifyFlushWrapper(sys.stderr)
+                    sys.exit(1)
+                else:
+                    raise
+        except Exit as e:
+            if standalone_mode:
+                sys.exit(e.exit_code)
+            else:
+                # in non-standalone mode, return the exit code
+                # note that this is only reached if `self.invoke` above raises
+                # an Exit explicitly -- thus bypassing the check there which
+                # would return its result
+                # the results of non-standalone execution may therefore be
+                # somewhat ambiguous: if there are codepaths which lead to
+                # `ctx.exit(1)` and to `return 1`, the caller won't be able to
+                # tell the difference between the two
+                return e.exit_code
+        except Abort:
+            if not standalone_mode:
+                raise
+            echo('Aborted!', file=sys.stderr)
+            sys.exit(1)
+
+    def __call__(self, *args, **kwargs):
+        """Alias for :meth:`main`."""
+        return self.main(*args, **kwargs)
+
+
+class Command(BaseCommand):
+    """Commands are the basic building block of command line interfaces in
+    Click.  A basic command handles command line parsing and might dispatch
+    more parsing to commands nested below it.
+
+    .. versionchanged:: 2.0
+       Added the `context_settings` parameter.
+
+    :param name: the name of the command to use unless a group overrides it.
+    :param context_settings: an optional dictionary with defaults that are
+                             passed to the context object.
+    :param callback: the callback to invoke.  This is optional.
+    :param params: the parameters to register with this command.  This can
+                   be either :class:`Option` or :class:`Argument` objects.
+    :param help: the help string to use for this command.
+    :param epilog: like the help string but it's printed at the end of the
+                   help page after everything else.
+    :param short_help: the short help to use for this command.  This is
+                       shown on the command listing of the parent command.
+    :param add_help_option: by default each command registers a ``--help``
+                            option.  This can be disabled by this parameter.
+    :param hidden: hide this command from help outputs.
+
+    :param deprecated: issues a message indicating that
+                             the command is deprecated.
+    """
+
+    def __init__(self, name, context_settings=None, callback=None,
+                 params=None, help=None, epilog=None, short_help=None,
+                 options_metavar='[OPTIONS]', add_help_option=True,
+                 hidden=False, deprecated=False):
+        BaseCommand.__init__(self, name, context_settings)
+        #: the callback to execute when the command fires.  This might be
+        #: `None` in which case nothing happens.
+        self.callback = callback
+        #: the list of parameters for this command in the order they
+        #: should show up in the help page and execute.  Eager parameters
+        #: will automatically be handled before non eager ones.
+        self.params = params or []
+        # if a form feed (page break) is found in the help text, truncate help
+        # text to the content preceding the first form feed
+        if help and '\f' in help:
+            help = help.split('\f', 1)[0]
+        self.help = help
+        self.epilog = epilog
+        self.options_metavar = options_metavar
+        self.short_help = short_help
+        self.add_help_option = add_help_option
+        self.hidden = hidden
+        self.deprecated = deprecated
+
+    def get_usage(self, ctx):
+        formatter = ctx.make_formatter()
+        self.format_usage(ctx, formatter)
+        return formatter.getvalue().rstrip('\n')
+
+    def get_params(self, ctx):
+        rv = self.params
+        help_option = self.get_help_option(ctx)
+        if help_option is not None:
+            rv = rv + [help_option]
+        return rv
+
+    def format_usage(self, ctx, formatter):
+        """Writes the usage line into the formatter."""
+        pieces = self.collect_usage_pieces(ctx)
+        formatter.write_usage(ctx.command_path, ' '.join(pieces))
+
+    def collect_usage_pieces(self, ctx):
+        """Returns all the pieces that go into the usage line and returns
+        it as a list of strings.
+        """
+        rv = [self.options_metavar]
+        for param in self.get_params(ctx):
+            rv.extend(param.get_usage_pieces(ctx))
+        return rv
+
+    def get_help_option_names(self, ctx):
+        """Returns the names for the help option."""
+        all_names = set(ctx.help_option_names)
+        for param in self.params:
+            all_names.difference_update(param.opts)
+            all_names.difference_update(param.secondary_opts)
+        return all_names
+
+    def get_help_option(self, ctx):
+        """Returns the help option object."""
+        help_options = self.get_help_option_names(ctx)
+        if not help_options or not self.add_help_option:
+            return
+
+        def show_help(ctx, param, value):
+            if value and not ctx.resilient_parsing:
+                echo(ctx.get_help(), color=ctx.color)
+                ctx.exit()
+        return Option(help_options, is_flag=True,
+                      is_eager=True, expose_value=False,
+                      callback=show_help,
+                      help='Show this message and exit.')
+
+    def make_parser(self, ctx):
+        """Creates the underlying option parser for this command."""
+        parser = OptionParser(ctx)
+        for param in self.get_params(ctx):
+            param.add_to_parser(parser, ctx)
+        return parser
+
+    def get_help(self, ctx):
+        """Formats the help into a string and returns it.  This creates a
+        formatter and will call into the following formatting methods:
+        """
+        formatter = ctx.make_formatter()
+        self.format_help(ctx, formatter)
+        return formatter.getvalue().rstrip('\n')
+
+    def get_short_help_str(self, limit=45):
+        """Gets short help for the command or makes it by shortening the long help string."""
+        return self.short_help or self.help and make_default_short_help(self.help, limit) or ''
+
+    def format_help(self, ctx, formatter):
+        """Writes the help into the formatter if it exists.
+
+        This calls into the following methods:
+
+        -   :meth:`format_usage`
+        -   :meth:`format_help_text`
+        -   :meth:`format_options`
+        -   :meth:`format_epilog`
+        """
+        self.format_usage(ctx, formatter)
+        self.format_help_text(ctx, formatter)
+        self.format_options(ctx, formatter)
+        self.format_epilog(ctx, formatter)
+
+    def format_help_text(self, ctx, formatter):
+        """Writes the help text to the formatter if it exists."""
+        if self.help:
+            formatter.write_paragraph()
+            with formatter.indentation():
+                help_text = self.help
+                if self.deprecated:
+                    help_text += DEPRECATED_HELP_NOTICE
+                formatter.write_text(help_text)
+        elif self.deprecated:
+            formatter.write_paragraph()
+            with formatter.indentation():
+                formatter.write_text(DEPRECATED_HELP_NOTICE)
+
+    def format_options(self, ctx, formatter):
+        """Writes all the options into the formatter if they exist."""
+        opts = []
+        for param in self.get_params(ctx):
+            rv = param.get_help_record(ctx)
+            if rv is not None:
+                opts.append(rv)
+
+        if opts:
+            with formatter.section('Options'):
+                formatter.write_dl(opts)
+
+    def format_epilog(self, ctx, formatter):
+        """Writes the epilog into the formatter if it exists."""
+        if self.epilog:
+            formatter.write_paragraph()
+            with formatter.indentation():
+                formatter.write_text(self.epilog)
+
+    def parse_args(self, ctx, args):
+        parser = self.make_parser(ctx)
+        opts, args, param_order = parser.parse_args(args=args)
+
+        for param in iter_params_for_processing(
+                param_order, self.get_params(ctx)):
+            value, args = param.handle_parse_result(ctx, opts, args)
+
+        if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
+            ctx.fail('Got unexpected extra argument%s (%s)'
+                     % (len(args) != 1 and 's' or '',
+                        ' '.join(map(make_str, args))))
+
+        ctx.args = args
+        return args
+
+    def invoke(self, ctx):
+        """Given a context, this invokes the attached callback (if it exists)
+        in the right way.
+        """
+        _maybe_show_deprecated_notice(self)
+        if self.callback is not None:
+            return ctx.invoke(self.callback, **ctx.params)
+
+
+class MultiCommand(Command):
+    """A multi command is the basic implementation of a command that
+    dispatches to subcommands.  The most common version is the
+    :class:`Group`.
+
+    :param invoke_without_command: this controls how the multi command itself
+                                   is invoked.  By default it's only invoked
+                                   if a subcommand is provided.
+    :param no_args_is_help: this controls what happens if no arguments are
+                            provided.  This option is enabled by default if
+                            `invoke_without_command` is disabled or disabled
+                            if it's enabled.  If enabled this will add
+                            ``--help`` as argument if no arguments are
+                            passed.
+    :param subcommand_metavar: the string that is used in the documentation
+                               to indicate the subcommand place.
+    :param chain: if this is set to `True` chaining of multiple subcommands
+                  is enabled.  This restricts the form of commands in that
+                  they cannot have optional arguments but it allows
+                  multiple commands to be chained together.
+    :param result_callback: the result callback to attach to this multi
+                            command.
+    """
+    allow_extra_args = True
+    allow_interspersed_args = False
+
+    def __init__(self, name=None, invoke_without_command=False,
+                 no_args_is_help=None, subcommand_metavar=None,
+                 chain=False, result_callback=None, **attrs):
+        Command.__init__(self, name, **attrs)
+        if no_args_is_help is None:
+            no_args_is_help = not invoke_without_command
+        self.no_args_is_help = no_args_is_help
+        self.invoke_without_command = invoke_without_command
+        if subcommand_metavar is None:
+            if chain:
+                subcommand_metavar = SUBCOMMANDS_METAVAR
+            else:
+                subcommand_metavar = SUBCOMMAND_METAVAR
+        self.subcommand_metavar = subcommand_metavar
+        self.chain = chain
+        #: The result callback that is stored.  This can be set or
+        #: overridden with the :func:`resultcallback` decorator.
+        self.result_callback = result_callback
+
+        if self.chain:
+            for param in self.params:
+                if isinstance(param, Argument) and not param.required:
+                    raise RuntimeError('Multi commands in chain mode cannot '
+                                       'have optional arguments.')
+
+    def collect_usage_pieces(self, ctx):
+        rv = Command.collect_usage_pieces(self, ctx)
+        rv.append(self.subcommand_metavar)
+        return rv
+
+    def format_options(self, ctx, formatter):
+        Command.format_options(self, ctx, formatter)
+        self.format_commands(ctx, formatter)
+
+    def resultcallback(self, replace=False):
+        """Adds a result callback to the chain command.  By default if a
+        result callback is already registered this will chain them but
+        this can be disabled with the `replace` parameter.  The result
+        callback is invoked with the return value of the subcommand
+        (or the list of return values from all subcommands if chaining
+        is enabled) as well as the parameters as they would be passed
+        to the main callback.
+
+        Example::
+
+            @click.group()
+            @click.option('-i', '--input', default=23)
+            def cli(input):
+                return 42
+
+            @cli.resultcallback()
+            def process_result(result, input):
+                return result + input
+
+        .. versionadded:: 3.0
+
+        :param replace: if set to `True` an already existing result
+                        callback will be removed.
+        """
+        def decorator(f):
+            old_callback = self.result_callback
+            if old_callback is None or replace:
+                self.result_callback = f
+                return f
+            def function(__value, *args, **kwargs):
+                return f(old_callback(__value, *args, **kwargs),
+                         *args, **kwargs)
+            self.result_callback = rv = update_wrapper(function, f)
+            return rv
+        return decorator
+
+    def format_commands(self, ctx, formatter):
+        """Extra format methods for multi methods that adds all the commands
+        after the options.
+        """
+        commands = []
+        for subcommand in self.list_commands(ctx):
+            cmd = self.get_command(ctx, subcommand)
+            # What is this, the tool lied about a command.  Ignore it
+            if cmd is None:
+                continue
+            if cmd.hidden:
+                continue
+
+            commands.append((subcommand, cmd))
+
+        # allow for 3 times the default spacing
+        if len(commands):
+            limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
+
+            rows = []
+            for subcommand, cmd in commands:
+                help = cmd.get_short_help_str(limit)
+                rows.append((subcommand, help))
+
+            if rows:
+                with formatter.section('Commands'):
+                    formatter.write_dl(rows)
+
+    def parse_args(self, ctx, args):
+        if not args and self.no_args_is_help and not ctx.resilient_parsing:
+            echo(ctx.get_help(), color=ctx.color)
+            ctx.exit()
+
+        rest = Command.parse_args(self, ctx, args)
+        if self.chain:
+            ctx.protected_args = rest
+            ctx.args = []
+        elif rest:
+            ctx.protected_args, ctx.args = rest[:1], rest[1:]
+
+        return ctx.args
+
+    def invoke(self, ctx):
+        def _process_result(value):
+            if self.result_callback is not None:
+                value = ctx.invoke(self.result_callback, value,
+                                   **ctx.params)
+            return value
+
+        if not ctx.protected_args:
+            # If we are invoked without command the chain flag controls
+            # how this happens.  If we are not in chain mode, the return
+            # value here is the return value of the command.
+            # If however we are in chain mode, the return value is the
+            # return value of the result processor invoked with an empty
+            # list (which means that no subcommand actually was executed).
+            if self.invoke_without_command:
+                if not self.chain:
+                    return Command.invoke(self, ctx)
+                with ctx:
+                    Command.invoke(self, ctx)
+                    return _process_result([])
+            ctx.fail('Missing command.')
+
+        # Fetch args back out
+        args = ctx.protected_args + ctx.args
+        ctx.args = []
+        ctx.protected_args = []
+
+        # If we're not in chain mode, we only allow the invocation of a
+        # single command but we also inform the current context about the
+        # name of the command to invoke.
+        if not self.chain:
+            # Make sure the context is entered so we do not clean up
+            # resources until the result processor has worked.
+            with ctx:
+                cmd_name, cmd, args = self.resolve_command(ctx, args)
+                ctx.invoked_subcommand = cmd_name
+                Command.invoke(self, ctx)
+                sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
+                with sub_ctx:
+                    return _process_result(sub_ctx.command.invoke(sub_ctx))
+
+        # In chain mode we create the contexts step by step, but after the
+        # base command has been invoked.  Because at that point we do not
+        # know the subcommands yet, the invoked subcommand attribute is
+        # set to ``*`` to inform the command that subcommands are executed
+        # but nothing else.
+        with ctx:
+            ctx.invoked_subcommand = args and '*' or None
+            Command.invoke(self, ctx)
+
+            # Otherwise we make every single context and invoke them in a
+            # chain.  In that case the return value to the result processor
+            # is the list of all invoked subcommand's results.
+            contexts = []
+            while args:
+                cmd_name, cmd, args = self.resolve_command(ctx, args)
+                sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
+                                           allow_extra_args=True,
+                                           allow_interspersed_args=False)
+                contexts.append(sub_ctx)
+                args, sub_ctx.args = sub_ctx.args, []
+
+            rv = []
+            for sub_ctx in contexts:
+                with sub_ctx:
+                    rv.append(sub_ctx.command.invoke(sub_ctx))
+            return _process_result(rv)
+
+    def resolve_command(self, ctx, args):
+        cmd_name = make_str(args[0])
+        original_cmd_name = cmd_name
+
+        # Get the command
+        cmd = self.get_command(ctx, cmd_name)
+
+        # If we can't find the command but there is a normalization
+        # function available, we try with that one.
+        if cmd is None and ctx.token_normalize_func is not None:
+            cmd_name = ctx.token_normalize_func(cmd_name)
+            cmd = self.get_command(ctx, cmd_name)
+
+        # If we don't find the command we want to show an error message
+        # to the user that it was not provided.  However, there is
+        # something else we should do: if the first argument looks like
+        # an option we want to kick off parsing again for arguments to
+        # resolve things like --help which now should go to the main
+        # place.
+        if cmd is None and not ctx.resilient_parsing:
+            if split_opt(cmd_name)[0]:
+                self.parse_args(ctx, ctx.args)
+            ctx.fail('No such command "%s".' % original_cmd_name)
+
+        return cmd_name, cmd, args[1:]
+
+    def get_command(self, ctx, cmd_name):
+        """Given a context and a command name, this returns a
+        :class:`Command` object if it exists or returns `None`.
+        """
+        raise NotImplementedError()
+
+    def list_commands(self, ctx):
+        """Returns a list of subcommand names in the order they should
+        appear.
+        """
+        return []
+
+
+class Group(MultiCommand):
+    """A group allows a command to have subcommands attached.  This is the
+    most common way to implement nesting in Click.
+
+    :param commands: a dictionary of commands.
+    """
+
+    def __init__(self, name=None, commands=None, **attrs):
+        MultiCommand.__init__(self, name, **attrs)
+        #: the registered subcommands by their exported names.
+        self.commands = commands or {}
+
+    def add_command(self, cmd, name=None):
+        """Registers another :class:`Command` with this group.  If the name
+        is not provided, the name of the command is used.
+        """
+        name = name or cmd.name
+        if name is None:
+            raise TypeError('Command has no name.')
+        _check_multicommand(self, name, cmd, register=True)
+        self.commands[name] = cmd
+
+    def command(self, *args, **kwargs):
+        """A shortcut decorator for declaring and attaching a command to
+        the group.  This takes the same arguments as :func:`command` but
+        immediately registers the created command with this instance by
+        calling into :meth:`add_command`.
+        """
+        def decorator(f):
+            cmd = command(*args, **kwargs)(f)
+            self.add_command(cmd)
+            return cmd
+        return decorator
+
+    def group(self, *args, **kwargs):
+        """A shortcut decorator for declaring and attaching a group to
+        the group.  This takes the same arguments as :func:`group` but
+        immediately registers the created command with this instance by
+        calling into :meth:`add_command`.
+        """
+        def decorator(f):
+            cmd = group(*args, **kwargs)(f)
+            self.add_command(cmd)
+            return cmd
+        return decorator
+
+    def get_command(self, ctx, cmd_name):
+        return self.commands.get(cmd_name)
+
+    def list_commands(self, ctx):
+        return sorted(self.commands)
+
+
+class CommandCollection(MultiCommand):
+    """A command collection is a multi command that merges multiple multi
+    commands together into one.  This is a straightforward implementation
+    that accepts a list of different multi commands as sources and
+    provides all the commands for each of them.
+    """
+
+    def __init__(self, name=None, sources=None, **attrs):
+        MultiCommand.__init__(self, name, **attrs)
+        #: The list of registered multi commands.
+        self.sources = sources or []
+
+    def add_source(self, multi_cmd):
+        """Adds a new multi command to the chain dispatcher."""
+        self.sources.append(multi_cmd)
+
+    def get_command(self, ctx, cmd_name):
+        for source in self.sources:
+            rv = source.get_command(ctx, cmd_name)
+            if rv is not None:
+                if self.chain:
+                    _check_multicommand(self, cmd_name, rv)
+                return rv
+
+    def list_commands(self, ctx):
+        rv = set()
+        for source in self.sources:
+            rv.update(source.list_commands(ctx))
+        return sorted(rv)
+
+
+class Parameter(object):
+    r"""A parameter to a command comes in two versions: they are either
+    :class:`Option`\s or :class:`Argument`\s.  Other subclasses are currently
+    not supported by design as some of the internals for parsing are
+    intentionally not finalized.
+
+    Some settings are supported by both options and arguments.
+
+    .. versionchanged:: 2.0
+       Changed signature for parameter callback to also be passed the
+       parameter.  In Click 2.0, the old callback format will still work,
+       but it will raise a warning to give you change to migrate the
+       code easier.
+
+    :param param_decls: the parameter declarations for this option or
+                        argument.  This is a list of flags or argument
+                        names.
+    :param type: the type that should be used.  Either a :class:`ParamType`
+                 or a Python type.  The later is converted into the former
+                 automatically if supported.
+    :param required: controls if this is optional or not.
+    :param default: the default value if omitted.  This can also be a callable,
+                    in which case it's invoked when the default is needed
+                    without any arguments.
+    :param callback: a callback that should be executed after the parameter
+                     was matched.  This is called as ``fn(ctx, param,
+                     value)`` and needs to return the value.  Before Click
+                     2.0, the signature was ``(ctx, value)``.
+    :param nargs: the number of arguments to match.  If not ``1`` the return
+                  value is a tuple instead of single value.  The default for
+                  nargs is ``1`` (except if the type is a tuple, then it's
+                  the arity of the tuple).
+    :param metavar: how the value is represented in the help page.
+    :param expose_value: if this is `True` then the value is passed onwards
+                         to the command callback and stored on the context,
+                         otherwise it's skipped.
+    :param is_eager: eager values are processed before non eager ones.  This
+                     should not be set for arguments or it will inverse the
+                     order of processing.
+    :param envvar: a string or list of strings that are environment variables
+                   that should be checked.
+    """
+    param_type_name = 'parameter'
+
+    def __init__(self, param_decls=None, type=None, required=False,
+                 default=None, callback=None, nargs=None, metavar=None,
+                 expose_value=True, is_eager=False, envvar=None,
+                 autocompletion=None):
+        self.name, self.opts, self.secondary_opts = \
+            self._parse_decls(param_decls or (), expose_value)
+
+        self.type = convert_type(type, default)
+
+        # Default nargs to what the type tells us if we have that
+        # information available.
+        if nargs is None:
+            if self.type.is_composite:
+                nargs = self.type.arity
+            else:
+                nargs = 1
+
+        self.required = required
+        self.callback = callback
+        self.nargs = nargs
+        self.multiple = False
+        self.expose_value = expose_value
+        self.default = default
+        self.is_eager = is_eager
+        self.metavar = metavar
+        self.envvar = envvar
+        self.autocompletion = autocompletion
+
+    @property
+    def human_readable_name(self):
+        """Returns the human readable name of this parameter.  This is the
+        same as the name for options, but the metavar for arguments.
+        """
+        return self.name
+
+    def make_metavar(self):
+        if self.metavar is not None:
+            return self.metavar
+        metavar = self.type.get_metavar(self)
+        if metavar is None:
+            metavar = self.type.name.upper()
+        if self.nargs != 1:
+            metavar += '...'
+        return metavar
+
+    def get_default(self, ctx):
+        """Given a context variable this calculates the default value."""
+        # Otherwise go with the regular default.
+        if callable(self.default):
+            rv = self.default()
+        else:
+            rv = self.default
+        return self.type_cast_value(ctx, rv)
+
+    def add_to_parser(self, parser, ctx):
+        pass
+
+    def consume_value(self, ctx, opts):
+        value = opts.get(self.name)
+        if value is None:
+            value = self.value_from_envvar(ctx)
+        if value is None:
+            value = ctx.lookup_default(self.name)
+        return value
+
+    def type_cast_value(self, ctx, value):
+        """Given a value this runs it properly through the type system.
+        This automatically handles things like `nargs` and `multiple` as
+        well as composite types.
+        """
+        if self.type.is_composite:
+            if self.nargs <= 1:
+                raise TypeError('Attempted to invoke composite type '
+                                'but nargs has been set to %s.  This is '
+                                'not supported; nargs needs to be set to '
+                                'a fixed value > 1.' % self.nargs)
+            if self.multiple:
+                return tuple(self.type(x or (), self, ctx) for x in value or ())
+            return self.type(value or (), self, ctx)
+
+        def _convert(value, level):
+            if level == 0:
+                return self.type(value, self, ctx)
+            return tuple(_convert(x, level - 1) for x in value or ())
+        return _convert(value, (self.nargs != 1) + bool(self.multiple))
+
+    def process_value(self, ctx, value):
+        """Given a value and context this runs the logic to convert the
+        value as necessary.
+        """
+        # If the value we were given is None we do nothing.  This way
+        # code that calls this can easily figure out if something was
+        # not provided.  Otherwise it would be converted into an empty
+        # tuple for multiple invocations which is inconvenient.
+        if value is not None:
+            return self.type_cast_value(ctx, value)
+
+    def value_is_missing(self, value):
+        if value is None:
+            return True
+        if (self.nargs != 1 or self.multiple) and value == ():
+            return True
+        return False
+
+    def full_process_value(self, ctx, value):
+        value = self.process_value(ctx, value)
+
+        if value is None and not ctx.resilient_parsing:
+            value = self.get_default(ctx)
+
+        if self.required and self.value_is_missing(value):
+            raise MissingParameter(ctx=ctx, param=self)
+
+        return value
+
+    def resolve_envvar_value(self, ctx):
+        if self.envvar is None:
+            return
+        if isinstance(self.envvar, (tuple, list)):
+            for envvar in self.envvar:
+                rv = os.environ.get(envvar)
+                if rv is not None:
+                    return rv
+        else:
+            return os.environ.get(self.envvar)
+
+    def value_from_envvar(self, ctx):
+        rv = self.resolve_envvar_value(ctx)
+        if rv is not None and self.nargs != 1:
+            rv = self.type.split_envvar_value(rv)
+        return rv
+
+    def handle_parse_result(self, ctx, opts, args):
+        with augment_usage_errors(ctx, param=self):
+            value = self.consume_value(ctx, opts)
+            try:
+                value = self.full_process_value(ctx, value)
+            except Exception:
+                if not ctx.resilient_parsing:
+                    raise
+                value = None
+            if self.callback is not None:
+                try:
+                    value = invoke_param_callback(
+                        self.callback, ctx, self, value)
+                except Exception:
+                    if not ctx.resilient_parsing:
+                        raise
+
+        if self.expose_value:
+            ctx.params[self.name] = value
+        return value, args
+
+    def get_help_record(self, ctx):
+        pass
+
+    def get_usage_pieces(self, ctx):
+        return []
+
+    def get_error_hint(self, ctx):
+        """Get a stringified version of the param for use in error messages to
+        indicate which param caused the error.
+        """
+        hint_list = self.opts or [self.human_readable_name]
+        return ' / '.join('"%s"' % x for x in hint_list)
+
+
+class Option(Parameter):
+    """Options are usually optional values on the command line and
+    have some extra features that arguments don't have.
+
+    All other parameters are passed onwards to the parameter constructor.
+
+    :param show_default: controls if the default value should be shown on the
+                         help page. Normally, defaults are not shown. If this
+                         value is a string, it shows the string instead of the
+                         value. This is particularly useful for dynamic options.
+    :param show_envvar: controls if an environment variable should be shown on
+                        the help page.  Normally, environment variables
+                        are not shown.
+    :param prompt: if set to `True` or a non empty string then the user will be
+                   prompted for input.  If set to `True` the prompt will be the
+                   option name capitalized.
+    :param confirmation_prompt: if set then the value will need to be confirmed
+                                if it was prompted for.
+    :param hide_input: if this is `True` then the input on the prompt will be
+                       hidden from the user.  This is useful for password
+                       input.
+    :param is_flag: forces this option to act as a flag.  The default is
+                    auto detection.
+    :param flag_value: which value should be used for this flag if it's
+                       enabled.  This is set to a boolean automatically if
+                       the option string contains a slash to mark two options.
+    :param multiple: if this is set to `True` then the argument is accepted
+                     multiple times and recorded.  This is similar to ``nargs``
+                     in how it works but supports arbitrary number of
+                     arguments.
+    :param count: this flag makes an option increment an integer.
+    :param allow_from_autoenv: if this is enabled then the value of this
+                               parameter will be pulled from an environment
+                               variable in case a prefix is defined on the
+                               context.
+    :param help: the help string.
+    :param hidden: hide this option from help outputs.
+    """
+    param_type_name = 'option'
+
+    def __init__(self, param_decls=None, show_default=False,
+                 prompt=False, confirmation_prompt=False,
+                 hide_input=False, is_flag=None, flag_value=None,
+                 multiple=False, count=False, allow_from_autoenv=True,
+                 type=None, help=None, hidden=False, show_choices=True,
+                 show_envvar=False, **attrs):
+        default_is_missing = attrs.get('default', _missing) is _missing
+        Parameter.__init__(self, param_decls, type=type, **attrs)
+
+        if prompt is True:
+            prompt_text = self.name.replace('_', ' ').capitalize()
+        elif prompt is False:
+            prompt_text = None
+        else:
+            prompt_text = prompt
+        self.prompt = prompt_text
+        self.confirmation_prompt = confirmation_prompt
+        self.hide_input = hide_input
+        self.hidden = hidden
+
+        # Flags
+        if is_flag is None:
+            if flag_value is not None:
+                is_flag = True
+            else:
+                is_flag = bool(self.secondary_opts)
+        if is_flag and default_is_missing:
+            self.default = False
+        if flag_value is None:
+            flag_value = not self.default
+        self.is_flag = is_flag
+        self.flag_value = flag_value
+        if self.is_flag and isinstance(self.flag_value, bool) \
+           and type is None:
+            self.type = BOOL
+            self.is_bool_flag = True
+        else:
+            self.is_bool_flag = False
+
+        # Counting
+        self.count = count
+        if count:
+            if type is None:
+                self.type = IntRange(min=0)
+            if default_is_missing:
+                self.default = 0
+
+        self.multiple = multiple
+        self.allow_from_autoenv = allow_from_autoenv
+        self.help = help
+        self.show_default = show_default
+        self.show_choices = show_choices
+        self.show_envvar = show_envvar
+
+        # Sanity check for stuff we don't support
+        if __debug__:
+            if self.nargs < 0:
+                raise TypeError('Options cannot have nargs < 0')
+            if self.prompt and self.is_flag and not self.is_bool_flag:
+                raise TypeError('Cannot prompt for flags that are not bools.')
+            if not self.is_bool_flag and self.secondary_opts:
+                raise TypeError('Got secondary option for non boolean flag.')
+            if self.is_bool_flag and self.hide_input \
+               and self.prompt is not None:
+                raise TypeError('Hidden input does not work with boolean '
+                                'flag prompts.')
+            if self.count:
+                if self.multiple:
+                    raise TypeError('Options cannot be multiple and count '
+                                    'at the same time.')
+                elif self.is_flag:
+                    raise TypeError('Options cannot be count and flags at '
+                                    'the same time.')
+
+    def _parse_decls(self, decls, expose_value):
+        opts = []
+        secondary_opts = []
+        name = None
+        possible_names = []
+
+        for decl in decls:
+            if isidentifier(decl):
+                if name is not None:
+                    raise TypeError('Name defined twice')
+                name = decl
+            else:
+                split_char = decl[:1] == '/' and ';' or '/'
+                if split_char in decl:
+                    first, second = decl.split(split_char, 1)
+                    first = first.rstrip()
+                    if first:
+                        possible_names.append(split_opt(first))
+                        opts.append(first)
+                    second = second.lstrip()
+                    if second:
+                        secondary_opts.append(second.lstrip())
+                else:
+                    possible_names.append(split_opt(decl))
+                    opts.append(decl)
+
+        if name is None and possible_names:
+            possible_names.sort(key=lambda x: -len(x[0]))  # group long options first
+            name = possible_names[0][1].replace('-', '_').lower()
+            if not isidentifier(name):
+                name = None
+
+        if name is None:
+            if not expose_value:
+                return None, opts, secondary_opts
+            raise TypeError('Could not determine name for option')
+
+        if not opts and not secondary_opts:
+            raise TypeError('No options defined but a name was passed (%s). '
+                            'Did you mean to declare an argument instead '
+                            'of an option?' % name)
+
+        return name, opts, secondary_opts
+
+    def add_to_parser(self, parser, ctx):
+        kwargs = {
+            'dest': self.name,
+            'nargs': self.nargs,
+            'obj': self,
+        }
+
+        if self.multiple:
+            action = 'append'
+        elif self.count:
+            action = 'count'
+        else:
+            action = 'store'
+
+        if self.is_flag:
+            kwargs.pop('nargs', None)
+            if self.is_bool_flag and self.secondary_opts:
+                parser.add_option(self.opts, action=action + '_const',
+                                  const=True, **kwargs)
+                parser.add_option(self.secondary_opts, action=action +
+                                  '_const', const=False, **kwargs)
+            else:
+                parser.add_option(self.opts, action=action + '_const',
+                                  const=self.flag_value,
+                                  **kwargs)
+        else:
+            kwargs['action'] = action
+            parser.add_option(self.opts, **kwargs)
+
+    def get_help_record(self, ctx):
+        if self.hidden:
+            return
+        any_prefix_is_slash = []
+
+        def _write_opts(opts):
+            rv, any_slashes = join_options(opts)
+            if any_slashes:
+                any_prefix_is_slash[:] = [True]
+            if not self.is_flag and not self.count:
+                rv += ' ' + self.make_metavar()
+            return rv
+
+        rv = [_write_opts(self.opts)]
+        if self.secondary_opts:
+            rv.append(_write_opts(self.secondary_opts))
+
+        help = self.help or ''
+        extra = []
+        if self.show_envvar:
+            envvar = self.envvar
+            if envvar is None:
+                if self.allow_from_autoenv and \
+                    ctx.auto_envvar_prefix is not None:
+                    envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper())
+            if envvar is not None:
+              extra.append('env var: %s' % (
+                           ', '.join('%s' % d for d in envvar)
+                           if isinstance(envvar, (list, tuple))
+                           else envvar, ))
+        if self.default is not None and self.show_default:
+            if isinstance(self.show_default, string_types):
+                default_string = '({})'.format(self.show_default)
+            elif isinstance(self.default, (list, tuple)):
+                default_string = ', '.join('%s' % d for d in self.default)
+            elif inspect.isfunction(self.default):
+                default_string = "(dynamic)"
+            else:
+                default_string = self.default
+            extra.append('default: {}'.format(default_string))
+
+        if self.required:
+            extra.append('required')
+        if extra:
+            help = '%s[%s]' % (help and help + '  ' or '', '; '.join(extra))
+
+        return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help)
+
+    def get_default(self, ctx):
+        # If we're a non boolean flag out default is more complex because
+        # we need to look at all flags in the same group to figure out
+        # if we're the the default one in which case we return the flag
+        # value as default.
+        if self.is_flag and not self.is_bool_flag:
+            for param in ctx.command.params:
+                if param.name == self.name and param.default:
+                    return param.flag_value
+            return None
+        return Parameter.get_default(self, ctx)
+
+    def prompt_for_value(self, ctx):
+        """This is an alternative flow that can be activated in the full
+        value processing if a value does not exist.  It will prompt the
+        user until a valid value exists and then returns the processed
+        value as result.
+        """
+        # Calculate the default before prompting anything to be stable.
+        default = self.get_default(ctx)
+
+        # If this is a prompt for a flag we need to handle this
+        # differently.
+        if self.is_bool_flag:
+            return confirm(self.prompt, default)
+
+        return prompt(self.prompt, default=default, type=self.type,
+                      hide_input=self.hide_input, show_choices=self.show_choices,
+                      confirmation_prompt=self.confirmation_prompt,
+                      value_proc=lambda x: self.process_value(ctx, x))
+
+    def resolve_envvar_value(self, ctx):
+        rv = Parameter.resolve_envvar_value(self, ctx)
+        if rv is not None:
+            return rv
+        if self.allow_from_autoenv and \
+           ctx.auto_envvar_prefix is not None:
+            envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper())
+            return os.environ.get(envvar)
+
+    def value_from_envvar(self, ctx):
+        rv = self.resolve_envvar_value(ctx)
+        if rv is None:
+            return None
+        value_depth = (self.nargs != 1) + bool(self.multiple)
+        if value_depth > 0 and rv is not None:
+            rv = self.type.split_envvar_value(rv)
+            if self.multiple and self.nargs != 1:
+                rv = batch(rv, self.nargs)
+        return rv
+
+    def full_process_value(self, ctx, value):
+        if value is None and self.prompt is not None \
+           and not ctx.resilient_parsing:
+            return self.prompt_for_value(ctx)
+        return Parameter.full_process_value(self, ctx, value)
+
+
+class Argument(Parameter):
+    """Arguments are positional parameters to a command.  They generally
+    provide fewer features than options but can have infinite ``nargs``
+    and are required by default.
+
+    All parameters are passed onwards to the parameter constructor.
+    """
+    param_type_name = 'argument'
+
+    def __init__(self, param_decls, required=None, **attrs):
+        if required is None:
+            if attrs.get('default') is not None:
+                required = False
+            else:
+                required = attrs.get('nargs', 1) > 0
+        Parameter.__init__(self, param_decls, required=required, **attrs)
+        if self.default is not None and self.nargs < 0:
+            raise TypeError('nargs=-1 in combination with a default value '
+                            'is not supported.')
+
+    @property
+    def human_readable_name(self):
+        if self.metavar is not None:
+            return self.metavar
+        return self.name.upper()
+
+    def make_metavar(self):
+        if self.metavar is not None:
+            return self.metavar
+        var = self.type.get_metavar(self)
+        if not var:
+            var = self.name.upper()
+        if not self.required:
+            var = '[%s]' % var
+        if self.nargs != 1:
+            var += '...'
+        return var
+
+    def _parse_decls(self, decls, expose_value):
+        if not decls:
+            if not expose_value:
+                return None, [], []
+            raise TypeError('Could not determine name for argument')
+        if len(decls) == 1:
+            name = arg = decls[0]
+            name = name.replace('-', '_').lower()
+        else:
+            raise TypeError('Arguments take exactly one '
+                            'parameter declaration, got %d' % len(decls))
+        return name, [arg], []
+
+    def get_usage_pieces(self, ctx):
+        return [self.make_metavar()]
+
+    def get_error_hint(self, ctx):
+        return '"%s"' % self.make_metavar()
+
+    def add_to_parser(self, parser, ctx):
+        parser.add_argument(dest=self.name, nargs=self.nargs,
+                            obj=self)
+
+
+# Circular dependency between decorators and core
+from .decorators import command, group
diff --git a/lib/python3.7/site-packages/click/decorators.py b/lib/python3.7/site-packages/click/decorators.py
new file mode 100644
index 0000000000000000000000000000000000000000..c57c5308613ec4798155e68088cc4644a42d2d52
--- /dev/null
+++ b/lib/python3.7/site-packages/click/decorators.py
@@ -0,0 +1,311 @@
+import sys
+import inspect
+
+from functools import update_wrapper
+
+from ._compat import iteritems
+from ._unicodefun import _check_for_unicode_literals
+from .utils import echo
+from .globals import get_current_context
+
+
+def pass_context(f):
+    """Marks a callback as wanting to receive the current context
+    object as first argument.
+    """
+    def new_func(*args, **kwargs):
+        return f(get_current_context(), *args, **kwargs)
+    return update_wrapper(new_func, f)
+
+
+def pass_obj(f):
+    """Similar to :func:`pass_context`, but only pass the object on the
+    context onwards (:attr:`Context.obj`).  This is useful if that object
+    represents the state of a nested system.
+    """
+    def new_func(*args, **kwargs):
+        return f(get_current_context().obj, *args, **kwargs)
+    return update_wrapper(new_func, f)
+
+
+def make_pass_decorator(object_type, ensure=False):
+    """Given an object type this creates a decorator that will work
+    similar to :func:`pass_obj` but instead of passing the object of the
+    current context, it will find the innermost context of type
+    :func:`object_type`.
+
+    This generates a decorator that works roughly like this::
+
+        from functools import update_wrapper
+
+        def decorator(f):
+            @pass_context
+            def new_func(ctx, *args, **kwargs):
+                obj = ctx.find_object(object_type)
+                return ctx.invoke(f, obj, *args, **kwargs)
+            return update_wrapper(new_func, f)
+        return decorator
+
+    :param object_type: the type of the object to pass.
+    :param ensure: if set to `True`, a new object will be created and
+                   remembered on the context if it's not there yet.
+    """
+    def decorator(f):
+        def new_func(*args, **kwargs):
+            ctx = get_current_context()
+            if ensure:
+                obj = ctx.ensure_object(object_type)
+            else:
+                obj = ctx.find_object(object_type)
+            if obj is None:
+                raise RuntimeError('Managed to invoke callback without a '
+                                   'context object of type %r existing'
+                                   % object_type.__name__)
+            return ctx.invoke(f, obj, *args, **kwargs)
+        return update_wrapper(new_func, f)
+    return decorator
+
+
+def _make_command(f, name, attrs, cls):
+    if isinstance(f, Command):
+        raise TypeError('Attempted to convert a callback into a '
+                        'command twice.')
+    try:
+        params = f.__click_params__
+        params.reverse()
+        del f.__click_params__
+    except AttributeError:
+        params = []
+    help = attrs.get('help')
+    if help is None:
+        help = inspect.getdoc(f)
+        if isinstance(help, bytes):
+            help = help.decode('utf-8')
+    else:
+        help = inspect.cleandoc(help)
+    attrs['help'] = help
+    _check_for_unicode_literals()
+    return cls(name=name or f.__name__.lower().replace('_', '-'),
+               callback=f, params=params, **attrs)
+
+
+def command(name=None, cls=None, **attrs):
+    r"""Creates a new :class:`Command` and uses the decorated function as
+    callback.  This will also automatically attach all decorated
+    :func:`option`\s and :func:`argument`\s as parameters to the command.
+
+    The name of the command defaults to the name of the function.  If you
+    want to change that, you can pass the intended name as the first
+    argument.
+
+    All keyword arguments are forwarded to the underlying command class.
+
+    Once decorated the function turns into a :class:`Command` instance
+    that can be invoked as a command line utility or be attached to a
+    command :class:`Group`.
+
+    :param name: the name of the command.  This defaults to the function
+                 name with underscores replaced by dashes.
+    :param cls: the command class to instantiate.  This defaults to
+                :class:`Command`.
+    """
+    if cls is None:
+        cls = Command
+    def decorator(f):
+        cmd = _make_command(f, name, attrs, cls)
+        cmd.__doc__ = f.__doc__
+        return cmd
+    return decorator
+
+
+def group(name=None, **attrs):
+    """Creates a new :class:`Group` with a function as callback.  This
+    works otherwise the same as :func:`command` just that the `cls`
+    parameter is set to :class:`Group`.
+    """
+    attrs.setdefault('cls', Group)
+    return command(name, **attrs)
+
+
+def _param_memo(f, param):
+    if isinstance(f, Command):
+        f.params.append(param)
+    else:
+        if not hasattr(f, '__click_params__'):
+            f.__click_params__ = []
+        f.__click_params__.append(param)
+
+
+def argument(*param_decls, **attrs):
+    """Attaches an argument to the command.  All positional arguments are
+    passed as parameter declarations to :class:`Argument`; all keyword
+    arguments are forwarded unchanged (except ``cls``).
+    This is equivalent to creating an :class:`Argument` instance manually
+    and attaching it to the :attr:`Command.params` list.
+
+    :param cls: the argument class to instantiate.  This defaults to
+                :class:`Argument`.
+    """
+    def decorator(f):
+        ArgumentClass = attrs.pop('cls', Argument)
+        _param_memo(f, ArgumentClass(param_decls, **attrs))
+        return f
+    return decorator
+
+
+def option(*param_decls, **attrs):
+    """Attaches an option to the command.  All positional arguments are
+    passed as parameter declarations to :class:`Option`; all keyword
+    arguments are forwarded unchanged (except ``cls``).
+    This is equivalent to creating an :class:`Option` instance manually
+    and attaching it to the :attr:`Command.params` list.
+
+    :param cls: the option class to instantiate.  This defaults to
+                :class:`Option`.
+    """
+    def decorator(f):
+        # Issue 926, copy attrs, so pre-defined options can re-use the same cls=
+        option_attrs = attrs.copy()
+
+        if 'help' in option_attrs:
+            option_attrs['help'] = inspect.cleandoc(option_attrs['help'])
+        OptionClass = option_attrs.pop('cls', Option)
+        _param_memo(f, OptionClass(param_decls, **option_attrs))
+        return f
+    return decorator
+
+
+def confirmation_option(*param_decls, **attrs):
+    """Shortcut for confirmation prompts that can be ignored by passing
+    ``--yes`` as parameter.
+
+    This is equivalent to decorating a function with :func:`option` with
+    the following parameters::
+
+        def callback(ctx, param, value):
+            if not value:
+                ctx.abort()
+
+        @click.command()
+        @click.option('--yes', is_flag=True, callback=callback,
+                      expose_value=False, prompt='Do you want to continue?')
+        def dropdb():
+            pass
+    """
+    def decorator(f):
+        def callback(ctx, param, value):
+            if not value:
+                ctx.abort()
+        attrs.setdefault('is_flag', True)
+        attrs.setdefault('callback', callback)
+        attrs.setdefault('expose_value', False)
+        attrs.setdefault('prompt', 'Do you want to continue?')
+        attrs.setdefault('help', 'Confirm the action without prompting.')
+        return option(*(param_decls or ('--yes',)), **attrs)(f)
+    return decorator
+
+
+def password_option(*param_decls, **attrs):
+    """Shortcut for password prompts.
+
+    This is equivalent to decorating a function with :func:`option` with
+    the following parameters::
+
+        @click.command()
+        @click.option('--password', prompt=True, confirmation_prompt=True,
+                      hide_input=True)
+        def changeadmin(password):
+            pass
+    """
+    def decorator(f):
+        attrs.setdefault('prompt', True)
+        attrs.setdefault('confirmation_prompt', True)
+        attrs.setdefault('hide_input', True)
+        return option(*(param_decls or ('--password',)), **attrs)(f)
+    return decorator
+
+
+def version_option(version=None, *param_decls, **attrs):
+    """Adds a ``--version`` option which immediately ends the program
+    printing out the version number.  This is implemented as an eager
+    option that prints the version and exits the program in the callback.
+
+    :param version: the version number to show.  If not provided Click
+                    attempts an auto discovery via setuptools.
+    :param prog_name: the name of the program (defaults to autodetection)
+    :param message: custom message to show instead of the default
+                    (``'%(prog)s, version %(version)s'``)
+    :param others: everything else is forwarded to :func:`option`.
+    """
+    if version is None:
+        if hasattr(sys, '_getframe'):
+            module = sys._getframe(1).f_globals.get('__name__')
+        else:
+            module = ''
+
+    def decorator(f):
+        prog_name = attrs.pop('prog_name', None)
+        message = attrs.pop('message', '%(prog)s, version %(version)s')
+
+        def callback(ctx, param, value):
+            if not value or ctx.resilient_parsing:
+                return
+            prog = prog_name
+            if prog is None:
+                prog = ctx.find_root().info_name
+            ver = version
+            if ver is None:
+                try:
+                    import pkg_resources
+                except ImportError:
+                    pass
+                else:
+                    for dist in pkg_resources.working_set:
+                        scripts = dist.get_entry_map().get('console_scripts') or {}
+                        for script_name, entry_point in iteritems(scripts):
+                            if entry_point.module_name == module:
+                                ver = dist.version
+                                break
+                if ver is None:
+                    raise RuntimeError('Could not determine version')
+            echo(message % {
+                'prog': prog,
+                'version': ver,
+            }, color=ctx.color)
+            ctx.exit()
+
+        attrs.setdefault('is_flag', True)
+        attrs.setdefault('expose_value', False)
+        attrs.setdefault('is_eager', True)
+        attrs.setdefault('help', 'Show the version and exit.')
+        attrs['callback'] = callback
+        return option(*(param_decls or ('--version',)), **attrs)(f)
+    return decorator
+
+
+def help_option(*param_decls, **attrs):
+    """Adds a ``--help`` option which immediately ends the program
+    printing out the help page.  This is usually unnecessary to add as
+    this is added by default to all commands unless suppressed.
+
+    Like :func:`version_option`, this is implemented as eager option that
+    prints in the callback and exits.
+
+    All arguments are forwarded to :func:`option`.
+    """
+    def decorator(f):
+        def callback(ctx, param, value):
+            if value and not ctx.resilient_parsing:
+                echo(ctx.get_help(), color=ctx.color)
+                ctx.exit()
+        attrs.setdefault('is_flag', True)
+        attrs.setdefault('expose_value', False)
+        attrs.setdefault('help', 'Show this message and exit.')
+        attrs.setdefault('is_eager', True)
+        attrs['callback'] = callback
+        return option(*(param_decls or ('--help',)), **attrs)(f)
+    return decorator
+
+
+# Circular dependencies between core and decorators
+from .core import Command, Group, Argument, Option
diff --git a/lib/python3.7/site-packages/click/exceptions.py b/lib/python3.7/site-packages/click/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fa17658cb20c9c6b0b4843736196780257d5f40
--- /dev/null
+++ b/lib/python3.7/site-packages/click/exceptions.py
@@ -0,0 +1,235 @@
+from ._compat import PY2, filename_to_ui, get_text_stderr
+from .utils import echo
+
+
+def _join_param_hints(param_hint):
+    if isinstance(param_hint, (tuple, list)):
+        return ' / '.join('"%s"' % x for x in param_hint)
+    return param_hint
+
+
+class ClickException(Exception):
+    """An exception that Click can handle and show to the user."""
+
+    #: The exit code for this exception
+    exit_code = 1
+
+    def __init__(self, message):
+        ctor_msg = message
+        if PY2:
+            if ctor_msg is not None:
+                ctor_msg = ctor_msg.encode('utf-8')
+        Exception.__init__(self, ctor_msg)
+        self.message = message
+
+    def format_message(self):
+        return self.message
+
+    def __str__(self):
+        return self.message
+
+    if PY2:
+        __unicode__ = __str__
+
+        def __str__(self):
+            return self.message.encode('utf-8')
+
+    def show(self, file=None):
+        if file is None:
+            file = get_text_stderr()
+        echo('Error: %s' % self.format_message(), file=file)
+
+
+class UsageError(ClickException):
+    """An internal exception that signals a usage error.  This typically
+    aborts any further handling.
+
+    :param message: the error message to display.
+    :param ctx: optionally the context that caused this error.  Click will
+                fill in the context automatically in some situations.
+    """
+    exit_code = 2
+
+    def __init__(self, message, ctx=None):
+        ClickException.__init__(self, message)
+        self.ctx = ctx
+        self.cmd = self.ctx and self.ctx.command or None
+
+    def show(self, file=None):
+        if file is None:
+            file = get_text_stderr()
+        color = None
+        hint = ''
+        if (self.cmd is not None and
+                self.cmd.get_help_option(self.ctx) is not None):
+            hint = ('Try "%s %s" for help.\n'
+                    % (self.ctx.command_path, self.ctx.help_option_names[0]))
+        if self.ctx is not None:
+            color = self.ctx.color
+            echo(self.ctx.get_usage() + '\n%s' % hint, file=file, color=color)
+        echo('Error: %s' % self.format_message(), file=file, color=color)
+
+
+class BadParameter(UsageError):
+    """An exception that formats out a standardized error message for a
+    bad parameter.  This is useful when thrown from a callback or type as
+    Click will attach contextual information to it (for instance, which
+    parameter it is).
+
+    .. versionadded:: 2.0
+
+    :param param: the parameter object that caused this error.  This can
+                  be left out, and Click will attach this info itself
+                  if possible.
+    :param param_hint: a string that shows up as parameter name.  This
+                       can be used as alternative to `param` in cases
+                       where custom validation should happen.  If it is
+                       a string it's used as such, if it's a list then
+                       each item is quoted and separated.
+    """
+
+    def __init__(self, message, ctx=None, param=None,
+                 param_hint=None):
+        UsageError.__init__(self, message, ctx)
+        self.param = param
+        self.param_hint = param_hint
+
+    def format_message(self):
+        if self.param_hint is not None:
+            param_hint = self.param_hint
+        elif self.param is not None:
+            param_hint = self.param.get_error_hint(self.ctx)
+        else:
+            return 'Invalid value: %s' % self.message
+        param_hint = _join_param_hints(param_hint)
+
+        return 'Invalid value for %s: %s' % (param_hint, self.message)
+
+
+class MissingParameter(BadParameter):
+    """Raised if click required an option or argument but it was not
+    provided when invoking the script.
+
+    .. versionadded:: 4.0
+
+    :param param_type: a string that indicates the type of the parameter.
+                       The default is to inherit the parameter type from
+                       the given `param`.  Valid values are ``'parameter'``,
+                       ``'option'`` or ``'argument'``.
+    """
+
+    def __init__(self, message=None, ctx=None, param=None,
+                 param_hint=None, param_type=None):
+        BadParameter.__init__(self, message, ctx, param, param_hint)
+        self.param_type = param_type
+
+    def format_message(self):
+        if self.param_hint is not None:
+            param_hint = self.param_hint
+        elif self.param is not None:
+            param_hint = self.param.get_error_hint(self.ctx)
+        else:
+            param_hint = None
+        param_hint = _join_param_hints(param_hint)
+
+        param_type = self.param_type
+        if param_type is None and self.param is not None:
+            param_type = self.param.param_type_name
+
+        msg = self.message
+        if self.param is not None:
+            msg_extra = self.param.type.get_missing_message(self.param)
+            if msg_extra:
+                if msg:
+                    msg += '.  ' + msg_extra
+                else:
+                    msg = msg_extra
+
+        return 'Missing %s%s%s%s' % (
+            param_type,
+            param_hint and ' %s' % param_hint or '',
+            msg and '.  ' or '.',
+            msg or '',
+        )
+
+
+class NoSuchOption(UsageError):
+    """Raised if click attempted to handle an option that does not
+    exist.
+
+    .. versionadded:: 4.0
+    """
+
+    def __init__(self, option_name, message=None, possibilities=None,
+                 ctx=None):
+        if message is None:
+            message = 'no such option: %s' % option_name
+        UsageError.__init__(self, message, ctx)
+        self.option_name = option_name
+        self.possibilities = possibilities
+
+    def format_message(self):
+        bits = [self.message]
+        if self.possibilities:
+            if len(self.possibilities) == 1:
+                bits.append('Did you mean %s?' % self.possibilities[0])
+            else:
+                possibilities = sorted(self.possibilities)
+                bits.append('(Possible options: %s)' % ', '.join(possibilities))
+        return '  '.join(bits)
+
+
+class BadOptionUsage(UsageError):
+    """Raised if an option is generally supplied but the use of the option
+    was incorrect.  This is for instance raised if the number of arguments
+    for an option is not correct.
+
+    .. versionadded:: 4.0
+
+    :param option_name: the name of the option being used incorrectly.
+    """
+
+    def __init__(self, option_name, message, ctx=None):
+        UsageError.__init__(self, message, ctx)
+        self.option_name = option_name
+
+
+class BadArgumentUsage(UsageError):
+    """Raised if an argument is generally supplied but the use of the argument
+    was incorrect.  This is for instance raised if the number of values
+    for an argument is not correct.
+
+    .. versionadded:: 6.0
+    """
+
+    def __init__(self, message, ctx=None):
+        UsageError.__init__(self, message, ctx)
+
+
+class FileError(ClickException):
+    """Raised if a file cannot be opened."""
+
+    def __init__(self, filename, hint=None):
+        ui_filename = filename_to_ui(filename)
+        if hint is None:
+            hint = 'unknown error'
+        ClickException.__init__(self, hint)
+        self.ui_filename = ui_filename
+        self.filename = filename
+
+    def format_message(self):
+        return 'Could not open file %s: %s' % (self.ui_filename, self.message)
+
+
+class Abort(RuntimeError):
+    """An internal signalling exception that signals Click to abort."""
+
+
+class Exit(RuntimeError):
+    """An exception that indicates that the application should exit with some
+    status code.
+
+    :param code: the status code to exit with.
+    """
+    def __init__(self, code=0):
+        self.exit_code = code
diff --git a/lib/python3.7/site-packages/click/formatting.py b/lib/python3.7/site-packages/click/formatting.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3d6a4d389019c3c5cf61a1ab9be8e6836bd15e6
--- /dev/null
+++ b/lib/python3.7/site-packages/click/formatting.py
@@ -0,0 +1,256 @@
+from contextlib import contextmanager
+from .termui import get_terminal_size
+from .parser import split_opt
+from ._compat import term_len
+
+
+# Can force a width.  This is used by the test system
+FORCED_WIDTH = None
+
+
+def measure_table(rows):
+    widths = {}
+    for row in rows:
+        for idx, col in enumerate(row):
+            widths[idx] = max(widths.get(idx, 0), term_len(col))
+    return tuple(y for x, y in sorted(widths.items()))
+
+
+def iter_rows(rows, col_count):
+    for row in rows:
+        row = tuple(row)
+        yield row + ('',) * (col_count - len(row))
+
+
+def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
+              preserve_paragraphs=False):
+    """A helper function that intelligently wraps text.  By default, it
+    assumes that it operates on a single paragraph of text but if the
+    `preserve_paragraphs` parameter is provided it will intelligently
+    handle paragraphs (defined by two empty lines).
+
+    If paragraphs are handled, a paragraph can be prefixed with an empty
+    line containing the ``\\b`` character (``\\x08``) to indicate that
+    no rewrapping should happen in that block.
+
+    :param text: the text that should be rewrapped.
+    :param width: the maximum width for the text.
+    :param initial_indent: the initial indent that should be placed on the
+                           first line as a string.
+    :param subsequent_indent: the indent string that should be placed on
+                              each consecutive line.
+    :param preserve_paragraphs: if this flag is set then the wrapping will
+                                intelligently handle paragraphs.
+    """
+    from ._textwrap import TextWrapper
+    text = text.expandtabs()
+    wrapper = TextWrapper(width, initial_indent=initial_indent,
+                          subsequent_indent=subsequent_indent,
+                          replace_whitespace=False)
+    if not preserve_paragraphs:
+        return wrapper.fill(text)
+
+    p = []
+    buf = []
+    indent = None
+
+    def _flush_par():
+        if not buf:
+            return
+        if buf[0].strip() == '\b':
+            p.append((indent or 0, True, '\n'.join(buf[1:])))
+        else:
+            p.append((indent or 0, False, ' '.join(buf)))
+        del buf[:]
+
+    for line in text.splitlines():
+        if not line:
+            _flush_par()
+            indent = None
+        else:
+            if indent is None:
+                orig_len = term_len(line)
+                line = line.lstrip()
+                indent = orig_len - term_len(line)
+            buf.append(line)
+    _flush_par()
+
+    rv = []
+    for indent, raw, text in p:
+        with wrapper.extra_indent(' ' * indent):
+            if raw:
+                rv.append(wrapper.indent_only(text))
+            else:
+                rv.append(wrapper.fill(text))
+
+    return '\n\n'.join(rv)
+
+
+class HelpFormatter(object):
+    """This class helps with formatting text-based help pages.  It's
+    usually just needed for very special internal cases, but it's also
+    exposed so that developers can write their own fancy outputs.
+
+    At present, it always writes into memory.
+
+    :param indent_increment: the additional increment for each level.
+    :param width: the width for the text.  This defaults to the terminal
+                  width clamped to a maximum of 78.
+    """
+
+    def __init__(self, indent_increment=2, width=None, max_width=None):
+        self.indent_increment = indent_increment
+        if max_width is None:
+            max_width = 80
+        if width is None:
+            width = FORCED_WIDTH
+            if width is None:
+                width = max(min(get_terminal_size()[0], max_width) - 2, 50)
+        self.width = width
+        self.current_indent = 0
+        self.buffer = []
+
+    def write(self, string):
+        """Writes a unicode string into the internal buffer."""
+        self.buffer.append(string)
+
+    def indent(self):
+        """Increases the indentation."""
+        self.current_indent += self.indent_increment
+
+    def dedent(self):
+        """Decreases the indentation."""
+        self.current_indent -= self.indent_increment
+
+    def write_usage(self, prog, args='', prefix='Usage: '):
+        """Writes a usage line into the buffer.
+
+        :param prog: the program name.
+        :param args: whitespace separated list of arguments.
+        :param prefix: the prefix for the first line.
+        """
+        usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog)
+        text_width = self.width - self.current_indent
+
+        if text_width >= (term_len(usage_prefix) + 20):
+            # The arguments will fit to the right of the prefix.
+            indent = ' ' * term_len(usage_prefix)
+            self.write(wrap_text(args, text_width,
+                                 initial_indent=usage_prefix,
+                                 subsequent_indent=indent))
+        else:
+            # The prefix is too long, put the arguments on the next line.
+            self.write(usage_prefix)
+            self.write('\n')
+            indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4)
+            self.write(wrap_text(args, text_width,
+                                 initial_indent=indent,
+                                 subsequent_indent=indent))
+
+        self.write('\n')
+
+    def write_heading(self, heading):
+        """Writes a heading into the buffer."""
+        self.write('%*s%s:\n' % (self.current_indent, '', heading))
+
+    def write_paragraph(self):
+        """Writes a paragraph into the buffer."""
+        if self.buffer:
+            self.write('\n')
+
+    def write_text(self, text):
+        """Writes re-indented text into the buffer.  This rewraps and
+        preserves paragraphs.
+        """
+        text_width = max(self.width - self.current_indent, 11)
+        indent = ' ' * self.current_indent
+        self.write(wrap_text(text, text_width,
+                             initial_indent=indent,
+                             subsequent_indent=indent,
+                             preserve_paragraphs=True))
+        self.write('\n')
+
+    def write_dl(self, rows, col_max=30, col_spacing=2):
+        """Writes a definition list into the buffer.  This is how options
+        and commands are usually formatted.
+
+        :param rows: a list of two item tuples for the terms and values.
+        :param col_max: the maximum width of the first column.
+        :param col_spacing: the number of spaces between the first and
+                            second column.
+        """
+        rows = list(rows)
+        widths = measure_table(rows)
+        if len(widths) != 2:
+            raise TypeError('Expected two columns for definition list')
+
+        first_col = min(widths[0], col_max) + col_spacing
+
+        for first, second in iter_rows(rows, len(widths)):
+            self.write('%*s%s' % (self.current_indent, '', first))
+            if not second:
+                self.write('\n')
+                continue
+            if term_len(first) <= first_col - col_spacing:
+                self.write(' ' * (first_col - term_len(first)))
+            else:
+                self.write('\n')
+                self.write(' ' * (first_col + self.current_indent))
+
+            text_width = max(self.width - first_col - 2, 10)
+            lines = iter(wrap_text(second, text_width).splitlines())
+            if lines:
+                self.write(next(lines) + '\n')
+                for line in lines:
+                    self.write('%*s%s\n' % (
+                        first_col + self.current_indent, '', line))
+            else:
+                self.write('\n')
+
+    @contextmanager
+    def section(self, name):
+        """Helpful context manager that writes a paragraph, a heading,
+        and the indents.
+
+        :param name: the section name that is written as heading.
+        """
+        self.write_paragraph()
+        self.write_heading(name)
+        self.indent()
+        try:
+            yield
+        finally:
+            self.dedent()
+
+    @contextmanager
+    def indentation(self):
+        """A context manager that increases the indentation."""
+        self.indent()
+        try:
+            yield
+        finally:
+            self.dedent()
+
+    def getvalue(self):
+        """Returns the buffer contents."""
+        return ''.join(self.buffer)
+
+
+def join_options(options):
+    """Given a list of option strings this joins them in the most appropriate
+    way and returns them in the form ``(formatted_string,
+    any_prefix_is_slash)`` where the second item in the tuple is a flag that
+    indicates if any of the option prefixes was a slash.
+    """
+    rv = []
+    any_prefix_is_slash = False
+    for opt in options:
+        prefix = split_opt(opt)[0]
+        if prefix == '/':
+            any_prefix_is_slash = True
+        rv.append((len(prefix), opt))
+
+    rv.sort(key=lambda x: x[0])
+
+    rv = ', '.join(x[1] for x in rv)
+    return rv, any_prefix_is_slash
diff --git a/lib/python3.7/site-packages/click/globals.py b/lib/python3.7/site-packages/click/globals.py
new file mode 100644
index 0000000000000000000000000000000000000000..843b594abe40b092a9b508ddc69b2d236e627363
--- /dev/null
+++ b/lib/python3.7/site-packages/click/globals.py
@@ -0,0 +1,48 @@
+from threading import local
+
+
+_local = local()
+
+
+def get_current_context(silent=False):
+    """Returns the current click context.  This can be used as a way to
+    access the current context object from anywhere.  This is a more implicit
+    alternative to the :func:`pass_context` decorator.  This function is
+    primarily useful for helpers such as :func:`echo` which might be
+    interested in changing its behavior based on the current context.
+
+    To push the current context, :meth:`Context.scope` can be used.
+
+    .. versionadded:: 5.0
+
+    :param silent: is set to `True` the return value is `None` if no context
+                   is available.  The default behavior is to raise a
+                   :exc:`RuntimeError`.
+    """
+    try:
+        return getattr(_local, 'stack')[-1]
+    except (AttributeError, IndexError):
+        if not silent:
+            raise RuntimeError('There is no active click context.')
+
+
+def push_context(ctx):
+    """Pushes a new context to the current stack."""
+    _local.__dict__.setdefault('stack', []).append(ctx)
+
+
+def pop_context():
+    """Removes the top level from the stack."""
+    _local.stack.pop()
+
+
+def resolve_color_default(color=None):
+    """"Internal helper to get the default value of the color flag.  If a
+    value is passed it's returned unchanged, otherwise it's looked up from
+    the current context.
+    """
+    if color is not None:
+        return color
+    ctx = get_current_context(silent=True)
+    if ctx is not None:
+        return ctx.color
diff --git a/lib/python3.7/site-packages/click/parser.py b/lib/python3.7/site-packages/click/parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c3ae9c8efd453aecda98026cf65208b789b948b
--- /dev/null
+++ b/lib/python3.7/site-packages/click/parser.py
@@ -0,0 +1,427 @@
+# -*- coding: utf-8 -*-
+"""
+click.parser
+~~~~~~~~~~~~
+
+This module started out as largely a copy paste from the stdlib's
+optparse module with the features removed that we do not need from
+optparse because we implement them in Click on a higher level (for
+instance type handling, help formatting and a lot more).
+
+The plan is to remove more and more from here over time.
+
+The reason this is a different module and not optparse from the stdlib
+is that there are differences in 2.x and 3.x about the error messages
+generated and optparse in the stdlib uses gettext for no good reason
+and might cause us issues.
+"""
+
+import re
+from collections import deque
+from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \
+     BadArgumentUsage
+
+
+def _unpack_args(args, nargs_spec):
+    """Given an iterable of arguments and an iterable of nargs specifications,
+    it returns a tuple with all the unpacked arguments at the first index
+    and all remaining arguments as the second.
+
+    The nargs specification is the number of arguments that should be consumed
+    or `-1` to indicate that this position should eat up all the remainders.
+
+    Missing items are filled with `None`.
+    """
+    args = deque(args)
+    nargs_spec = deque(nargs_spec)
+    rv = []
+    spos = None
+
+    def _fetch(c):
+        try:
+            if spos is None:
+                return c.popleft()
+            else:
+                return c.pop()
+        except IndexError:
+            return None
+
+    while nargs_spec:
+        nargs = _fetch(nargs_spec)
+        if nargs == 1:
+            rv.append(_fetch(args))
+        elif nargs > 1:
+            x = [_fetch(args) for _ in range(nargs)]
+            # If we're reversed, we're pulling in the arguments in reverse,
+            # so we need to turn them around.
+            if spos is not None:
+                x.reverse()
+            rv.append(tuple(x))
+        elif nargs < 0:
+            if spos is not None:
+                raise TypeError('Cannot have two nargs < 0')
+            spos = len(rv)
+            rv.append(None)
+
+    # spos is the position of the wildcard (star).  If it's not `None`,
+    # we fill it with the remainder.
+    if spos is not None:
+        rv[spos] = tuple(args)
+        args = []
+        rv[spos + 1:] = reversed(rv[spos + 1:])
+
+    return tuple(rv), list(args)
+
+
+def _error_opt_args(nargs, opt):
+    if nargs == 1:
+        raise BadOptionUsage(opt, '%s option requires an argument' % opt)
+    raise BadOptionUsage(opt, '%s option requires %d arguments' % (opt, nargs))
+
+
+def split_opt(opt):
+    first = opt[:1]
+    if first.isalnum():
+        return '', opt
+    if opt[1:2] == first:
+        return opt[:2], opt[2:]
+    return first, opt[1:]
+
+
+def normalize_opt(opt, ctx):
+    if ctx is None or ctx.token_normalize_func is None:
+        return opt
+    prefix, opt = split_opt(opt)
+    return prefix + ctx.token_normalize_func(opt)
+
+
+def split_arg_string(string):
+    """Given an argument string this attempts to split it into small parts."""
+    rv = []
+    for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
+                             r'|"([^"\\]*(?:\\.[^"\\]*)*)"'
+                             r'|\S+)\s*', string, re.S):
+        arg = match.group().strip()
+        if arg[:1] == arg[-1:] and arg[:1] in '"\'':
+            arg = arg[1:-1].encode('ascii', 'backslashreplace') \
+                .decode('unicode-escape')
+        try:
+            arg = type(string)(arg)
+        except UnicodeError:
+            pass
+        rv.append(arg)
+    return rv
+
+
+class Option(object):
+
+    def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
+        self._short_opts = []
+        self._long_opts = []
+        self.prefixes = set()
+
+        for opt in opts:
+            prefix, value = split_opt(opt)
+            if not prefix:
+                raise ValueError('Invalid start character for option (%s)'
+                                 % opt)
+            self.prefixes.add(prefix[0])
+            if len(prefix) == 1 and len(value) == 1:
+                self._short_opts.append(opt)
+            else:
+                self._long_opts.append(opt)
+                self.prefixes.add(prefix)
+
+        if action is None:
+            action = 'store'
+
+        self.dest = dest
+        self.action = action
+        self.nargs = nargs
+        self.const = const
+        self.obj = obj
+
+    @property
+    def takes_value(self):
+        return self.action in ('store', 'append')
+
+    def process(self, value, state):
+        if self.action == 'store':
+            state.opts[self.dest] = value
+        elif self.action == 'store_const':
+            state.opts[self.dest] = self.const
+        elif self.action == 'append':
+            state.opts.setdefault(self.dest, []).append(value)
+        elif self.action == 'append_const':
+            state.opts.setdefault(self.dest, []).append(self.const)
+        elif self.action == 'count':
+            state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
+        else:
+            raise ValueError('unknown action %r' % self.action)
+        state.order.append(self.obj)
+
+
+class Argument(object):
+
+    def __init__(self, dest, nargs=1, obj=None):
+        self.dest = dest
+        self.nargs = nargs
+        self.obj = obj
+
+    def process(self, value, state):
+        if self.nargs > 1:
+            holes = sum(1 for x in value if x is None)
+            if holes == len(value):
+                value = None
+            elif holes != 0:
+                raise BadArgumentUsage('argument %s takes %d values'
+                                       % (self.dest, self.nargs))
+        state.opts[self.dest] = value
+        state.order.append(self.obj)
+
+
+class ParsingState(object):
+
+    def __init__(self, rargs):
+        self.opts = {}
+        self.largs = []
+        self.rargs = rargs
+        self.order = []
+
+
+class OptionParser(object):
+    """The option parser is an internal class that is ultimately used to
+    parse options and arguments.  It's modelled after optparse and brings
+    a similar but vastly simplified API.  It should generally not be used
+    directly as the high level Click classes wrap it for you.
+
+    It's not nearly as extensible as optparse or argparse as it does not
+    implement features that are implemented on a higher level (such as
+    types or defaults).
+
+    :param ctx: optionally the :class:`~click.Context` where this parser
+                should go with.
+    """
+
+    def __init__(self, ctx=None):
+        #: The :class:`~click.Context` for this parser.  This might be
+        #: `None` for some advanced use cases.
+        self.ctx = ctx
+        #: This controls how the parser deals with interspersed arguments.
+        #: If this is set to `False`, the parser will stop on the first
+        #: non-option.  Click uses this to implement nested subcommands
+        #: safely.
+        self.allow_interspersed_args = True
+        #: This tells the parser how to deal with unknown options.  By
+        #: default it will error out (which is sensible), but there is a
+        #: second mode where it will ignore it and continue processing
+        #: after shifting all the unknown options into the resulting args.
+        self.ignore_unknown_options = False
+        if ctx is not None:
+            self.allow_interspersed_args = ctx.allow_interspersed_args
+            self.ignore_unknown_options = ctx.ignore_unknown_options
+        self._short_opt = {}
+        self._long_opt = {}
+        self._opt_prefixes = set(['-', '--'])
+        self._args = []
+
+    def add_option(self, opts, dest, action=None, nargs=1, const=None,
+                   obj=None):
+        """Adds a new option named `dest` to the parser.  The destination
+        is not inferred (unlike with optparse) and needs to be explicitly
+        provided.  Action can be any of ``store``, ``store_const``,
+        ``append``, ``appnd_const`` or ``count``.
+
+        The `obj` can be used to identify the option in the order list
+        that is returned from the parser.
+        """
+        if obj is None:
+            obj = dest
+        opts = [normalize_opt(opt, self.ctx) for opt in opts]
+        option = Option(opts, dest, action=action, nargs=nargs,
+                        const=const, obj=obj)
+        self._opt_prefixes.update(option.prefixes)
+        for opt in option._short_opts:
+            self._short_opt[opt] = option
+        for opt in option._long_opts:
+            self._long_opt[opt] = option
+
+    def add_argument(self, dest, nargs=1, obj=None):
+        """Adds a positional argument named `dest` to the parser.
+
+        The `obj` can be used to identify the option in the order list
+        that is returned from the parser.
+        """
+        if obj is None:
+            obj = dest
+        self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
+
+    def parse_args(self, args):
+        """Parses positional arguments and returns ``(values, args, order)``
+        for the parsed options and arguments as well as the leftover
+        arguments if there are any.  The order is a list of objects as they
+        appear on the command line.  If arguments appear multiple times they
+        will be memorized multiple times as well.
+        """
+        state = ParsingState(args)
+        try:
+            self._process_args_for_options(state)
+            self._process_args_for_args(state)
+        except UsageError:
+            if self.ctx is None or not self.ctx.resilient_parsing:
+                raise
+        return state.opts, state.largs, state.order
+
+    def _process_args_for_args(self, state):
+        pargs, args = _unpack_args(state.largs + state.rargs,
+                                   [x.nargs for x in self._args])
+
+        for idx, arg in enumerate(self._args):
+            arg.process(pargs[idx], state)
+
+        state.largs = args
+        state.rargs = []
+
+    def _process_args_for_options(self, state):
+        while state.rargs:
+            arg = state.rargs.pop(0)
+            arglen = len(arg)
+            # Double dashes always handled explicitly regardless of what
+            # prefixes are valid.
+            if arg == '--':
+                return
+            elif arg[:1] in self._opt_prefixes and arglen > 1:
+                self._process_opts(arg, state)
+            elif self.allow_interspersed_args:
+                state.largs.append(arg)
+            else:
+                state.rargs.insert(0, arg)
+                return
+
+        # Say this is the original argument list:
+        # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
+        #                            ^
+        # (we are about to process arg(i)).
+        #
+        # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
+        # [arg0, ..., arg(i-1)] (any options and their arguments will have
+        # been removed from largs).
+        #
+        # The while loop will usually consume 1 or more arguments per pass.
+        # If it consumes 1 (eg. arg is an option that takes no arguments),
+        # then after _process_arg() is done the situation is:
+        #
+        #   largs = subset of [arg0, ..., arg(i)]
+        #   rargs = [arg(i+1), ..., arg(N-1)]
+        #
+        # If allow_interspersed_args is false, largs will always be
+        # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
+        # not a very interesting subset!
+
+    def _match_long_opt(self, opt, explicit_value, state):
+        if opt not in self._long_opt:
+            possibilities = [word for word in self._long_opt
+                             if word.startswith(opt)]
+            raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
+
+        option = self._long_opt[opt]
+        if option.takes_value:
+            # At this point it's safe to modify rargs by injecting the
+            # explicit value, because no exception is raised in this
+            # branch.  This means that the inserted value will be fully
+            # consumed.
+            if explicit_value is not None:
+                state.rargs.insert(0, explicit_value)
+
+            nargs = option.nargs
+            if len(state.rargs) < nargs:
+                _error_opt_args(nargs, opt)
+            elif nargs == 1:
+                value = state.rargs.pop(0)
+            else:
+                value = tuple(state.rargs[:nargs])
+                del state.rargs[:nargs]
+
+        elif explicit_value is not None:
+            raise BadOptionUsage(opt, '%s option does not take a value' % opt)
+
+        else:
+            value = None
+
+        option.process(value, state)
+
+    def _match_short_opt(self, arg, state):
+        stop = False
+        i = 1
+        prefix = arg[0]
+        unknown_options = []
+
+        for ch in arg[1:]:
+            opt = normalize_opt(prefix + ch, self.ctx)
+            option = self._short_opt.get(opt)
+            i += 1
+
+            if not option:
+                if self.ignore_unknown_options:
+                    unknown_options.append(ch)
+                    continue
+                raise NoSuchOption(opt, ctx=self.ctx)
+            if option.takes_value:
+                # Any characters left in arg?  Pretend they're the
+                # next arg, and stop consuming characters of arg.
+                if i < len(arg):
+                    state.rargs.insert(0, arg[i:])
+                    stop = True
+
+                nargs = option.nargs
+                if len(state.rargs) < nargs:
+                    _error_opt_args(nargs, opt)
+                elif nargs == 1:
+                    value = state.rargs.pop(0)
+                else:
+                    value = tuple(state.rargs[:nargs])
+                    del state.rargs[:nargs]
+
+            else:
+                value = None
+
+            option.process(value, state)
+
+            if stop:
+                break
+
+        # If we got any unknown options we re-combinate the string of the
+        # remaining options and re-attach the prefix, then report that
+        # to the state as new larg.  This way there is basic combinatorics
+        # that can be achieved while still ignoring unknown arguments.
+        if self.ignore_unknown_options and unknown_options:
+            state.largs.append(prefix + ''.join(unknown_options))
+
+    def _process_opts(self, arg, state):
+        explicit_value = None
+        # Long option handling happens in two parts.  The first part is
+        # supporting explicitly attached values.  In any case, we will try
+        # to long match the option first.
+        if '=' in arg:
+            long_opt, explicit_value = arg.split('=', 1)
+        else:
+            long_opt = arg
+        norm_long_opt = normalize_opt(long_opt, self.ctx)
+
+        # At this point we will match the (assumed) long option through
+        # the long option matching code.  Note that this allows options
+        # like "-foo" to be matched as long options.
+        try:
+            self._match_long_opt(norm_long_opt, explicit_value, state)
+        except NoSuchOption:
+            # At this point the long option matching failed, and we need
+            # to try with short options.  However there is a special rule
+            # which says, that if we have a two character options prefix
+            # (applies to "--foo" for instance), we do not dispatch to the
+            # short option code and will instead raise the no option
+            # error.
+            if arg[:2] not in self._opt_prefixes:
+                return self._match_short_opt(arg, state)
+            if not self.ignore_unknown_options:
+                raise
+            state.largs.append(arg)
diff --git a/lib/python3.7/site-packages/click/termui.py b/lib/python3.7/site-packages/click/termui.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf9a3aa163f9221111f3136643ac04d056e57f12
--- /dev/null
+++ b/lib/python3.7/site-packages/click/termui.py
@@ -0,0 +1,606 @@
+import os
+import sys
+import struct
+import inspect
+import itertools
+
+from ._compat import raw_input, text_type, string_types, \
+     isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN
+from .utils import echo
+from .exceptions import Abort, UsageError
+from .types import convert_type, Choice, Path
+from .globals import resolve_color_default
+
+
+# The prompt functions to use.  The doc tools currently override these
+# functions to customize how they work.
+visible_prompt_func = raw_input
+
+_ansi_colors = {
+    'black': 30,
+    'red': 31,
+    'green': 32,
+    'yellow': 33,
+    'blue': 34,
+    'magenta': 35,
+    'cyan': 36,
+    'white': 37,
+    'reset': 39,
+    'bright_black': 90,
+    'bright_red': 91,
+    'bright_green': 92,
+    'bright_yellow': 93,
+    'bright_blue': 94,
+    'bright_magenta': 95,
+    'bright_cyan': 96,
+    'bright_white': 97,
+}
+_ansi_reset_all = '\033[0m'
+
+
+def hidden_prompt_func(prompt):
+    import getpass
+    return getpass.getpass(prompt)
+
+
+def _build_prompt(text, suffix, show_default=False, default=None, show_choices=True, type=None):
+    prompt = text
+    if type is not None and show_choices and isinstance(type, Choice):
+        prompt += ' (' + ", ".join(map(str, type.choices)) + ')'
+    if default is not None and show_default:
+        prompt = '%s [%s]' % (prompt, default)
+    return prompt + suffix
+
+
+def prompt(text, default=None, hide_input=False, confirmation_prompt=False,
+           type=None, value_proc=None, prompt_suffix=': ', show_default=True,
+           err=False, show_choices=True):
+    """Prompts a user for input.  This is a convenience function that can
+    be used to prompt a user for input later.
+
+    If the user aborts the input by sending a interrupt signal, this
+    function will catch it and raise a :exc:`Abort` exception.
+
+    .. versionadded:: 7.0
+       Added the show_choices parameter.
+
+    .. versionadded:: 6.0
+       Added unicode support for cmd.exe on Windows.
+
+    .. versionadded:: 4.0
+       Added the `err` parameter.
+
+    :param text: the text to show for the prompt.
+    :param default: the default value to use if no input happens.  If this
+                    is not given it will prompt until it's aborted.
+    :param hide_input: if this is set to true then the input value will
+                       be hidden.
+    :param confirmation_prompt: asks for confirmation for the value.
+    :param type: the type to use to check the value against.
+    :param value_proc: if this parameter is provided it's a function that
+                       is invoked instead of the type conversion to
+                       convert a value.
+    :param prompt_suffix: a suffix that should be added to the prompt.
+    :param show_default: shows or hides the default value in the prompt.
+    :param err: if set to true the file defaults to ``stderr`` instead of
+                ``stdout``, the same as with echo.
+    :param show_choices: Show or hide choices if the passed type is a Choice.
+                         For example if type is a Choice of either day or week,
+                         show_choices is true and text is "Group by" then the
+                         prompt will be "Group by (day, week): ".
+    """
+    result = None
+
+    def prompt_func(text):
+        f = hide_input and hidden_prompt_func or visible_prompt_func
+        try:
+            # Write the prompt separately so that we get nice
+            # coloring through colorama on Windows
+            echo(text, nl=False, err=err)
+            return f('')
+        except (KeyboardInterrupt, EOFError):
+            # getpass doesn't print a newline if the user aborts input with ^C.
+            # Allegedly this behavior is inherited from getpass(3).
+            # A doc bug has been filed at https://bugs.python.org/issue24711
+            if hide_input:
+                echo(None, err=err)
+            raise Abort()
+
+    if value_proc is None:
+        value_proc = convert_type(type, default)
+
+    prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type)
+
+    while 1:
+        while 1:
+            value = prompt_func(prompt)
+            if value:
+                break
+            elif default is not None:
+                if isinstance(value_proc, Path):
+                    # validate Path default value(exists, dir_okay etc.)
+                    value = default
+                    break
+                return default
+        try:
+            result = value_proc(value)
+        except UsageError as e:
+            echo('Error: %s' % e.message, err=err)
+            continue
+        if not confirmation_prompt:
+            return result
+        while 1:
+            value2 = prompt_func('Repeat for confirmation: ')
+            if value2:
+                break
+        if value == value2:
+            return result
+        echo('Error: the two entered values do not match', err=err)
+
+
+def confirm(text, default=False, abort=False, prompt_suffix=': ',
+            show_default=True, err=False):
+    """Prompts for confirmation (yes/no question).
+
+    If the user aborts the input by sending a interrupt signal this
+    function will catch it and raise a :exc:`Abort` exception.
+
+    .. versionadded:: 4.0
+       Added the `err` parameter.
+
+    :param text: the question to ask.
+    :param default: the default for the prompt.
+    :param abort: if this is set to `True` a negative answer aborts the
+                  exception by raising :exc:`Abort`.
+    :param prompt_suffix: a suffix that should be added to the prompt.
+    :param show_default: shows or hides the default value in the prompt.
+    :param err: if set to true the file defaults to ``stderr`` instead of
+                ``stdout``, the same as with echo.
+    """
+    prompt = _build_prompt(text, prompt_suffix, show_default,
+                           default and 'Y/n' or 'y/N')
+    while 1:
+        try:
+            # Write the prompt separately so that we get nice
+            # coloring through colorama on Windows
+            echo(prompt, nl=False, err=err)
+            value = visible_prompt_func('').lower().strip()
+        except (KeyboardInterrupt, EOFError):
+            raise Abort()
+        if value in ('y', 'yes'):
+            rv = True
+        elif value in ('n', 'no'):
+            rv = False
+        elif value == '':
+            rv = default
+        else:
+            echo('Error: invalid input', err=err)
+            continue
+        break
+    if abort and not rv:
+        raise Abort()
+    return rv
+
+
+def get_terminal_size():
+    """Returns the current size of the terminal as tuple in the form
+    ``(width, height)`` in columns and rows.
+    """
+    # If shutil has get_terminal_size() (Python 3.3 and later) use that
+    if sys.version_info >= (3, 3):
+        import shutil
+        shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None)
+        if shutil_get_terminal_size:
+            sz = shutil_get_terminal_size()
+            return sz.columns, sz.lines
+
+    # We provide a sensible default for get_winterm_size() when being invoked
+    # inside a subprocess. Without this, it would not provide a useful input.
+    if get_winterm_size is not None:
+        size = get_winterm_size()
+        if size == (0, 0):
+            return (79, 24)
+        else:
+            return size
+
+    def ioctl_gwinsz(fd):
+        try:
+            import fcntl
+            import termios
+            cr = struct.unpack(
+                'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
+        except Exception:
+            return
+        return cr
+
+    cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
+    if not cr:
+        try:
+            fd = os.open(os.ctermid(), os.O_RDONLY)
+            try:
+                cr = ioctl_gwinsz(fd)
+            finally:
+                os.close(fd)
+        except Exception:
+            pass
+    if not cr or not cr[0] or not cr[1]:
+        cr = (os.environ.get('LINES', 25),
+              os.environ.get('COLUMNS', DEFAULT_COLUMNS))
+    return int(cr[1]), int(cr[0])
+
+
+def echo_via_pager(text_or_generator, color=None):
+    """This function takes a text and shows it via an environment specific
+    pager on stdout.
+
+    .. versionchanged:: 3.0
+       Added the `color` flag.
+
+    :param text_or_generator: the text to page, or alternatively, a
+                              generator emitting the text to page.
+    :param color: controls if the pager supports ANSI colors or not.  The
+                  default is autodetection.
+    """
+    color = resolve_color_default(color)
+
+    if inspect.isgeneratorfunction(text_or_generator):
+        i = text_or_generator()
+    elif isinstance(text_or_generator, string_types):
+        i = [text_or_generator]
+    else:
+        i = iter(text_or_generator)
+
+    # convert every element of i to a text type if necessary
+    text_generator = (el if isinstance(el, string_types) else text_type(el)
+                      for el in i)
+
+    from ._termui_impl import pager
+    return pager(itertools.chain(text_generator, "\n"), color)
+
+
+def progressbar(iterable=None, length=None, label=None, show_eta=True,
+                show_percent=None, show_pos=False,
+                item_show_func=None, fill_char='#', empty_char='-',
+                bar_template='%(label)s  [%(bar)s]  %(info)s',
+                info_sep='  ', width=36, file=None, color=None):
+    """This function creates an iterable context manager that can be used
+    to iterate over something while showing a progress bar.  It will
+    either iterate over the `iterable` or `length` items (that are counted
+    up).  While iteration happens, this function will print a rendered
+    progress bar to the given `file` (defaults to stdout) and will attempt
+    to calculate remaining time and more.  By default, this progress bar
+    will not be rendered if the file is not a terminal.
+
+    The context manager creates the progress bar.  When the context
+    manager is entered the progress bar is already displayed.  With every
+    iteration over the progress bar, the iterable passed to the bar is
+    advanced and the bar is updated.  When the context manager exits,
+    a newline is printed and the progress bar is finalized on screen.
+
+    No printing must happen or the progress bar will be unintentionally
+    destroyed.
+
+    Example usage::
+
+        with progressbar(items) as bar:
+            for item in bar:
+                do_something_with(item)
+
+    Alternatively, if no iterable is specified, one can manually update the
+    progress bar through the `update()` method instead of directly
+    iterating over the progress bar.  The update method accepts the number
+    of steps to increment the bar with::
+
+        with progressbar(length=chunks.total_bytes) as bar:
+            for chunk in chunks:
+                process_chunk(chunk)
+                bar.update(chunks.bytes)
+
+    .. versionadded:: 2.0
+
+    .. versionadded:: 4.0
+       Added the `color` parameter.  Added a `update` method to the
+       progressbar object.
+
+    :param iterable: an iterable to iterate over.  If not provided the length
+                     is required.
+    :param length: the number of items to iterate over.  By default the
+                   progressbar will attempt to ask the iterator about its
+                   length, which might or might not work.  If an iterable is
+                   also provided this parameter can be used to override the
+                   length.  If an iterable is not provided the progress bar
+                   will iterate over a range of that length.
+    :param label: the label to show next to the progress bar.
+    :param show_eta: enables or disables the estimated time display.  This is
+                     automatically disabled if the length cannot be
+                     determined.
+    :param show_percent: enables or disables the percentage display.  The
+                         default is `True` if the iterable has a length or
+                         `False` if not.
+    :param show_pos: enables or disables the absolute position display.  The
+                     default is `False`.
+    :param item_show_func: a function called with the current item which
+                           can return a string to show the current item
+                           next to the progress bar.  Note that the current
+                           item can be `None`!
+    :param fill_char: the character to use to show the filled part of the
+                      progress bar.
+    :param empty_char: the character to use to show the non-filled part of
+                       the progress bar.
+    :param bar_template: the format string to use as template for the bar.
+                         The parameters in it are ``label`` for the label,
+                         ``bar`` for the progress bar and ``info`` for the
+                         info section.
+    :param info_sep: the separator between multiple info items (eta etc.)
+    :param width: the width of the progress bar in characters, 0 means full
+                  terminal width
+    :param file: the file to write to.  If this is not a terminal then
+                 only the label is printed.
+    :param color: controls if the terminal supports ANSI colors or not.  The
+                  default is autodetection.  This is only needed if ANSI
+                  codes are included anywhere in the progress bar output
+                  which is not the case by default.
+    """
+    from ._termui_impl import ProgressBar
+    color = resolve_color_default(color)
+    return ProgressBar(iterable=iterable, length=length, show_eta=show_eta,
+                       show_percent=show_percent, show_pos=show_pos,
+                       item_show_func=item_show_func, fill_char=fill_char,
+                       empty_char=empty_char, bar_template=bar_template,
+                       info_sep=info_sep, file=file, label=label,
+                       width=width, color=color)
+
+
+def clear():
+    """Clears the terminal screen.  This will have the effect of clearing
+    the whole visible space of the terminal and moving the cursor to the
+    top left.  This does not do anything if not connected to a terminal.
+
+    .. versionadded:: 2.0
+    """
+    if not isatty(sys.stdout):
+        return
+    # If we're on Windows and we don't have colorama available, then we
+    # clear the screen by shelling out.  Otherwise we can use an escape
+    # sequence.
+    if WIN:
+        os.system('cls')
+    else:
+        sys.stdout.write('\033[2J\033[1;1H')
+
+
+def style(text, fg=None, bg=None, bold=None, dim=None, underline=None,
+          blink=None, reverse=None, reset=True):
+    """Styles a text with ANSI styles and returns the new string.  By
+    default the styling is self contained which means that at the end
+    of the string a reset code is issued.  This can be prevented by
+    passing ``reset=False``.
+
+    Examples::
+
+        click.echo(click.style('Hello World!', fg='green'))
+        click.echo(click.style('ATTENTION!', blink=True))
+        click.echo(click.style('Some things', reverse=True, fg='cyan'))
+
+    Supported color names:
+
+    * ``black`` (might be a gray)
+    * ``red``
+    * ``green``
+    * ``yellow`` (might be an orange)
+    * ``blue``
+    * ``magenta``
+    * ``cyan``
+    * ``white`` (might be light gray)
+    * ``bright_black``
+    * ``bright_red``
+    * ``bright_green``
+    * ``bright_yellow``
+    * ``bright_blue``
+    * ``bright_magenta``
+    * ``bright_cyan``
+    * ``bright_white``
+    * ``reset`` (reset the color code only)
+
+    .. versionadded:: 2.0
+
+    .. versionadded:: 7.0
+       Added support for bright colors.
+
+    :param text: the string to style with ansi codes.
+    :param fg: if provided this will become the foreground color.
+    :param bg: if provided this will become the background color.
+    :param bold: if provided this will enable or disable bold mode.
+    :param dim: if provided this will enable or disable dim mode.  This is
+                badly supported.
+    :param underline: if provided this will enable or disable underline.
+    :param blink: if provided this will enable or disable blinking.
+    :param reverse: if provided this will enable or disable inverse
+                    rendering (foreground becomes background and the
+                    other way round).
+    :param reset: by default a reset-all code is added at the end of the
+                  string which means that styles do not carry over.  This
+                  can be disabled to compose styles.
+    """
+    bits = []
+    if fg:
+        try:
+            bits.append('\033[%dm' % (_ansi_colors[fg]))
+        except KeyError:
+            raise TypeError('Unknown color %r' % fg)
+    if bg:
+        try:
+            bits.append('\033[%dm' % (_ansi_colors[bg] + 10))
+        except KeyError:
+            raise TypeError('Unknown color %r' % bg)
+    if bold is not None:
+        bits.append('\033[%dm' % (1 if bold else 22))
+    if dim is not None:
+        bits.append('\033[%dm' % (2 if dim else 22))
+    if underline is not None:
+        bits.append('\033[%dm' % (4 if underline else 24))
+    if blink is not None:
+        bits.append('\033[%dm' % (5 if blink else 25))
+    if reverse is not None:
+        bits.append('\033[%dm' % (7 if reverse else 27))
+    bits.append(text)
+    if reset:
+        bits.append(_ansi_reset_all)
+    return ''.join(bits)
+
+
+def unstyle(text):
+    """Removes ANSI styling information from a string.  Usually it's not
+    necessary to use this function as Click's echo function will
+    automatically remove styling if necessary.
+
+    .. versionadded:: 2.0
+
+    :param text: the text to remove style information from.
+    """
+    return strip_ansi(text)
+
+
+def secho(message=None, file=None, nl=True, err=False, color=None, **styles):
+    """This function combines :func:`echo` and :func:`style` into one
+    call.  As such the following two calls are the same::
+
+        click.secho('Hello World!', fg='green')
+        click.echo(click.style('Hello World!', fg='green'))
+
+    All keyword arguments are forwarded to the underlying functions
+    depending on which one they go with.
+
+    .. versionadded:: 2.0
+    """
+    if message is not None:
+        message = style(message, **styles)
+    return echo(message, file=file, nl=nl, err=err, color=color)
+
+
+def edit(text=None, editor=None, env=None, require_save=True,
+         extension='.txt', filename=None):
+    r"""Edits the given text in the defined editor.  If an editor is given
+    (should be the full path to the executable but the regular operating
+    system search path is used for finding the executable) it overrides
+    the detected editor.  Optionally, some environment variables can be
+    used.  If the editor is closed without changes, `None` is returned.  In
+    case a file is edited directly the return value is always `None` and
+    `require_save` and `extension` are ignored.
+
+    If the editor cannot be opened a :exc:`UsageError` is raised.
+
+    Note for Windows: to simplify cross-platform usage, the newlines are
+    automatically converted from POSIX to Windows and vice versa.  As such,
+    the message here will have ``\n`` as newline markers.
+
+    :param text: the text to edit.
+    :param editor: optionally the editor to use.  Defaults to automatic
+                   detection.
+    :param env: environment variables to forward to the editor.
+    :param require_save: if this is true, then not saving in the editor
+                         will make the return value become `None`.
+    :param extension: the extension to tell the editor about.  This defaults
+                      to `.txt` but changing this might change syntax
+                      highlighting.
+    :param filename: if provided it will edit this file instead of the
+                     provided text contents.  It will not use a temporary
+                     file as an indirection in that case.
+    """
+    from ._termui_impl import Editor
+    editor = Editor(editor=editor, env=env, require_save=require_save,
+                    extension=extension)
+    if filename is None:
+        return editor.edit(text)
+    editor.edit_file(filename)
+
+
+def launch(url, wait=False, locate=False):
+    """This function launches the given URL (or filename) in the default
+    viewer application for this file type.  If this is an executable, it
+    might launch the executable in a new session.  The return value is
+    the exit code of the launched application.  Usually, ``0`` indicates
+    success.
+
+    Examples::
+
+        click.launch('https://click.palletsprojects.com/')
+        click.launch('/my/downloaded/file', locate=True)
+
+    .. versionadded:: 2.0
+
+    :param url: URL or filename of the thing to launch.
+    :param wait: waits for the program to stop.
+    :param locate: if this is set to `True` then instead of launching the
+                   application associated with the URL it will attempt to
+                   launch a file manager with the file located.  This
+                   might have weird effects if the URL does not point to
+                   the filesystem.
+    """
+    from ._termui_impl import open_url
+    return open_url(url, wait=wait, locate=locate)
+
+
+# If this is provided, getchar() calls into this instead.  This is used
+# for unittesting purposes.
+_getchar = None
+
+
+def getchar(echo=False):
+    """Fetches a single character from the terminal and returns it.  This
+    will always return a unicode character and under certain rare
+    circumstances this might return more than one character.  The
+    situations which more than one character is returned is when for
+    whatever reason multiple characters end up in the terminal buffer or
+    standard input was not actually a terminal.
+
+    Note that this will always read from the terminal, even if something
+    is piped into the standard input.
+
+    Note for Windows: in rare cases when typing non-ASCII characters, this
+    function might wait for a second character and then return both at once.
+    This is because certain Unicode characters look like special-key markers.
+
+    .. versionadded:: 2.0
+
+    :param echo: if set to `True`, the character read will also show up on
+                 the terminal.  The default is to not show it.
+    """
+    f = _getchar
+    if f is None:
+        from ._termui_impl import getchar as f
+    return f(echo)
+
+
+def raw_terminal():
+    from ._termui_impl import raw_terminal as f
+    return f()
+
+
+def pause(info='Press any key to continue ...', err=False):
+    """This command stops execution and waits for the user to press any
+    key to continue.  This is similar to the Windows batch "pause"
+    command.  If the program is not run through a terminal, this command
+    will instead do nothing.
+
+    .. versionadded:: 2.0
+
+    .. versionadded:: 4.0
+       Added the `err` parameter.
+
+    :param info: the info string to print before pausing.
+    :param err: if set to message goes to ``stderr`` instead of
+                ``stdout``, the same as with echo.
+    """
+    if not isatty(sys.stdin) or not isatty(sys.stdout):
+        return
+    try:
+        if info:
+            echo(info, nl=False, err=err)
+        try:
+            getchar()
+        except (KeyboardInterrupt, EOFError):
+            pass
+    finally:
+        if info:
+            echo(err=err)
diff --git a/lib/python3.7/site-packages/click/testing.py b/lib/python3.7/site-packages/click/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b2924e0b13996e83e8dc14001247886d534ee1c
--- /dev/null
+++ b/lib/python3.7/site-packages/click/testing.py
@@ -0,0 +1,374 @@
+import os
+import sys
+import shutil
+import tempfile
+import contextlib
+import shlex
+
+from ._compat import iteritems, PY2, string_types
+
+
+# If someone wants to vendor click, we want to ensure the
+# correct package is discovered.  Ideally we could use a
+# relative import here but unfortunately Python does not
+# support that.
+clickpkg = sys.modules[__name__.rsplit('.', 1)[0]]
+
+
+if PY2:
+    from cStringIO import StringIO
+else:
+    import io
+    from ._compat import _find_binary_reader
+
+
+class EchoingStdin(object):
+
+    def __init__(self, input, output):
+        self._input = input
+        self._output = output
+
+    def __getattr__(self, x):
+        return getattr(self._input, x)
+
+    def _echo(self, rv):
+        self._output.write(rv)
+        return rv
+
+    def read(self, n=-1):
+        return self._echo(self._input.read(n))
+
+    def readline(self, n=-1):
+        return self._echo(self._input.readline(n))
+
+    def readlines(self):
+        return [self._echo(x) for x in self._input.readlines()]
+
+    def __iter__(self):
+        return iter(self._echo(x) for x in self._input)
+
+    def __repr__(self):
+        return repr(self._input)
+
+
+def make_input_stream(input, charset):
+    # Is already an input stream.
+    if hasattr(input, 'read'):
+        if PY2:
+            return input
+        rv = _find_binary_reader(input)
+        if rv is not None:
+            return rv
+        raise TypeError('Could not find binary reader for input stream.')
+
+    if input is None:
+        input = b''
+    elif not isinstance(input, bytes):
+        input = input.encode(charset)
+    if PY2:
+        return StringIO(input)
+    return io.BytesIO(input)
+
+
+class Result(object):
+    """Holds the captured result of an invoked CLI script."""
+
+    def __init__(self, runner, stdout_bytes, stderr_bytes, exit_code,
+                 exception, exc_info=None):
+        #: The runner that created the result
+        self.runner = runner
+        #: The standard output as bytes.
+        self.stdout_bytes = stdout_bytes
+        #: The standard error as bytes, or False(y) if not available
+        self.stderr_bytes = stderr_bytes
+        #: The exit code as integer.
+        self.exit_code = exit_code
+        #: The exception that happened if one did.
+        self.exception = exception
+        #: The traceback
+        self.exc_info = exc_info
+
+    @property
+    def output(self):
+        """The (standard) output as unicode string."""
+        return self.stdout
+
+    @property
+    def stdout(self):
+        """The standard output as unicode string."""
+        return self.stdout_bytes.decode(self.runner.charset, 'replace') \
+            .replace('\r\n', '\n')
+
+    @property
+    def stderr(self):
+        """The standard error as unicode string."""
+        if not self.stderr_bytes:
+            raise ValueError("stderr not separately captured")
+        return self.stderr_bytes.decode(self.runner.charset, 'replace') \
+            .replace('\r\n', '\n')
+
+
+    def __repr__(self):
+        return '<%s %s>' % (
+            type(self).__name__,
+            self.exception and repr(self.exception) or 'okay',
+        )
+
+
+class CliRunner(object):
+    """The CLI runner provides functionality to invoke a Click command line
+    script for unittesting purposes in a isolated environment.  This only
+    works in single-threaded systems without any concurrency as it changes the
+    global interpreter state.
+
+    :param charset: the character set for the input and output data.  This is
+                    UTF-8 by default and should not be changed currently as
+                    the reporting to Click only works in Python 2 properly.
+    :param env: a dictionary with environment variables for overriding.
+    :param echo_stdin: if this is set to `True`, then reading from stdin writes
+                       to stdout.  This is useful for showing examples in
+                       some circumstances.  Note that regular prompts
+                       will automatically echo the input.
+    :param mix_stderr: if this is set to `False`, then stdout and stderr are
+                       preserved as independent streams.  This is useful for
+                       Unix-philosophy apps that have predictable stdout and
+                       noisy stderr, such that each may be measured
+                       independently
+    """
+
+    def __init__(self, charset=None, env=None, echo_stdin=False,
+                 mix_stderr=True):
+        if charset is None:
+            charset = 'utf-8'
+        self.charset = charset
+        self.env = env or {}
+        self.echo_stdin = echo_stdin
+        self.mix_stderr = mix_stderr
+
+    def get_default_prog_name(self, cli):
+        """Given a command object it will return the default program name
+        for it.  The default is the `name` attribute or ``"root"`` if not
+        set.
+        """
+        return cli.name or 'root'
+
+    def make_env(self, overrides=None):
+        """Returns the environment overrides for invoking a script."""
+        rv = dict(self.env)
+        if overrides:
+            rv.update(overrides)
+        return rv
+
+    @contextlib.contextmanager
+    def isolation(self, input=None, env=None, color=False):
+        """A context manager that sets up the isolation for invoking of a
+        command line tool.  This sets up stdin with the given input data
+        and `os.environ` with the overrides from the given dictionary.
+        This also rebinds some internals in Click to be mocked (like the
+        prompt functionality).
+
+        This is automatically done in the :meth:`invoke` method.
+
+        .. versionadded:: 4.0
+           The ``color`` parameter was added.
+
+        :param input: the input stream to put into sys.stdin.
+        :param env: the environment overrides as dictionary.
+        :param color: whether the output should contain color codes. The
+                      application can still override this explicitly.
+        """
+        input = make_input_stream(input, self.charset)
+
+        old_stdin = sys.stdin
+        old_stdout = sys.stdout
+        old_stderr = sys.stderr
+        old_forced_width = clickpkg.formatting.FORCED_WIDTH
+        clickpkg.formatting.FORCED_WIDTH = 80
+
+        env = self.make_env(env)
+
+        if PY2:
+            bytes_output = StringIO()
+            if self.echo_stdin:
+                input = EchoingStdin(input, bytes_output)
+            sys.stdout = bytes_output
+            if not self.mix_stderr:
+                bytes_error = StringIO()
+                sys.stderr = bytes_error
+        else:
+            bytes_output = io.BytesIO()
+            if self.echo_stdin:
+                input = EchoingStdin(input, bytes_output)
+            input = io.TextIOWrapper(input, encoding=self.charset)
+            sys.stdout = io.TextIOWrapper(
+                bytes_output, encoding=self.charset)
+            if not self.mix_stderr:
+                bytes_error = io.BytesIO()
+                sys.stderr = io.TextIOWrapper(
+                    bytes_error, encoding=self.charset)
+
+        if self.mix_stderr:
+            sys.stderr = sys.stdout
+
+        sys.stdin = input
+
+        def visible_input(prompt=None):
+            sys.stdout.write(prompt or '')
+            val = input.readline().rstrip('\r\n')
+            sys.stdout.write(val + '\n')
+            sys.stdout.flush()
+            return val
+
+        def hidden_input(prompt=None):
+            sys.stdout.write((prompt or '') + '\n')
+            sys.stdout.flush()
+            return input.readline().rstrip('\r\n')
+
+        def _getchar(echo):
+            char = sys.stdin.read(1)
+            if echo:
+                sys.stdout.write(char)
+                sys.stdout.flush()
+            return char
+
+        default_color = color
+
+        def should_strip_ansi(stream=None, color=None):
+            if color is None:
+                return not default_color
+            return not color
+
+        old_visible_prompt_func = clickpkg.termui.visible_prompt_func
+        old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func
+        old__getchar_func = clickpkg.termui._getchar
+        old_should_strip_ansi = clickpkg.utils.should_strip_ansi
+        clickpkg.termui.visible_prompt_func = visible_input
+        clickpkg.termui.hidden_prompt_func = hidden_input
+        clickpkg.termui._getchar = _getchar
+        clickpkg.utils.should_strip_ansi = should_strip_ansi
+
+        old_env = {}
+        try:
+            for key, value in iteritems(env):
+                old_env[key] = os.environ.get(key)
+                if value is None:
+                    try:
+                        del os.environ[key]
+                    except Exception:
+                        pass
+                else:
+                    os.environ[key] = value
+            yield (bytes_output, not self.mix_stderr and bytes_error)
+        finally:
+            for key, value in iteritems(old_env):
+                if value is None:
+                    try:
+                        del os.environ[key]
+                    except Exception:
+                        pass
+                else:
+                    os.environ[key] = value
+            sys.stdout = old_stdout
+            sys.stderr = old_stderr
+            sys.stdin = old_stdin
+            clickpkg.termui.visible_prompt_func = old_visible_prompt_func
+            clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func
+            clickpkg.termui._getchar = old__getchar_func
+            clickpkg.utils.should_strip_ansi = old_should_strip_ansi
+            clickpkg.formatting.FORCED_WIDTH = old_forced_width
+
+    def invoke(self, cli, args=None, input=None, env=None,
+               catch_exceptions=True, color=False, mix_stderr=False, **extra):
+        """Invokes a command in an isolated environment.  The arguments are
+        forwarded directly to the command line script, the `extra` keyword
+        arguments are passed to the :meth:`~clickpkg.Command.main` function of
+        the command.
+
+        This returns a :class:`Result` object.
+
+        .. versionadded:: 3.0
+           The ``catch_exceptions`` parameter was added.
+
+        .. versionchanged:: 3.0
+           The result object now has an `exc_info` attribute with the
+           traceback if available.
+
+        .. versionadded:: 4.0
+           The ``color`` parameter was added.
+
+        :param cli: the command to invoke
+        :param args: the arguments to invoke. It may be given as an iterable
+                     or a string. When given as string it will be interpreted
+                     as a Unix shell command. More details at
+                     :func:`shlex.split`.
+        :param input: the input data for `sys.stdin`.
+        :param env: the environment overrides.
+        :param catch_exceptions: Whether to catch any other exceptions than
+                                 ``SystemExit``.
+        :param extra: the keyword arguments to pass to :meth:`main`.
+        :param color: whether the output should contain color codes. The
+                      application can still override this explicitly.
+        """
+        exc_info = None
+        with self.isolation(input=input, env=env, color=color) as outstreams:
+            exception = None
+            exit_code = 0
+
+            if isinstance(args, string_types):
+                args = shlex.split(args)
+
+            try:
+                prog_name = extra.pop("prog_name")
+            except KeyError:
+                prog_name = self.get_default_prog_name(cli)
+
+            try:
+                cli.main(args=args or (), prog_name=prog_name, **extra)
+            except SystemExit as e:
+                exc_info = sys.exc_info()
+                exit_code = e.code
+                if exit_code is None:
+                    exit_code = 0
+
+                if exit_code != 0:
+                    exception = e
+
+                if not isinstance(exit_code, int):
+                    sys.stdout.write(str(exit_code))
+                    sys.stdout.write('\n')
+                    exit_code = 1
+
+            except Exception as e:
+                if not catch_exceptions:
+                    raise
+                exception = e
+                exit_code = 1
+                exc_info = sys.exc_info()
+            finally:
+                sys.stdout.flush()
+                stdout = outstreams[0].getvalue()
+                stderr = outstreams[1] and outstreams[1].getvalue()
+
+        return Result(runner=self,
+                      stdout_bytes=stdout,
+                      stderr_bytes=stderr,
+                      exit_code=exit_code,
+                      exception=exception,
+                      exc_info=exc_info)
+
+    @contextlib.contextmanager
+    def isolated_filesystem(self):
+        """A context manager that creates a temporary folder and changes
+        the current working directory to it for isolated filesystem tests.
+        """
+        cwd = os.getcwd()
+        t = tempfile.mkdtemp()
+        os.chdir(t)
+        try:
+            yield t
+        finally:
+            os.chdir(cwd)
+            try:
+                shutil.rmtree(t)
+            except (OSError, IOError):
+                pass
diff --git a/lib/python3.7/site-packages/click/types.py b/lib/python3.7/site-packages/click/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f88032f5485d3e215660a76fc8dd7eb9b7cebad
--- /dev/null
+++ b/lib/python3.7/site-packages/click/types.py
@@ -0,0 +1,668 @@
+import os
+import stat
+from datetime import datetime
+
+from ._compat import open_stream, text_type, filename_to_ui, \
+    get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2
+from .exceptions import BadParameter
+from .utils import safecall, LazyFile
+
+
+class ParamType(object):
+    """Helper for converting values through types.  The following is
+    necessary for a valid type:
+
+    *   it needs a name
+    *   it needs to pass through None unchanged
+    *   it needs to convert from a string
+    *   it needs to convert its result type through unchanged
+        (eg: needs to be idempotent)
+    *   it needs to be able to deal with param and context being `None`.
+        This can be the case when the object is used with prompt
+        inputs.
+    """
+    is_composite = False
+
+    #: the descriptive name of this type
+    name = None
+
+    #: if a list of this type is expected and the value is pulled from a
+    #: string environment variable, this is what splits it up.  `None`
+    #: means any whitespace.  For all parameters the general rule is that
+    #: whitespace splits them up.  The exception are paths and files which
+    #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
+    #: Windows).
+    envvar_list_splitter = None
+
+    def __call__(self, value, param=None, ctx=None):
+        if value is not None:
+            return self.convert(value, param, ctx)
+
+    def get_metavar(self, param):
+        """Returns the metavar default for this param if it provides one."""
+
+    def get_missing_message(self, param):
+        """Optionally might return extra information about a missing
+        parameter.
+
+        .. versionadded:: 2.0
+        """
+
+    def convert(self, value, param, ctx):
+        """Converts the value.  This is not invoked for values that are
+        `None` (the missing value).
+        """
+        return value
+
+    def split_envvar_value(self, rv):
+        """Given a value from an environment variable this splits it up
+        into small chunks depending on the defined envvar list splitter.
+
+        If the splitter is set to `None`, which means that whitespace splits,
+        then leading and trailing whitespace is ignored.  Otherwise, leading
+        and trailing splitters usually lead to empty items being included.
+        """
+        return (rv or '').split(self.envvar_list_splitter)
+
+    def fail(self, message, param=None, ctx=None):
+        """Helper method to fail with an invalid value message."""
+        raise BadParameter(message, ctx=ctx, param=param)
+
+
+class CompositeParamType(ParamType):
+    is_composite = True
+
+    @property
+    def arity(self):
+        raise NotImplementedError()
+
+
+class FuncParamType(ParamType):
+
+    def __init__(self, func):
+        self.name = func.__name__
+        self.func = func
+
+    def convert(self, value, param, ctx):
+        try:
+            return self.func(value)
+        except ValueError:
+            try:
+                value = text_type(value)
+            except UnicodeError:
+                value = str(value).decode('utf-8', 'replace')
+            self.fail(value, param, ctx)
+
+
+class UnprocessedParamType(ParamType):
+    name = 'text'
+
+    def convert(self, value, param, ctx):
+        return value
+
+    def __repr__(self):
+        return 'UNPROCESSED'
+
+
+class StringParamType(ParamType):
+    name = 'text'
+
+    def convert(self, value, param, ctx):
+        if isinstance(value, bytes):
+            enc = _get_argv_encoding()
+            try:
+                value = value.decode(enc)
+            except UnicodeError:
+                fs_enc = get_filesystem_encoding()
+                if fs_enc != enc:
+                    try:
+                        value = value.decode(fs_enc)
+                    except UnicodeError:
+                        value = value.decode('utf-8', 'replace')
+            return value
+        return value
+
+    def __repr__(self):
+        return 'STRING'
+
+
+class Choice(ParamType):
+    """The choice type allows a value to be checked against a fixed set
+    of supported values. All of these values have to be strings.
+
+    You should only pass a list or tuple of choices. Other iterables
+    (like generators) may lead to surprising results.
+
+    See :ref:`choice-opts` for an example.
+
+    :param case_sensitive: Set to false to make choices case
+        insensitive. Defaults to true.
+    """
+
+    name = 'choice'
+
+    def __init__(self, choices, case_sensitive=True):
+        self.choices = choices
+        self.case_sensitive = case_sensitive
+
+    def get_metavar(self, param):
+        return '[%s]' % '|'.join(self.choices)
+
+    def get_missing_message(self, param):
+        return 'Choose from:\n\t%s.' % ',\n\t'.join(self.choices)
+
+    def convert(self, value, param, ctx):
+        # Exact match
+        if value in self.choices:
+            return value
+
+        # Match through normalization and case sensitivity
+        # first do token_normalize_func, then lowercase
+        # preserve original `value` to produce an accurate message in
+        # `self.fail`
+        normed_value = value
+        normed_choices = self.choices
+
+        if ctx is not None and \
+           ctx.token_normalize_func is not None:
+            normed_value = ctx.token_normalize_func(value)
+            normed_choices = [ctx.token_normalize_func(choice) for choice in
+                              self.choices]
+
+        if not self.case_sensitive:
+            normed_value = normed_value.lower()
+            normed_choices = [choice.lower() for choice in normed_choices]
+
+        if normed_value in normed_choices:
+            return normed_value
+
+        self.fail('invalid choice: %s. (choose from %s)' %
+                  (value, ', '.join(self.choices)), param, ctx)
+
+    def __repr__(self):
+        return 'Choice(%r)' % list(self.choices)
+
+
+class DateTime(ParamType):
+    """The DateTime type converts date strings into `datetime` objects.
+
+    The format strings which are checked are configurable, but default to some
+    common (non-timezone aware) ISO 8601 formats.
+
+    When specifying *DateTime* formats, you should only pass a list or a tuple.
+    Other iterables, like generators, may lead to surprising results.
+
+    The format strings are processed using ``datetime.strptime``, and this
+    consequently defines the format strings which are allowed.
+
+    Parsing is tried using each format, in order, and the first format which
+    parses successfully is used.
+
+    :param formats: A list or tuple of date format strings, in the order in
+                    which they should be tried. Defaults to
+                    ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
+                    ``'%Y-%m-%d %H:%M:%S'``.
+    """
+    name = 'datetime'
+
+    def __init__(self, formats=None):
+        self.formats = formats or [
+            '%Y-%m-%d',
+            '%Y-%m-%dT%H:%M:%S',
+            '%Y-%m-%d %H:%M:%S'
+        ]
+
+    def get_metavar(self, param):
+        return '[{}]'.format('|'.join(self.formats))
+
+    def _try_to_convert_date(self, value, format):
+        try:
+            return datetime.strptime(value, format)
+        except ValueError:
+            return None
+
+    def convert(self, value, param, ctx):
+        # Exact match
+        for format in self.formats:
+            dtime = self._try_to_convert_date(value, format)
+            if dtime:
+                return dtime
+
+        self.fail(
+            'invalid datetime format: {}. (choose from {})'.format(
+                value, ', '.join(self.formats)))
+
+    def __repr__(self):
+        return 'DateTime'
+
+
+class IntParamType(ParamType):
+    name = 'integer'
+
+    def convert(self, value, param, ctx):
+        try:
+            return int(value)
+        except (ValueError, UnicodeError):
+            self.fail('%s is not a valid integer' % value, param, ctx)
+
+    def __repr__(self):
+        return 'INT'
+
+
+class IntRange(IntParamType):
+    """A parameter that works similar to :data:`click.INT` but restricts
+    the value to fit into a range.  The default behavior is to fail if the
+    value falls outside the range, but it can also be silently clamped
+    between the two edges.
+
+    See :ref:`ranges` for an example.
+    """
+    name = 'integer range'
+
+    def __init__(self, min=None, max=None, clamp=False):
+        self.min = min
+        self.max = max
+        self.clamp = clamp
+
+    def convert(self, value, param, ctx):
+        rv = IntParamType.convert(self, value, param, ctx)
+        if self.clamp:
+            if self.min is not None and rv < self.min:
+                return self.min
+            if self.max is not None and rv > self.max:
+                return self.max
+        if self.min is not None and rv < self.min or \
+           self.max is not None and rv > self.max:
+            if self.min is None:
+                self.fail('%s is bigger than the maximum valid value '
+                          '%s.' % (rv, self.max), param, ctx)
+            elif self.max is None:
+                self.fail('%s is smaller than the minimum valid value '
+                          '%s.' % (rv, self.min), param, ctx)
+            else:
+                self.fail('%s is not in the valid range of %s to %s.'
+                          % (rv, self.min, self.max), param, ctx)
+        return rv
+
+    def __repr__(self):
+        return 'IntRange(%r, %r)' % (self.min, self.max)
+
+
+class FloatParamType(ParamType):
+    name = 'float'
+
+    def convert(self, value, param, ctx):
+        try:
+            return float(value)
+        except (UnicodeError, ValueError):
+            self.fail('%s is not a valid floating point value' %
+                      value, param, ctx)
+
+    def __repr__(self):
+        return 'FLOAT'
+
+
+class FloatRange(FloatParamType):
+    """A parameter that works similar to :data:`click.FLOAT` but restricts
+    the value to fit into a range.  The default behavior is to fail if the
+    value falls outside the range, but it can also be silently clamped
+    between the two edges.
+
+    See :ref:`ranges` for an example.
+    """
+    name = 'float range'
+
+    def __init__(self, min=None, max=None, clamp=False):
+        self.min = min
+        self.max = max
+        self.clamp = clamp
+
+    def convert(self, value, param, ctx):
+        rv = FloatParamType.convert(self, value, param, ctx)
+        if self.clamp:
+            if self.min is not None and rv < self.min:
+                return self.min
+            if self.max is not None and rv > self.max:
+                return self.max
+        if self.min is not None and rv < self.min or \
+           self.max is not None and rv > self.max:
+            if self.min is None:
+                self.fail('%s is bigger than the maximum valid value '
+                          '%s.' % (rv, self.max), param, ctx)
+            elif self.max is None:
+                self.fail('%s is smaller than the minimum valid value '
+                          '%s.' % (rv, self.min), param, ctx)
+            else:
+                self.fail('%s is not in the valid range of %s to %s.'
+                          % (rv, self.min, self.max), param, ctx)
+        return rv
+
+    def __repr__(self):
+        return 'FloatRange(%r, %r)' % (self.min, self.max)
+
+
+class BoolParamType(ParamType):
+    name = 'boolean'
+
+    def convert(self, value, param, ctx):
+        if isinstance(value, bool):
+            return bool(value)
+        value = value.lower()
+        if value in ('true', 't', '1', 'yes', 'y'):
+            return True
+        elif value in ('false', 'f', '0', 'no', 'n'):
+            return False
+        self.fail('%s is not a valid boolean' % value, param, ctx)
+
+    def __repr__(self):
+        return 'BOOL'
+
+
+class UUIDParameterType(ParamType):
+    name = 'uuid'
+
+    def convert(self, value, param, ctx):
+        import uuid
+        try:
+            if PY2 and isinstance(value, text_type):
+                value = value.encode('ascii')
+            return uuid.UUID(value)
+        except (UnicodeError, ValueError):
+            self.fail('%s is not a valid UUID value' % value, param, ctx)
+
+    def __repr__(self):
+        return 'UUID'
+
+
+class File(ParamType):
+    """Declares a parameter to be a file for reading or writing.  The file
+    is automatically closed once the context tears down (after the command
+    finished working).
+
+    Files can be opened for reading or writing.  The special value ``-``
+    indicates stdin or stdout depending on the mode.
+
+    By default, the file is opened for reading text data, but it can also be
+    opened in binary mode or for writing.  The encoding parameter can be used
+    to force a specific encoding.
+
+    The `lazy` flag controls if the file should be opened immediately or upon
+    first IO. The default is to be non-lazy for standard input and output
+    streams as well as files opened for reading, `lazy` otherwise. When opening a
+    file lazily for reading, it is still opened temporarily for validation, but
+    will not be held open until first IO. lazy is mainly useful when opening
+    for writing to avoid creating the file until it is needed.
+
+    Starting with Click 2.0, files can also be opened atomically in which
+    case all writes go into a separate file in the same folder and upon
+    completion the file will be moved over to the original location.  This
+    is useful if a file regularly read by other users is modified.
+
+    See :ref:`file-args` for more information.
+    """
+    name = 'filename'
+    envvar_list_splitter = os.path.pathsep
+
+    def __init__(self, mode='r', encoding=None, errors='strict', lazy=None,
+                 atomic=False):
+        self.mode = mode
+        self.encoding = encoding
+        self.errors = errors
+        self.lazy = lazy
+        self.atomic = atomic
+
+    def resolve_lazy_flag(self, value):
+        if self.lazy is not None:
+            return self.lazy
+        if value == '-':
+            return False
+        elif 'w' in self.mode:
+            return True
+        return False
+
+    def convert(self, value, param, ctx):
+        try:
+            if hasattr(value, 'read') or hasattr(value, 'write'):
+                return value
+
+            lazy = self.resolve_lazy_flag(value)
+
+            if lazy:
+                f = LazyFile(value, self.mode, self.encoding, self.errors,
+                             atomic=self.atomic)
+                if ctx is not None:
+                    ctx.call_on_close(f.close_intelligently)
+                return f
+
+            f, should_close = open_stream(value, self.mode,
+                                          self.encoding, self.errors,
+                                          atomic=self.atomic)
+            # If a context is provided, we automatically close the file
+            # at the end of the context execution (or flush out).  If a
+            # context does not exist, it's the caller's responsibility to
+            # properly close the file.  This for instance happens when the
+            # type is used with prompts.
+            if ctx is not None:
+                if should_close:
+                    ctx.call_on_close(safecall(f.close))
+                else:
+                    ctx.call_on_close(safecall(f.flush))
+            return f
+        except (IOError, OSError) as e:
+            self.fail('Could not open file: %s: %s' % (
+                filename_to_ui(value),
+                get_streerror(e),
+            ), param, ctx)
+
+
+class Path(ParamType):
+    """The path type is similar to the :class:`File` type but it performs
+    different checks.  First of all, instead of returning an open file
+    handle it returns just the filename.  Secondly, it can perform various
+    basic checks about what the file or directory should be.
+
+    .. versionchanged:: 6.0
+       `allow_dash` was added.
+
+    :param exists: if set to true, the file or directory needs to exist for
+                   this value to be valid.  If this is not required and a
+                   file does indeed not exist, then all further checks are
+                   silently skipped.
+    :param file_okay: controls if a file is a possible value.
+    :param dir_okay: controls if a directory is a possible value.
+    :param writable: if true, a writable check is performed.
+    :param readable: if true, a readable check is performed.
+    :param resolve_path: if this is true, then the path is fully resolved
+                         before the value is passed onwards.  This means
+                         that it's absolute and symlinks are resolved.  It
+                         will not expand a tilde-prefix, as this is
+                         supposed to be done by the shell only.
+    :param allow_dash: If this is set to `True`, a single dash to indicate
+                       standard streams is permitted.
+    :param path_type: optionally a string type that should be used to
+                      represent the path.  The default is `None` which
+                      means the return value will be either bytes or
+                      unicode depending on what makes most sense given the
+                      input data Click deals with.
+    """
+    envvar_list_splitter = os.path.pathsep
+
+    def __init__(self, exists=False, file_okay=True, dir_okay=True,
+                 writable=False, readable=True, resolve_path=False,
+                 allow_dash=False, path_type=None):
+        self.exists = exists
+        self.file_okay = file_okay
+        self.dir_okay = dir_okay
+        self.writable = writable
+        self.readable = readable
+        self.resolve_path = resolve_path
+        self.allow_dash = allow_dash
+        self.type = path_type
+
+        if self.file_okay and not self.dir_okay:
+            self.name = 'file'
+            self.path_type = 'File'
+        elif self.dir_okay and not self.file_okay:
+            self.name = 'directory'
+            self.path_type = 'Directory'
+        else:
+            self.name = 'path'
+            self.path_type = 'Path'
+
+    def coerce_path_result(self, rv):
+        if self.type is not None and not isinstance(rv, self.type):
+            if self.type is text_type:
+                rv = rv.decode(get_filesystem_encoding())
+            else:
+                rv = rv.encode(get_filesystem_encoding())
+        return rv
+
+    def convert(self, value, param, ctx):
+        rv = value
+
+        is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-')
+
+        if not is_dash:
+            if self.resolve_path:
+                rv = os.path.realpath(rv)
+
+            try:
+                st = os.stat(rv)
+            except OSError:
+                if not self.exists:
+                    return self.coerce_path_result(rv)
+                self.fail('%s "%s" does not exist.' % (
+                    self.path_type,
+                    filename_to_ui(value)
+                ), param, ctx)
+
+            if not self.file_okay and stat.S_ISREG(st.st_mode):
+                self.fail('%s "%s" is a file.' % (
+                    self.path_type,
+                    filename_to_ui(value)
+                ), param, ctx)
+            if not self.dir_okay and stat.S_ISDIR(st.st_mode):
+                self.fail('%s "%s" is a directory.' % (
+                    self.path_type,
+                    filename_to_ui(value)
+                ), param, ctx)
+            if self.writable and not os.access(value, os.W_OK):
+                self.fail('%s "%s" is not writable.' % (
+                    self.path_type,
+                    filename_to_ui(value)
+                ), param, ctx)
+            if self.readable and not os.access(value, os.R_OK):
+                self.fail('%s "%s" is not readable.' % (
+                    self.path_type,
+                    filename_to_ui(value)
+                ), param, ctx)
+
+        return self.coerce_path_result(rv)
+
+
+class Tuple(CompositeParamType):
+    """The default behavior of Click is to apply a type on a value directly.
+    This works well in most cases, except for when `nargs` is set to a fixed
+    count and different types should be used for different items.  In this
+    case the :class:`Tuple` type can be used.  This type can only be used
+    if `nargs` is set to a fixed number.
+
+    For more information see :ref:`tuple-type`.
+
+    This can be selected by using a Python tuple literal as a type.
+
+    :param types: a list of types that should be used for the tuple items.
+    """
+
+    def __init__(self, types):
+        self.types = [convert_type(ty) for ty in types]
+
+    @property
+    def name(self):
+        return "<" + " ".join(ty.name for ty in self.types) + ">"
+
+    @property
+    def arity(self):
+        return len(self.types)
+
+    def convert(self, value, param, ctx):
+        if len(value) != len(self.types):
+            raise TypeError('It would appear that nargs is set to conflict '
+                            'with the composite type arity.')
+        return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
+
+
+def convert_type(ty, default=None):
+    """Converts a callable or python ty into the most appropriate param
+    ty.
+    """
+    guessed_type = False
+    if ty is None and default is not None:
+        if isinstance(default, tuple):
+            ty = tuple(map(type, default))
+        else:
+            ty = type(default)
+        guessed_type = True
+
+    if isinstance(ty, tuple):
+        return Tuple(ty)
+    if isinstance(ty, ParamType):
+        return ty
+    if ty is text_type or ty is str or ty is None:
+        return STRING
+    if ty is int:
+        return INT
+    # Booleans are only okay if not guessed.  This is done because for
+    # flags the default value is actually a bit of a lie in that it
+    # indicates which of the flags is the one we want.  See get_default()
+    # for more information.
+    if ty is bool and not guessed_type:
+        return BOOL
+    if ty is float:
+        return FLOAT
+    if guessed_type:
+        return STRING
+
+    # Catch a common mistake
+    if __debug__:
+        try:
+            if issubclass(ty, ParamType):
+                raise AssertionError('Attempted to use an uninstantiated '
+                                     'parameter type (%s).' % ty)
+        except TypeError:
+            pass
+    return FuncParamType(ty)
+
+
+#: A dummy parameter type that just does nothing.  From a user's
+#: perspective this appears to just be the same as `STRING` but internally
+#: no string conversion takes place.  This is necessary to achieve the
+#: same bytes/unicode behavior on Python 2/3 in situations where you want
+#: to not convert argument types.  This is usually useful when working
+#: with file paths as they can appear in bytes and unicode.
+#:
+#: For path related uses the :class:`Path` type is a better choice but
+#: there are situations where an unprocessed type is useful which is why
+#: it is is provided.
+#:
+#: .. versionadded:: 4.0
+UNPROCESSED = UnprocessedParamType()
+
+#: A unicode string parameter type which is the implicit default.  This
+#: can also be selected by using ``str`` as type.
+STRING = StringParamType()
+
+#: An integer parameter.  This can also be selected by using ``int`` as
+#: type.
+INT = IntParamType()
+
+#: A floating point value parameter.  This can also be selected by using
+#: ``float`` as type.
+FLOAT = FloatParamType()
+
+#: A boolean parameter.  This is the default for boolean flags.  This can
+#: also be selected by using ``bool`` as a type.
+BOOL = BoolParamType()
+
+#: A UUID parameter.
+UUID = UUIDParameterType()
diff --git a/lib/python3.7/site-packages/click/utils.py b/lib/python3.7/site-packages/click/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc84369fc9540647892b87eeaff49ebc84c8872d
--- /dev/null
+++ b/lib/python3.7/site-packages/click/utils.py
@@ -0,0 +1,440 @@
+import os
+import sys
+
+from .globals import resolve_color_default
+
+from ._compat import text_type, open_stream, get_filesystem_encoding, \
+    get_streerror, string_types, PY2, binary_streams, text_streams, \
+    filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \
+    _default_text_stdout, _default_text_stderr, is_bytes, WIN
+
+if not PY2:
+    from ._compat import _find_binary_writer
+elif WIN:
+    from ._winconsole import _get_windows_argv, \
+         _hash_py_argv, _initial_argv_hash
+
+
+echo_native_types = string_types + (bytes, bytearray)
+
+
+def _posixify(name):
+    return '-'.join(name.split()).lower()
+
+
+def safecall(func):
+    """Wraps a function so that it swallows exceptions."""
+    def wrapper(*args, **kwargs):
+        try:
+            return func(*args, **kwargs)
+        except Exception:
+            pass
+    return wrapper
+
+
+def make_str(value):
+    """Converts a value into a valid string."""
+    if isinstance(value, bytes):
+        try:
+            return value.decode(get_filesystem_encoding())
+        except UnicodeError:
+            return value.decode('utf-8', 'replace')
+    return text_type(value)
+
+
+def make_default_short_help(help, max_length=45):
+    """Return a condensed version of help string."""
+    words = help.split()
+    total_length = 0
+    result = []
+    done = False
+
+    for word in words:
+        if word[-1:] == '.':
+            done = True
+        new_length = result and 1 + len(word) or len(word)
+        if total_length + new_length > max_length:
+            result.append('...')
+            done = True
+        else:
+            if result:
+                result.append(' ')
+            result.append(word)
+        if done:
+            break
+        total_length += new_length
+
+    return ''.join(result)
+
+
+class LazyFile(object):
+    """A lazy file works like a regular file but it does not fully open
+    the file but it does perform some basic checks early to see if the
+    filename parameter does make sense.  This is useful for safely opening
+    files for writing.
+    """
+
+    def __init__(self, filename, mode='r', encoding=None, errors='strict',
+                 atomic=False):
+        self.name = filename
+        self.mode = mode
+        self.encoding = encoding
+        self.errors = errors
+        self.atomic = atomic
+
+        if filename == '-':
+            self._f, self.should_close = open_stream(filename, mode,
+                                                     encoding, errors)
+        else:
+            if 'r' in mode:
+                # Open and close the file in case we're opening it for
+                # reading so that we can catch at least some errors in
+                # some cases early.
+                open(filename, mode).close()
+            self._f = None
+            self.should_close = True
+
+    def __getattr__(self, name):
+        return getattr(self.open(), name)
+
+    def __repr__(self):
+        if self._f is not None:
+            return repr(self._f)
+        return '<unopened file %r %s>' % (self.name, self.mode)
+
+    def open(self):
+        """Opens the file if it's not yet open.  This call might fail with
+        a :exc:`FileError`.  Not handling this error will produce an error
+        that Click shows.
+        """
+        if self._f is not None:
+            return self._f
+        try:
+            rv, self.should_close = open_stream(self.name, self.mode,
+                                                self.encoding,
+                                                self.errors,
+                                                atomic=self.atomic)
+        except (IOError, OSError) as e:
+            from .exceptions import FileError
+            raise FileError(self.name, hint=get_streerror(e))
+        self._f = rv
+        return rv
+
+    def close(self):
+        """Closes the underlying file, no matter what."""
+        if self._f is not None:
+            self._f.close()
+
+    def close_intelligently(self):
+        """This function only closes the file if it was opened by the lazy
+        file wrapper.  For instance this will never close stdin.
+        """
+        if self.should_close:
+            self.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self.close_intelligently()
+
+    def __iter__(self):
+        self.open()
+        return iter(self._f)
+
+
+class KeepOpenFile(object):
+
+    def __init__(self, file):
+        self._file = file
+
+    def __getattr__(self, name):
+        return getattr(self._file, name)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        pass
+
+    def __repr__(self):
+        return repr(self._file)
+
+    def __iter__(self):
+        return iter(self._file)
+
+
+def echo(message=None, file=None, nl=True, err=False, color=None):
+    """Prints a message plus a newline to the given file or stdout.  On
+    first sight, this looks like the print function, but it has improved
+    support for handling Unicode and binary data that does not fail no
+    matter how badly configured the system is.
+
+    Primarily it means that you can print binary data as well as Unicode
+    data on both 2.x and 3.x to the given file in the most appropriate way
+    possible.  This is a very carefree function in that it will try its
+    best to not fail.  As of Click 6.0 this includes support for unicode
+    output on the Windows console.
+
+    In addition to that, if `colorama`_ is installed, the echo function will
+    also support clever handling of ANSI codes.  Essentially it will then
+    do the following:
+
+    -   add transparent handling of ANSI color codes on Windows.
+    -   hide ANSI codes automatically if the destination file is not a
+        terminal.
+
+    .. _colorama: https://pypi.org/project/colorama/
+
+    .. versionchanged:: 6.0
+       As of Click 6.0 the echo function will properly support unicode
+       output on the windows console.  Not that click does not modify
+       the interpreter in any way which means that `sys.stdout` or the
+       print statement or function will still not provide unicode support.
+
+    .. versionchanged:: 2.0
+       Starting with version 2.0 of Click, the echo function will work
+       with colorama if it's installed.
+
+    .. versionadded:: 3.0
+       The `err` parameter was added.
+
+    .. versionchanged:: 4.0
+       Added the `color` flag.
+
+    :param message: the message to print
+    :param file: the file to write to (defaults to ``stdout``)
+    :param err: if set to true the file defaults to ``stderr`` instead of
+                ``stdout``.  This is faster and easier than calling
+                :func:`get_text_stderr` yourself.
+    :param nl: if set to `True` (the default) a newline is printed afterwards.
+    :param color: controls if the terminal supports ANSI colors or not.  The
+                  default is autodetection.
+    """
+    if file is None:
+        if err:
+            file = _default_text_stderr()
+        else:
+            file = _default_text_stdout()
+
+    # Convert non bytes/text into the native string type.
+    if message is not None and not isinstance(message, echo_native_types):
+        message = text_type(message)
+
+    if nl:
+        message = message or u''
+        if isinstance(message, text_type):
+            message += u'\n'
+        else:
+            message += b'\n'
+
+    # If there is a message, and we're in Python 3, and the value looks
+    # like bytes, we manually need to find the binary stream and write the
+    # message in there.  This is done separately so that most stream
+    # types will work as you would expect.  Eg: you can write to StringIO
+    # for other cases.
+    if message and not PY2 and is_bytes(message):
+        binary_file = _find_binary_writer(file)
+        if binary_file is not None:
+            file.flush()
+            binary_file.write(message)
+            binary_file.flush()
+            return
+
+    # ANSI-style support.  If there is no message or we are dealing with
+    # bytes nothing is happening.  If we are connected to a file we want
+    # to strip colors.  If we are on windows we either wrap the stream
+    # to strip the color or we use the colorama support to translate the
+    # ansi codes to API calls.
+    if message and not is_bytes(message):
+        color = resolve_color_default(color)
+        if should_strip_ansi(file, color):
+            message = strip_ansi(message)
+        elif WIN:
+            if auto_wrap_for_ansi is not None:
+                file = auto_wrap_for_ansi(file)
+            elif not color:
+                message = strip_ansi(message)
+
+    if message:
+        file.write(message)
+    file.flush()
+
+
+def get_binary_stream(name):
+    """Returns a system stream for byte processing.  This essentially
+    returns the stream from the sys module with the given name but it
+    solves some compatibility issues between different Python versions.
+    Primarily this function is necessary for getting binary streams on
+    Python 3.
+
+    :param name: the name of the stream to open.  Valid names are ``'stdin'``,
+                 ``'stdout'`` and ``'stderr'``
+    """
+    opener = binary_streams.get(name)
+    if opener is None:
+        raise TypeError('Unknown standard stream %r' % name)
+    return opener()
+
+
+def get_text_stream(name, encoding=None, errors='strict'):
+    """Returns a system stream for text processing.  This usually returns
+    a wrapped stream around a binary stream returned from
+    :func:`get_binary_stream` but it also can take shortcuts on Python 3
+    for already correctly configured streams.
+
+    :param name: the name of the stream to open.  Valid names are ``'stdin'``,
+                 ``'stdout'`` and ``'stderr'``
+    :param encoding: overrides the detected default encoding.
+    :param errors: overrides the default error mode.
+    """
+    opener = text_streams.get(name)
+    if opener is None:
+        raise TypeError('Unknown standard stream %r' % name)
+    return opener(encoding, errors)
+
+
+def open_file(filename, mode='r', encoding=None, errors='strict',
+              lazy=False, atomic=False):
+    """This is similar to how the :class:`File` works but for manual
+    usage.  Files are opened non lazy by default.  This can open regular
+    files as well as stdin/stdout if ``'-'`` is passed.
+
+    If stdin/stdout is returned the stream is wrapped so that the context
+    manager will not close the stream accidentally.  This makes it possible
+    to always use the function like this without having to worry to
+    accidentally close a standard stream::
+
+        with open_file(filename) as f:
+            ...
+
+    .. versionadded:: 3.0
+
+    :param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
+    :param mode: the mode in which to open the file.
+    :param encoding: the encoding to use.
+    :param errors: the error handling for this file.
+    :param lazy: can be flipped to true to open the file lazily.
+    :param atomic: in atomic mode writes go into a temporary file and it's
+                   moved on close.
+    """
+    if lazy:
+        return LazyFile(filename, mode, encoding, errors, atomic=atomic)
+    f, should_close = open_stream(filename, mode, encoding, errors,
+                                  atomic=atomic)
+    if not should_close:
+        f = KeepOpenFile(f)
+    return f
+
+
+def get_os_args():
+    """This returns the argument part of sys.argv in the most appropriate
+    form for processing.  What this means is that this return value is in
+    a format that works for Click to process but does not necessarily
+    correspond well to what's actually standard for the interpreter.
+
+    On most environments the return value is ``sys.argv[:1]`` unchanged.
+    However if you are on Windows and running Python 2 the return value
+    will actually be a list of unicode strings instead because the
+    default behavior on that platform otherwise will not be able to
+    carry all possible values that sys.argv can have.
+
+    .. versionadded:: 6.0
+    """
+    # We can only extract the unicode argv if sys.argv has not been
+    # changed since the startup of the application.
+    if PY2 and WIN and _initial_argv_hash == _hash_py_argv():
+        return _get_windows_argv()
+    return sys.argv[1:]
+
+
+def format_filename(filename, shorten=False):
+    """Formats a filename for user display.  The main purpose of this
+    function is to ensure that the filename can be displayed at all.  This
+    will decode the filename to unicode if necessary in a way that it will
+    not fail.  Optionally, it can shorten the filename to not include the
+    full path to the filename.
+
+    :param filename: formats a filename for UI display.  This will also convert
+                     the filename into unicode without failing.
+    :param shorten: this optionally shortens the filename to strip of the
+                    path that leads up to it.
+    """
+    if shorten:
+        filename = os.path.basename(filename)
+    return filename_to_ui(filename)
+
+
+def get_app_dir(app_name, roaming=True, force_posix=False):
+    r"""Returns the config folder for the application.  The default behavior
+    is to return whatever is most appropriate for the operating system.
+
+    To give you an idea, for an app called ``"Foo Bar"``, something like
+    the following folders could be returned:
+
+    Mac OS X:
+      ``~/Library/Application Support/Foo Bar``
+    Mac OS X (POSIX):
+      ``~/.foo-bar``
+    Unix:
+      ``~/.config/foo-bar``
+    Unix (POSIX):
+      ``~/.foo-bar``
+    Win XP (roaming):
+      ``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar``
+    Win XP (not roaming):
+      ``C:\Documents and Settings\<user>\Application Data\Foo Bar``
+    Win 7 (roaming):
+      ``C:\Users\<user>\AppData\Roaming\Foo Bar``
+    Win 7 (not roaming):
+      ``C:\Users\<user>\AppData\Local\Foo Bar``
+
+    .. versionadded:: 2.0
+
+    :param app_name: the application name.  This should be properly capitalized
+                     and can contain whitespace.
+    :param roaming: controls if the folder should be roaming or not on Windows.
+                    Has no affect otherwise.
+    :param force_posix: if this is set to `True` then on any POSIX system the
+                        folder will be stored in the home folder with a leading
+                        dot instead of the XDG config home or darwin's
+                        application support folder.
+    """
+    if WIN:
+        key = roaming and 'APPDATA' or 'LOCALAPPDATA'
+        folder = os.environ.get(key)
+        if folder is None:
+            folder = os.path.expanduser('~')
+        return os.path.join(folder, app_name)
+    if force_posix:
+        return os.path.join(os.path.expanduser('~/.' + _posixify(app_name)))
+    if sys.platform == 'darwin':
+        return os.path.join(os.path.expanduser(
+            '~/Library/Application Support'), app_name)
+    return os.path.join(
+        os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')),
+        _posixify(app_name))
+
+
+class PacifyFlushWrapper(object):
+    """This wrapper is used to catch and suppress BrokenPipeErrors resulting
+    from ``.flush()`` being called on broken pipe during the shutdown/final-GC
+    of the Python interpreter. Notably ``.flush()`` is always called on
+    ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any
+    other cleanup code, and the case where the underlying file is not a broken
+    pipe, all calls and attributes are proxied.
+    """
+
+    def __init__(self, wrapped):
+        self.wrapped = wrapped
+
+    def flush(self):
+        try:
+            self.wrapped.flush()
+        except IOError as e:
+            import errno
+            if e.errno != errno.EPIPE:
+                raise
+
+    def __getattr__(self, attr):
+        return getattr(self.wrapped, attr)
diff --git a/lib/python3.7/site-packages/easy_install.py b/lib/python3.7/site-packages/easy_install.py
new file mode 100644
index 0000000000000000000000000000000000000000..d87e984034b6e6e9eb456ebcb2b3f420c07a48bc
--- /dev/null
+++ b/lib/python3.7/site-packages/easy_install.py
@@ -0,0 +1,5 @@
+"""Run the EasyInstall command"""
+
+if __name__ == '__main__':
+    from setuptools.command.easy_install import main
+    main()
diff --git a/lib/python3.7/site-packages/flask/__init__.py b/lib/python3.7/site-packages/flask/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ded19822400ad7c661e5e3fa1de49b9f7e5e906e
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/__init__.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+"""
+    flask
+    ~~~~~
+
+    A microframework based on Werkzeug.  It's extensively documented
+    and follows best practice patterns.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+__version__ = '1.0.2'
+
+# utilities we import from Werkzeug and Jinja2 that are unused
+# in the module but are exported as public interface.
+from werkzeug.exceptions import abort
+from werkzeug.utils import redirect
+from jinja2 import Markup, escape
+
+from .app import Flask, Request, Response
+from .config import Config
+from .helpers import url_for, flash, send_file, send_from_directory, \
+     get_flashed_messages, get_template_attribute, make_response, safe_join, \
+     stream_with_context
+from .globals import current_app, g, request, session, _request_ctx_stack, \
+     _app_ctx_stack
+from .ctx import has_request_context, has_app_context, \
+     after_this_request, copy_current_request_context
+from .blueprints import Blueprint
+from .templating import render_template, render_template_string
+
+# the signals
+from .signals import signals_available, template_rendered, request_started, \
+     request_finished, got_request_exception, request_tearing_down, \
+     appcontext_tearing_down, appcontext_pushed, \
+     appcontext_popped, message_flashed, before_render_template
+
+# We're not exposing the actual json module but a convenient wrapper around
+# it.
+from . import json
+
+# This was the only thing that Flask used to export at one point and it had
+# a more generic name.
+jsonify = json.jsonify
+
+# backwards compat, goes away in 1.0
+from .sessions import SecureCookieSession as Session
+json_available = True
diff --git a/lib/python3.7/site-packages/flask/__main__.py b/lib/python3.7/site-packages/flask/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4aee654374633ddf7f5bf088843e23465adad075
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/__main__.py
@@ -0,0 +1,14 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.__main__
+    ~~~~~~~~~~~~~~
+
+    Alias for flask.run for the command line.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+if __name__ == '__main__':
+    from .cli import main
+    main(as_module=True)
diff --git a/lib/python3.7/site-packages/flask/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d61f69d42648f39432e3567423adfd33957dbd7f
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/__main__.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/__main__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2527f2f3bbdc95af8cde1641f085cb9d2efa157d
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/__main__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/_compat.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/_compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..44ba590211abf74c0ec6dd956decb99bb35809b8
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/_compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/app.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/app.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..85793fb662aee64a4aa61241c503773765387b30
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/app.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/blueprints.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/blueprints.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ded313a0a28f5c041b478cd4cc2f6c6a14764b86
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/blueprints.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/cli.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/cli.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae84e64044eaa9e95f92225846d36d66dbc66237
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/cli.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/config.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/config.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a4418ce0d853be15796f48769e047d8f82d3380b
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/config.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/ctx.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/ctx.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7913f7c8e9a4fd5d838677859d6594da3f48bba6
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/ctx.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/debughelpers.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/debughelpers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f32dc87d59ce1c7caa5b841358468ae29da91974
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/debughelpers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/globals.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/globals.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2615a3b5089d970398fc7b48e5a91a19076cdec7
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/globals.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/helpers.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/helpers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f394a8c74b89a577e9e22e28e12b2a8a57f3b109
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/helpers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/logging.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/logging.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..253dcfe66ec9147b1689c1916c03dbfdae8769e7
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/logging.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/sessions.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/sessions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..faff24249e1746134c74f1f9316df98ac33af236
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/sessions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/signals.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/signals.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..607ec29b33b93fa2f4b9117c0f14b07b347b6a2e
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/signals.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/templating.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/templating.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3bdbbeb3e9dc065734126eb99f3b6c7a55a0fea6
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/templating.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/testing.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/testing.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e56826ab1cb356224475564b4d73f80dd17275ff
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/testing.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/views.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/views.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a82b83e531642d565da25ca2f7fa99422dd8b6f9
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/views.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/__pycache__/wrappers.cpython-37.pyc b/lib/python3.7/site-packages/flask/__pycache__/wrappers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0d144e83fd8aa2dbbfe7a0473682505dd50bdb64
Binary files /dev/null and b/lib/python3.7/site-packages/flask/__pycache__/wrappers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/_compat.py b/lib/python3.7/site-packages/flask/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3b5b9c1c9ade6bebc495798a3358a1b5d04e8be
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/_compat.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+"""
+    flask._compat
+    ~~~~~~~~~~~~~
+
+    Some py2/py3 compatibility support based on a stripped down
+    version of six so we don't have to depend on a specific version
+    of it.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+import sys
+
+PY2 = sys.version_info[0] == 2
+_identity = lambda x: x
+
+
+if not PY2:
+    text_type = str
+    string_types = (str,)
+    integer_types = (int,)
+
+    iterkeys = lambda d: iter(d.keys())
+    itervalues = lambda d: iter(d.values())
+    iteritems = lambda d: iter(d.items())
+
+    from inspect import getfullargspec as getargspec
+    from io import StringIO
+
+    def reraise(tp, value, tb=None):
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+    implements_to_string = _identity
+
+else:
+    text_type = unicode
+    string_types = (str, unicode)
+    integer_types = (int, long)
+
+    iterkeys = lambda d: d.iterkeys()
+    itervalues = lambda d: d.itervalues()
+    iteritems = lambda d: d.iteritems()
+
+    from inspect import getargspec
+    from cStringIO import StringIO
+
+    exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
+
+    def implements_to_string(cls):
+        cls.__unicode__ = cls.__str__
+        cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
+        return cls
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a
+    # dummy metaclass for one level of class instantiation that replaces
+    # itself with the actual metaclass.
+    class metaclass(type):
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+# Certain versions of pypy have a bug where clearing the exception stack
+# breaks the __exit__ function in a very peculiar way.  The second level of
+# exception blocks is necessary because pypy seems to forget to check if an
+# exception happened until the next bytecode instruction?
+#
+# Relevant PyPy bugfix commit:
+# https://bitbucket.org/pypy/pypy/commits/77ecf91c635a287e88e60d8ddb0f4e9df4003301
+# According to ronan on #pypy IRC, it is released in PyPy2 2.3 and later
+# versions.
+#
+# Ubuntu 14.04 has PyPy 2.2.1, which does exhibit this bug.
+BROKEN_PYPY_CTXMGR_EXIT = False
+if hasattr(sys, 'pypy_version_info'):
+    class _Mgr(object):
+        def __enter__(self):
+            return self
+        def __exit__(self, *args):
+            if hasattr(sys, 'exc_clear'):
+                # Python 3 (PyPy3) doesn't have exc_clear
+                sys.exc_clear()
+    try:
+        try:
+            with _Mgr():
+                raise AssertionError()
+        except:
+            raise
+    except TypeError:
+        BROKEN_PYPY_CTXMGR_EXIT = True
+    except AssertionError:
+        pass
diff --git a/lib/python3.7/site-packages/flask/app.py b/lib/python3.7/site-packages/flask/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..87c5900348b5a2ad629f374bd7b03d791ed1a7e6
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/app.py
@@ -0,0 +1,2315 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.app
+    ~~~~~~~~~
+
+    This module implements the central WSGI application object.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+import os
+import sys
+import warnings
+from datetime import timedelta
+from functools import update_wrapper
+from itertools import chain
+from threading import Lock
+
+from werkzeug.datastructures import Headers, ImmutableDict
+from werkzeug.exceptions import BadRequest, BadRequestKeyError, HTTPException, \
+    InternalServerError, MethodNotAllowed, default_exceptions
+from werkzeug.routing import BuildError, Map, RequestRedirect, Rule
+
+from . import cli, json
+from ._compat import integer_types, reraise, string_types, text_type
+from .config import Config, ConfigAttribute
+from .ctx import AppContext, RequestContext, _AppCtxGlobals
+from .globals import _request_ctx_stack, g, request, session
+from .helpers import (
+    _PackageBoundObject,
+    _endpoint_from_view_func, find_package, get_env, get_debug_flag,
+    get_flashed_messages, locked_cached_property, url_for, get_load_dotenv
+)
+from .logging import create_logger
+from .sessions import SecureCookieSessionInterface
+from .signals import appcontext_tearing_down, got_request_exception, \
+    request_finished, request_started, request_tearing_down
+from .templating import DispatchingJinjaLoader, Environment, \
+    _default_template_ctx_processor
+from .wrappers import Request, Response
+
+# a singleton sentinel value for parameter defaults
+_sentinel = object()
+
+
+def _make_timedelta(value):
+    if not isinstance(value, timedelta):
+        return timedelta(seconds=value)
+    return value
+
+
+def setupmethod(f):
+    """Wraps a method so that it performs a check in debug mode if the
+    first request was already handled.
+    """
+    def wrapper_func(self, *args, **kwargs):
+        if self.debug and self._got_first_request:
+            raise AssertionError('A setup function was called after the '
+                'first request was handled.  This usually indicates a bug '
+                'in the application where a module was not imported '
+                'and decorators or other functionality was called too late.\n'
+                'To fix this make sure to import all your view modules, '
+                'database models and everything related at a central place '
+                'before the application starts serving requests.')
+        return f(self, *args, **kwargs)
+    return update_wrapper(wrapper_func, f)
+
+
+class Flask(_PackageBoundObject):
+    """The flask object implements a WSGI application and acts as the central
+    object.  It is passed the name of the module or package of the
+    application.  Once it is created it will act as a central registry for
+    the view functions, the URL rules, template configuration and much more.
+
+    The name of the package is used to resolve resources from inside the
+    package or the folder the module is contained in depending on if the
+    package parameter resolves to an actual python package (a folder with
+    an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
+
+    For more information about resource loading, see :func:`open_resource`.
+
+    Usually you create a :class:`Flask` instance in your main module or
+    in the :file:`__init__.py` file of your package like this::
+
+        from flask import Flask
+        app = Flask(__name__)
+
+    .. admonition:: About the First Parameter
+
+        The idea of the first parameter is to give Flask an idea of what
+        belongs to your application.  This name is used to find resources
+        on the filesystem, can be used by extensions to improve debugging
+        information and a lot more.
+
+        So it's important what you provide there.  If you are using a single
+        module, `__name__` is always the correct value.  If you however are
+        using a package, it's usually recommended to hardcode the name of
+        your package there.
+
+        For example if your application is defined in :file:`yourapplication/app.py`
+        you should create it with one of the two versions below::
+
+            app = Flask('yourapplication')
+            app = Flask(__name__.split('.')[0])
+
+        Why is that?  The application will work even with `__name__`, thanks
+        to how resources are looked up.  However it will make debugging more
+        painful.  Certain extensions can make assumptions based on the
+        import name of your application.  For example the Flask-SQLAlchemy
+        extension will look for the code in your application that triggered
+        an SQL query in debug mode.  If the import name is not properly set
+        up, that debugging information is lost.  (For example it would only
+        pick up SQL queries in `yourapplication.app` and not
+        `yourapplication.views.frontend`)
+
+    .. versionadded:: 0.7
+       The `static_url_path`, `static_folder`, and `template_folder`
+       parameters were added.
+
+    .. versionadded:: 0.8
+       The `instance_path` and `instance_relative_config` parameters were
+       added.
+
+    .. versionadded:: 0.11
+       The `root_path` parameter was added.
+
+    .. versionadded:: 1.0
+       The ``host_matching`` and ``static_host`` parameters were added.
+
+    .. versionadded:: 1.0
+       The ``subdomain_matching`` parameter was added. Subdomain
+       matching needs to be enabled manually now. Setting
+       :data:`SERVER_NAME` does not implicitly enable it.
+
+    :param import_name: the name of the application package
+    :param static_url_path: can be used to specify a different path for the
+                            static files on the web.  Defaults to the name
+                            of the `static_folder` folder.
+    :param static_folder: the folder with static files that should be served
+                          at `static_url_path`.  Defaults to the ``'static'``
+                          folder in the root path of the application.
+    :param static_host: the host to use when adding the static route.
+        Defaults to None. Required when using ``host_matching=True``
+        with a ``static_folder`` configured.
+    :param host_matching: set ``url_map.host_matching`` attribute.
+        Defaults to False.
+    :param subdomain_matching: consider the subdomain relative to
+        :data:`SERVER_NAME` when matching routes. Defaults to False.
+    :param template_folder: the folder that contains the templates that should
+                            be used by the application.  Defaults to
+                            ``'templates'`` folder in the root path of the
+                            application.
+    :param instance_path: An alternative instance path for the application.
+                          By default the folder ``'instance'`` next to the
+                          package or module is assumed to be the instance
+                          path.
+    :param instance_relative_config: if set to ``True`` relative filenames
+                                     for loading the config are assumed to
+                                     be relative to the instance path instead
+                                     of the application root.
+    :param root_path: Flask by default will automatically calculate the path
+                      to the root of the application.  In certain situations
+                      this cannot be achieved (for instance if the package
+                      is a Python 3 namespace package) and needs to be
+                      manually defined.
+    """
+
+    #: The class that is used for request objects.  See :class:`~flask.Request`
+    #: for more information.
+    request_class = Request
+
+    #: The class that is used for response objects.  See
+    #: :class:`~flask.Response` for more information.
+    response_class = Response
+
+    #: The class that is used for the Jinja environment.
+    #:
+    #: .. versionadded:: 0.11
+    jinja_environment = Environment
+
+    #: The class that is used for the :data:`~flask.g` instance.
+    #:
+    #: Example use cases for a custom class:
+    #:
+    #: 1. Store arbitrary attributes on flask.g.
+    #: 2. Add a property for lazy per-request database connectors.
+    #: 3. Return None instead of AttributeError on unexpected attributes.
+    #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
+    #:
+    #: In Flask 0.9 this property was called `request_globals_class` but it
+    #: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
+    #: flask.g object is now application context scoped.
+    #:
+    #: .. versionadded:: 0.10
+    app_ctx_globals_class = _AppCtxGlobals
+
+    #: The class that is used for the ``config`` attribute of this app.
+    #: Defaults to :class:`~flask.Config`.
+    #:
+    #: Example use cases for a custom class:
+    #:
+    #: 1. Default values for certain config options.
+    #: 2. Access to config values through attributes in addition to keys.
+    #:
+    #: .. versionadded:: 0.11
+    config_class = Config
+
+    #: The testing flag.  Set this to ``True`` to enable the test mode of
+    #: Flask extensions (and in the future probably also Flask itself).
+    #: For example this might activate test helpers that have an
+    #: additional runtime cost which should not be enabled by default.
+    #:
+    #: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
+    #: default it's implicitly enabled.
+    #:
+    #: This attribute can also be configured from the config with the
+    #: ``TESTING`` configuration key.  Defaults to ``False``.
+    testing = ConfigAttribute('TESTING')
+
+    #: If a secret key is set, cryptographic components can use this to
+    #: sign cookies and other things. Set this to a complex random value
+    #: when you want to use the secure cookie for instance.
+    #:
+    #: This attribute can also be configured from the config with the
+    #: :data:`SECRET_KEY` configuration key. Defaults to ``None``.
+    secret_key = ConfigAttribute('SECRET_KEY')
+
+    #: The secure cookie uses this for the name of the session cookie.
+    #:
+    #: This attribute can also be configured from the config with the
+    #: ``SESSION_COOKIE_NAME`` configuration key.  Defaults to ``'session'``
+    session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
+
+    #: A :class:`~datetime.timedelta` which is used to set the expiration
+    #: date of a permanent session.  The default is 31 days which makes a
+    #: permanent session survive for roughly one month.
+    #:
+    #: This attribute can also be configured from the config with the
+    #: ``PERMANENT_SESSION_LIFETIME`` configuration key.  Defaults to
+    #: ``timedelta(days=31)``
+    permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
+        get_converter=_make_timedelta)
+
+    #: A :class:`~datetime.timedelta` which is used as default cache_timeout
+    #: for the :func:`send_file` functions. The default is 12 hours.
+    #:
+    #: This attribute can also be configured from the config with the
+    #: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration
+    #: variable can also be set with an integer value used as seconds.
+    #: Defaults to ``timedelta(hours=12)``
+    send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT',
+        get_converter=_make_timedelta)
+
+    #: Enable this if you want to use the X-Sendfile feature.  Keep in
+    #: mind that the server has to support this.  This only affects files
+    #: sent with the :func:`send_file` method.
+    #:
+    #: .. versionadded:: 0.2
+    #:
+    #: This attribute can also be configured from the config with the
+    #: ``USE_X_SENDFILE`` configuration key.  Defaults to ``False``.
+    use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
+
+    #: The JSON encoder class to use.  Defaults to :class:`~flask.json.JSONEncoder`.
+    #:
+    #: .. versionadded:: 0.10
+    json_encoder = json.JSONEncoder
+
+    #: The JSON decoder class to use.  Defaults to :class:`~flask.json.JSONDecoder`.
+    #:
+    #: .. versionadded:: 0.10
+    json_decoder = json.JSONDecoder
+
+    #: Options that are passed directly to the Jinja2 environment.
+    jinja_options = ImmutableDict(
+        extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
+    )
+
+    #: Default configuration parameters.
+    default_config = ImmutableDict({
+        'ENV':                                  None,
+        'DEBUG':                                None,
+        'TESTING':                              False,
+        'PROPAGATE_EXCEPTIONS':                 None,
+        'PRESERVE_CONTEXT_ON_EXCEPTION':        None,
+        'SECRET_KEY':                           None,
+        'PERMANENT_SESSION_LIFETIME':           timedelta(days=31),
+        'USE_X_SENDFILE':                       False,
+        'SERVER_NAME':                          None,
+        'APPLICATION_ROOT':                     '/',
+        'SESSION_COOKIE_NAME':                  'session',
+        'SESSION_COOKIE_DOMAIN':                None,
+        'SESSION_COOKIE_PATH':                  None,
+        'SESSION_COOKIE_HTTPONLY':              True,
+        'SESSION_COOKIE_SECURE':                False,
+        'SESSION_COOKIE_SAMESITE':              None,
+        'SESSION_REFRESH_EACH_REQUEST':         True,
+        'MAX_CONTENT_LENGTH':                   None,
+        'SEND_FILE_MAX_AGE_DEFAULT':            timedelta(hours=12),
+        'TRAP_BAD_REQUEST_ERRORS':              None,
+        'TRAP_HTTP_EXCEPTIONS':                 False,
+        'EXPLAIN_TEMPLATE_LOADING':             False,
+        'PREFERRED_URL_SCHEME':                 'http',
+        'JSON_AS_ASCII':                        True,
+        'JSON_SORT_KEYS':                       True,
+        'JSONIFY_PRETTYPRINT_REGULAR':          False,
+        'JSONIFY_MIMETYPE':                     'application/json',
+        'TEMPLATES_AUTO_RELOAD':                None,
+        'MAX_COOKIE_SIZE': 4093,
+    })
+
+    #: The rule object to use for URL rules created.  This is used by
+    #: :meth:`add_url_rule`.  Defaults to :class:`werkzeug.routing.Rule`.
+    #:
+    #: .. versionadded:: 0.7
+    url_rule_class = Rule
+
+    #: the test client that is used with when `test_client` is used.
+    #:
+    #: .. versionadded:: 0.7
+    test_client_class = None
+
+    #: The :class:`~click.testing.CliRunner` subclass, by default
+    #: :class:`~flask.testing.FlaskCliRunner` that is used by
+    #: :meth:`test_cli_runner`. Its ``__init__`` method should take a
+    #: Flask app object as the first argument.
+    #:
+    #: .. versionadded:: 1.0
+    test_cli_runner_class = None
+
+    #: the session interface to use.  By default an instance of
+    #: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
+    #:
+    #: .. versionadded:: 0.8
+    session_interface = SecureCookieSessionInterface()
+
+    # TODO remove the next three attrs when Sphinx :inherited-members: works
+    # https://github.com/sphinx-doc/sphinx/issues/741
+
+    #: The name of the package or module that this app belongs to. Do not
+    #: change this once it is set by the constructor.
+    import_name = None
+
+    #: Location of the template files to be added to the template lookup.
+    #: ``None`` if templates should not be added.
+    template_folder = None
+
+    #: Absolute path to the package on the filesystem. Used to look up
+    #: resources contained in the package.
+    root_path = None
+
+    def __init__(
+        self,
+        import_name,
+        static_url_path=None,
+        static_folder='static',
+        static_host=None,
+        host_matching=False,
+        subdomain_matching=False,
+        template_folder='templates',
+        instance_path=None,
+        instance_relative_config=False,
+        root_path=None
+    ):
+        _PackageBoundObject.__init__(
+            self,
+            import_name,
+            template_folder=template_folder,
+            root_path=root_path
+        )
+
+        if static_url_path is not None:
+            self.static_url_path = static_url_path
+
+        if static_folder is not None:
+            self.static_folder = static_folder
+
+        if instance_path is None:
+            instance_path = self.auto_find_instance_path()
+        elif not os.path.isabs(instance_path):
+            raise ValueError(
+                'If an instance path is provided it must be absolute.'
+                ' A relative path was given instead.'
+            )
+
+        #: Holds the path to the instance folder.
+        #:
+        #: .. versionadded:: 0.8
+        self.instance_path = instance_path
+
+        #: The configuration dictionary as :class:`Config`.  This behaves
+        #: exactly like a regular dictionary but supports additional methods
+        #: to load a config from files.
+        self.config = self.make_config(instance_relative_config)
+
+        #: A dictionary of all view functions registered.  The keys will
+        #: be function names which are also used to generate URLs and
+        #: the values are the function objects themselves.
+        #: To register a view function, use the :meth:`route` decorator.
+        self.view_functions = {}
+
+        #: A dictionary of all registered error handlers.  The key is ``None``
+        #: for error handlers active on the application, otherwise the key is
+        #: the name of the blueprint.  Each key points to another dictionary
+        #: where the key is the status code of the http exception.  The
+        #: special key ``None`` points to a list of tuples where the first item
+        #: is the class for the instance check and the second the error handler
+        #: function.
+        #:
+        #: To register an error handler, use the :meth:`errorhandler`
+        #: decorator.
+        self.error_handler_spec = {}
+
+        #: A list of functions that are called when :meth:`url_for` raises a
+        #: :exc:`~werkzeug.routing.BuildError`.  Each function registered here
+        #: is called with `error`, `endpoint` and `values`.  If a function
+        #: returns ``None`` or raises a :exc:`BuildError` the next function is
+        #: tried.
+        #:
+        #: .. versionadded:: 0.9
+        self.url_build_error_handlers = []
+
+        #: A dictionary with lists of functions that will be called at the
+        #: beginning of each request. The key of the dictionary is the name of
+        #: the blueprint this function is active for, or ``None`` for all
+        #: requests. To register a function, use the :meth:`before_request`
+        #: decorator.
+        self.before_request_funcs = {}
+
+        #: A list of functions that will be called at the beginning of the
+        #: first request to this instance. To register a function, use the
+        #: :meth:`before_first_request` decorator.
+        #:
+        #: .. versionadded:: 0.8
+        self.before_first_request_funcs = []
+
+        #: A dictionary with lists of functions that should be called after
+        #: each request.  The key of the dictionary is the name of the blueprint
+        #: this function is active for, ``None`` for all requests.  This can for
+        #: example be used to close database connections. To register a function
+        #: here, use the :meth:`after_request` decorator.
+        self.after_request_funcs = {}
+
+        #: A dictionary with lists of functions that are called after
+        #: each request, even if an exception has occurred. The key of the
+        #: dictionary is the name of the blueprint this function is active for,
+        #: ``None`` for all requests. These functions are not allowed to modify
+        #: the request, and their return values are ignored. If an exception
+        #: occurred while processing the request, it gets passed to each
+        #: teardown_request function. To register a function here, use the
+        #: :meth:`teardown_request` decorator.
+        #:
+        #: .. versionadded:: 0.7
+        self.teardown_request_funcs = {}
+
+        #: A list of functions that are called when the application context
+        #: is destroyed.  Since the application context is also torn down
+        #: if the request ends this is the place to store code that disconnects
+        #: from databases.
+        #:
+        #: .. versionadded:: 0.9
+        self.teardown_appcontext_funcs = []
+
+        #: A dictionary with lists of functions that are called before the
+        #: :attr:`before_request_funcs` functions. The key of the dictionary is
+        #: the name of the blueprint this function is active for, or ``None``
+        #: for all requests. To register a function, use
+        #: :meth:`url_value_preprocessor`.
+        #:
+        #: .. versionadded:: 0.7
+        self.url_value_preprocessors = {}
+
+        #: A dictionary with lists of functions that can be used as URL value
+        #: preprocessors.  The key ``None`` here is used for application wide
+        #: callbacks, otherwise the key is the name of the blueprint.
+        #: Each of these functions has the chance to modify the dictionary
+        #: of URL values before they are used as the keyword arguments of the
+        #: view function.  For each function registered this one should also
+        #: provide a :meth:`url_defaults` function that adds the parameters
+        #: automatically again that were removed that way.
+        #:
+        #: .. versionadded:: 0.7
+        self.url_default_functions = {}
+
+        #: A dictionary with list of functions that are called without argument
+        #: to populate the template context.  The key of the dictionary is the
+        #: name of the blueprint this function is active for, ``None`` for all
+        #: requests.  Each returns a dictionary that the template context is
+        #: updated with.  To register a function here, use the
+        #: :meth:`context_processor` decorator.
+        self.template_context_processors = {
+            None: [_default_template_ctx_processor]
+        }
+
+        #: A list of shell context processor functions that should be run
+        #: when a shell context is created.
+        #:
+        #: .. versionadded:: 0.11
+        self.shell_context_processors = []
+
+        #: all the attached blueprints in a dictionary by name.  Blueprints
+        #: can be attached multiple times so this dictionary does not tell
+        #: you how often they got attached.
+        #:
+        #: .. versionadded:: 0.7
+        self.blueprints = {}
+        self._blueprint_order = []
+
+        #: a place where extensions can store application specific state.  For
+        #: example this is where an extension could store database engines and
+        #: similar things.  For backwards compatibility extensions should register
+        #: themselves like this::
+        #:
+        #:      if not hasattr(app, 'extensions'):
+        #:          app.extensions = {}
+        #:      app.extensions['extensionname'] = SomeObject()
+        #:
+        #: The key must match the name of the extension module. For example in
+        #: case of a "Flask-Foo" extension in `flask_foo`, the key would be
+        #: ``'foo'``.
+        #:
+        #: .. versionadded:: 0.7
+        self.extensions = {}
+
+        #: The :class:`~werkzeug.routing.Map` for this instance.  You can use
+        #: this to change the routing converters after the class was created
+        #: but before any routes are connected.  Example::
+        #:
+        #:    from werkzeug.routing import BaseConverter
+        #:
+        #:    class ListConverter(BaseConverter):
+        #:        def to_python(self, value):
+        #:            return value.split(',')
+        #:        def to_url(self, values):
+        #:            return ','.join(super(ListConverter, self).to_url(value)
+        #:                            for value in values)
+        #:
+        #:    app = Flask(__name__)
+        #:    app.url_map.converters['list'] = ListConverter
+        self.url_map = Map()
+
+        self.url_map.host_matching = host_matching
+        self.subdomain_matching = subdomain_matching
+
+        # tracks internally if the application already handled at least one
+        # request.
+        self._got_first_request = False
+        self._before_request_lock = Lock()
+
+        # Add a static route using the provided static_url_path, static_host,
+        # and static_folder if there is a configured static_folder.
+        # Note we do this without checking if static_folder exists.
+        # For one, it might be created while the server is running (e.g. during
+        # development). Also, Google App Engine stores static files somewhere
+        if self.has_static_folder:
+            assert bool(static_host) == host_matching, 'Invalid static_host/host_matching combination'
+            self.add_url_rule(
+                self.static_url_path + '/<path:filename>',
+                endpoint='static',
+                host=static_host,
+                view_func=self.send_static_file
+            )
+
+        #: The click command line context for this application.  Commands
+        #: registered here show up in the :command:`flask` command once the
+        #: application has been discovered.  The default commands are
+        #: provided by Flask itself and can be overridden.
+        #:
+        #: This is an instance of a :class:`click.Group` object.
+        self.cli = cli.AppGroup(self.name)
+
+    @locked_cached_property
+    def name(self):
+        """The name of the application.  This is usually the import name
+        with the difference that it's guessed from the run file if the
+        import name is main.  This name is used as a display name when
+        Flask needs the name of the application.  It can be set and overridden
+        to change the value.
+
+        .. versionadded:: 0.8
+        """
+        if self.import_name == '__main__':
+            fn = getattr(sys.modules['__main__'], '__file__', None)
+            if fn is None:
+                return '__main__'
+            return os.path.splitext(os.path.basename(fn))[0]
+        return self.import_name
+
+    @property
+    def propagate_exceptions(self):
+        """Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration
+        value in case it's set, otherwise a sensible default is returned.
+
+        .. versionadded:: 0.7
+        """
+        rv = self.config['PROPAGATE_EXCEPTIONS']
+        if rv is not None:
+            return rv
+        return self.testing or self.debug
+
+    @property
+    def preserve_context_on_exception(self):
+        """Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``
+        configuration value in case it's set, otherwise a sensible default
+        is returned.
+
+        .. versionadded:: 0.7
+        """
+        rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
+        if rv is not None:
+            return rv
+        return self.debug
+
+    @locked_cached_property
+    def logger(self):
+        """The ``'flask.app'`` logger, a standard Python
+        :class:`~logging.Logger`.
+
+        In debug mode, the logger's :attr:`~logging.Logger.level` will be set
+        to :data:`~logging.DEBUG`.
+
+        If there are no handlers configured, a default handler will be added.
+        See :ref:`logging` for more information.
+
+        .. versionchanged:: 1.0
+            Behavior was simplified. The logger is always named
+            ``flask.app``. The level is only set during configuration, it
+            doesn't check ``app.debug`` each time. Only one format is used,
+            not different ones depending on ``app.debug``. No handlers are
+            removed, and a handler is only added if no handlers are already
+            configured.
+
+        .. versionadded:: 0.3
+        """
+        return create_logger(self)
+
+    @locked_cached_property
+    def jinja_env(self):
+        """The Jinja2 environment used to load templates."""
+        return self.create_jinja_environment()
+
+    @property
+    def got_first_request(self):
+        """This attribute is set to ``True`` if the application started
+        handling the first request.
+
+        .. versionadded:: 0.8
+        """
+        return self._got_first_request
+
+    def make_config(self, instance_relative=False):
+        """Used to create the config attribute by the Flask constructor.
+        The `instance_relative` parameter is passed in from the constructor
+        of Flask (there named `instance_relative_config`) and indicates if
+        the config should be relative to the instance path or the root path
+        of the application.
+
+        .. versionadded:: 0.8
+        """
+        root_path = self.root_path
+        if instance_relative:
+            root_path = self.instance_path
+        defaults = dict(self.default_config)
+        defaults['ENV'] = get_env()
+        defaults['DEBUG'] = get_debug_flag()
+        return self.config_class(root_path, defaults)
+
+    def auto_find_instance_path(self):
+        """Tries to locate the instance path if it was not provided to the
+        constructor of the application class.  It will basically calculate
+        the path to a folder named ``instance`` next to your main file or
+        the package.
+
+        .. versionadded:: 0.8
+        """
+        prefix, package_path = find_package(self.import_name)
+        if prefix is None:
+            return os.path.join(package_path, 'instance')
+        return os.path.join(prefix, 'var', self.name + '-instance')
+
+    def open_instance_resource(self, resource, mode='rb'):
+        """Opens a resource from the application's instance folder
+        (:attr:`instance_path`).  Otherwise works like
+        :meth:`open_resource`.  Instance resources can also be opened for
+        writing.
+
+        :param resource: the name of the resource.  To access resources within
+                         subfolders use forward slashes as separator.
+        :param mode: resource file opening mode, default is 'rb'.
+        """
+        return open(os.path.join(self.instance_path, resource), mode)
+
+    def _get_templates_auto_reload(self):
+        """Reload templates when they are changed. Used by
+        :meth:`create_jinja_environment`.
+
+        This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If
+        not set, it will be enabled in debug mode.
+
+        .. versionadded:: 1.0
+            This property was added but the underlying config and behavior
+            already existed.
+        """
+        rv = self.config['TEMPLATES_AUTO_RELOAD']
+        return rv if rv is not None else self.debug
+
+    def _set_templates_auto_reload(self, value):
+        self.config['TEMPLATES_AUTO_RELOAD'] = value
+
+    templates_auto_reload = property(
+        _get_templates_auto_reload, _set_templates_auto_reload
+    )
+    del _get_templates_auto_reload, _set_templates_auto_reload
+
+    def create_jinja_environment(self):
+        """Creates the Jinja2 environment based on :attr:`jinja_options`
+        and :meth:`select_jinja_autoescape`.  Since 0.7 this also adds
+        the Jinja2 globals and filters after initialization.  Override
+        this function to customize the behavior.
+
+        .. versionadded:: 0.5
+        .. versionchanged:: 0.11
+           ``Environment.auto_reload`` set in accordance with
+           ``TEMPLATES_AUTO_RELOAD`` configuration option.
+        """
+        options = dict(self.jinja_options)
+
+        if 'autoescape' not in options:
+            options['autoescape'] = self.select_jinja_autoescape
+
+        if 'auto_reload' not in options:
+            options['auto_reload'] = self.templates_auto_reload
+
+        rv = self.jinja_environment(self, **options)
+        rv.globals.update(
+            url_for=url_for,
+            get_flashed_messages=get_flashed_messages,
+            config=self.config,
+            # request, session and g are normally added with the
+            # context processor for efficiency reasons but for imported
+            # templates we also want the proxies in there.
+            request=request,
+            session=session,
+            g=g
+        )
+        rv.filters['tojson'] = json.tojson_filter
+        return rv
+
+    def create_global_jinja_loader(self):
+        """Creates the loader for the Jinja2 environment.  Can be used to
+        override just the loader and keeping the rest unchanged.  It's
+        discouraged to override this function.  Instead one should override
+        the :meth:`jinja_loader` function instead.
+
+        The global loader dispatches between the loaders of the application
+        and the individual blueprints.
+
+        .. versionadded:: 0.7
+        """
+        return DispatchingJinjaLoader(self)
+
+    def select_jinja_autoescape(self, filename):
+        """Returns ``True`` if autoescaping should be active for the given
+        template name. If no template name is given, returns `True`.
+
+        .. versionadded:: 0.5
+        """
+        if filename is None:
+            return True
+        return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
+
+    def update_template_context(self, context):
+        """Update the template context with some commonly used variables.
+        This injects request, session, config and g into the template
+        context as well as everything template context processors want
+        to inject.  Note that the as of Flask 0.6, the original values
+        in the context will not be overridden if a context processor
+        decides to return a value with the same key.
+
+        :param context: the context as a dictionary that is updated in place
+                        to add extra variables.
+        """
+        funcs = self.template_context_processors[None]
+        reqctx = _request_ctx_stack.top
+        if reqctx is not None:
+            bp = reqctx.request.blueprint
+            if bp is not None and bp in self.template_context_processors:
+                funcs = chain(funcs, self.template_context_processors[bp])
+        orig_ctx = context.copy()
+        for func in funcs:
+            context.update(func())
+        # make sure the original values win.  This makes it possible to
+        # easier add new variables in context processors without breaking
+        # existing views.
+        context.update(orig_ctx)
+
+    def make_shell_context(self):
+        """Returns the shell context for an interactive shell for this
+        application.  This runs all the registered shell context
+        processors.
+
+        .. versionadded:: 0.11
+        """
+        rv = {'app': self, 'g': g}
+        for processor in self.shell_context_processors:
+            rv.update(processor())
+        return rv
+
+    #: What environment the app is running in. Flask and extensions may
+    #: enable behaviors based on the environment, such as enabling debug
+    #: mode. This maps to the :data:`ENV` config key. This is set by the
+    #: :envvar:`FLASK_ENV` environment variable and may not behave as
+    #: expected if set in code.
+    #:
+    #: **Do not enable development when deploying in production.**
+    #:
+    #: Default: ``'production'``
+    env = ConfigAttribute('ENV')
+
+    def _get_debug(self):
+        return self.config['DEBUG']
+
+    def _set_debug(self, value):
+        self.config['DEBUG'] = value
+        self.jinja_env.auto_reload = self.templates_auto_reload
+
+    #: Whether debug mode is enabled. When using ``flask run`` to start
+    #: the development server, an interactive debugger will be shown for
+    #: unhandled exceptions, and the server will be reloaded when code
+    #: changes. This maps to the :data:`DEBUG` config key. This is
+    #: enabled when :attr:`env` is ``'development'`` and is overridden
+    #: by the ``FLASK_DEBUG`` environment variable. It may not behave as
+    #: expected if set in code.
+    #:
+    #: **Do not enable debug mode when deploying in production.**
+    #:
+    #: Default: ``True`` if :attr:`env` is ``'development'``, or
+    #: ``False`` otherwise.
+    debug = property(_get_debug, _set_debug)
+    del _get_debug, _set_debug
+
+    def run(self, host=None, port=None, debug=None,
+            load_dotenv=True, **options):
+        """Runs the application on a local development server.
+
+        Do not use ``run()`` in a production setting. It is not intended to
+        meet security and performance requirements for a production server.
+        Instead, see :ref:`deployment` for WSGI server recommendations.
+
+        If the :attr:`debug` flag is set the server will automatically reload
+        for code changes and show a debugger in case an exception happened.
+
+        If you want to run the application in debug mode, but disable the
+        code execution on the interactive debugger, you can pass
+        ``use_evalex=False`` as parameter.  This will keep the debugger's
+        traceback screen active, but disable code execution.
+
+        It is not recommended to use this function for development with
+        automatic reloading as this is badly supported.  Instead you should
+        be using the :command:`flask` command line script's ``run`` support.
+
+        .. admonition:: Keep in Mind
+
+           Flask will suppress any server error with a generic error page
+           unless it is in debug mode.  As such to enable just the
+           interactive debugger without the code reloading, you have to
+           invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
+           Setting ``use_debugger`` to ``True`` without being in debug mode
+           won't catch any exceptions because there won't be any to
+           catch.
+
+        :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
+            have the server available externally as well. Defaults to
+            ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable
+            if present.
+        :param port: the port of the webserver. Defaults to ``5000`` or the
+            port defined in the ``SERVER_NAME`` config variable if present.
+        :param debug: if given, enable or disable debug mode. See
+            :attr:`debug`.
+        :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
+            files to set environment variables. Will also change the working
+            directory to the directory containing the first file found.
+        :param options: the options to be forwarded to the underlying Werkzeug
+            server. See :func:`werkzeug.serving.run_simple` for more
+            information.
+
+        .. versionchanged:: 1.0
+            If installed, python-dotenv will be used to load environment
+            variables from :file:`.env` and :file:`.flaskenv` files.
+
+            If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`
+            environment variables will override :attr:`env` and
+            :attr:`debug`.
+
+            Threaded mode is enabled by default.
+
+        .. versionchanged:: 0.10
+            The default port is now picked from the ``SERVER_NAME``
+            variable.
+        """
+        # Change this into a no-op if the server is invoked from the
+        # command line. Have a look at cli.py for more information.
+        if os.environ.get('FLASK_RUN_FROM_CLI') == 'true':
+            from .debughelpers import explain_ignored_app_run
+            explain_ignored_app_run()
+            return
+
+        if get_load_dotenv(load_dotenv):
+            cli.load_dotenv()
+
+            # if set, let env vars override previous values
+            if 'FLASK_ENV' in os.environ:
+                self.env = get_env()
+                self.debug = get_debug_flag()
+            elif 'FLASK_DEBUG' in os.environ:
+                self.debug = get_debug_flag()
+
+        # debug passed to method overrides all other sources
+        if debug is not None:
+            self.debug = bool(debug)
+
+        _host = '127.0.0.1'
+        _port = 5000
+        server_name = self.config.get('SERVER_NAME')
+        sn_host, sn_port = None, None
+
+        if server_name:
+            sn_host, _, sn_port = server_name.partition(':')
+
+        host = host or sn_host or _host
+        port = int(port or sn_port or _port)
+
+        options.setdefault('use_reloader', self.debug)
+        options.setdefault('use_debugger', self.debug)
+        options.setdefault('threaded', True)
+
+        cli.show_server_banner(self.env, self.debug, self.name, False)
+
+        from werkzeug.serving import run_simple
+
+        try:
+            run_simple(host, port, self, **options)
+        finally:
+            # reset the first request information if the development server
+            # reset normally.  This makes it possible to restart the server
+            # without reloader and that stuff from an interactive shell.
+            self._got_first_request = False
+
+    def test_client(self, use_cookies=True, **kwargs):
+        """Creates a test client for this application.  For information
+        about unit testing head over to :ref:`testing`.
+
+        Note that if you are testing for assertions or exceptions in your
+        application code, you must set ``app.testing = True`` in order for the
+        exceptions to propagate to the test client.  Otherwise, the exception
+        will be handled by the application (not visible to the test client) and
+        the only indication of an AssertionError or other exception will be a
+        500 status code response to the test client.  See the :attr:`testing`
+        attribute.  For example::
+
+            app.testing = True
+            client = app.test_client()
+
+        The test client can be used in a ``with`` block to defer the closing down
+        of the context until the end of the ``with`` block.  This is useful if
+        you want to access the context locals for testing::
+
+            with app.test_client() as c:
+                rv = c.get('/?vodka=42')
+                assert request.args['vodka'] == '42'
+
+        Additionally, you may pass optional keyword arguments that will then
+        be passed to the application's :attr:`test_client_class` constructor.
+        For example::
+
+            from flask.testing import FlaskClient
+
+            class CustomClient(FlaskClient):
+                def __init__(self, *args, **kwargs):
+                    self._authentication = kwargs.pop("authentication")
+                    super(CustomClient,self).__init__( *args, **kwargs)
+
+            app.test_client_class = CustomClient
+            client = app.test_client(authentication='Basic ....')
+
+        See :class:`~flask.testing.FlaskClient` for more information.
+
+        .. versionchanged:: 0.4
+           added support for ``with`` block usage for the client.
+
+        .. versionadded:: 0.7
+           The `use_cookies` parameter was added as well as the ability
+           to override the client to be used by setting the
+           :attr:`test_client_class` attribute.
+
+        .. versionchanged:: 0.11
+           Added `**kwargs` to support passing additional keyword arguments to
+           the constructor of :attr:`test_client_class`.
+        """
+        cls = self.test_client_class
+        if cls is None:
+            from flask.testing import FlaskClient as cls
+        return cls(self, self.response_class, use_cookies=use_cookies, **kwargs)
+
+    def test_cli_runner(self, **kwargs):
+        """Create a CLI runner for testing CLI commands.
+        See :ref:`testing-cli`.
+
+        Returns an instance of :attr:`test_cli_runner_class`, by default
+        :class:`~flask.testing.FlaskCliRunner`. The Flask app object is
+        passed as the first argument.
+
+        .. versionadded:: 1.0
+        """
+        cls = self.test_cli_runner_class
+
+        if cls is None:
+            from flask.testing import FlaskCliRunner as cls
+
+        return cls(self, **kwargs)
+
+    def open_session(self, request):
+        """Creates or opens a new session.  Default implementation stores all
+        session data in a signed cookie.  This requires that the
+        :attr:`secret_key` is set.  Instead of overriding this method
+        we recommend replacing the :class:`session_interface`.
+
+        .. deprecated: 1.0
+            Will be removed in 1.1. Use ``session_interface.open_session``
+            instead.
+
+        :param request: an instance of :attr:`request_class`.
+        """
+
+        warnings.warn(DeprecationWarning(
+            '"open_session" is deprecated and will be removed in 1.1. Use'
+            ' "session_interface.open_session" instead.'
+        ))
+        return self.session_interface.open_session(self, request)
+
+    def save_session(self, session, response):
+        """Saves the session if it needs updates.  For the default
+        implementation, check :meth:`open_session`.  Instead of overriding this
+        method we recommend replacing the :class:`session_interface`.
+
+        .. deprecated: 1.0
+            Will be removed in 1.1. Use ``session_interface.save_session``
+            instead.
+
+        :param session: the session to be saved (a
+                        :class:`~werkzeug.contrib.securecookie.SecureCookie`
+                        object)
+        :param response: an instance of :attr:`response_class`
+        """
+
+        warnings.warn(DeprecationWarning(
+            '"save_session" is deprecated and will be removed in 1.1. Use'
+            ' "session_interface.save_session" instead.'
+        ))
+        return self.session_interface.save_session(self, session, response)
+
+    def make_null_session(self):
+        """Creates a new instance of a missing session.  Instead of overriding
+        this method we recommend replacing the :class:`session_interface`.
+
+        .. deprecated: 1.0
+            Will be removed in 1.1. Use ``session_interface.make_null_session``
+            instead.
+
+        .. versionadded:: 0.7
+        """
+
+        warnings.warn(DeprecationWarning(
+            '"make_null_session" is deprecated and will be removed in 1.1. Use'
+            ' "session_interface.make_null_session" instead.'
+        ))
+        return self.session_interface.make_null_session(self)
+
+    @setupmethod
+    def register_blueprint(self, blueprint, **options):
+        """Register a :class:`~flask.Blueprint` on the application. Keyword
+        arguments passed to this method will override the defaults set on the
+        blueprint.
+
+        Calls the blueprint's :meth:`~flask.Blueprint.register` method after
+        recording the blueprint in the application's :attr:`blueprints`.
+
+        :param blueprint: The blueprint to register.
+        :param url_prefix: Blueprint routes will be prefixed with this.
+        :param subdomain: Blueprint routes will match on this subdomain.
+        :param url_defaults: Blueprint routes will use these default values for
+            view arguments.
+        :param options: Additional keyword arguments are passed to
+            :class:`~flask.blueprints.BlueprintSetupState`. They can be
+            accessed in :meth:`~flask.Blueprint.record` callbacks.
+
+        .. versionadded:: 0.7
+        """
+        first_registration = False
+
+        if blueprint.name in self.blueprints:
+            assert self.blueprints[blueprint.name] is blueprint, (
+                'A name collision occurred between blueprints %r and %r. Both'
+                ' share the same name "%s". Blueprints that are created on the'
+                ' fly need unique names.' % (
+                    blueprint, self.blueprints[blueprint.name], blueprint.name
+                )
+            )
+        else:
+            self.blueprints[blueprint.name] = blueprint
+            self._blueprint_order.append(blueprint)
+            first_registration = True
+
+        blueprint.register(self, options, first_registration)
+
+    def iter_blueprints(self):
+        """Iterates over all blueprints by the order they were registered.
+
+        .. versionadded:: 0.11
+        """
+        return iter(self._blueprint_order)
+
+    @setupmethod
+    def add_url_rule(self, rule, endpoint=None, view_func=None,
+                     provide_automatic_options=None, **options):
+        """Connects a URL rule.  Works exactly like the :meth:`route`
+        decorator.  If a view_func is provided it will be registered with the
+        endpoint.
+
+        Basically this example::
+
+            @app.route('/')
+            def index():
+                pass
+
+        Is equivalent to the following::
+
+            def index():
+                pass
+            app.add_url_rule('/', 'index', index)
+
+        If the view_func is not provided you will need to connect the endpoint
+        to a view function like so::
+
+            app.view_functions['index'] = index
+
+        Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
+        to customize the behavior via subclassing you only need to change
+        this method.
+
+        For more information refer to :ref:`url-route-registrations`.
+
+        .. versionchanged:: 0.2
+           `view_func` parameter added.
+
+        .. versionchanged:: 0.6
+           ``OPTIONS`` is added automatically as method.
+
+        :param rule: the URL rule as string
+        :param endpoint: the endpoint for the registered URL rule.  Flask
+                         itself assumes the name of the view function as
+                         endpoint
+        :param view_func: the function to call when serving a request to the
+                          provided endpoint
+        :param provide_automatic_options: controls whether the ``OPTIONS``
+            method should be added automatically. This can also be controlled
+            by setting the ``view_func.provide_automatic_options = False``
+            before adding the rule.
+        :param options: the options to be forwarded to the underlying
+                        :class:`~werkzeug.routing.Rule` object.  A change
+                        to Werkzeug is handling of method options.  methods
+                        is a list of methods this rule should be limited
+                        to (``GET``, ``POST`` etc.).  By default a rule
+                        just listens for ``GET`` (and implicitly ``HEAD``).
+                        Starting with Flask 0.6, ``OPTIONS`` is implicitly
+                        added and handled by the standard request handling.
+        """
+        if endpoint is None:
+            endpoint = _endpoint_from_view_func(view_func)
+        options['endpoint'] = endpoint
+        methods = options.pop('methods', None)
+
+        # if the methods are not given and the view_func object knows its
+        # methods we can use that instead.  If neither exists, we go with
+        # a tuple of only ``GET`` as default.
+        if methods is None:
+            methods = getattr(view_func, 'methods', None) or ('GET',)
+        if isinstance(methods, string_types):
+            raise TypeError('Allowed methods have to be iterables of strings, '
+                            'for example: @app.route(..., methods=["POST"])')
+        methods = set(item.upper() for item in methods)
+
+        # Methods that should always be added
+        required_methods = set(getattr(view_func, 'required_methods', ()))
+
+        # starting with Flask 0.8 the view_func object can disable and
+        # force-enable the automatic options handling.
+        if provide_automatic_options is None:
+            provide_automatic_options = getattr(view_func,
+                'provide_automatic_options', None)
+
+        if provide_automatic_options is None:
+            if 'OPTIONS' not in methods:
+                provide_automatic_options = True
+                required_methods.add('OPTIONS')
+            else:
+                provide_automatic_options = False
+
+        # Add the required methods now.
+        methods |= required_methods
+
+        rule = self.url_rule_class(rule, methods=methods, **options)
+        rule.provide_automatic_options = provide_automatic_options
+
+        self.url_map.add(rule)
+        if view_func is not None:
+            old_func = self.view_functions.get(endpoint)
+            if old_func is not None and old_func != view_func:
+                raise AssertionError('View function mapping is overwriting an '
+                                     'existing endpoint function: %s' % endpoint)
+            self.view_functions[endpoint] = view_func
+
+    def route(self, rule, **options):
+        """A decorator that is used to register a view function for a
+        given URL rule.  This does the same thing as :meth:`add_url_rule`
+        but is intended for decorator usage::
+
+            @app.route('/')
+            def index():
+                return 'Hello World'
+
+        For more information refer to :ref:`url-route-registrations`.
+
+        :param rule: the URL rule as string
+        :param endpoint: the endpoint for the registered URL rule.  Flask
+                         itself assumes the name of the view function as
+                         endpoint
+        :param options: the options to be forwarded to the underlying
+                        :class:`~werkzeug.routing.Rule` object.  A change
+                        to Werkzeug is handling of method options.  methods
+                        is a list of methods this rule should be limited
+                        to (``GET``, ``POST`` etc.).  By default a rule
+                        just listens for ``GET`` (and implicitly ``HEAD``).
+                        Starting with Flask 0.6, ``OPTIONS`` is implicitly
+                        added and handled by the standard request handling.
+        """
+        def decorator(f):
+            endpoint = options.pop('endpoint', None)
+            self.add_url_rule(rule, endpoint, f, **options)
+            return f
+        return decorator
+
+    @setupmethod
+    def endpoint(self, endpoint):
+        """A decorator to register a function as an endpoint.
+        Example::
+
+            @app.endpoint('example.endpoint')
+            def example():
+                return "example"
+
+        :param endpoint: the name of the endpoint
+        """
+        def decorator(f):
+            self.view_functions[endpoint] = f
+            return f
+        return decorator
+
+    @staticmethod
+    def _get_exc_class_and_code(exc_class_or_code):
+        """Ensure that we register only exceptions as handler keys"""
+        if isinstance(exc_class_or_code, integer_types):
+            exc_class = default_exceptions[exc_class_or_code]
+        else:
+            exc_class = exc_class_or_code
+
+        assert issubclass(exc_class, Exception)
+
+        if issubclass(exc_class, HTTPException):
+            return exc_class, exc_class.code
+        else:
+            return exc_class, None
+
+    @setupmethod
+    def errorhandler(self, code_or_exception):
+        """Register a function to handle errors by code or exception class.
+
+        A decorator that is used to register a function given an
+        error code.  Example::
+
+            @app.errorhandler(404)
+            def page_not_found(error):
+                return 'This page does not exist', 404
+
+        You can also register handlers for arbitrary exceptions::
+
+            @app.errorhandler(DatabaseError)
+            def special_exception_handler(error):
+                return 'Database connection failed', 500
+
+        .. versionadded:: 0.7
+            Use :meth:`register_error_handler` instead of modifying
+            :attr:`error_handler_spec` directly, for application wide error
+            handlers.
+
+        .. versionadded:: 0.7
+           One can now additionally also register custom exception types
+           that do not necessarily have to be a subclass of the
+           :class:`~werkzeug.exceptions.HTTPException` class.
+
+        :param code_or_exception: the code as integer for the handler, or
+                                  an arbitrary exception
+        """
+        def decorator(f):
+            self._register_error_handler(None, code_or_exception, f)
+            return f
+        return decorator
+
+    @setupmethod
+    def register_error_handler(self, code_or_exception, f):
+        """Alternative error attach function to the :meth:`errorhandler`
+        decorator that is more straightforward to use for non decorator
+        usage.
+
+        .. versionadded:: 0.7
+        """
+        self._register_error_handler(None, code_or_exception, f)
+
+    @setupmethod
+    def _register_error_handler(self, key, code_or_exception, f):
+        """
+        :type key: None|str
+        :type code_or_exception: int|T<=Exception
+        :type f: callable
+        """
+        if isinstance(code_or_exception, HTTPException):  # old broken behavior
+            raise ValueError(
+                'Tried to register a handler for an exception instance {0!r}.'
+                ' Handlers can only be registered for exception classes or'
+                ' HTTP error codes.'.format(code_or_exception)
+            )
+
+        try:
+            exc_class, code = self._get_exc_class_and_code(code_or_exception)
+        except KeyError:
+            raise KeyError(
+                "'{0}' is not a recognized HTTP error code. Use a subclass of"
+                " HTTPException with that code instead.".format(code_or_exception)
+            )
+
+        handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {})
+        handlers[exc_class] = f
+
+    @setupmethod
+    def template_filter(self, name=None):
+        """A decorator that is used to register custom template filter.
+        You can specify a name for the filter, otherwise the function
+        name will be used. Example::
+
+          @app.template_filter()
+          def reverse(s):
+              return s[::-1]
+
+        :param name: the optional name of the filter, otherwise the
+                     function name will be used.
+        """
+        def decorator(f):
+            self.add_template_filter(f, name=name)
+            return f
+        return decorator
+
+    @setupmethod
+    def add_template_filter(self, f, name=None):
+        """Register a custom template filter.  Works exactly like the
+        :meth:`template_filter` decorator.
+
+        :param name: the optional name of the filter, otherwise the
+                     function name will be used.
+        """
+        self.jinja_env.filters[name or f.__name__] = f
+
+    @setupmethod
+    def template_test(self, name=None):
+        """A decorator that is used to register custom template test.
+        You can specify a name for the test, otherwise the function
+        name will be used. Example::
+
+          @app.template_test()
+          def is_prime(n):
+              if n == 2:
+                  return True
+              for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
+                  if n % i == 0:
+                      return False
+              return True
+
+        .. versionadded:: 0.10
+
+        :param name: the optional name of the test, otherwise the
+                     function name will be used.
+        """
+        def decorator(f):
+            self.add_template_test(f, name=name)
+            return f
+        return decorator
+
+    @setupmethod
+    def add_template_test(self, f, name=None):
+        """Register a custom template test.  Works exactly like the
+        :meth:`template_test` decorator.
+
+        .. versionadded:: 0.10
+
+        :param name: the optional name of the test, otherwise the
+                     function name will be used.
+        """
+        self.jinja_env.tests[name or f.__name__] = f
+
+    @setupmethod
+    def template_global(self, name=None):
+        """A decorator that is used to register a custom template global function.
+        You can specify a name for the global function, otherwise the function
+        name will be used. Example::
+
+            @app.template_global()
+            def double(n):
+                return 2 * n
+
+        .. versionadded:: 0.10
+
+        :param name: the optional name of the global function, otherwise the
+                     function name will be used.
+        """
+        def decorator(f):
+            self.add_template_global(f, name=name)
+            return f
+        return decorator
+
+    @setupmethod
+    def add_template_global(self, f, name=None):
+        """Register a custom template global function. Works exactly like the
+        :meth:`template_global` decorator.
+
+        .. versionadded:: 0.10
+
+        :param name: the optional name of the global function, otherwise the
+                     function name will be used.
+        """
+        self.jinja_env.globals[name or f.__name__] = f
+
+    @setupmethod
+    def before_request(self, f):
+        """Registers a function to run before each request.
+
+        For example, this can be used to open a database connection, or to load
+        the logged in user from the session.
+
+        The function will be called without any arguments. If it returns a
+        non-None value, the value is handled as if it was the return value from
+        the view, and further request handling is stopped.
+        """
+        self.before_request_funcs.setdefault(None, []).append(f)
+        return f
+
+    @setupmethod
+    def before_first_request(self, f):
+        """Registers a function to be run before the first request to this
+        instance of the application.
+
+        The function will be called without any arguments and its return
+        value is ignored.
+
+        .. versionadded:: 0.8
+        """
+        self.before_first_request_funcs.append(f)
+        return f
+
+    @setupmethod
+    def after_request(self, f):
+        """Register a function to be run after each request.
+
+        Your function must take one parameter, an instance of
+        :attr:`response_class` and return a new response object or the
+        same (see :meth:`process_response`).
+
+        As of Flask 0.7 this function might not be executed at the end of the
+        request in case an unhandled exception occurred.
+        """
+        self.after_request_funcs.setdefault(None, []).append(f)
+        return f
+
+    @setupmethod
+    def teardown_request(self, f):
+        """Register a function to be run at the end of each request,
+        regardless of whether there was an exception or not.  These functions
+        are executed when the request context is popped, even if not an
+        actual request was performed.
+
+        Example::
+
+            ctx = app.test_request_context()
+            ctx.push()
+            ...
+            ctx.pop()
+
+        When ``ctx.pop()`` is executed in the above example, the teardown
+        functions are called just before the request context moves from the
+        stack of active contexts.  This becomes relevant if you are using
+        such constructs in tests.
+
+        Generally teardown functions must take every necessary step to avoid
+        that they will fail.  If they do execute code that might fail they
+        will have to surround the execution of these code by try/except
+        statements and log occurring errors.
+
+        When a teardown function was called because of an exception it will
+        be passed an error object.
+
+        The return values of teardown functions are ignored.
+
+        .. admonition:: Debug Note
+
+           In debug mode Flask will not tear down a request on an exception
+           immediately.  Instead it will keep it alive so that the interactive
+           debugger can still access it.  This behavior can be controlled
+           by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
+        """
+        self.teardown_request_funcs.setdefault(None, []).append(f)
+        return f
+
+    @setupmethod
+    def teardown_appcontext(self, f):
+        """Registers a function to be called when the application context
+        ends.  These functions are typically also called when the request
+        context is popped.
+
+        Example::
+
+            ctx = app.app_context()
+            ctx.push()
+            ...
+            ctx.pop()
+
+        When ``ctx.pop()`` is executed in the above example, the teardown
+        functions are called just before the app context moves from the
+        stack of active contexts.  This becomes relevant if you are using
+        such constructs in tests.
+
+        Since a request context typically also manages an application
+        context it would also be called when you pop a request context.
+
+        When a teardown function was called because of an unhandled exception
+        it will be passed an error object. If an :meth:`errorhandler` is
+        registered, it will handle the exception and the teardown will not
+        receive it.
+
+        The return values of teardown functions are ignored.
+
+        .. versionadded:: 0.9
+        """
+        self.teardown_appcontext_funcs.append(f)
+        return f
+
+    @setupmethod
+    def context_processor(self, f):
+        """Registers a template context processor function."""
+        self.template_context_processors[None].append(f)
+        return f
+
+    @setupmethod
+    def shell_context_processor(self, f):
+        """Registers a shell context processor function.
+
+        .. versionadded:: 0.11
+        """
+        self.shell_context_processors.append(f)
+        return f
+
+    @setupmethod
+    def url_value_preprocessor(self, f):
+        """Register a URL value preprocessor function for all view
+        functions in the application. These functions will be called before the
+        :meth:`before_request` functions.
+
+        The function can modify the values captured from the matched url before
+        they are passed to the view. For example, this can be used to pop a
+        common language code value and place it in ``g`` rather than pass it to
+        every view.
+
+        The function is passed the endpoint name and values dict. The return
+        value is ignored.
+        """
+        self.url_value_preprocessors.setdefault(None, []).append(f)
+        return f
+
+    @setupmethod
+    def url_defaults(self, f):
+        """Callback function for URL defaults for all view functions of the
+        application.  It's called with the endpoint and values and should
+        update the values passed in place.
+        """
+        self.url_default_functions.setdefault(None, []).append(f)
+        return f
+
+    def _find_error_handler(self, e):
+        """Return a registered error handler for an exception in this order:
+        blueprint handler for a specific code, app handler for a specific code,
+        blueprint handler for an exception class, app handler for an exception
+        class, or ``None`` if a suitable handler is not found.
+        """
+        exc_class, code = self._get_exc_class_and_code(type(e))
+
+        for name, c in (
+            (request.blueprint, code), (None, code),
+            (request.blueprint, None), (None, None)
+        ):
+            handler_map = self.error_handler_spec.setdefault(name, {}).get(c)
+
+            if not handler_map:
+                continue
+
+            for cls in exc_class.__mro__:
+                handler = handler_map.get(cls)
+
+                if handler is not None:
+                    return handler
+
+    def handle_http_exception(self, e):
+        """Handles an HTTP exception.  By default this will invoke the
+        registered error handlers and fall back to returning the
+        exception as response.
+
+        .. versionadded:: 0.3
+        """
+        # Proxy exceptions don't have error codes.  We want to always return
+        # those unchanged as errors
+        if e.code is None:
+            return e
+
+        handler = self._find_error_handler(e)
+        if handler is None:
+            return e
+        return handler(e)
+
+    def trap_http_exception(self, e):
+        """Checks if an HTTP exception should be trapped or not.  By default
+        this will return ``False`` for all exceptions except for a bad request
+        key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``.  It
+        also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
+
+        This is called for all HTTP exceptions raised by a view function.
+        If it returns ``True`` for any exception the error handler for this
+        exception is not called and it shows up as regular exception in the
+        traceback.  This is helpful for debugging implicitly raised HTTP
+        exceptions.
+
+        .. versionchanged:: 1.0
+            Bad request errors are not trapped by default in debug mode.
+
+        .. versionadded:: 0.8
+        """
+        if self.config['TRAP_HTTP_EXCEPTIONS']:
+            return True
+
+        trap_bad_request = self.config['TRAP_BAD_REQUEST_ERRORS']
+
+        # if unset, trap key errors in debug mode
+        if (
+            trap_bad_request is None and self.debug
+            and isinstance(e, BadRequestKeyError)
+        ):
+            return True
+
+        if trap_bad_request:
+            return isinstance(e, BadRequest)
+
+        return False
+
+    def handle_user_exception(self, e):
+        """This method is called whenever an exception occurs that should be
+        handled.  A special case are
+        :class:`~werkzeug.exception.HTTPException`\s which are forwarded by
+        this function to the :meth:`handle_http_exception` method.  This
+        function will either return a response value or reraise the
+        exception with the same traceback.
+
+        .. versionchanged:: 1.0
+            Key errors raised from request data like ``form`` show the the bad
+            key in debug mode rather than a generic bad request message.
+
+        .. versionadded:: 0.7
+        """
+        exc_type, exc_value, tb = sys.exc_info()
+        assert exc_value is e
+        # ensure not to trash sys.exc_info() at that point in case someone
+        # wants the traceback preserved in handle_http_exception.  Of course
+        # we cannot prevent users from trashing it themselves in a custom
+        # trap_http_exception method so that's their fault then.
+
+        # MultiDict passes the key to the exception, but that's ignored
+        # when generating the response message. Set an informative
+        # description for key errors in debug mode or when trapping errors.
+        if (
+            (self.debug or self.config['TRAP_BAD_REQUEST_ERRORS'])
+            and isinstance(e, BadRequestKeyError)
+            # only set it if it's still the default description
+            and e.description is BadRequestKeyError.description
+        ):
+            e.description = "KeyError: '{0}'".format(*e.args)
+
+        if isinstance(e, HTTPException) and not self.trap_http_exception(e):
+            return self.handle_http_exception(e)
+
+        handler = self._find_error_handler(e)
+
+        if handler is None:
+            reraise(exc_type, exc_value, tb)
+        return handler(e)
+
+    def handle_exception(self, e):
+        """Default exception handling that kicks in when an exception
+        occurs that is not caught.  In debug mode the exception will
+        be re-raised immediately, otherwise it is logged and the handler
+        for a 500 internal server error is used.  If no such handler
+        exists, a default 500 internal server error message is displayed.
+
+        .. versionadded:: 0.3
+        """
+        exc_type, exc_value, tb = sys.exc_info()
+
+        got_request_exception.send(self, exception=e)
+        handler = self._find_error_handler(InternalServerError())
+
+        if self.propagate_exceptions:
+            # if we want to repropagate the exception, we can attempt to
+            # raise it with the whole traceback in case we can do that
+            # (the function was actually called from the except part)
+            # otherwise, we just raise the error again
+            if exc_value is e:
+                reraise(exc_type, exc_value, tb)
+            else:
+                raise e
+
+        self.log_exception((exc_type, exc_value, tb))
+        if handler is None:
+            return InternalServerError()
+        return self.finalize_request(handler(e), from_error_handler=True)
+
+    def log_exception(self, exc_info):
+        """Logs an exception.  This is called by :meth:`handle_exception`
+        if debugging is disabled and right before the handler is called.
+        The default implementation logs the exception as error on the
+        :attr:`logger`.
+
+        .. versionadded:: 0.8
+        """
+        self.logger.error('Exception on %s [%s]' % (
+            request.path,
+            request.method
+        ), exc_info=exc_info)
+
+    def raise_routing_exception(self, request):
+        """Exceptions that are recording during routing are reraised with
+        this method.  During debug we are not reraising redirect requests
+        for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
+        a different error instead to help debug situations.
+
+        :internal:
+        """
+        if not self.debug \
+           or not isinstance(request.routing_exception, RequestRedirect) \
+           or request.method in ('GET', 'HEAD', 'OPTIONS'):
+            raise request.routing_exception
+
+        from .debughelpers import FormDataRoutingRedirect
+        raise FormDataRoutingRedirect(request)
+
+    def dispatch_request(self):
+        """Does the request dispatching.  Matches the URL and returns the
+        return value of the view or error handler.  This does not have to
+        be a response object.  In order to convert the return value to a
+        proper response object, call :func:`make_response`.
+
+        .. versionchanged:: 0.7
+           This no longer does the exception handling, this code was
+           moved to the new :meth:`full_dispatch_request`.
+        """
+        req = _request_ctx_stack.top.request
+        if req.routing_exception is not None:
+            self.raise_routing_exception(req)
+        rule = req.url_rule
+        # if we provide automatic options for this URL and the
+        # request came with the OPTIONS method, reply automatically
+        if getattr(rule, 'provide_automatic_options', False) \
+           and req.method == 'OPTIONS':
+            return self.make_default_options_response()
+        # otherwise dispatch to the handler for that endpoint
+        return self.view_functions[rule.endpoint](**req.view_args)
+
+    def full_dispatch_request(self):
+        """Dispatches the request and on top of that performs request
+        pre and postprocessing as well as HTTP exception catching and
+        error handling.
+
+        .. versionadded:: 0.7
+        """
+        self.try_trigger_before_first_request_functions()
+        try:
+            request_started.send(self)
+            rv = self.preprocess_request()
+            if rv is None:
+                rv = self.dispatch_request()
+        except Exception as e:
+            rv = self.handle_user_exception(e)
+        return self.finalize_request(rv)
+
+    def finalize_request(self, rv, from_error_handler=False):
+        """Given the return value from a view function this finalizes
+        the request by converting it into a response and invoking the
+        postprocessing functions.  This is invoked for both normal
+        request dispatching as well as error handlers.
+
+        Because this means that it might be called as a result of a
+        failure a special safe mode is available which can be enabled
+        with the `from_error_handler` flag.  If enabled, failures in
+        response processing will be logged and otherwise ignored.
+
+        :internal:
+        """
+        response = self.make_response(rv)
+        try:
+            response = self.process_response(response)
+            request_finished.send(self, response=response)
+        except Exception:
+            if not from_error_handler:
+                raise
+            self.logger.exception('Request finalizing failed with an '
+                                  'error while handling an error')
+        return response
+
+    def try_trigger_before_first_request_functions(self):
+        """Called before each request and will ensure that it triggers
+        the :attr:`before_first_request_funcs` and only exactly once per
+        application instance (which means process usually).
+
+        :internal:
+        """
+        if self._got_first_request:
+            return
+        with self._before_request_lock:
+            if self._got_first_request:
+                return
+            for func in self.before_first_request_funcs:
+                func()
+            self._got_first_request = True
+
+    def make_default_options_response(self):
+        """This method is called to create the default ``OPTIONS`` response.
+        This can be changed through subclassing to change the default
+        behavior of ``OPTIONS`` responses.
+
+        .. versionadded:: 0.7
+        """
+        adapter = _request_ctx_stack.top.url_adapter
+        if hasattr(adapter, 'allowed_methods'):
+            methods = adapter.allowed_methods()
+        else:
+            # fallback for Werkzeug < 0.7
+            methods = []
+            try:
+                adapter.match(method='--')
+            except MethodNotAllowed as e:
+                methods = e.valid_methods
+            except HTTPException as e:
+                pass
+        rv = self.response_class()
+        rv.allow.update(methods)
+        return rv
+
+    def should_ignore_error(self, error):
+        """This is called to figure out if an error should be ignored
+        or not as far as the teardown system is concerned.  If this
+        function returns ``True`` then the teardown handlers will not be
+        passed the error.
+
+        .. versionadded:: 0.10
+        """
+        return False
+
+    def make_response(self, rv):
+        """Convert the return value from a view function to an instance of
+        :attr:`response_class`.
+
+        :param rv: the return value from the view function. The view function
+            must return a response. Returning ``None``, or the view ending
+            without returning, is not allowed. The following types are allowed
+            for ``view_rv``:
+
+            ``str`` (``unicode`` in Python 2)
+                A response object is created with the string encoded to UTF-8
+                as the body.
+
+            ``bytes`` (``str`` in Python 2)
+                A response object is created with the bytes as the body.
+
+            ``tuple``
+                Either ``(body, status, headers)``, ``(body, status)``, or
+                ``(body, headers)``, where ``body`` is any of the other types
+                allowed here, ``status`` is a string or an integer, and
+                ``headers`` is a dictionary or a list of ``(key, value)``
+                tuples. If ``body`` is a :attr:`response_class` instance,
+                ``status`` overwrites the exiting value and ``headers`` are
+                extended.
+
+            :attr:`response_class`
+                The object is returned unchanged.
+
+            other :class:`~werkzeug.wrappers.Response` class
+                The object is coerced to :attr:`response_class`.
+
+            :func:`callable`
+                The function is called as a WSGI application. The result is
+                used to create a response object.
+
+        .. versionchanged:: 0.9
+           Previously a tuple was interpreted as the arguments for the
+           response object.
+        """
+
+        status = headers = None
+
+        # unpack tuple returns
+        if isinstance(rv, tuple):
+            len_rv = len(rv)
+
+            # a 3-tuple is unpacked directly
+            if len_rv == 3:
+                rv, status, headers = rv
+            # decide if a 2-tuple has status or headers
+            elif len_rv == 2:
+                if isinstance(rv[1], (Headers, dict, tuple, list)):
+                    rv, headers = rv
+                else:
+                    rv, status = rv
+            # other sized tuples are not allowed
+            else:
+                raise TypeError(
+                    'The view function did not return a valid response tuple.'
+                    ' The tuple must have the form (body, status, headers),'
+                    ' (body, status), or (body, headers).'
+                )
+
+        # the body must not be None
+        if rv is None:
+            raise TypeError(
+                'The view function did not return a valid response. The'
+                ' function either returned None or ended without a return'
+                ' statement.'
+            )
+
+        # make sure the body is an instance of the response class
+        if not isinstance(rv, self.response_class):
+            if isinstance(rv, (text_type, bytes, bytearray)):
+                # let the response class set the status and headers instead of
+                # waiting to do it manually, so that the class can handle any
+                # special logic
+                rv = self.response_class(rv, status=status, headers=headers)
+                status = headers = None
+            else:
+                # evaluate a WSGI callable, or coerce a different response
+                # class to the correct type
+                try:
+                    rv = self.response_class.force_type(rv, request.environ)
+                except TypeError as e:
+                    new_error = TypeError(
+                        '{e}\nThe view function did not return a valid'
+                        ' response. The return type must be a string, tuple,'
+                        ' Response instance, or WSGI callable, but it was a'
+                        ' {rv.__class__.__name__}.'.format(e=e, rv=rv)
+                    )
+                    reraise(TypeError, new_error, sys.exc_info()[2])
+
+        # prefer the status if it was provided
+        if status is not None:
+            if isinstance(status, (text_type, bytes, bytearray)):
+                rv.status = status
+            else:
+                rv.status_code = status
+
+        # extend existing headers with provided headers
+        if headers:
+            rv.headers.extend(headers)
+
+        return rv
+
+    def create_url_adapter(self, request):
+        """Creates a URL adapter for the given request. The URL adapter
+        is created at a point where the request context is not yet set
+        up so the request is passed explicitly.
+
+        .. versionadded:: 0.6
+
+        .. versionchanged:: 0.9
+           This can now also be called without a request object when the
+           URL adapter is created for the application context.
+
+        .. versionchanged:: 1.0
+            :data:`SERVER_NAME` no longer implicitly enables subdomain
+            matching. Use :attr:`subdomain_matching` instead.
+        """
+        if request is not None:
+            # If subdomain matching is disabled (the default), use the
+            # default subdomain in all cases. This should be the default
+            # in Werkzeug but it currently does not have that feature.
+            subdomain = ((self.url_map.default_subdomain or None)
+                         if not self.subdomain_matching else None)
+            return self.url_map.bind_to_environ(
+                request.environ,
+                server_name=self.config['SERVER_NAME'],
+                subdomain=subdomain)
+        # We need at the very least the server name to be set for this
+        # to work.
+        if self.config['SERVER_NAME'] is not None:
+            return self.url_map.bind(
+                self.config['SERVER_NAME'],
+                script_name=self.config['APPLICATION_ROOT'],
+                url_scheme=self.config['PREFERRED_URL_SCHEME'])
+
+    def inject_url_defaults(self, endpoint, values):
+        """Injects the URL defaults for the given endpoint directly into
+        the values dictionary passed.  This is used internally and
+        automatically called on URL building.
+
+        .. versionadded:: 0.7
+        """
+        funcs = self.url_default_functions.get(None, ())
+        if '.' in endpoint:
+            bp = endpoint.rsplit('.', 1)[0]
+            funcs = chain(funcs, self.url_default_functions.get(bp, ()))
+        for func in funcs:
+            func(endpoint, values)
+
+    def handle_url_build_error(self, error, endpoint, values):
+        """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
+        """
+        exc_type, exc_value, tb = sys.exc_info()
+        for handler in self.url_build_error_handlers:
+            try:
+                rv = handler(error, endpoint, values)
+                if rv is not None:
+                    return rv
+            except BuildError as e:
+                # make error available outside except block (py3)
+                error = e
+
+        # At this point we want to reraise the exception.  If the error is
+        # still the same one we can reraise it with the original traceback,
+        # otherwise we raise it from here.
+        if error is exc_value:
+            reraise(exc_type, exc_value, tb)
+        raise error
+
+    def preprocess_request(self):
+        """Called before the request is dispatched. Calls
+        :attr:`url_value_preprocessors` registered with the app and the
+        current blueprint (if any). Then calls :attr:`before_request_funcs`
+        registered with the app and the blueprint.
+
+        If any :meth:`before_request` handler returns a non-None value, the
+        value is handled as if it was the return value from the view, and
+        further request handling is stopped.
+        """
+
+        bp = _request_ctx_stack.top.request.blueprint
+
+        funcs = self.url_value_preprocessors.get(None, ())
+        if bp is not None and bp in self.url_value_preprocessors:
+            funcs = chain(funcs, self.url_value_preprocessors[bp])
+        for func in funcs:
+            func(request.endpoint, request.view_args)
+
+        funcs = self.before_request_funcs.get(None, ())
+        if bp is not None and bp in self.before_request_funcs:
+            funcs = chain(funcs, self.before_request_funcs[bp])
+        for func in funcs:
+            rv = func()
+            if rv is not None:
+                return rv
+
+    def process_response(self, response):
+        """Can be overridden in order to modify the response object
+        before it's sent to the WSGI server.  By default this will
+        call all the :meth:`after_request` decorated functions.
+
+        .. versionchanged:: 0.5
+           As of Flask 0.5 the functions registered for after request
+           execution are called in reverse order of registration.
+
+        :param response: a :attr:`response_class` object.
+        :return: a new response object or the same, has to be an
+                 instance of :attr:`response_class`.
+        """
+        ctx = _request_ctx_stack.top
+        bp = ctx.request.blueprint
+        funcs = ctx._after_request_functions
+        if bp is not None and bp in self.after_request_funcs:
+            funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
+        if None in self.after_request_funcs:
+            funcs = chain(funcs, reversed(self.after_request_funcs[None]))
+        for handler in funcs:
+            response = handler(response)
+        if not self.session_interface.is_null_session(ctx.session):
+            self.session_interface.save_session(self, ctx.session, response)
+        return response
+
+    def do_teardown_request(self, exc=_sentinel):
+        """Called after the request is dispatched and the response is
+        returned, right before the request context is popped.
+
+        This calls all functions decorated with
+        :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`
+        if a blueprint handled the request. Finally, the
+        :data:`request_tearing_down` signal is sent.
+
+        This is called by
+        :meth:`RequestContext.pop() <flask.ctx.RequestContext.pop>`,
+        which may be delayed during testing to maintain access to
+        resources.
+
+        :param exc: An unhandled exception raised while dispatching the
+            request. Detected from the current exception information if
+            not passed. Passed to each teardown function.
+
+        .. versionchanged:: 0.9
+            Added the ``exc`` argument.
+        """
+        if exc is _sentinel:
+            exc = sys.exc_info()[1]
+        funcs = reversed(self.teardown_request_funcs.get(None, ()))
+        bp = _request_ctx_stack.top.request.blueprint
+        if bp is not None and bp in self.teardown_request_funcs:
+            funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
+        for func in funcs:
+            func(exc)
+        request_tearing_down.send(self, exc=exc)
+
+    def do_teardown_appcontext(self, exc=_sentinel):
+        """Called right before the application context is popped.
+
+        When handling a request, the application context is popped
+        after the request context. See :meth:`do_teardown_request`.
+
+        This calls all functions decorated with
+        :meth:`teardown_appcontext`. Then the
+        :data:`appcontext_tearing_down` signal is sent.
+
+        This is called by
+        :meth:`AppContext.pop() <flask.ctx.AppContext.pop>`.
+
+        .. versionadded:: 0.9
+        """
+        if exc is _sentinel:
+            exc = sys.exc_info()[1]
+        for func in reversed(self.teardown_appcontext_funcs):
+            func(exc)
+        appcontext_tearing_down.send(self, exc=exc)
+
+    def app_context(self):
+        """Create an :class:`~flask.ctx.AppContext`. Use as a ``with``
+        block to push the context, which will make :data:`current_app`
+        point at this application.
+
+        An application context is automatically pushed by
+        :meth:`RequestContext.push() <flask.ctx.RequestContext.push>`
+        when handling a request, and when running a CLI command. Use
+        this to manually create a context outside of these situations.
+
+        ::
+
+            with app.app_context():
+                init_db()
+
+        See :doc:`/appcontext`.
+
+        .. versionadded:: 0.9
+        """
+        return AppContext(self)
+
+    def request_context(self, environ):
+        """Create a :class:`~flask.ctx.RequestContext` representing a
+        WSGI environment. Use a ``with`` block to push the context,
+        which will make :data:`request` point at this request.
+
+        See :doc:`/reqcontext`.
+
+        Typically you should not call this from your own code. A request
+        context is automatically pushed by the :meth:`wsgi_app` when
+        handling a request. Use :meth:`test_request_context` to create
+        an environment and context instead of this method.
+
+        :param environ: a WSGI environment
+        """
+        return RequestContext(self, environ)
+
+    def test_request_context(self, *args, **kwargs):
+        """Create a :class:`~flask.ctx.RequestContext` for a WSGI
+        environment created from the given values. This is mostly useful
+        during testing, where you may want to run a function that uses
+        request data without dispatching a full request.
+
+        See :doc:`/reqcontext`.
+
+        Use a ``with`` block to push the context, which will make
+        :data:`request` point at the request for the created
+        environment. ::
+
+            with test_request_context(...):
+                generate_report()
+
+        When using the shell, it may be easier to push and pop the
+        context manually to avoid indentation. ::
+
+            ctx = app.test_request_context(...)
+            ctx.push()
+            ...
+            ctx.pop()
+
+        Takes the same arguments as Werkzeug's
+        :class:`~werkzeug.test.EnvironBuilder`, with some defaults from
+        the application. See the linked Werkzeug docs for most of the
+        available arguments. Flask-specific behavior is listed here.
+
+        :param path: URL path being requested.
+        :param base_url: Base URL where the app is being served, which
+            ``path`` is relative to. If not given, built from
+            :data:`PREFERRED_URL_SCHEME`, ``subdomain``,
+            :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.
+        :param subdomain: Subdomain name to append to
+            :data:`SERVER_NAME`.
+        :param url_scheme: Scheme to use instead of
+            :data:`PREFERRED_URL_SCHEME`.
+        :param data: The request body, either as a string or a dict of
+            form keys and values.
+        :param json: If given, this is serialized as JSON and passed as
+            ``data``. Also defaults ``content_type`` to
+            ``application/json``.
+        :param args: other positional arguments passed to
+            :class:`~werkzeug.test.EnvironBuilder`.
+        :param kwargs: other keyword arguments passed to
+            :class:`~werkzeug.test.EnvironBuilder`.
+        """
+        from flask.testing import make_test_environ_builder
+
+        builder = make_test_environ_builder(self, *args, **kwargs)
+
+        try:
+            return self.request_context(builder.get_environ())
+        finally:
+            builder.close()
+
+    def wsgi_app(self, environ, start_response):
+        """The actual WSGI application. This is not implemented in
+        :meth:`__call__` so that middlewares can be applied without
+        losing a reference to the app object. Instead of doing this::
+
+            app = MyMiddleware(app)
+
+        It's a better idea to do this instead::
+
+            app.wsgi_app = MyMiddleware(app.wsgi_app)
+
+        Then you still have the original application object around and
+        can continue to call methods on it.
+
+        .. versionchanged:: 0.7
+            Teardown events for the request and app contexts are called
+            even if an unhandled error occurs. Other events may not be
+            called depending on when an error occurs during dispatch.
+            See :ref:`callbacks-and-errors`.
+
+        :param environ: A WSGI environment.
+        :param start_response: A callable accepting a status code,
+            a list of headers, and an optional exception context to
+            start the response.
+        """
+        ctx = self.request_context(environ)
+        error = None
+        try:
+            try:
+                ctx.push()
+                response = self.full_dispatch_request()
+            except Exception as e:
+                error = e
+                response = self.handle_exception(e)
+            except:
+                error = sys.exc_info()[1]
+                raise
+            return response(environ, start_response)
+        finally:
+            if self.should_ignore_error(error):
+                error = None
+            ctx.auto_pop(error)
+
+    def __call__(self, environ, start_response):
+        """The WSGI server calls the Flask application object as the
+        WSGI application. This calls :meth:`wsgi_app` which can be
+        wrapped to applying middleware."""
+        return self.wsgi_app(environ, start_response)
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self.name,
+        )
diff --git a/lib/python3.7/site-packages/flask/blueprints.py b/lib/python3.7/site-packages/flask/blueprints.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ce5561e4e1950ff89c90496244d1f4b974babc3
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/blueprints.py
@@ -0,0 +1,448 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.blueprints
+    ~~~~~~~~~~~~~~~~
+
+    Blueprints are the recommended way to implement larger or more
+    pluggable applications in Flask 0.7 and later.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+from functools import update_wrapper
+from werkzeug.urls import url_join
+
+from .helpers import _PackageBoundObject, _endpoint_from_view_func
+
+
+class BlueprintSetupState(object):
+    """Temporary holder object for registering a blueprint with the
+    application.  An instance of this class is created by the
+    :meth:`~flask.Blueprint.make_setup_state` method and later passed
+    to all register callback functions.
+    """
+
+    def __init__(self, blueprint, app, options, first_registration):
+        #: a reference to the current application
+        self.app = app
+
+        #: a reference to the blueprint that created this setup state.
+        self.blueprint = blueprint
+
+        #: a dictionary with all options that were passed to the
+        #: :meth:`~flask.Flask.register_blueprint` method.
+        self.options = options
+
+        #: as blueprints can be registered multiple times with the
+        #: application and not everything wants to be registered
+        #: multiple times on it, this attribute can be used to figure
+        #: out if the blueprint was registered in the past already.
+        self.first_registration = first_registration
+
+        subdomain = self.options.get('subdomain')
+        if subdomain is None:
+            subdomain = self.blueprint.subdomain
+
+        #: The subdomain that the blueprint should be active for, ``None``
+        #: otherwise.
+        self.subdomain = subdomain
+
+        url_prefix = self.options.get('url_prefix')
+        if url_prefix is None:
+            url_prefix = self.blueprint.url_prefix
+        #: The prefix that should be used for all URLs defined on the
+        #: blueprint.
+        self.url_prefix = url_prefix
+
+        #: A dictionary with URL defaults that is added to each and every
+        #: URL that was defined with the blueprint.
+        self.url_defaults = dict(self.blueprint.url_values_defaults)
+        self.url_defaults.update(self.options.get('url_defaults', ()))
+
+    def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
+        """A helper method to register a rule (and optionally a view function)
+        to the application.  The endpoint is automatically prefixed with the
+        blueprint's name.
+        """
+        if self.url_prefix is not None:
+            if rule:
+                rule = '/'.join((
+                    self.url_prefix.rstrip('/'), rule.lstrip('/')))
+            else:
+                rule = self.url_prefix
+        options.setdefault('subdomain', self.subdomain)
+        if endpoint is None:
+            endpoint = _endpoint_from_view_func(view_func)
+        defaults = self.url_defaults
+        if 'defaults' in options:
+            defaults = dict(defaults, **options.pop('defaults'))
+        self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
+                              view_func, defaults=defaults, **options)
+
+
+class Blueprint(_PackageBoundObject):
+    """Represents a blueprint.  A blueprint is an object that records
+    functions that will be called with the
+    :class:`~flask.blueprints.BlueprintSetupState` later to register functions
+    or other things on the main application.  See :ref:`blueprints` for more
+    information.
+
+    .. versionadded:: 0.7
+    """
+
+    warn_on_modifications = False
+    _got_registered_once = False
+
+    #: Blueprint local JSON decoder class to use.
+    #: Set to ``None`` to use the app's :class:`~flask.app.Flask.json_encoder`.
+    json_encoder = None
+    #: Blueprint local JSON decoder class to use.
+    #: Set to ``None`` to use the app's :class:`~flask.app.Flask.json_decoder`.
+    json_decoder = None
+
+    # TODO remove the next three attrs when Sphinx :inherited-members: works
+    # https://github.com/sphinx-doc/sphinx/issues/741
+
+    #: The name of the package or module that this app belongs to. Do not
+    #: change this once it is set by the constructor.
+    import_name = None
+
+    #: Location of the template files to be added to the template lookup.
+    #: ``None`` if templates should not be added.
+    template_folder = None
+
+    #: Absolute path to the package on the filesystem. Used to look up
+    #: resources contained in the package.
+    root_path = None
+
+    def __init__(self, name, import_name, static_folder=None,
+                 static_url_path=None, template_folder=None,
+                 url_prefix=None, subdomain=None, url_defaults=None,
+                 root_path=None):
+        _PackageBoundObject.__init__(self, import_name, template_folder,
+                                     root_path=root_path)
+        self.name = name
+        self.url_prefix = url_prefix
+        self.subdomain = subdomain
+        self.static_folder = static_folder
+        self.static_url_path = static_url_path
+        self.deferred_functions = []
+        if url_defaults is None:
+            url_defaults = {}
+        self.url_values_defaults = url_defaults
+
+    def record(self, func):
+        """Registers a function that is called when the blueprint is
+        registered on the application.  This function is called with the
+        state as argument as returned by the :meth:`make_setup_state`
+        method.
+        """
+        if self._got_registered_once and self.warn_on_modifications:
+            from warnings import warn
+            warn(Warning('The blueprint was already registered once '
+                         'but is getting modified now.  These changes '
+                         'will not show up.'))
+        self.deferred_functions.append(func)
+
+    def record_once(self, func):
+        """Works like :meth:`record` but wraps the function in another
+        function that will ensure the function is only called once.  If the
+        blueprint is registered a second time on the application, the
+        function passed is not called.
+        """
+        def wrapper(state):
+            if state.first_registration:
+                func(state)
+        return self.record(update_wrapper(wrapper, func))
+
+    def make_setup_state(self, app, options, first_registration=False):
+        """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
+        object that is later passed to the register callback functions.
+        Subclasses can override this to return a subclass of the setup state.
+        """
+        return BlueprintSetupState(self, app, options, first_registration)
+
+    def register(self, app, options, first_registration=False):
+        """Called by :meth:`Flask.register_blueprint` to register all views
+        and callbacks registered on the blueprint with the application. Creates
+        a :class:`.BlueprintSetupState` and calls each :meth:`record` callback
+        with it.
+
+        :param app: The application this blueprint is being registered with.
+        :param options: Keyword arguments forwarded from
+            :meth:`~Flask.register_blueprint`.
+        :param first_registration: Whether this is the first time this
+            blueprint has been registered on the application.
+        """
+        self._got_registered_once = True
+        state = self.make_setup_state(app, options, first_registration)
+
+        if self.has_static_folder:
+            state.add_url_rule(
+                self.static_url_path + '/<path:filename>',
+                view_func=self.send_static_file, endpoint='static'
+            )
+
+        for deferred in self.deferred_functions:
+            deferred(state)
+
+    def route(self, rule, **options):
+        """Like :meth:`Flask.route` but for a blueprint.  The endpoint for the
+        :func:`url_for` function is prefixed with the name of the blueprint.
+        """
+        def decorator(f):
+            endpoint = options.pop("endpoint", f.__name__)
+            self.add_url_rule(rule, endpoint, f, **options)
+            return f
+        return decorator
+
+    def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
+        """Like :meth:`Flask.add_url_rule` but for a blueprint.  The endpoint for
+        the :func:`url_for` function is prefixed with the name of the blueprint.
+        """
+        if endpoint:
+            assert '.' not in endpoint, "Blueprint endpoints should not contain dots"
+        if view_func and hasattr(view_func, '__name__'):
+            assert '.' not in view_func.__name__, "Blueprint view function name should not contain dots"
+        self.record(lambda s:
+            s.add_url_rule(rule, endpoint, view_func, **options))
+
+    def endpoint(self, endpoint):
+        """Like :meth:`Flask.endpoint` but for a blueprint.  This does not
+        prefix the endpoint with the blueprint name, this has to be done
+        explicitly by the user of this method.  If the endpoint is prefixed
+        with a `.` it will be registered to the current blueprint, otherwise
+        it's an application independent endpoint.
+        """
+        def decorator(f):
+            def register_endpoint(state):
+                state.app.view_functions[endpoint] = f
+            self.record_once(register_endpoint)
+            return f
+        return decorator
+
+    def app_template_filter(self, name=None):
+        """Register a custom template filter, available application wide.  Like
+        :meth:`Flask.template_filter` but for a blueprint.
+
+        :param name: the optional name of the filter, otherwise the
+                     function name will be used.
+        """
+        def decorator(f):
+            self.add_app_template_filter(f, name=name)
+            return f
+        return decorator
+
+    def add_app_template_filter(self, f, name=None):
+        """Register a custom template filter, available application wide.  Like
+        :meth:`Flask.add_template_filter` but for a blueprint.  Works exactly
+        like the :meth:`app_template_filter` decorator.
+
+        :param name: the optional name of the filter, otherwise the
+                     function name will be used.
+        """
+        def register_template(state):
+            state.app.jinja_env.filters[name or f.__name__] = f
+        self.record_once(register_template)
+
+    def app_template_test(self, name=None):
+        """Register a custom template test, available application wide.  Like
+        :meth:`Flask.template_test` but for a blueprint.
+
+        .. versionadded:: 0.10
+
+        :param name: the optional name of the test, otherwise the
+                     function name will be used.
+        """
+        def decorator(f):
+            self.add_app_template_test(f, name=name)
+            return f
+        return decorator
+
+    def add_app_template_test(self, f, name=None):
+        """Register a custom template test, available application wide.  Like
+        :meth:`Flask.add_template_test` but for a blueprint.  Works exactly
+        like the :meth:`app_template_test` decorator.
+
+        .. versionadded:: 0.10
+
+        :param name: the optional name of the test, otherwise the
+                     function name will be used.
+        """
+        def register_template(state):
+            state.app.jinja_env.tests[name or f.__name__] = f
+        self.record_once(register_template)
+
+    def app_template_global(self, name=None):
+        """Register a custom template global, available application wide.  Like
+        :meth:`Flask.template_global` but for a blueprint.
+
+        .. versionadded:: 0.10
+
+        :param name: the optional name of the global, otherwise the
+                     function name will be used.
+        """
+        def decorator(f):
+            self.add_app_template_global(f, name=name)
+            return f
+        return decorator
+
+    def add_app_template_global(self, f, name=None):
+        """Register a custom template global, available application wide.  Like
+        :meth:`Flask.add_template_global` but for a blueprint.  Works exactly
+        like the :meth:`app_template_global` decorator.
+
+        .. versionadded:: 0.10
+
+        :param name: the optional name of the global, otherwise the
+                     function name will be used.
+        """
+        def register_template(state):
+            state.app.jinja_env.globals[name or f.__name__] = f
+        self.record_once(register_template)
+
+    def before_request(self, f):
+        """Like :meth:`Flask.before_request` but for a blueprint.  This function
+        is only executed before each request that is handled by a function of
+        that blueprint.
+        """
+        self.record_once(lambda s: s.app.before_request_funcs
+            .setdefault(self.name, []).append(f))
+        return f
+
+    def before_app_request(self, f):
+        """Like :meth:`Flask.before_request`.  Such a function is executed
+        before each request, even if outside of a blueprint.
+        """
+        self.record_once(lambda s: s.app.before_request_funcs
+            .setdefault(None, []).append(f))
+        return f
+
+    def before_app_first_request(self, f):
+        """Like :meth:`Flask.before_first_request`.  Such a function is
+        executed before the first request to the application.
+        """
+        self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
+        return f
+
+    def after_request(self, f):
+        """Like :meth:`Flask.after_request` but for a blueprint.  This function
+        is only executed after each request that is handled by a function of
+        that blueprint.
+        """
+        self.record_once(lambda s: s.app.after_request_funcs
+            .setdefault(self.name, []).append(f))
+        return f
+
+    def after_app_request(self, f):
+        """Like :meth:`Flask.after_request` but for a blueprint.  Such a function
+        is executed after each request, even if outside of the blueprint.
+        """
+        self.record_once(lambda s: s.app.after_request_funcs
+            .setdefault(None, []).append(f))
+        return f
+
+    def teardown_request(self, f):
+        """Like :meth:`Flask.teardown_request` but for a blueprint.  This
+        function is only executed when tearing down requests handled by a
+        function of that blueprint.  Teardown request functions are executed
+        when the request context is popped, even when no actual request was
+        performed.
+        """
+        self.record_once(lambda s: s.app.teardown_request_funcs
+            .setdefault(self.name, []).append(f))
+        return f
+
+    def teardown_app_request(self, f):
+        """Like :meth:`Flask.teardown_request` but for a blueprint.  Such a
+        function is executed when tearing down each request, even if outside of
+        the blueprint.
+        """
+        self.record_once(lambda s: s.app.teardown_request_funcs
+            .setdefault(None, []).append(f))
+        return f
+
+    def context_processor(self, f):
+        """Like :meth:`Flask.context_processor` but for a blueprint.  This
+        function is only executed for requests handled by a blueprint.
+        """
+        self.record_once(lambda s: s.app.template_context_processors
+            .setdefault(self.name, []).append(f))
+        return f
+
+    def app_context_processor(self, f):
+        """Like :meth:`Flask.context_processor` but for a blueprint.  Such a
+        function is executed each request, even if outside of the blueprint.
+        """
+        self.record_once(lambda s: s.app.template_context_processors
+            .setdefault(None, []).append(f))
+        return f
+
+    def app_errorhandler(self, code):
+        """Like :meth:`Flask.errorhandler` but for a blueprint.  This
+        handler is used for all requests, even if outside of the blueprint.
+        """
+        def decorator(f):
+            self.record_once(lambda s: s.app.errorhandler(code)(f))
+            return f
+        return decorator
+
+    def url_value_preprocessor(self, f):
+        """Registers a function as URL value preprocessor for this
+        blueprint.  It's called before the view functions are called and
+        can modify the url values provided.
+        """
+        self.record_once(lambda s: s.app.url_value_preprocessors
+            .setdefault(self.name, []).append(f))
+        return f
+
+    def url_defaults(self, f):
+        """Callback function for URL defaults for this blueprint.  It's called
+        with the endpoint and values and should update the values passed
+        in place.
+        """
+        self.record_once(lambda s: s.app.url_default_functions
+            .setdefault(self.name, []).append(f))
+        return f
+
+    def app_url_value_preprocessor(self, f):
+        """Same as :meth:`url_value_preprocessor` but application wide.
+        """
+        self.record_once(lambda s: s.app.url_value_preprocessors
+            .setdefault(None, []).append(f))
+        return f
+
+    def app_url_defaults(self, f):
+        """Same as :meth:`url_defaults` but application wide.
+        """
+        self.record_once(lambda s: s.app.url_default_functions
+            .setdefault(None, []).append(f))
+        return f
+
+    def errorhandler(self, code_or_exception):
+        """Registers an error handler that becomes active for this blueprint
+        only.  Please be aware that routing does not happen local to a
+        blueprint so an error handler for 404 usually is not handled by
+        a blueprint unless it is caused inside a view function.  Another
+        special case is the 500 internal server error which is always looked
+        up from the application.
+
+        Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
+        of the :class:`~flask.Flask` object.
+        """
+        def decorator(f):
+            self.record_once(lambda s: s.app._register_error_handler(
+                self.name, code_or_exception, f))
+            return f
+        return decorator
+
+    def register_error_handler(self, code_or_exception, f):
+        """Non-decorator version of the :meth:`errorhandler` error attach
+        function, akin to the :meth:`~flask.Flask.register_error_handler`
+        application-wide function of the :class:`~flask.Flask` object but
+        for error handlers limited to this blueprint.
+
+        .. versionadded:: 0.11
+        """
+        self.record_once(lambda s: s.app._register_error_handler(
+            self.name, code_or_exception, f))
diff --git a/lib/python3.7/site-packages/flask/cli.py b/lib/python3.7/site-packages/flask/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..efc1733e29cb8669f36b9c1bb39799c4743787dd
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/cli.py
@@ -0,0 +1,898 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.cli
+    ~~~~~~~~~
+
+    A simple command line application to run flask apps.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+from __future__ import print_function
+
+import ast
+import inspect
+import os
+import re
+import ssl
+import sys
+import traceback
+from functools import update_wrapper
+from operator import attrgetter
+from threading import Lock, Thread
+
+import click
+from werkzeug.utils import import_string
+
+from . import __version__
+from ._compat import getargspec, iteritems, reraise, text_type
+from .globals import current_app
+from .helpers import get_debug_flag, get_env, get_load_dotenv
+
+try:
+    import dotenv
+except ImportError:
+    dotenv = None
+
+
+class NoAppException(click.UsageError):
+    """Raised if an application cannot be found or loaded."""
+
+
+def find_best_app(script_info, module):
+    """Given a module instance this tries to find the best possible
+    application in the module or raises an exception.
+    """
+    from . import Flask
+
+    # Search for the most common names first.
+    for attr_name in ('app', 'application'):
+        app = getattr(module, attr_name, None)
+
+        if isinstance(app, Flask):
+            return app
+
+    # Otherwise find the only object that is a Flask instance.
+    matches = [
+        v for k, v in iteritems(module.__dict__) if isinstance(v, Flask)
+    ]
+
+    if len(matches) == 1:
+        return matches[0]
+    elif len(matches) > 1:
+        raise NoAppException(
+            'Detected multiple Flask applications in module "{module}". Use '
+            '"FLASK_APP={module}:name" to specify the correct '
+            'one.'.format(module=module.__name__)
+        )
+
+    # Search for app factory functions.
+    for attr_name in ('create_app', 'make_app'):
+        app_factory = getattr(module, attr_name, None)
+
+        if inspect.isfunction(app_factory):
+            try:
+                app = call_factory(script_info, app_factory)
+
+                if isinstance(app, Flask):
+                    return app
+            except TypeError:
+                if not _called_with_wrong_args(app_factory):
+                    raise
+                raise NoAppException(
+                    'Detected factory "{factory}" in module "{module}", but '
+                    'could not call it without arguments. Use '
+                    '"FLASK_APP=\'{module}:{factory}(args)\'" to specify '
+                    'arguments.'.format(
+                        factory=attr_name, module=module.__name__
+                    )
+                )
+
+    raise NoAppException(
+        'Failed to find Flask application or factory in module "{module}". '
+        'Use "FLASK_APP={module}:name to specify one.'.format(
+            module=module.__name__
+        )
+    )
+
+
+def call_factory(script_info, app_factory, arguments=()):
+    """Takes an app factory, a ``script_info` object and  optionally a tuple
+    of arguments. Checks for the existence of a script_info argument and calls
+    the app_factory depending on that and the arguments provided.
+    """
+    args_spec = getargspec(app_factory)
+    arg_names = args_spec.args
+    arg_defaults = args_spec.defaults
+
+    if 'script_info' in arg_names:
+        return app_factory(*arguments, script_info=script_info)
+    elif arguments:
+        return app_factory(*arguments)
+    elif not arguments and len(arg_names) == 1 and arg_defaults is None:
+        return app_factory(script_info)
+
+    return app_factory()
+
+
+def _called_with_wrong_args(factory):
+    """Check whether calling a function raised a ``TypeError`` because
+    the call failed or because something in the factory raised the
+    error.
+
+    :param factory: the factory function that was called
+    :return: true if the call failed
+    """
+    tb = sys.exc_info()[2]
+
+    try:
+        while tb is not None:
+            if tb.tb_frame.f_code is factory.__code__:
+                # in the factory, it was called successfully
+                return False
+
+            tb = tb.tb_next
+
+        # didn't reach the factory
+        return True
+    finally:
+        del tb
+
+
+def find_app_by_string(script_info, module, app_name):
+    """Checks if the given string is a variable name or a function. If it is a
+    function, it checks for specified arguments and whether it takes a
+    ``script_info`` argument and calls the function with the appropriate
+    arguments.
+    """
+    from flask import Flask
+    match = re.match(r'^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$', app_name)
+
+    if not match:
+        raise NoAppException(
+            '"{name}" is not a valid variable name or function '
+            'expression.'.format(name=app_name)
+        )
+
+    name, args = match.groups()
+
+    try:
+        attr = getattr(module, name)
+    except AttributeError as e:
+        raise NoAppException(e.args[0])
+
+    if inspect.isfunction(attr):
+        if args:
+            try:
+                args = ast.literal_eval('({args},)'.format(args=args))
+            except (ValueError, SyntaxError)as e:
+                raise NoAppException(
+                    'Could not parse the arguments in '
+                    '"{app_name}".'.format(e=e, app_name=app_name)
+                )
+        else:
+            args = ()
+
+        try:
+            app = call_factory(script_info, attr, args)
+        except TypeError as e:
+            if not _called_with_wrong_args(attr):
+                raise
+
+            raise NoAppException(
+                '{e}\nThe factory "{app_name}" in module "{module}" could not '
+                'be called with the specified arguments.'.format(
+                    e=e, app_name=app_name, module=module.__name__
+                )
+            )
+    else:
+        app = attr
+
+    if isinstance(app, Flask):
+        return app
+
+    raise NoAppException(
+        'A valid Flask application was not obtained from '
+        '"{module}:{app_name}".'.format(
+            module=module.__name__, app_name=app_name
+        )
+    )
+
+
+def prepare_import(path):
+    """Given a filename this will try to calculate the python path, add it
+    to the search path and return the actual module name that is expected.
+    """
+    path = os.path.realpath(path)
+
+    if os.path.splitext(path)[1] == '.py':
+        path = os.path.splitext(path)[0]
+
+    if os.path.basename(path) == '__init__':
+        path = os.path.dirname(path)
+
+    module_name = []
+
+    # move up until outside package structure (no __init__.py)
+    while True:
+        path, name = os.path.split(path)
+        module_name.append(name)
+
+        if not os.path.exists(os.path.join(path, '__init__.py')):
+            break
+
+    if sys.path[0] != path:
+        sys.path.insert(0, path)
+
+    return '.'.join(module_name[::-1])
+
+
+def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
+    __traceback_hide__ = True
+
+    try:
+        __import__(module_name)
+    except ImportError:
+        # Reraise the ImportError if it occurred within the imported module.
+        # Determine this by checking whether the trace has a depth > 1.
+        if sys.exc_info()[-1].tb_next:
+            raise NoAppException(
+                'While importing "{name}", an ImportError was raised:'
+                '\n\n{tb}'.format(name=module_name, tb=traceback.format_exc())
+            )
+        elif raise_if_not_found:
+            raise NoAppException(
+                'Could not import "{name}".'.format(name=module_name)
+            )
+        else:
+            return
+
+    module = sys.modules[module_name]
+
+    if app_name is None:
+        return find_best_app(script_info, module)
+    else:
+        return find_app_by_string(script_info, module, app_name)
+
+
+def get_version(ctx, param, value):
+    if not value or ctx.resilient_parsing:
+        return
+    message = 'Flask %(version)s\nPython %(python_version)s'
+    click.echo(message % {
+        'version': __version__,
+        'python_version': sys.version,
+    }, color=ctx.color)
+    ctx.exit()
+
+
+version_option = click.Option(
+    ['--version'],
+    help='Show the flask version',
+    expose_value=False,
+    callback=get_version,
+    is_flag=True,
+    is_eager=True
+)
+
+
+class DispatchingApp(object):
+    """Special application that dispatches to a Flask application which
+    is imported by name in a background thread.  If an error happens
+    it is recorded and shown as part of the WSGI handling which in case
+    of the Werkzeug debugger means that it shows up in the browser.
+    """
+
+    def __init__(self, loader, use_eager_loading=False):
+        self.loader = loader
+        self._app = None
+        self._lock = Lock()
+        self._bg_loading_exc_info = None
+        if use_eager_loading:
+            self._load_unlocked()
+        else:
+            self._load_in_background()
+
+    def _load_in_background(self):
+        def _load_app():
+            __traceback_hide__ = True
+            with self._lock:
+                try:
+                    self._load_unlocked()
+                except Exception:
+                    self._bg_loading_exc_info = sys.exc_info()
+        t = Thread(target=_load_app, args=())
+        t.start()
+
+    def _flush_bg_loading_exception(self):
+        __traceback_hide__ = True
+        exc_info = self._bg_loading_exc_info
+        if exc_info is not None:
+            self._bg_loading_exc_info = None
+            reraise(*exc_info)
+
+    def _load_unlocked(self):
+        __traceback_hide__ = True
+        self._app = rv = self.loader()
+        self._bg_loading_exc_info = None
+        return rv
+
+    def __call__(self, environ, start_response):
+        __traceback_hide__ = True
+        if self._app is not None:
+            return self._app(environ, start_response)
+        self._flush_bg_loading_exception()
+        with self._lock:
+            if self._app is not None:
+                rv = self._app
+            else:
+                rv = self._load_unlocked()
+            return rv(environ, start_response)
+
+
+class ScriptInfo(object):
+    """Help object to deal with Flask applications.  This is usually not
+    necessary to interface with as it's used internally in the dispatching
+    to click.  In future versions of Flask this object will most likely play
+    a bigger role.  Typically it's created automatically by the
+    :class:`FlaskGroup` but you can also manually create it and pass it
+    onwards as click object.
+    """
+
+    def __init__(self, app_import_path=None, create_app=None):
+        #: Optionally the import path for the Flask application.
+        self.app_import_path = app_import_path or os.environ.get('FLASK_APP')
+        #: Optionally a function that is passed the script info to create
+        #: the instance of the application.
+        self.create_app = create_app
+        #: A dictionary with arbitrary data that can be associated with
+        #: this script info.
+        self.data = {}
+        self._loaded_app = None
+
+    def load_app(self):
+        """Loads the Flask app (if not yet loaded) and returns it.  Calling
+        this multiple times will just result in the already loaded app to
+        be returned.
+        """
+        __traceback_hide__ = True
+
+        if self._loaded_app is not None:
+            return self._loaded_app
+
+        app = None
+
+        if self.create_app is not None:
+            app = call_factory(self, self.create_app)
+        else:
+            if self.app_import_path:
+                path, name = (self.app_import_path.split(':', 1) + [None])[:2]
+                import_name = prepare_import(path)
+                app = locate_app(self, import_name, name)
+            else:
+                for path in ('wsgi.py', 'app.py'):
+                    import_name = prepare_import(path)
+                    app = locate_app(self, import_name, None,
+                                     raise_if_not_found=False)
+
+                    if app:
+                        break
+
+        if not app:
+            raise NoAppException(
+                'Could not locate a Flask application. You did not provide '
+                'the "FLASK_APP" environment variable, and a "wsgi.py" or '
+                '"app.py" module was not found in the current directory.'
+            )
+
+        debug = get_debug_flag()
+
+        # Update the app's debug flag through the descriptor so that other
+        # values repopulate as well.
+        if debug is not None:
+            app.debug = debug
+
+        self._loaded_app = app
+        return app
+
+
+pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
+
+
+def with_appcontext(f):
+    """Wraps a callback so that it's guaranteed to be executed with the
+    script's application context.  If callbacks are registered directly
+    to the ``app.cli`` object then they are wrapped with this function
+    by default unless it's disabled.
+    """
+    @click.pass_context
+    def decorator(__ctx, *args, **kwargs):
+        with __ctx.ensure_object(ScriptInfo).load_app().app_context():
+            return __ctx.invoke(f, *args, **kwargs)
+    return update_wrapper(decorator, f)
+
+
+class AppGroup(click.Group):
+    """This works similar to a regular click :class:`~click.Group` but it
+    changes the behavior of the :meth:`command` decorator so that it
+    automatically wraps the functions in :func:`with_appcontext`.
+
+    Not to be confused with :class:`FlaskGroup`.
+    """
+
+    def command(self, *args, **kwargs):
+        """This works exactly like the method of the same name on a regular
+        :class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
+        unless it's disabled by passing ``with_appcontext=False``.
+        """
+        wrap_for_ctx = kwargs.pop('with_appcontext', True)
+        def decorator(f):
+            if wrap_for_ctx:
+                f = with_appcontext(f)
+            return click.Group.command(self, *args, **kwargs)(f)
+        return decorator
+
+    def group(self, *args, **kwargs):
+        """This works exactly like the method of the same name on a regular
+        :class:`click.Group` but it defaults the group class to
+        :class:`AppGroup`.
+        """
+        kwargs.setdefault('cls', AppGroup)
+        return click.Group.group(self, *args, **kwargs)
+
+
+class FlaskGroup(AppGroup):
+    """Special subclass of the :class:`AppGroup` group that supports
+    loading more commands from the configured Flask app.  Normally a
+    developer does not have to interface with this class but there are
+    some very advanced use cases for which it makes sense to create an
+    instance of this.
+
+    For information as of why this is useful see :ref:`custom-scripts`.
+
+    :param add_default_commands: if this is True then the default run and
+        shell commands wil be added.
+    :param add_version_option: adds the ``--version`` option.
+    :param create_app: an optional callback that is passed the script info and
+        returns the loaded app.
+    :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
+        files to set environment variables. Will also change the working
+        directory to the directory containing the first file found.
+
+    .. versionchanged:: 1.0
+        If installed, python-dotenv will be used to load environment variables
+        from :file:`.env` and :file:`.flaskenv` files.
+    """
+
+    def __init__(self, add_default_commands=True, create_app=None,
+                 add_version_option=True, load_dotenv=True, **extra):
+        params = list(extra.pop('params', None) or ())
+
+        if add_version_option:
+            params.append(version_option)
+
+        AppGroup.__init__(self, params=params, **extra)
+        self.create_app = create_app
+        self.load_dotenv = load_dotenv
+
+        if add_default_commands:
+            self.add_command(run_command)
+            self.add_command(shell_command)
+            self.add_command(routes_command)
+
+        self._loaded_plugin_commands = False
+
+    def _load_plugin_commands(self):
+        if self._loaded_plugin_commands:
+            return
+        try:
+            import pkg_resources
+        except ImportError:
+            self._loaded_plugin_commands = True
+            return
+
+        for ep in pkg_resources.iter_entry_points('flask.commands'):
+            self.add_command(ep.load(), ep.name)
+        self._loaded_plugin_commands = True
+
+    def get_command(self, ctx, name):
+        self._load_plugin_commands()
+
+        # We load built-in commands first as these should always be the
+        # same no matter what the app does.  If the app does want to
+        # override this it needs to make a custom instance of this group
+        # and not attach the default commands.
+        #
+        # This also means that the script stays functional in case the
+        # application completely fails.
+        rv = AppGroup.get_command(self, ctx, name)
+        if rv is not None:
+            return rv
+
+        info = ctx.ensure_object(ScriptInfo)
+        try:
+            rv = info.load_app().cli.get_command(ctx, name)
+            if rv is not None:
+                return rv
+        except NoAppException:
+            pass
+
+    def list_commands(self, ctx):
+        self._load_plugin_commands()
+
+        # The commands available is the list of both the application (if
+        # available) plus the builtin commands.
+        rv = set(click.Group.list_commands(self, ctx))
+        info = ctx.ensure_object(ScriptInfo)
+        try:
+            rv.update(info.load_app().cli.list_commands(ctx))
+        except Exception:
+            # Here we intentionally swallow all exceptions as we don't
+            # want the help page to break if the app does not exist.
+            # If someone attempts to use the command we try to create
+            # the app again and this will give us the error.
+            # However, we will not do so silently because that would confuse
+            # users.
+            traceback.print_exc()
+        return sorted(rv)
+
+    def main(self, *args, **kwargs):
+        # Set a global flag that indicates that we were invoked from the
+        # command line interface. This is detected by Flask.run to make the
+        # call into a no-op. This is necessary to avoid ugly errors when the
+        # script that is loaded here also attempts to start a server.
+        os.environ['FLASK_RUN_FROM_CLI'] = 'true'
+
+        if get_load_dotenv(self.load_dotenv):
+            load_dotenv()
+
+        obj = kwargs.get('obj')
+
+        if obj is None:
+            obj = ScriptInfo(create_app=self.create_app)
+
+        kwargs['obj'] = obj
+        kwargs.setdefault('auto_envvar_prefix', 'FLASK')
+        return super(FlaskGroup, self).main(*args, **kwargs)
+
+
+def _path_is_ancestor(path, other):
+    """Take ``other`` and remove the length of ``path`` from it. Then join it
+    to ``path``. If it is the original value, ``path`` is an ancestor of
+    ``other``."""
+    return os.path.join(path, other[len(path):].lstrip(os.sep)) == other
+
+
+def load_dotenv(path=None):
+    """Load "dotenv" files in order of precedence to set environment variables.
+
+    If an env var is already set it is not overwritten, so earlier files in the
+    list are preferred over later files.
+
+    Changes the current working directory to the location of the first file
+    found, with the assumption that it is in the top level project directory
+    and will be where the Python path should import local packages from.
+
+    This is a no-op if `python-dotenv`_ is not installed.
+
+    .. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
+
+    :param path: Load the file at this location instead of searching.
+    :return: ``True`` if a file was loaded.
+
+    .. versionadded:: 1.0
+    """
+    if dotenv is None:
+        if path or os.path.exists('.env') or os.path.exists('.flaskenv'):
+            click.secho(
+                ' * Tip: There are .env files present.'
+                ' Do "pip install python-dotenv" to use them.',
+                fg='yellow')
+        return
+
+    if path is not None:
+        return dotenv.load_dotenv(path)
+
+    new_dir = None
+
+    for name in ('.env', '.flaskenv'):
+        path = dotenv.find_dotenv(name, usecwd=True)
+
+        if not path:
+            continue
+
+        if new_dir is None:
+            new_dir = os.path.dirname(path)
+
+        dotenv.load_dotenv(path)
+
+    if new_dir and os.getcwd() != new_dir:
+        os.chdir(new_dir)
+
+    return new_dir is not None  # at least one file was located and loaded
+
+
+def show_server_banner(env, debug, app_import_path, eager_loading):
+    """Show extra startup messages the first time the server is run,
+    ignoring the reloader.
+    """
+    if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
+        return
+
+    if app_import_path is not None:
+        message = ' * Serving Flask app "{0}"'.format(app_import_path)
+
+        if not eager_loading:
+            message += ' (lazy loading)'
+
+        click.echo(message)
+
+    click.echo(' * Environment: {0}'.format(env))
+
+    if env == 'production':
+        click.secho(
+            '   WARNING: Do not use the development server in a production'
+            ' environment.', fg='red')
+        click.secho('   Use a production WSGI server instead.', dim=True)
+
+    if debug is not None:
+        click.echo(' * Debug mode: {0}'.format('on' if debug else 'off'))
+
+
+class CertParamType(click.ParamType):
+    """Click option type for the ``--cert`` option. Allows either an
+    existing file, the string ``'adhoc'``, or an import for a
+    :class:`~ssl.SSLContext` object.
+    """
+
+    name = 'path'
+
+    def __init__(self):
+        self.path_type = click.Path(
+            exists=True, dir_okay=False, resolve_path=True)
+
+    def convert(self, value, param, ctx):
+        try:
+            return self.path_type(value, param, ctx)
+        except click.BadParameter:
+            value = click.STRING(value, param, ctx).lower()
+
+            if value == 'adhoc':
+                try:
+                    import OpenSSL
+                except ImportError:
+                    raise click.BadParameter(
+                        'Using ad-hoc certificates requires pyOpenSSL.',
+                        ctx, param)
+
+                return value
+
+            obj = import_string(value, silent=True)
+
+            if sys.version_info < (2, 7):
+                if obj:
+                    return obj
+            else:
+                if isinstance(obj, ssl.SSLContext):
+                    return obj
+
+            raise
+
+
+def _validate_key(ctx, param, value):
+    """The ``--key`` option must be specified when ``--cert`` is a file.
+    Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
+    """
+    cert = ctx.params.get('cert')
+    is_adhoc = cert == 'adhoc'
+
+    if sys.version_info < (2, 7):
+        is_context = cert and not isinstance(cert, (text_type, bytes))
+    else:
+        is_context = isinstance(cert, ssl.SSLContext)
+
+    if value is not None:
+        if is_adhoc:
+            raise click.BadParameter(
+                'When "--cert" is "adhoc", "--key" is not used.',
+                ctx, param)
+
+        if is_context:
+            raise click.BadParameter(
+                'When "--cert" is an SSLContext object, "--key is not used.',
+                ctx, param)
+
+        if not cert:
+            raise click.BadParameter(
+                '"--cert" must also be specified.',
+                ctx, param)
+
+        ctx.params['cert'] = cert, value
+
+    else:
+        if cert and not (is_adhoc or is_context):
+            raise click.BadParameter(
+                'Required when using "--cert".',
+                ctx, param)
+
+    return value
+
+
+@click.command('run', short_help='Runs a development server.')
+@click.option('--host', '-h', default='127.0.0.1',
+              help='The interface to bind to.')
+@click.option('--port', '-p', default=5000,
+              help='The port to bind to.')
+@click.option('--cert', type=CertParamType(),
+              help='Specify a certificate file to use HTTPS.')
+@click.option('--key',
+              type=click.Path(exists=True, dir_okay=False, resolve_path=True),
+              callback=_validate_key, expose_value=False,
+              help='The key file to use when specifying a certificate.')
+@click.option('--reload/--no-reload', default=None,
+              help='Enable or disable the reloader. By default the reloader '
+              'is active if debug is enabled.')
+@click.option('--debugger/--no-debugger', default=None,
+              help='Enable or disable the debugger. By default the debugger '
+              'is active if debug is enabled.')
+@click.option('--eager-loading/--lazy-loader', default=None,
+              help='Enable or disable eager loading. By default eager '
+              'loading is enabled if the reloader is disabled.')
+@click.option('--with-threads/--without-threads', default=True,
+              help='Enable or disable multithreading.')
+@pass_script_info
+def run_command(info, host, port, reload, debugger, eager_loading,
+                with_threads, cert):
+    """Run a local development server.
+
+    This server is for development purposes only. It does not provide
+    the stability, security, or performance of production WSGI servers.
+
+    The reloader and debugger are enabled by default if
+    FLASK_ENV=development or FLASK_DEBUG=1.
+    """
+    debug = get_debug_flag()
+
+    if reload is None:
+        reload = debug
+
+    if debugger is None:
+        debugger = debug
+
+    if eager_loading is None:
+        eager_loading = not reload
+
+    show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
+    app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
+
+    from werkzeug.serving import run_simple
+    run_simple(host, port, app, use_reloader=reload, use_debugger=debugger,
+               threaded=with_threads, ssl_context=cert)
+
+
+@click.command('shell', short_help='Runs a shell in the app context.')
+@with_appcontext
+def shell_command():
+    """Runs an interactive Python shell in the context of a given
+    Flask application.  The application will populate the default
+    namespace of this shell according to it's configuration.
+
+    This is useful for executing small snippets of management code
+    without having to manually configure the application.
+    """
+    import code
+    from flask.globals import _app_ctx_stack
+    app = _app_ctx_stack.top.app
+    banner = 'Python %s on %s\nApp: %s [%s]\nInstance: %s' % (
+        sys.version,
+        sys.platform,
+        app.import_name,
+        app.env,
+        app.instance_path,
+    )
+    ctx = {}
+
+    # Support the regular Python interpreter startup script if someone
+    # is using it.
+    startup = os.environ.get('PYTHONSTARTUP')
+    if startup and os.path.isfile(startup):
+        with open(startup, 'r') as f:
+            eval(compile(f.read(), startup, 'exec'), ctx)
+
+    ctx.update(app.make_shell_context())
+
+    code.interact(banner=banner, local=ctx)
+
+
+@click.command('routes', short_help='Show the routes for the app.')
+@click.option(
+    '--sort', '-s',
+    type=click.Choice(('endpoint', 'methods', 'rule', 'match')),
+    default='endpoint',
+    help=(
+        'Method to sort routes by. "match" is the order that Flask will match '
+        'routes when dispatching a request.'
+    )
+)
+@click.option(
+    '--all-methods',
+    is_flag=True,
+    help="Show HEAD and OPTIONS methods."
+)
+@with_appcontext
+def routes_command(sort, all_methods):
+    """Show all registered routes with endpoints and methods."""
+
+    rules = list(current_app.url_map.iter_rules())
+    if not rules:
+        click.echo('No routes were registered.')
+        return
+
+    ignored_methods = set(() if all_methods else ('HEAD', 'OPTIONS'))
+
+    if sort in ('endpoint', 'rule'):
+        rules = sorted(rules, key=attrgetter(sort))
+    elif sort == 'methods':
+        rules = sorted(rules, key=lambda rule: sorted(rule.methods))
+
+    rule_methods = [
+        ', '.join(sorted(rule.methods - ignored_methods)) for rule in rules
+    ]
+
+    headers = ('Endpoint', 'Methods', 'Rule')
+    widths = (
+        max(len(rule.endpoint) for rule in rules),
+        max(len(methods) for methods in rule_methods),
+        max(len(rule.rule) for rule in rules),
+    )
+    widths = [max(len(h), w) for h, w in zip(headers, widths)]
+    row = '{{0:<{0}}}  {{1:<{1}}}  {{2:<{2}}}'.format(*widths)
+
+    click.echo(row.format(*headers).strip())
+    click.echo(row.format(*('-' * width for width in widths)))
+
+    for rule, methods in zip(rules, rule_methods):
+        click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
+
+
+cli = FlaskGroup(help="""\
+A general utility script for Flask applications.
+
+Provides commands from Flask, extensions, and the application. Loads the
+application defined in the FLASK_APP environment variable, or from a wsgi.py
+file. Setting the FLASK_ENV environment variable to 'development' will enable
+debug mode.
+
+\b
+  {prefix}{cmd} FLASK_APP=hello.py
+  {prefix}{cmd} FLASK_ENV=development
+  {prefix}flask run
+""".format(
+    cmd='export' if os.name == 'posix' else 'set',
+    prefix='$ ' if os.name == 'posix' else '> '
+))
+
+
+def main(as_module=False):
+    args = sys.argv[1:]
+
+    if as_module:
+        this_module = 'flask'
+
+        if sys.version_info < (2, 7):
+            this_module += '.cli'
+
+        name = 'python -m ' + this_module
+
+        # Python rewrites "python -m flask" to the path to the file in argv.
+        # Restore the original command so that the reloader works.
+        sys.argv = ['-m', this_module] + args
+    else:
+        name = None
+
+    cli.main(args=args, prog_name=name)
+
+
+if __name__ == '__main__':
+    main(as_module=True)
diff --git a/lib/python3.7/site-packages/flask/config.py b/lib/python3.7/site-packages/flask/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6074baa8565e43df992f2c6d14b1902e218a819
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/config.py
@@ -0,0 +1,265 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.config
+    ~~~~~~~~~~~~
+
+    Implements the configuration related objects.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+import os
+import types
+import errno
+
+from werkzeug.utils import import_string
+from ._compat import string_types, iteritems
+from . import json
+
+
+class ConfigAttribute(object):
+    """Makes an attribute forward to the config"""
+
+    def __init__(self, name, get_converter=None):
+        self.__name__ = name
+        self.get_converter = get_converter
+
+    def __get__(self, obj, type=None):
+        if obj is None:
+            return self
+        rv = obj.config[self.__name__]
+        if self.get_converter is not None:
+            rv = self.get_converter(rv)
+        return rv
+
+    def __set__(self, obj, value):
+        obj.config[self.__name__] = value
+
+
+class Config(dict):
+    """Works exactly like a dict but provides ways to fill it from files
+    or special dictionaries.  There are two common patterns to populate the
+    config.
+
+    Either you can fill the config from a config file::
+
+        app.config.from_pyfile('yourconfig.cfg')
+
+    Or alternatively you can define the configuration options in the
+    module that calls :meth:`from_object` or provide an import path to
+    a module that should be loaded.  It is also possible to tell it to
+    use the same module and with that provide the configuration values
+    just before the call::
+
+        DEBUG = True
+        SECRET_KEY = 'development key'
+        app.config.from_object(__name__)
+
+    In both cases (loading from any Python file or loading from modules),
+    only uppercase keys are added to the config.  This makes it possible to use
+    lowercase values in the config file for temporary values that are not added
+    to the config or to define the config keys in the same file that implements
+    the application.
+
+    Probably the most interesting way to load configurations is from an
+    environment variable pointing to a file::
+
+        app.config.from_envvar('YOURAPPLICATION_SETTINGS')
+
+    In this case before launching the application you have to set this
+    environment variable to the file you want to use.  On Linux and OS X
+    use the export statement::
+
+        export YOURAPPLICATION_SETTINGS='/path/to/config/file'
+
+    On windows use `set` instead.
+
+    :param root_path: path to which files are read relative from.  When the
+                      config object is created by the application, this is
+                      the application's :attr:`~flask.Flask.root_path`.
+    :param defaults: an optional dictionary of default values
+    """
+
+    def __init__(self, root_path, defaults=None):
+        dict.__init__(self, defaults or {})
+        self.root_path = root_path
+
+    def from_envvar(self, variable_name, silent=False):
+        """Loads a configuration from an environment variable pointing to
+        a configuration file.  This is basically just a shortcut with nicer
+        error messages for this line of code::
+
+            app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
+
+        :param variable_name: name of the environment variable
+        :param silent: set to ``True`` if you want silent failure for missing
+                       files.
+        :return: bool. ``True`` if able to load config, ``False`` otherwise.
+        """
+        rv = os.environ.get(variable_name)
+        if not rv:
+            if silent:
+                return False
+            raise RuntimeError('The environment variable %r is not set '
+                               'and as such configuration could not be '
+                               'loaded.  Set this variable and make it '
+                               'point to a configuration file' %
+                               variable_name)
+        return self.from_pyfile(rv, silent=silent)
+
+    def from_pyfile(self, filename, silent=False):
+        """Updates the values in the config from a Python file.  This function
+        behaves as if the file was imported as module with the
+        :meth:`from_object` function.
+
+        :param filename: the filename of the config.  This can either be an
+                         absolute filename or a filename relative to the
+                         root path.
+        :param silent: set to ``True`` if you want silent failure for missing
+                       files.
+
+        .. versionadded:: 0.7
+           `silent` parameter.
+        """
+        filename = os.path.join(self.root_path, filename)
+        d = types.ModuleType('config')
+        d.__file__ = filename
+        try:
+            with open(filename, mode='rb') as config_file:
+                exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
+        except IOError as e:
+            if silent and e.errno in (
+                errno.ENOENT, errno.EISDIR, errno.ENOTDIR
+            ):
+                return False
+            e.strerror = 'Unable to load configuration file (%s)' % e.strerror
+            raise
+        self.from_object(d)
+        return True
+
+    def from_object(self, obj):
+        """Updates the values from the given object.  An object can be of one
+        of the following two types:
+
+        -   a string: in this case the object with that name will be imported
+        -   an actual object reference: that object is used directly
+
+        Objects are usually either modules or classes. :meth:`from_object`
+        loads only the uppercase attributes of the module/class. A ``dict``
+        object will not work with :meth:`from_object` because the keys of a
+        ``dict`` are not attributes of the ``dict`` class.
+
+        Example of module-based configuration::
+
+            app.config.from_object('yourapplication.default_config')
+            from yourapplication import default_config
+            app.config.from_object(default_config)
+
+        You should not use this function to load the actual configuration but
+        rather configuration defaults.  The actual config should be loaded
+        with :meth:`from_pyfile` and ideally from a location not within the
+        package because the package might be installed system wide.
+
+        See :ref:`config-dev-prod` for an example of class-based configuration
+        using :meth:`from_object`.
+
+        :param obj: an import name or object
+        """
+        if isinstance(obj, string_types):
+            obj = import_string(obj)
+        for key in dir(obj):
+            if key.isupper():
+                self[key] = getattr(obj, key)
+
+    def from_json(self, filename, silent=False):
+        """Updates the values in the config from a JSON file. This function
+        behaves as if the JSON object was a dictionary and passed to the
+        :meth:`from_mapping` function.
+
+        :param filename: the filename of the JSON file.  This can either be an
+                         absolute filename or a filename relative to the
+                         root path.
+        :param silent: set to ``True`` if you want silent failure for missing
+                       files.
+
+        .. versionadded:: 0.11
+        """
+        filename = os.path.join(self.root_path, filename)
+
+        try:
+            with open(filename) as json_file:
+                obj = json.loads(json_file.read())
+        except IOError as e:
+            if silent and e.errno in (errno.ENOENT, errno.EISDIR):
+                return False
+            e.strerror = 'Unable to load configuration file (%s)' % e.strerror
+            raise
+        return self.from_mapping(obj)
+
+    def from_mapping(self, *mapping, **kwargs):
+        """Updates the config like :meth:`update` ignoring items with non-upper
+        keys.
+
+        .. versionadded:: 0.11
+        """
+        mappings = []
+        if len(mapping) == 1:
+            if hasattr(mapping[0], 'items'):
+                mappings.append(mapping[0].items())
+            else:
+                mappings.append(mapping[0])
+        elif len(mapping) > 1:
+            raise TypeError(
+                'expected at most 1 positional argument, got %d' % len(mapping)
+            )
+        mappings.append(kwargs.items())
+        for mapping in mappings:
+            for (key, value) in mapping:
+                if key.isupper():
+                    self[key] = value
+        return True
+
+    def get_namespace(self, namespace, lowercase=True, trim_namespace=True):
+        """Returns a dictionary containing a subset of configuration options
+        that match the specified namespace/prefix. Example usage::
+
+            app.config['IMAGE_STORE_TYPE'] = 'fs'
+            app.config['IMAGE_STORE_PATH'] = '/var/app/images'
+            app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
+            image_store_config = app.config.get_namespace('IMAGE_STORE_')
+
+        The resulting dictionary `image_store_config` would look like::
+
+            {
+                'type': 'fs',
+                'path': '/var/app/images',
+                'base_url': 'http://img.website.com'
+            }
+
+        This is often useful when configuration options map directly to
+        keyword arguments in functions or class constructors.
+
+        :param namespace: a configuration namespace
+        :param lowercase: a flag indicating if the keys of the resulting
+                          dictionary should be lowercase
+        :param trim_namespace: a flag indicating if the keys of the resulting
+                          dictionary should not include the namespace
+
+        .. versionadded:: 0.11
+        """
+        rv = {}
+        for k, v in iteritems(self):
+            if not k.startswith(namespace):
+                continue
+            if trim_namespace:
+                key = k[len(namespace):]
+            else:
+                key = k
+            if lowercase:
+                key = key.lower()
+            rv[key] = v
+        return rv
+
+    def __repr__(self):
+        return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
diff --git a/lib/python3.7/site-packages/flask/ctx.py b/lib/python3.7/site-packages/flask/ctx.py
new file mode 100644
index 0000000000000000000000000000000000000000..8472c920c2d6ef8673fa894b15b9c94405e1f0c4
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/ctx.py
@@ -0,0 +1,457 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.ctx
+    ~~~~~~~~~
+
+    Implements the objects required to keep the context.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+import sys
+from functools import update_wrapper
+
+from werkzeug.exceptions import HTTPException
+
+from .globals import _request_ctx_stack, _app_ctx_stack
+from .signals import appcontext_pushed, appcontext_popped
+from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise
+
+
+# a singleton sentinel value for parameter defaults
+_sentinel = object()
+
+
+class _AppCtxGlobals(object):
+    """A plain object. Used as a namespace for storing data during an
+    application context.
+
+    Creating an app context automatically creates this object, which is
+    made available as the :data:`g` proxy.
+
+    .. describe:: 'key' in g
+
+        Check whether an attribute is present.
+
+        .. versionadded:: 0.10
+
+    .. describe:: iter(g)
+
+        Return an iterator over the attribute names.
+
+        .. versionadded:: 0.10
+    """
+
+    def get(self, name, default=None):
+        """Get an attribute by name, or a default value. Like
+        :meth:`dict.get`.
+
+        :param name: Name of attribute to get.
+        :param default: Value to return if the attribute is not present.
+
+        .. versionadded:: 0.10
+        """
+        return self.__dict__.get(name, default)
+
+    def pop(self, name, default=_sentinel):
+        """Get and remove an attribute by name. Like :meth:`dict.pop`.
+
+        :param name: Name of attribute to pop.
+        :param default: Value to return if the attribute is not present,
+            instead of raise a ``KeyError``.
+
+        .. versionadded:: 0.11
+        """
+        if default is _sentinel:
+            return self.__dict__.pop(name)
+        else:
+            return self.__dict__.pop(name, default)
+
+    def setdefault(self, name, default=None):
+        """Get the value of an attribute if it is present, otherwise
+        set and return a default value. Like :meth:`dict.setdefault`.
+
+        :param name: Name of attribute to get.
+        :param: default: Value to set and return if the attribute is not
+            present.
+
+        .. versionadded:: 0.11
+        """
+        return self.__dict__.setdefault(name, default)
+
+    def __contains__(self, item):
+        return item in self.__dict__
+
+    def __iter__(self):
+        return iter(self.__dict__)
+
+    def __repr__(self):
+        top = _app_ctx_stack.top
+        if top is not None:
+            return '<flask.g of %r>' % top.app.name
+        return object.__repr__(self)
+
+
+def after_this_request(f):
+    """Executes a function after this request.  This is useful to modify
+    response objects.  The function is passed the response object and has
+    to return the same or a new one.
+
+    Example::
+
+        @app.route('/')
+        def index():
+            @after_this_request
+            def add_header(response):
+                response.headers['X-Foo'] = 'Parachute'
+                return response
+            return 'Hello World!'
+
+    This is more useful if a function other than the view function wants to
+    modify a response.  For instance think of a decorator that wants to add
+    some headers without converting the return value into a response object.
+
+    .. versionadded:: 0.9
+    """
+    _request_ctx_stack.top._after_request_functions.append(f)
+    return f
+
+
+def copy_current_request_context(f):
+    """A helper function that decorates a function to retain the current
+    request context.  This is useful when working with greenlets.  The moment
+    the function is decorated a copy of the request context is created and
+    then pushed when the function is called.
+
+    Example::
+
+        import gevent
+        from flask import copy_current_request_context
+
+        @app.route('/')
+        def index():
+            @copy_current_request_context
+            def do_some_work():
+                # do some work here, it can access flask.request like you
+                # would otherwise in the view function.
+                ...
+            gevent.spawn(do_some_work)
+            return 'Regular response'
+
+    .. versionadded:: 0.10
+    """
+    top = _request_ctx_stack.top
+    if top is None:
+        raise RuntimeError('This decorator can only be used at local scopes '
+            'when a request context is on the stack.  For instance within '
+            'view functions.')
+    reqctx = top.copy()
+    def wrapper(*args, **kwargs):
+        with reqctx:
+            return f(*args, **kwargs)
+    return update_wrapper(wrapper, f)
+
+
+def has_request_context():
+    """If you have code that wants to test if a request context is there or
+    not this function can be used.  For instance, you may want to take advantage
+    of request information if the request object is available, but fail
+    silently if it is unavailable.
+
+    ::
+
+        class User(db.Model):
+
+            def __init__(self, username, remote_addr=None):
+                self.username = username
+                if remote_addr is None and has_request_context():
+                    remote_addr = request.remote_addr
+                self.remote_addr = remote_addr
+
+    Alternatively you can also just test any of the context bound objects
+    (such as :class:`request` or :class:`g` for truthness)::
+
+        class User(db.Model):
+
+            def __init__(self, username, remote_addr=None):
+                self.username = username
+                if remote_addr is None and request:
+                    remote_addr = request.remote_addr
+                self.remote_addr = remote_addr
+
+    .. versionadded:: 0.7
+    """
+    return _request_ctx_stack.top is not None
+
+
+def has_app_context():
+    """Works like :func:`has_request_context` but for the application
+    context.  You can also just do a boolean check on the
+    :data:`current_app` object instead.
+
+    .. versionadded:: 0.9
+    """
+    return _app_ctx_stack.top is not None
+
+
+class AppContext(object):
+    """The application context binds an application object implicitly
+    to the current thread or greenlet, similar to how the
+    :class:`RequestContext` binds request information.  The application
+    context is also implicitly created if a request context is created
+    but the application is not on top of the individual application
+    context.
+    """
+
+    def __init__(self, app):
+        self.app = app
+        self.url_adapter = app.create_url_adapter(None)
+        self.g = app.app_ctx_globals_class()
+
+        # Like request context, app contexts can be pushed multiple times
+        # but there a basic "refcount" is enough to track them.
+        self._refcnt = 0
+
+    def push(self):
+        """Binds the app context to the current context."""
+        self._refcnt += 1
+        if hasattr(sys, 'exc_clear'):
+            sys.exc_clear()
+        _app_ctx_stack.push(self)
+        appcontext_pushed.send(self.app)
+
+    def pop(self, exc=_sentinel):
+        """Pops the app context."""
+        try:
+            self._refcnt -= 1
+            if self._refcnt <= 0:
+                if exc is _sentinel:
+                    exc = sys.exc_info()[1]
+                self.app.do_teardown_appcontext(exc)
+        finally:
+            rv = _app_ctx_stack.pop()
+        assert rv is self, 'Popped wrong app context.  (%r instead of %r)' \
+            % (rv, self)
+        appcontext_popped.send(self.app)
+
+    def __enter__(self):
+        self.push()
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self.pop(exc_value)
+
+        if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
+            reraise(exc_type, exc_value, tb)
+
+
+class RequestContext(object):
+    """The request context contains all request relevant information.  It is
+    created at the beginning of the request and pushed to the
+    `_request_ctx_stack` and removed at the end of it.  It will create the
+    URL adapter and request object for the WSGI environment provided.
+
+    Do not attempt to use this class directly, instead use
+    :meth:`~flask.Flask.test_request_context` and
+    :meth:`~flask.Flask.request_context` to create this object.
+
+    When the request context is popped, it will evaluate all the
+    functions registered on the application for teardown execution
+    (:meth:`~flask.Flask.teardown_request`).
+
+    The request context is automatically popped at the end of the request
+    for you.  In debug mode the request context is kept around if
+    exceptions happen so that interactive debuggers have a chance to
+    introspect the data.  With 0.4 this can also be forced for requests
+    that did not fail and outside of ``DEBUG`` mode.  By setting
+    ``'flask._preserve_context'`` to ``True`` on the WSGI environment the
+    context will not pop itself at the end of the request.  This is used by
+    the :meth:`~flask.Flask.test_client` for example to implement the
+    deferred cleanup functionality.
+
+    You might find this helpful for unittests where you need the
+    information from the context local around for a little longer.  Make
+    sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in
+    that situation, otherwise your unittests will leak memory.
+    """
+
+    def __init__(self, app, environ, request=None):
+        self.app = app
+        if request is None:
+            request = app.request_class(environ)
+        self.request = request
+        self.url_adapter = app.create_url_adapter(self.request)
+        self.flashes = None
+        self.session = None
+
+        # Request contexts can be pushed multiple times and interleaved with
+        # other request contexts.  Now only if the last level is popped we
+        # get rid of them.  Additionally if an application context is missing
+        # one is created implicitly so for each level we add this information
+        self._implicit_app_ctx_stack = []
+
+        # indicator if the context was preserved.  Next time another context
+        # is pushed the preserved context is popped.
+        self.preserved = False
+
+        # remembers the exception for pop if there is one in case the context
+        # preservation kicks in.
+        self._preserved_exc = None
+
+        # Functions that should be executed after the request on the response
+        # object.  These will be called before the regular "after_request"
+        # functions.
+        self._after_request_functions = []
+
+        self.match_request()
+
+    def _get_g(self):
+        return _app_ctx_stack.top.g
+    def _set_g(self, value):
+        _app_ctx_stack.top.g = value
+    g = property(_get_g, _set_g)
+    del _get_g, _set_g
+
+    def copy(self):
+        """Creates a copy of this request context with the same request object.
+        This can be used to move a request context to a different greenlet.
+        Because the actual request object is the same this cannot be used to
+        move a request context to a different thread unless access to the
+        request object is locked.
+
+        .. versionadded:: 0.10
+        """
+        return self.__class__(self.app,
+            environ=self.request.environ,
+            request=self.request
+        )
+
+    def match_request(self):
+        """Can be overridden by a subclass to hook into the matching
+        of the request.
+        """
+        try:
+            url_rule, self.request.view_args = \
+                self.url_adapter.match(return_rule=True)
+            self.request.url_rule = url_rule
+        except HTTPException as e:
+            self.request.routing_exception = e
+
+    def push(self):
+        """Binds the request context to the current context."""
+        # If an exception occurs in debug mode or if context preservation is
+        # activated under exception situations exactly one context stays
+        # on the stack.  The rationale is that you want to access that
+        # information under debug situations.  However if someone forgets to
+        # pop that context again we want to make sure that on the next push
+        # it's invalidated, otherwise we run at risk that something leaks
+        # memory.  This is usually only a problem in test suite since this
+        # functionality is not active in production environments.
+        top = _request_ctx_stack.top
+        if top is not None and top.preserved:
+            top.pop(top._preserved_exc)
+
+        # Before we push the request context we have to ensure that there
+        # is an application context.
+        app_ctx = _app_ctx_stack.top
+        if app_ctx is None or app_ctx.app != self.app:
+            app_ctx = self.app.app_context()
+            app_ctx.push()
+            self._implicit_app_ctx_stack.append(app_ctx)
+        else:
+            self._implicit_app_ctx_stack.append(None)
+
+        if hasattr(sys, 'exc_clear'):
+            sys.exc_clear()
+
+        _request_ctx_stack.push(self)
+
+        # Open the session at the moment that the request context is available.
+        # This allows a custom open_session method to use the request context.
+        # Only open a new session if this is the first time the request was
+        # pushed, otherwise stream_with_context loses the session.
+        if self.session is None:
+            session_interface = self.app.session_interface
+            self.session = session_interface.open_session(
+                self.app, self.request
+            )
+
+            if self.session is None:
+                self.session = session_interface.make_null_session(self.app)
+
+    def pop(self, exc=_sentinel):
+        """Pops the request context and unbinds it by doing that.  This will
+        also trigger the execution of functions registered by the
+        :meth:`~flask.Flask.teardown_request` decorator.
+
+        .. versionchanged:: 0.9
+           Added the `exc` argument.
+        """
+        app_ctx = self._implicit_app_ctx_stack.pop()
+
+        try:
+            clear_request = False
+            if not self._implicit_app_ctx_stack:
+                self.preserved = False
+                self._preserved_exc = None
+                if exc is _sentinel:
+                    exc = sys.exc_info()[1]
+                self.app.do_teardown_request(exc)
+
+                # If this interpreter supports clearing the exception information
+                # we do that now.  This will only go into effect on Python 2.x,
+                # on 3.x it disappears automatically at the end of the exception
+                # stack.
+                if hasattr(sys, 'exc_clear'):
+                    sys.exc_clear()
+
+                request_close = getattr(self.request, 'close', None)
+                if request_close is not None:
+                    request_close()
+                clear_request = True
+        finally:
+            rv = _request_ctx_stack.pop()
+
+            # get rid of circular dependencies at the end of the request
+            # so that we don't require the GC to be active.
+            if clear_request:
+                rv.request.environ['werkzeug.request'] = None
+
+            # Get rid of the app as well if necessary.
+            if app_ctx is not None:
+                app_ctx.pop(exc)
+
+            assert rv is self, 'Popped wrong request context.  ' \
+                '(%r instead of %r)' % (rv, self)
+
+    def auto_pop(self, exc):
+        if self.request.environ.get('flask._preserve_context') or \
+           (exc is not None and self.app.preserve_context_on_exception):
+            self.preserved = True
+            self._preserved_exc = exc
+        else:
+            self.pop(exc)
+
+    def __enter__(self):
+        self.push()
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        # do not pop the request stack if we are in debug mode and an
+        # exception happened.  This will allow the debugger to still
+        # access the request object in the interactive shell.  Furthermore
+        # the context can be force kept alive for the test client.
+        # See flask.testing for how this works.
+        self.auto_pop(exc_value)
+
+        if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
+            reraise(exc_type, exc_value, tb)
+
+    def __repr__(self):
+        return '<%s \'%s\' [%s] of %s>' % (
+            self.__class__.__name__,
+            self.request.url,
+            self.request.method,
+            self.app.name,
+        )
diff --git a/lib/python3.7/site-packages/flask/debughelpers.py b/lib/python3.7/site-packages/flask/debughelpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9765f20d0a2e43a6ce9b694c85499bcdbb1b222
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/debughelpers.py
@@ -0,0 +1,168 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.debughelpers
+    ~~~~~~~~~~~~~~~~~~
+
+    Various helpers to make the development experience better.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+import os
+from warnings import warn
+
+from ._compat import implements_to_string, text_type
+from .app import Flask
+from .blueprints import Blueprint
+from .globals import _request_ctx_stack
+
+
+class UnexpectedUnicodeError(AssertionError, UnicodeError):
+    """Raised in places where we want some better error reporting for
+    unexpected unicode or binary data.
+    """
+
+
+@implements_to_string
+class DebugFilesKeyError(KeyError, AssertionError):
+    """Raised from request.files during debugging.  The idea is that it can
+    provide a better error message than just a generic KeyError/BadRequest.
+    """
+
+    def __init__(self, request, key):
+        form_matches = request.form.getlist(key)
+        buf = ['You tried to access the file "%s" in the request.files '
+               'dictionary but it does not exist.  The mimetype for the request '
+               'is "%s" instead of "multipart/form-data" which means that no '
+               'file contents were transmitted.  To fix this error you should '
+               'provide enctype="multipart/form-data" in your form.' %
+               (key, request.mimetype)]
+        if form_matches:
+            buf.append('\n\nThe browser instead transmitted some file names. '
+                       'This was submitted: %s' % ', '.join('"%s"' % x
+                            for x in form_matches))
+        self.msg = ''.join(buf)
+
+    def __str__(self):
+        return self.msg
+
+
+class FormDataRoutingRedirect(AssertionError):
+    """This exception is raised by Flask in debug mode if it detects a
+    redirect caused by the routing system when the request method is not
+    GET, HEAD or OPTIONS.  Reasoning: form data will be dropped.
+    """
+
+    def __init__(self, request):
+        exc = request.routing_exception
+        buf = ['A request was sent to this URL (%s) but a redirect was '
+               'issued automatically by the routing system to "%s".'
+               % (request.url, exc.new_url)]
+
+        # In case just a slash was appended we can be extra helpful
+        if request.base_url + '/' == exc.new_url.split('?')[0]:
+            buf.append('  The URL was defined with a trailing slash so '
+                       'Flask will automatically redirect to the URL '
+                       'with the trailing slash if it was accessed '
+                       'without one.')
+
+        buf.append('  Make sure to directly send your %s-request to this URL '
+                   'since we can\'t make browsers or HTTP clients redirect '
+                   'with form data reliably or without user interaction.' %
+                   request.method)
+        buf.append('\n\nNote: this exception is only raised in debug mode')
+        AssertionError.__init__(self, ''.join(buf).encode('utf-8'))
+
+
+def attach_enctype_error_multidict(request):
+    """Since Flask 0.8 we're monkeypatching the files object in case a
+    request is detected that does not use multipart form data but the files
+    object is accessed.
+    """
+    oldcls = request.files.__class__
+    class newcls(oldcls):
+        def __getitem__(self, key):
+            try:
+                return oldcls.__getitem__(self, key)
+            except KeyError:
+                if key not in request.form:
+                    raise
+                raise DebugFilesKeyError(request, key)
+    newcls.__name__ = oldcls.__name__
+    newcls.__module__ = oldcls.__module__
+    request.files.__class__ = newcls
+
+
+def _dump_loader_info(loader):
+    yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__)
+    for key, value in sorted(loader.__dict__.items()):
+        if key.startswith('_'):
+            continue
+        if isinstance(value, (tuple, list)):
+            if not all(isinstance(x, (str, text_type)) for x in value):
+                continue
+            yield '%s:' % key
+            for item in value:
+                yield '  - %s' % item
+            continue
+        elif not isinstance(value, (str, text_type, int, float, bool)):
+            continue
+        yield '%s: %r' % (key, value)
+
+
+def explain_template_loading_attempts(app, template, attempts):
+    """This should help developers understand what failed"""
+    info = ['Locating template "%s":' % template]
+    total_found = 0
+    blueprint = None
+    reqctx = _request_ctx_stack.top
+    if reqctx is not None and reqctx.request.blueprint is not None:
+        blueprint = reqctx.request.blueprint
+
+    for idx, (loader, srcobj, triple) in enumerate(attempts):
+        if isinstance(srcobj, Flask):
+            src_info = 'application "%s"' % srcobj.import_name
+        elif isinstance(srcobj, Blueprint):
+            src_info = 'blueprint "%s" (%s)' % (srcobj.name,
+                                                srcobj.import_name)
+        else:
+            src_info = repr(srcobj)
+
+        info.append('% 5d: trying loader of %s' % (
+            idx + 1, src_info))
+
+        for line in _dump_loader_info(loader):
+            info.append('       %s' % line)
+
+        if triple is None:
+            detail = 'no match'
+        else:
+            detail = 'found (%r)' % (triple[1] or '<string>')
+            total_found += 1
+        info.append('       -> %s' % detail)
+
+    seems_fishy = False
+    if total_found == 0:
+        info.append('Error: the template could not be found.')
+        seems_fishy = True
+    elif total_found > 1:
+        info.append('Warning: multiple loaders returned a match for the template.')
+        seems_fishy = True
+
+    if blueprint is not None and seems_fishy:
+        info.append('  The template was looked up from an endpoint that '
+                    'belongs to the blueprint "%s".' % blueprint)
+        info.append('  Maybe you did not place a template in the right folder?')
+        info.append('  See http://flask.pocoo.org/docs/blueprints/#templates')
+
+    app.logger.info('\n'.join(info))
+
+
+def explain_ignored_app_run():
+    if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
+        warn(Warning('Silently ignoring app.run() because the '
+                     'application is run from the flask command line '
+                     'executable.  Consider putting app.run() behind an '
+                     'if __name__ == "__main__" guard to silence this '
+                     'warning.'), stacklevel=3)
diff --git a/lib/python3.7/site-packages/flask/globals.py b/lib/python3.7/site-packages/flask/globals.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d50a6f6d4052f1af090954131c519aaf3b95b5e
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/globals.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.globals
+    ~~~~~~~~~~~~~
+
+    Defines all the global objects that are proxies to the current
+    active context.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+from functools import partial
+from werkzeug.local import LocalStack, LocalProxy
+
+
+_request_ctx_err_msg = '''\
+Working outside of request context.
+
+This typically means that you attempted to use functionality that needed
+an active HTTP request.  Consult the documentation on testing for
+information about how to avoid this problem.\
+'''
+_app_ctx_err_msg = '''\
+Working outside of application context.
+
+This typically means that you attempted to use functionality that needed
+to interface with the current application object in some way. To solve
+this, set up an application context with app.app_context().  See the
+documentation for more information.\
+'''
+
+
+def _lookup_req_object(name):
+    top = _request_ctx_stack.top
+    if top is None:
+        raise RuntimeError(_request_ctx_err_msg)
+    return getattr(top, name)
+
+
+def _lookup_app_object(name):
+    top = _app_ctx_stack.top
+    if top is None:
+        raise RuntimeError(_app_ctx_err_msg)
+    return getattr(top, name)
+
+
+def _find_app():
+    top = _app_ctx_stack.top
+    if top is None:
+        raise RuntimeError(_app_ctx_err_msg)
+    return top.app
+
+
+# context locals
+_request_ctx_stack = LocalStack()
+_app_ctx_stack = LocalStack()
+current_app = LocalProxy(_find_app)
+request = LocalProxy(partial(_lookup_req_object, 'request'))
+session = LocalProxy(partial(_lookup_req_object, 'session'))
+g = LocalProxy(partial(_lookup_app_object, 'g'))
diff --git a/lib/python3.7/site-packages/flask/helpers.py b/lib/python3.7/site-packages/flask/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..df0b91fc55ee0e08a73071c26712351d913b3ba0
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/helpers.py
@@ -0,0 +1,1044 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.helpers
+    ~~~~~~~~~~~~~
+
+    Implements various helpers.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+import os
+import socket
+import sys
+import pkgutil
+import posixpath
+import mimetypes
+from time import time
+from zlib import adler32
+from threading import RLock
+import unicodedata
+from werkzeug.routing import BuildError
+from functools import update_wrapper
+
+from werkzeug.urls import url_quote
+from werkzeug.datastructures import Headers, Range
+from werkzeug.exceptions import BadRequest, NotFound, \
+    RequestedRangeNotSatisfiable
+
+from werkzeug.wsgi import wrap_file
+from jinja2 import FileSystemLoader
+
+from .signals import message_flashed
+from .globals import session, _request_ctx_stack, _app_ctx_stack, \
+     current_app, request
+from ._compat import string_types, text_type, PY2
+
+# sentinel
+_missing = object()
+
+
+# what separators does this operating system provide that are not a slash?
+# this is used by the send_from_directory function to ensure that nobody is
+# able to access files from outside the filesystem.
+_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
+                    if sep not in (None, '/'))
+
+
+def get_env():
+    """Get the environment the app is running in, indicated by the
+    :envvar:`FLASK_ENV` environment variable. The default is
+    ``'production'``.
+    """
+    return os.environ.get('FLASK_ENV') or 'production'
+
+
+def get_debug_flag():
+    """Get whether debug mode should be enabled for the app, indicated
+    by the :envvar:`FLASK_DEBUG` environment variable. The default is
+    ``True`` if :func:`.get_env` returns ``'development'``, or ``False``
+    otherwise.
+    """
+    val = os.environ.get('FLASK_DEBUG')
+
+    if not val:
+        return get_env() == 'development'
+
+    return val.lower() not in ('0', 'false', 'no')
+
+
+def get_load_dotenv(default=True):
+    """Get whether the user has disabled loading dotenv files by setting
+    :envvar:`FLASK_SKIP_DOTENV`. The default is ``True``, load the
+    files.
+
+    :param default: What to return if the env var isn't set.
+    """
+    val = os.environ.get('FLASK_SKIP_DOTENV')
+
+    if not val:
+        return default
+
+    return val.lower() in ('0', 'false', 'no')
+
+
+def _endpoint_from_view_func(view_func):
+    """Internal helper that returns the default endpoint for a given
+    function.  This always is the function name.
+    """
+    assert view_func is not None, 'expected view func if endpoint ' \
+                                  'is not provided.'
+    return view_func.__name__
+
+
+def stream_with_context(generator_or_function):
+    """Request contexts disappear when the response is started on the server.
+    This is done for efficiency reasons and to make it less likely to encounter
+    memory leaks with badly written WSGI middlewares.  The downside is that if
+    you are using streamed responses, the generator cannot access request bound
+    information any more.
+
+    This function however can help you keep the context around for longer::
+
+        from flask import stream_with_context, request, Response
+
+        @app.route('/stream')
+        def streamed_response():
+            @stream_with_context
+            def generate():
+                yield 'Hello '
+                yield request.args['name']
+                yield '!'
+            return Response(generate())
+
+    Alternatively it can also be used around a specific generator::
+
+        from flask import stream_with_context, request, Response
+
+        @app.route('/stream')
+        def streamed_response():
+            def generate():
+                yield 'Hello '
+                yield request.args['name']
+                yield '!'
+            return Response(stream_with_context(generate()))
+
+    .. versionadded:: 0.9
+    """
+    try:
+        gen = iter(generator_or_function)
+    except TypeError:
+        def decorator(*args, **kwargs):
+            gen = generator_or_function(*args, **kwargs)
+            return stream_with_context(gen)
+        return update_wrapper(decorator, generator_or_function)
+
+    def generator():
+        ctx = _request_ctx_stack.top
+        if ctx is None:
+            raise RuntimeError('Attempted to stream with context but '
+                'there was no context in the first place to keep around.')
+        with ctx:
+            # Dummy sentinel.  Has to be inside the context block or we're
+            # not actually keeping the context around.
+            yield None
+
+            # The try/finally is here so that if someone passes a WSGI level
+            # iterator in we're still running the cleanup logic.  Generators
+            # don't need that because they are closed on their destruction
+            # automatically.
+            try:
+                for item in gen:
+                    yield item
+            finally:
+                if hasattr(gen, 'close'):
+                    gen.close()
+
+    # The trick is to start the generator.  Then the code execution runs until
+    # the first dummy None is yielded at which point the context was already
+    # pushed.  This item is discarded.  Then when the iteration continues the
+    # real generator is executed.
+    wrapped_g = generator()
+    next(wrapped_g)
+    return wrapped_g
+
+
+def make_response(*args):
+    """Sometimes it is necessary to set additional headers in a view.  Because
+    views do not have to return response objects but can return a value that
+    is converted into a response object by Flask itself, it becomes tricky to
+    add headers to it.  This function can be called instead of using a return
+    and you will get a response object which you can use to attach headers.
+
+    If view looked like this and you want to add a new header::
+
+        def index():
+            return render_template('index.html', foo=42)
+
+    You can now do something like this::
+
+        def index():
+            response = make_response(render_template('index.html', foo=42))
+            response.headers['X-Parachutes'] = 'parachutes are cool'
+            return response
+
+    This function accepts the very same arguments you can return from a
+    view function.  This for example creates a response with a 404 error
+    code::
+
+        response = make_response(render_template('not_found.html'), 404)
+
+    The other use case of this function is to force the return value of a
+    view function into a response which is helpful with view
+    decorators::
+
+        response = make_response(view_function())
+        response.headers['X-Parachutes'] = 'parachutes are cool'
+
+    Internally this function does the following things:
+
+    -   if no arguments are passed, it creates a new response argument
+    -   if one argument is passed, :meth:`flask.Flask.make_response`
+        is invoked with it.
+    -   if more than one argument is passed, the arguments are passed
+        to the :meth:`flask.Flask.make_response` function as tuple.
+
+    .. versionadded:: 0.6
+    """
+    if not args:
+        return current_app.response_class()
+    if len(args) == 1:
+        args = args[0]
+    return current_app.make_response(args)
+
+
+def url_for(endpoint, **values):
+    """Generates a URL to the given endpoint with the method provided.
+
+    Variable arguments that are unknown to the target endpoint are appended
+    to the generated URL as query arguments.  If the value of a query argument
+    is ``None``, the whole pair is skipped.  In case blueprints are active
+    you can shortcut references to the same blueprint by prefixing the
+    local endpoint with a dot (``.``).
+
+    This will reference the index function local to the current blueprint::
+
+        url_for('.index')
+
+    For more information, head over to the :ref:`Quickstart <url-building>`.
+
+    To integrate applications, :class:`Flask` has a hook to intercept URL build
+    errors through :attr:`Flask.url_build_error_handlers`.  The `url_for`
+    function results in a :exc:`~werkzeug.routing.BuildError` when the current
+    app does not have a URL for the given endpoint and values.  When it does, the
+    :data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if
+    it is not ``None``, which can return a string to use as the result of
+    `url_for` (instead of `url_for`'s default to raise the
+    :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
+    An example::
+
+        def external_url_handler(error, endpoint, values):
+            "Looks up an external URL when `url_for` cannot build a URL."
+            # This is an example of hooking the build_error_handler.
+            # Here, lookup_url is some utility function you've built
+            # which looks up the endpoint in some external URL registry.
+            url = lookup_url(endpoint, **values)
+            if url is None:
+                # External lookup did not have a URL.
+                # Re-raise the BuildError, in context of original traceback.
+                exc_type, exc_value, tb = sys.exc_info()
+                if exc_value is error:
+                    raise exc_type, exc_value, tb
+                else:
+                    raise error
+            # url_for will use this result, instead of raising BuildError.
+            return url
+
+        app.url_build_error_handlers.append(external_url_handler)
+
+    Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
+    `endpoint` and `values` are the arguments passed into `url_for`.  Note
+    that this is for building URLs outside the current application, and not for
+    handling 404 NotFound errors.
+
+    .. versionadded:: 0.10
+       The `_scheme` parameter was added.
+
+    .. versionadded:: 0.9
+       The `_anchor` and `_method` parameters were added.
+
+    .. versionadded:: 0.9
+       Calls :meth:`Flask.handle_build_error` on
+       :exc:`~werkzeug.routing.BuildError`.
+
+    :param endpoint: the endpoint of the URL (name of the function)
+    :param values: the variable arguments of the URL rule
+    :param _external: if set to ``True``, an absolute URL is generated. Server
+      address can be changed via ``SERVER_NAME`` configuration variable which
+      defaults to `localhost`.
+    :param _scheme: a string specifying the desired URL scheme. The `_external`
+      parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default
+      behavior uses the same scheme as the current request, or
+      ``PREFERRED_URL_SCHEME`` from the :ref:`app configuration <config>` if no
+      request context is available. As of Werkzeug 0.10, this also can be set
+      to an empty string to build protocol-relative URLs.
+    :param _anchor: if provided this is added as anchor to the URL.
+    :param _method: if provided this explicitly specifies an HTTP method.
+    """
+    appctx = _app_ctx_stack.top
+    reqctx = _request_ctx_stack.top
+
+    if appctx is None:
+        raise RuntimeError(
+            'Attempted to generate a URL without the application context being'
+            ' pushed. This has to be executed when application context is'
+            ' available.'
+        )
+
+    # If request specific information is available we have some extra
+    # features that support "relative" URLs.
+    if reqctx is not None:
+        url_adapter = reqctx.url_adapter
+        blueprint_name = request.blueprint
+
+        if endpoint[:1] == '.':
+            if blueprint_name is not None:
+                endpoint = blueprint_name + endpoint
+            else:
+                endpoint = endpoint[1:]
+
+        external = values.pop('_external', False)
+
+    # Otherwise go with the url adapter from the appctx and make
+    # the URLs external by default.
+    else:
+        url_adapter = appctx.url_adapter
+
+        if url_adapter is None:
+            raise RuntimeError(
+                'Application was not able to create a URL adapter for request'
+                ' independent URL generation. You might be able to fix this by'
+                ' setting the SERVER_NAME config variable.'
+            )
+
+        external = values.pop('_external', True)
+
+    anchor = values.pop('_anchor', None)
+    method = values.pop('_method', None)
+    scheme = values.pop('_scheme', None)
+    appctx.app.inject_url_defaults(endpoint, values)
+
+    # This is not the best way to deal with this but currently the
+    # underlying Werkzeug router does not support overriding the scheme on
+    # a per build call basis.
+    old_scheme = None
+    if scheme is not None:
+        if not external:
+            raise ValueError('When specifying _scheme, _external must be True')
+        old_scheme = url_adapter.url_scheme
+        url_adapter.url_scheme = scheme
+
+    try:
+        try:
+            rv = url_adapter.build(endpoint, values, method=method,
+                                   force_external=external)
+        finally:
+            if old_scheme is not None:
+                url_adapter.url_scheme = old_scheme
+    except BuildError as error:
+        # We need to inject the values again so that the app callback can
+        # deal with that sort of stuff.
+        values['_external'] = external
+        values['_anchor'] = anchor
+        values['_method'] = method
+        values['_scheme'] = scheme
+        return appctx.app.handle_url_build_error(error, endpoint, values)
+
+    if anchor is not None:
+        rv += '#' + url_quote(anchor)
+    return rv
+
+
+def get_template_attribute(template_name, attribute):
+    """Loads a macro (or variable) a template exports.  This can be used to
+    invoke a macro from within Python code.  If you for example have a
+    template named :file:`_cider.html` with the following contents:
+
+    .. sourcecode:: html+jinja
+
+       {% macro hello(name) %}Hello {{ name }}!{% endmacro %}
+
+    You can access this from Python code like this::
+
+        hello = get_template_attribute('_cider.html', 'hello')
+        return hello('World')
+
+    .. versionadded:: 0.2
+
+    :param template_name: the name of the template
+    :param attribute: the name of the variable of macro to access
+    """
+    return getattr(current_app.jinja_env.get_template(template_name).module,
+                   attribute)
+
+
+def flash(message, category='message'):
+    """Flashes a message to the next request.  In order to remove the
+    flashed message from the session and to display it to the user,
+    the template has to call :func:`get_flashed_messages`.
+
+    .. versionchanged:: 0.3
+       `category` parameter added.
+
+    :param message: the message to be flashed.
+    :param category: the category for the message.  The following values
+                     are recommended: ``'message'`` for any kind of message,
+                     ``'error'`` for errors, ``'info'`` for information
+                     messages and ``'warning'`` for warnings.  However any
+                     kind of string can be used as category.
+    """
+    # Original implementation:
+    #
+    #     session.setdefault('_flashes', []).append((category, message))
+    #
+    # This assumed that changes made to mutable structures in the session are
+    # always in sync with the session object, which is not true for session
+    # implementations that use external storage for keeping their keys/values.
+    flashes = session.get('_flashes', [])
+    flashes.append((category, message))
+    session['_flashes'] = flashes
+    message_flashed.send(current_app._get_current_object(),
+                         message=message, category=category)
+
+
+def get_flashed_messages(with_categories=False, category_filter=[]):
+    """Pulls all flashed messages from the session and returns them.
+    Further calls in the same request to the function will return
+    the same messages.  By default just the messages are returned,
+    but when `with_categories` is set to ``True``, the return value will
+    be a list of tuples in the form ``(category, message)`` instead.
+
+    Filter the flashed messages to one or more categories by providing those
+    categories in `category_filter`.  This allows rendering categories in
+    separate html blocks.  The `with_categories` and `category_filter`
+    arguments are distinct:
+
+    * `with_categories` controls whether categories are returned with message
+      text (``True`` gives a tuple, where ``False`` gives just the message text).
+    * `category_filter` filters the messages down to only those matching the
+      provided categories.
+
+    See :ref:`message-flashing-pattern` for examples.
+
+    .. versionchanged:: 0.3
+       `with_categories` parameter added.
+
+    .. versionchanged:: 0.9
+        `category_filter` parameter added.
+
+    :param with_categories: set to ``True`` to also receive categories.
+    :param category_filter: whitelist of categories to limit return values
+    """
+    flashes = _request_ctx_stack.top.flashes
+    if flashes is None:
+        _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
+            if '_flashes' in session else []
+    if category_filter:
+        flashes = list(filter(lambda f: f[0] in category_filter, flashes))
+    if not with_categories:
+        return [x[1] for x in flashes]
+    return flashes
+
+
+def send_file(filename_or_fp, mimetype=None, as_attachment=False,
+              attachment_filename=None, add_etags=True,
+              cache_timeout=None, conditional=False, last_modified=None):
+    """Sends the contents of a file to the client.  This will use the
+    most efficient method available and configured.  By default it will
+    try to use the WSGI server's file_wrapper support.  Alternatively
+    you can set the application's :attr:`~Flask.use_x_sendfile` attribute
+    to ``True`` to directly emit an ``X-Sendfile`` header.  This however
+    requires support of the underlying webserver for ``X-Sendfile``.
+
+    By default it will try to guess the mimetype for you, but you can
+    also explicitly provide one.  For extra security you probably want
+    to send certain files as attachment (HTML for instance).  The mimetype
+    guessing requires a `filename` or an `attachment_filename` to be
+    provided.
+
+    ETags will also be attached automatically if a `filename` is provided. You
+    can turn this off by setting `add_etags=False`.
+
+    If `conditional=True` and `filename` is provided, this method will try to
+    upgrade the response stream to support range requests.  This will allow
+    the request to be answered with partial content response.
+
+    Please never pass filenames to this function from user sources;
+    you should use :func:`send_from_directory` instead.
+
+    .. versionadded:: 0.2
+
+    .. versionadded:: 0.5
+       The `add_etags`, `cache_timeout` and `conditional` parameters were
+       added.  The default behavior is now to attach etags.
+
+    .. versionchanged:: 0.7
+       mimetype guessing and etag support for file objects was
+       deprecated because it was unreliable.  Pass a filename if you are
+       able to, otherwise attach an etag yourself.  This functionality
+       will be removed in Flask 1.0
+
+    .. versionchanged:: 0.9
+       cache_timeout pulls its default from application config, when None.
+
+    .. versionchanged:: 0.12
+       The filename is no longer automatically inferred from file objects. If
+       you want to use automatic mimetype and etag support, pass a filepath via
+       `filename_or_fp` or `attachment_filename`.
+
+    .. versionchanged:: 0.12
+       The `attachment_filename` is preferred over `filename` for MIME-type
+       detection.
+
+    .. versionchanged:: 1.0
+        UTF-8 filenames, as specified in `RFC 2231`_, are supported.
+
+    .. _RFC 2231: https://tools.ietf.org/html/rfc2231#section-4
+
+    :param filename_or_fp: the filename of the file to send.
+                           This is relative to the :attr:`~Flask.root_path`
+                           if a relative path is specified.
+                           Alternatively a file object might be provided in
+                           which case ``X-Sendfile`` might not work and fall
+                           back to the traditional method.  Make sure that the
+                           file pointer is positioned at the start of data to
+                           send before calling :func:`send_file`.
+    :param mimetype: the mimetype of the file if provided. If a file path is
+                     given, auto detection happens as fallback, otherwise an
+                     error will be raised.
+    :param as_attachment: set to ``True`` if you want to send this file with
+                          a ``Content-Disposition: attachment`` header.
+    :param attachment_filename: the filename for the attachment if it
+                                differs from the file's filename.
+    :param add_etags: set to ``False`` to disable attaching of etags.
+    :param conditional: set to ``True`` to enable conditional responses.
+
+    :param cache_timeout: the timeout in seconds for the headers. When ``None``
+                          (default), this value is set by
+                          :meth:`~Flask.get_send_file_max_age` of
+                          :data:`~flask.current_app`.
+    :param last_modified: set the ``Last-Modified`` header to this value,
+        a :class:`~datetime.datetime` or timestamp.
+        If a file was passed, this overrides its mtime.
+    """
+    mtime = None
+    fsize = None
+    if isinstance(filename_or_fp, string_types):
+        filename = filename_or_fp
+        if not os.path.isabs(filename):
+            filename = os.path.join(current_app.root_path, filename)
+        file = None
+        if attachment_filename is None:
+            attachment_filename = os.path.basename(filename)
+    else:
+        file = filename_or_fp
+        filename = None
+
+    if mimetype is None:
+        if attachment_filename is not None:
+            mimetype = mimetypes.guess_type(attachment_filename)[0] \
+                or 'application/octet-stream'
+
+        if mimetype is None:
+            raise ValueError(
+                'Unable to infer MIME-type because no filename is available. '
+                'Please set either `attachment_filename`, pass a filepath to '
+                '`filename_or_fp` or set your own MIME-type via `mimetype`.'
+            )
+
+    headers = Headers()
+    if as_attachment:
+        if attachment_filename is None:
+            raise TypeError('filename unavailable, required for '
+                            'sending as attachment')
+
+        try:
+            attachment_filename = attachment_filename.encode('latin-1')
+        except UnicodeEncodeError:
+            filenames = {
+                'filename': unicodedata.normalize(
+                    'NFKD', attachment_filename).encode('latin-1', 'ignore'),
+                'filename*': "UTF-8''%s" % url_quote(attachment_filename),
+            }
+        else:
+            filenames = {'filename': attachment_filename}
+
+        headers.add('Content-Disposition', 'attachment', **filenames)
+
+    if current_app.use_x_sendfile and filename:
+        if file is not None:
+            file.close()
+        headers['X-Sendfile'] = filename
+        fsize = os.path.getsize(filename)
+        headers['Content-Length'] = fsize
+        data = None
+    else:
+        if file is None:
+            file = open(filename, 'rb')
+            mtime = os.path.getmtime(filename)
+            fsize = os.path.getsize(filename)
+            headers['Content-Length'] = fsize
+        data = wrap_file(request.environ, file)
+
+    rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
+                                    direct_passthrough=True)
+
+    if last_modified is not None:
+        rv.last_modified = last_modified
+    elif mtime is not None:
+        rv.last_modified = mtime
+
+    rv.cache_control.public = True
+    if cache_timeout is None:
+        cache_timeout = current_app.get_send_file_max_age(filename)
+    if cache_timeout is not None:
+        rv.cache_control.max_age = cache_timeout
+        rv.expires = int(time() + cache_timeout)
+
+    if add_etags and filename is not None:
+        from warnings import warn
+
+        try:
+            rv.set_etag('%s-%s-%s' % (
+                os.path.getmtime(filename),
+                os.path.getsize(filename),
+                adler32(
+                    filename.encode('utf-8') if isinstance(filename, text_type)
+                    else filename
+                ) & 0xffffffff
+            ))
+        except OSError:
+            warn('Access %s failed, maybe it does not exist, so ignore etags in '
+                 'headers' % filename, stacklevel=2)
+
+    if conditional:
+        try:
+            rv = rv.make_conditional(request, accept_ranges=True,
+                                     complete_length=fsize)
+        except RequestedRangeNotSatisfiable:
+            if file is not None:
+                file.close()
+            raise
+        # make sure we don't send x-sendfile for servers that
+        # ignore the 304 status code for x-sendfile.
+        if rv.status_code == 304:
+            rv.headers.pop('x-sendfile', None)
+    return rv
+
+
+def safe_join(directory, *pathnames):
+    """Safely join `directory` and zero or more untrusted `pathnames`
+    components.
+
+    Example usage::
+
+        @app.route('/wiki/<path:filename>')
+        def wiki_page(filename):
+            filename = safe_join(app.config['WIKI_FOLDER'], filename)
+            with open(filename, 'rb') as fd:
+                content = fd.read()  # Read and process the file content...
+
+    :param directory: the trusted base directory.
+    :param pathnames: the untrusted pathnames relative to that directory.
+    :raises: :class:`~werkzeug.exceptions.NotFound` if one or more passed
+            paths fall out of its boundaries.
+    """
+
+    parts = [directory]
+
+    for filename in pathnames:
+        if filename != '':
+            filename = posixpath.normpath(filename)
+
+        if (
+            any(sep in filename for sep in _os_alt_seps)
+            or os.path.isabs(filename)
+            or filename == '..'
+            or filename.startswith('../')
+        ):
+            raise NotFound()
+
+        parts.append(filename)
+
+    return posixpath.join(*parts)
+
+
+def send_from_directory(directory, filename, **options):
+    """Send a file from a given directory with :func:`send_file`.  This
+    is a secure way to quickly expose static files from an upload folder
+    or something similar.
+
+    Example usage::
+
+        @app.route('/uploads/<path:filename>')
+        def download_file(filename):
+            return send_from_directory(app.config['UPLOAD_FOLDER'],
+                                       filename, as_attachment=True)
+
+    .. admonition:: Sending files and Performance
+
+       It is strongly recommended to activate either ``X-Sendfile`` support in
+       your webserver or (if no authentication happens) to tell the webserver
+       to serve files for the given path on its own without calling into the
+       web application for improved performance.
+
+    .. versionadded:: 0.5
+
+    :param directory: the directory where all the files are stored.
+    :param filename: the filename relative to that directory to
+                     download.
+    :param options: optional keyword arguments that are directly
+                    forwarded to :func:`send_file`.
+    """
+    filename = safe_join(directory, filename)
+    if not os.path.isabs(filename):
+        filename = os.path.join(current_app.root_path, filename)
+    try:
+        if not os.path.isfile(filename):
+            raise NotFound()
+    except (TypeError, ValueError):
+        raise BadRequest()
+    options.setdefault('conditional', True)
+    return send_file(filename, **options)
+
+
+def get_root_path(import_name):
+    """Returns the path to a package or cwd if that cannot be found.  This
+    returns the path of a package or the folder that contains a module.
+
+    Not to be confused with the package path returned by :func:`find_package`.
+    """
+    # Module already imported and has a file attribute.  Use that first.
+    mod = sys.modules.get(import_name)
+    if mod is not None and hasattr(mod, '__file__'):
+        return os.path.dirname(os.path.abspath(mod.__file__))
+
+    # Next attempt: check the loader.
+    loader = pkgutil.get_loader(import_name)
+
+    # Loader does not exist or we're referring to an unloaded main module
+    # or a main module without path (interactive sessions), go with the
+    # current working directory.
+    if loader is None or import_name == '__main__':
+        return os.getcwd()
+
+    # For .egg, zipimporter does not have get_filename until Python 2.7.
+    # Some other loaders might exhibit the same behavior.
+    if hasattr(loader, 'get_filename'):
+        filepath = loader.get_filename(import_name)
+    else:
+        # Fall back to imports.
+        __import__(import_name)
+        mod = sys.modules[import_name]
+        filepath = getattr(mod, '__file__', None)
+
+        # If we don't have a filepath it might be because we are a
+        # namespace package.  In this case we pick the root path from the
+        # first module that is contained in our package.
+        if filepath is None:
+            raise RuntimeError('No root path can be found for the provided '
+                               'module "%s".  This can happen because the '
+                               'module came from an import hook that does '
+                               'not provide file name information or because '
+                               'it\'s a namespace package.  In this case '
+                               'the root path needs to be explicitly '
+                               'provided.' % import_name)
+
+    # filepath is import_name.py for a module, or __init__.py for a package.
+    return os.path.dirname(os.path.abspath(filepath))
+
+
+def _matching_loader_thinks_module_is_package(loader, mod_name):
+    """Given the loader that loaded a module and the module this function
+    attempts to figure out if the given module is actually a package.
+    """
+    # If the loader can tell us if something is a package, we can
+    # directly ask the loader.
+    if hasattr(loader, 'is_package'):
+        return loader.is_package(mod_name)
+    # importlib's namespace loaders do not have this functionality but
+    # all the modules it loads are packages, so we can take advantage of
+    # this information.
+    elif (loader.__class__.__module__ == '_frozen_importlib' and
+          loader.__class__.__name__ == 'NamespaceLoader'):
+        return True
+    # Otherwise we need to fail with an error that explains what went
+    # wrong.
+    raise AttributeError(
+        ('%s.is_package() method is missing but is required by Flask of '
+         'PEP 302 import hooks.  If you do not use import hooks and '
+         'you encounter this error please file a bug against Flask.') %
+        loader.__class__.__name__)
+
+
+def find_package(import_name):
+    """Finds a package and returns the prefix (or None if the package is
+    not installed) as well as the folder that contains the package or
+    module as a tuple.  The package path returned is the module that would
+    have to be added to the pythonpath in order to make it possible to
+    import the module.  The prefix is the path below which a UNIX like
+    folder structure exists (lib, share etc.).
+    """
+    root_mod_name = import_name.split('.')[0]
+    loader = pkgutil.get_loader(root_mod_name)
+    if loader is None or import_name == '__main__':
+        # import name is not found, or interactive/main module
+        package_path = os.getcwd()
+    else:
+        # For .egg, zipimporter does not have get_filename until Python 2.7.
+        if hasattr(loader, 'get_filename'):
+            filename = loader.get_filename(root_mod_name)
+        elif hasattr(loader, 'archive'):
+            # zipimporter's loader.archive points to the .egg or .zip
+            # archive filename is dropped in call to dirname below.
+            filename = loader.archive
+        else:
+            # At least one loader is missing both get_filename and archive:
+            # Google App Engine's HardenedModulesHook
+            #
+            # Fall back to imports.
+            __import__(import_name)
+            filename = sys.modules[import_name].__file__
+        package_path = os.path.abspath(os.path.dirname(filename))
+
+        # In case the root module is a package we need to chop of the
+        # rightmost part.  This needs to go through a helper function
+        # because of python 3.3 namespace packages.
+        if _matching_loader_thinks_module_is_package(
+                loader, root_mod_name):
+            package_path = os.path.dirname(package_path)
+
+    site_parent, site_folder = os.path.split(package_path)
+    py_prefix = os.path.abspath(sys.prefix)
+    if package_path.startswith(py_prefix):
+        return py_prefix, package_path
+    elif site_folder.lower() == 'site-packages':
+        parent, folder = os.path.split(site_parent)
+        # Windows like installations
+        if folder.lower() == 'lib':
+            base_dir = parent
+        # UNIX like installations
+        elif os.path.basename(parent).lower() == 'lib':
+            base_dir = os.path.dirname(parent)
+        else:
+            base_dir = site_parent
+        return base_dir, package_path
+    return None, package_path
+
+
+class locked_cached_property(object):
+    """A decorator that converts a function into a lazy property.  The
+    function wrapped is called the first time to retrieve the result
+    and then that calculated result is used the next time you access
+    the value.  Works like the one in Werkzeug but has a lock for
+    thread safety.
+    """
+
+    def __init__(self, func, name=None, doc=None):
+        self.__name__ = name or func.__name__
+        self.__module__ = func.__module__
+        self.__doc__ = doc or func.__doc__
+        self.func = func
+        self.lock = RLock()
+
+    def __get__(self, obj, type=None):
+        if obj is None:
+            return self
+        with self.lock:
+            value = obj.__dict__.get(self.__name__, _missing)
+            if value is _missing:
+                value = self.func(obj)
+                obj.__dict__[self.__name__] = value
+            return value
+
+
+class _PackageBoundObject(object):
+    #: The name of the package or module that this app belongs to. Do not
+    #: change this once it is set by the constructor.
+    import_name = None
+
+    #: Location of the template files to be added to the template lookup.
+    #: ``None`` if templates should not be added.
+    template_folder = None
+
+    #: Absolute path to the package on the filesystem. Used to look up
+    #: resources contained in the package.
+    root_path = None
+
+    def __init__(self, import_name, template_folder=None, root_path=None):
+        self.import_name = import_name
+        self.template_folder = template_folder
+
+        if root_path is None:
+            root_path = get_root_path(self.import_name)
+
+        self.root_path = root_path
+        self._static_folder = None
+        self._static_url_path = None
+
+    def _get_static_folder(self):
+        if self._static_folder is not None:
+            return os.path.join(self.root_path, self._static_folder)
+
+    def _set_static_folder(self, value):
+        self._static_folder = value
+
+    static_folder = property(
+        _get_static_folder, _set_static_folder,
+        doc='The absolute path to the configured static folder.'
+    )
+    del _get_static_folder, _set_static_folder
+
+    def _get_static_url_path(self):
+        if self._static_url_path is not None:
+            return self._static_url_path
+
+        if self.static_folder is not None:
+            return '/' + os.path.basename(self.static_folder)
+
+    def _set_static_url_path(self, value):
+        self._static_url_path = value
+
+    static_url_path = property(
+        _get_static_url_path, _set_static_url_path,
+        doc='The URL prefix that the static route will be registered for.'
+    )
+    del _get_static_url_path, _set_static_url_path
+
+    @property
+    def has_static_folder(self):
+        """This is ``True`` if the package bound object's container has a
+        folder for static files.
+
+        .. versionadded:: 0.5
+        """
+        return self.static_folder is not None
+
+    @locked_cached_property
+    def jinja_loader(self):
+        """The Jinja loader for this package bound object.
+
+        .. versionadded:: 0.5
+        """
+        if self.template_folder is not None:
+            return FileSystemLoader(os.path.join(self.root_path,
+                                                 self.template_folder))
+
+    def get_send_file_max_age(self, filename):
+        """Provides default cache_timeout for the :func:`send_file` functions.
+
+        By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
+        the configuration of :data:`~flask.current_app`.
+
+        Static file functions such as :func:`send_from_directory` use this
+        function, and :func:`send_file` calls this function on
+        :data:`~flask.current_app` when the given cache_timeout is ``None``. If a
+        cache_timeout is given in :func:`send_file`, that timeout is used;
+        otherwise, this method is called.
+
+        This allows subclasses to change the behavior when sending files based
+        on the filename.  For example, to set the cache timeout for .js files
+        to 60 seconds::
+
+            class MyFlask(flask.Flask):
+                def get_send_file_max_age(self, name):
+                    if name.lower().endswith('.js'):
+                        return 60
+                    return flask.Flask.get_send_file_max_age(self, name)
+
+        .. versionadded:: 0.9
+        """
+        return total_seconds(current_app.send_file_max_age_default)
+
+    def send_static_file(self, filename):
+        """Function used internally to send static files from the static
+        folder to the browser.
+
+        .. versionadded:: 0.5
+        """
+        if not self.has_static_folder:
+            raise RuntimeError('No static folder for this object')
+        # Ensure get_send_file_max_age is called in all cases.
+        # Here, we ensure get_send_file_max_age is called for Blueprints.
+        cache_timeout = self.get_send_file_max_age(filename)
+        return send_from_directory(self.static_folder, filename,
+                                   cache_timeout=cache_timeout)
+
+    def open_resource(self, resource, mode='rb'):
+        """Opens a resource from the application's resource folder.  To see
+        how this works, consider the following folder structure::
+
+            /myapplication.py
+            /schema.sql
+            /static
+                /style.css
+            /templates
+                /layout.html
+                /index.html
+
+        If you want to open the :file:`schema.sql` file you would do the
+        following::
+
+            with app.open_resource('schema.sql') as f:
+                contents = f.read()
+                do_something_with(contents)
+
+        :param resource: the name of the resource.  To access resources within
+                         subfolders use forward slashes as separator.
+        :param mode: resource file opening mode, default is 'rb'.
+        """
+        if mode not in ('r', 'rb'):
+            raise ValueError('Resources can only be opened for reading')
+        return open(os.path.join(self.root_path, resource), mode)
+
+
+def total_seconds(td):
+    """Returns the total seconds from a timedelta object.
+
+    :param timedelta td: the timedelta to be converted in seconds
+
+    :returns: number of seconds
+    :rtype: int
+    """
+    return td.days * 60 * 60 * 24 + td.seconds
+
+
+def is_ip(value):
+    """Determine if the given string is an IP address.
+
+    Python 2 on Windows doesn't provide ``inet_pton``, so this only
+    checks IPv4 addresses in that environment.
+
+    :param value: value to check
+    :type value: str
+
+    :return: True if string is an IP address
+    :rtype: bool
+    """
+    if PY2 and os.name == 'nt':
+        try:
+            socket.inet_aton(value)
+            return True
+        except socket.error:
+            return False
+
+    for family in (socket.AF_INET, socket.AF_INET6):
+        try:
+            socket.inet_pton(family, value)
+        except socket.error:
+            pass
+        else:
+            return True
+
+    return False
diff --git a/lib/python3.7/site-packages/flask/json/__init__.py b/lib/python3.7/site-packages/flask/json/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbe6b92f0a3712616a271a8cebefdcbfb2c4f7e8
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/json/__init__.py
@@ -0,0 +1,327 @@
+# -*- coding: utf-8 -*-
+"""
+flask.json
+~~~~~~~~~~
+
+:copyright: © 2010 by the Pallets team.
+:license: BSD, see LICENSE for more details.
+"""
+import codecs
+import io
+import uuid
+from datetime import date, datetime
+from flask.globals import current_app, request
+from flask._compat import text_type, PY2
+
+from werkzeug.http import http_date
+from jinja2 import Markup
+
+# Use the same json implementation as itsdangerous on which we
+# depend anyways.
+from itsdangerous import json as _json
+
+
+# Figure out if simplejson escapes slashes.  This behavior was changed
+# from one version to another without reason.
+_slash_escape = '\\/' not in _json.dumps('/')
+
+
+__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
+           'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
+           'jsonify']
+
+
+def _wrap_reader_for_text(fp, encoding):
+    if isinstance(fp.read(0), bytes):
+        fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
+    return fp
+
+
+def _wrap_writer_for_text(fp, encoding):
+    try:
+        fp.write('')
+    except TypeError:
+        fp = io.TextIOWrapper(fp, encoding)
+    return fp
+
+
+class JSONEncoder(_json.JSONEncoder):
+    """The default Flask JSON encoder.  This one extends the default simplejson
+    encoder by also supporting ``datetime`` objects, ``UUID`` as well as
+    ``Markup`` objects which are serialized as RFC 822 datetime strings (same
+    as the HTTP date format).  In order to support more data types override the
+    :meth:`default` method.
+    """
+
+    def default(self, o):
+        """Implement this method in a subclass such that it returns a
+        serializable object for ``o``, or calls the base implementation (to
+        raise a :exc:`TypeError`).
+
+        For example, to support arbitrary iterators, you could implement
+        default like this::
+
+            def default(self, o):
+                try:
+                    iterable = iter(o)
+                except TypeError:
+                    pass
+                else:
+                    return list(iterable)
+                return JSONEncoder.default(self, o)
+        """
+        if isinstance(o, datetime):
+            return http_date(o.utctimetuple())
+        if isinstance(o, date):
+            return http_date(o.timetuple())
+        if isinstance(o, uuid.UUID):
+            return str(o)
+        if hasattr(o, '__html__'):
+            return text_type(o.__html__())
+        return _json.JSONEncoder.default(self, o)
+
+
+class JSONDecoder(_json.JSONDecoder):
+    """The default JSON decoder.  This one does not change the behavior from
+    the default simplejson decoder.  Consult the :mod:`json` documentation
+    for more information.  This decoder is not only used for the load
+    functions of this module but also :attr:`~flask.Request`.
+    """
+
+
+def _dump_arg_defaults(kwargs):
+    """Inject default arguments for dump functions."""
+    if current_app:
+        bp = current_app.blueprints.get(request.blueprint) if request else None
+        kwargs.setdefault(
+            'cls',
+            bp.json_encoder if bp and bp.json_encoder
+                else current_app.json_encoder
+        )
+
+        if not current_app.config['JSON_AS_ASCII']:
+            kwargs.setdefault('ensure_ascii', False)
+
+        kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
+    else:
+        kwargs.setdefault('sort_keys', True)
+        kwargs.setdefault('cls', JSONEncoder)
+
+
+def _load_arg_defaults(kwargs):
+    """Inject default arguments for load functions."""
+    if current_app:
+        bp = current_app.blueprints.get(request.blueprint) if request else None
+        kwargs.setdefault(
+            'cls',
+            bp.json_decoder if bp and bp.json_decoder
+                else current_app.json_decoder
+        )
+    else:
+        kwargs.setdefault('cls', JSONDecoder)
+
+
+def detect_encoding(data):
+    """Detect which UTF codec was used to encode the given bytes.
+
+    The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is
+    accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big
+    or little endian. Some editors or libraries may prepend a BOM.
+
+    :param data: Bytes in unknown UTF encoding.
+    :return: UTF encoding name
+    """
+    head = data[:4]
+
+    if head[:3] == codecs.BOM_UTF8:
+        return 'utf-8-sig'
+
+    if b'\x00' not in head:
+        return 'utf-8'
+
+    if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE):
+        return 'utf-32'
+
+    if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE):
+        return 'utf-16'
+
+    if len(head) == 4:
+        if head[:3] == b'\x00\x00\x00':
+            return 'utf-32-be'
+
+        if head[::2] == b'\x00\x00':
+            return 'utf-16-be'
+
+        if head[1:] == b'\x00\x00\x00':
+            return 'utf-32-le'
+
+        if head[1::2] == b'\x00\x00':
+            return 'utf-16-le'
+
+    if len(head) == 2:
+        return 'utf-16-be' if head.startswith(b'\x00') else 'utf-16-le'
+
+    return 'utf-8'
+
+
+def dumps(obj, **kwargs):
+    """Serialize ``obj`` to a JSON formatted ``str`` by using the application's
+    configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
+    application on the stack.
+
+    This function can return ``unicode`` strings or ascii-only bytestrings by
+    default which coerce into unicode strings automatically.  That behavior by
+    default is controlled by the ``JSON_AS_ASCII`` configuration variable
+    and can be overridden by the simplejson ``ensure_ascii`` parameter.
+    """
+    _dump_arg_defaults(kwargs)
+    encoding = kwargs.pop('encoding', None)
+    rv = _json.dumps(obj, **kwargs)
+    if encoding is not None and isinstance(rv, text_type):
+        rv = rv.encode(encoding)
+    return rv
+
+
+def dump(obj, fp, **kwargs):
+    """Like :func:`dumps` but writes into a file object."""
+    _dump_arg_defaults(kwargs)
+    encoding = kwargs.pop('encoding', None)
+    if encoding is not None:
+        fp = _wrap_writer_for_text(fp, encoding)
+    _json.dump(obj, fp, **kwargs)
+
+
+def loads(s, **kwargs):
+    """Unserialize a JSON object from a string ``s`` by using the application's
+    configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
+    application on the stack.
+    """
+    _load_arg_defaults(kwargs)
+    if isinstance(s, bytes):
+        encoding = kwargs.pop('encoding', None)
+        if encoding is None:
+            encoding = detect_encoding(s)
+        s = s.decode(encoding)
+    return _json.loads(s, **kwargs)
+
+
+def load(fp, **kwargs):
+    """Like :func:`loads` but reads from a file object.
+    """
+    _load_arg_defaults(kwargs)
+    if not PY2:
+        fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
+    return _json.load(fp, **kwargs)
+
+
+def htmlsafe_dumps(obj, **kwargs):
+    """Works exactly like :func:`dumps` but is safe for use in ``<script>``
+    tags.  It accepts the same arguments and returns a JSON string.  Note that
+    this is available in templates through the ``|tojson`` filter which will
+    also mark the result as safe.  Due to how this function escapes certain
+    characters this is safe even if used outside of ``<script>`` tags.
+
+    The following characters are escaped in strings:
+
+    -   ``<``
+    -   ``>``
+    -   ``&``
+    -   ``'``
+
+    This makes it safe to embed such strings in any place in HTML with the
+    notable exception of double quoted attributes.  In that case single
+    quote your attributes or HTML escape it in addition.
+
+    .. versionchanged:: 0.10
+       This function's return value is now always safe for HTML usage, even
+       if outside of script tags or if used in XHTML.  This rule does not
+       hold true when using this function in HTML attributes that are double
+       quoted.  Always single quote attributes if you use the ``|tojson``
+       filter.  Alternatively use ``|tojson|forceescape``.
+    """
+    rv = dumps(obj, **kwargs) \
+        .replace(u'<', u'\\u003c') \
+        .replace(u'>', u'\\u003e') \
+        .replace(u'&', u'\\u0026') \
+        .replace(u"'", u'\\u0027')
+    if not _slash_escape:
+        rv = rv.replace('\\/', '/')
+    return rv
+
+
+def htmlsafe_dump(obj, fp, **kwargs):
+    """Like :func:`htmlsafe_dumps` but writes into a file object."""
+    fp.write(text_type(htmlsafe_dumps(obj, **kwargs)))
+
+
+def jsonify(*args, **kwargs):
+    """This function wraps :func:`dumps` to add a few enhancements that make
+    life easier.  It turns the JSON output into a :class:`~flask.Response`
+    object with the :mimetype:`application/json` mimetype.  For convenience, it
+    also converts multiple arguments into an array or multiple keyword arguments
+    into a dict.  This means that both ``jsonify(1,2,3)`` and
+    ``jsonify([1,2,3])`` serialize to ``[1,2,3]``.
+
+    For clarity, the JSON serialization behavior has the following differences
+    from :func:`dumps`:
+
+    1. Single argument: Passed straight through to :func:`dumps`.
+    2. Multiple arguments: Converted to an array before being passed to
+       :func:`dumps`.
+    3. Multiple keyword arguments: Converted to a dict before being passed to
+       :func:`dumps`.
+    4. Both args and kwargs: Behavior undefined and will throw an exception.
+
+    Example usage::
+
+        from flask import jsonify
+
+        @app.route('/_get_current_user')
+        def get_current_user():
+            return jsonify(username=g.user.username,
+                           email=g.user.email,
+                           id=g.user.id)
+
+    This will send a JSON response like this to the browser::
+
+        {
+            "username": "admin",
+            "email": "admin@localhost",
+            "id": 42
+        }
+
+
+    .. versionchanged:: 0.11
+       Added support for serializing top-level arrays. This introduces a
+       security risk in ancient browsers. See :ref:`json-security` for details.
+
+    This function's response will be pretty printed if the
+    ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to True or the
+    Flask app is running in debug mode. Compressed (not pretty) formatting
+    currently means no indents and no spaces after separators.
+
+    .. versionadded:: 0.2
+    """
+
+    indent = None
+    separators = (',', ':')
+
+    if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] or current_app.debug:
+        indent = 2
+        separators = (', ', ': ')
+
+    if args and kwargs:
+        raise TypeError('jsonify() behavior undefined when passed both args and kwargs')
+    elif len(args) == 1:  # single args are passed directly to dumps()
+        data = args[0]
+    else:
+        data = args or kwargs
+
+    return current_app.response_class(
+        dumps(data, indent=indent, separators=separators) + '\n',
+        mimetype=current_app.config['JSONIFY_MIMETYPE']
+    )
+
+
+def tojson_filter(obj, **kwargs):
+    return Markup(htmlsafe_dumps(obj, **kwargs))
diff --git a/lib/python3.7/site-packages/flask/json/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/flask/json/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9be720e8757e8d0b2b9cb2c977ecf736de2c20c5
Binary files /dev/null and b/lib/python3.7/site-packages/flask/json/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/json/__pycache__/tag.cpython-37.pyc b/lib/python3.7/site-packages/flask/json/__pycache__/tag.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66b2af90a2d1e3a50b648b01ebf5ce0a86b8a8ea
Binary files /dev/null and b/lib/python3.7/site-packages/flask/json/__pycache__/tag.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/flask/json/tag.py b/lib/python3.7/site-packages/flask/json/tag.py
new file mode 100644
index 0000000000000000000000000000000000000000..11c966c58276b093b04f09d60dc3be68475f87fc
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/json/tag.py
@@ -0,0 +1,300 @@
+# -*- coding: utf-8 -*-
+"""
+Tagged JSON
+~~~~~~~~~~~
+
+A compact representation for lossless serialization of non-standard JSON types.
+:class:`~flask.sessions.SecureCookieSessionInterface` uses this to serialize
+the session data, but it may be useful in other places. It can be extended to
+support other types.
+
+.. autoclass:: TaggedJSONSerializer
+    :members:
+
+.. autoclass:: JSONTag
+    :members:
+
+Let's seen an example that adds support for :class:`~collections.OrderedDict`.
+Dicts don't have an order in Python or JSON, so to handle this we will dump
+the items as a list of ``[key, value]`` pairs. Subclass :class:`JSONTag` and
+give it the new key ``' od'`` to identify the type. The session serializer
+processes dicts first, so insert the new tag at the front of the order since
+``OrderedDict`` must be processed before ``dict``. ::
+
+    from flask.json.tag import JSONTag
+
+    class TagOrderedDict(JSONTag):
+        __slots__ = ('serializer',)
+        key = ' od'
+
+        def check(self, value):
+            return isinstance(value, OrderedDict)
+
+        def to_json(self, value):
+            return [[k, self.serializer.tag(v)] for k, v in iteritems(value)]
+
+        def to_python(self, value):
+            return OrderedDict(value)
+
+    app.session_interface.serializer.register(TagOrderedDict, index=0)
+
+:copyright: © 2010 by the Pallets team.
+:license: BSD, see LICENSE for more details.
+"""
+
+from base64 import b64decode, b64encode
+from datetime import datetime
+from uuid import UUID
+
+from jinja2 import Markup
+from werkzeug.http import http_date, parse_date
+
+from flask._compat import iteritems, text_type
+from flask.json import dumps, loads
+
+
+class JSONTag(object):
+    """Base class for defining type tags for :class:`TaggedJSONSerializer`."""
+
+    __slots__ = ('serializer',)
+
+    #: The tag to mark the serialized object with. If ``None``, this tag is
+    #: only used as an intermediate step during tagging.
+    key = None
+
+    def __init__(self, serializer):
+        """Create a tagger for the given serializer."""
+        self.serializer = serializer
+
+    def check(self, value):
+        """Check if the given value should be tagged by this tag."""
+        raise NotImplementedError
+
+    def to_json(self, value):
+        """Convert the Python object to an object that is a valid JSON type.
+        The tag will be added later."""
+        raise NotImplementedError
+
+    def to_python(self, value):
+        """Convert the JSON representation back to the correct type. The tag
+        will already be removed."""
+        raise NotImplementedError
+
+    def tag(self, value):
+        """Convert the value to a valid JSON type and add the tag structure
+        around it."""
+        return {self.key: self.to_json(value)}
+
+
+class TagDict(JSONTag):
+    """Tag for 1-item dicts whose only key matches a registered tag.
+
+    Internally, the dict key is suffixed with `__`, and the suffix is removed
+    when deserializing.
+    """
+
+    __slots__ = ()
+    key = ' di'
+
+    def check(self, value):
+        return (
+            isinstance(value, dict)
+            and len(value) == 1
+            and next(iter(value)) in self.serializer.tags
+        )
+
+    def to_json(self, value):
+        key = next(iter(value))
+        return {key + '__': self.serializer.tag(value[key])}
+
+    def to_python(self, value):
+        key = next(iter(value))
+        return {key[:-2]: value[key]}
+
+
+class PassDict(JSONTag):
+    __slots__ = ()
+
+    def check(self, value):
+        return isinstance(value, dict)
+
+    def to_json(self, value):
+        # JSON objects may only have string keys, so don't bother tagging the
+        # key here.
+        return dict((k, self.serializer.tag(v)) for k, v in iteritems(value))
+
+    tag = to_json
+
+
+class TagTuple(JSONTag):
+    __slots__ = ()
+    key = ' t'
+
+    def check(self, value):
+        return isinstance(value, tuple)
+
+    def to_json(self, value):
+        return [self.serializer.tag(item) for item in value]
+
+    def to_python(self, value):
+        return tuple(value)
+
+
+class PassList(JSONTag):
+    __slots__ = ()
+
+    def check(self, value):
+        return isinstance(value, list)
+
+    def to_json(self, value):
+        return [self.serializer.tag(item) for item in value]
+
+    tag = to_json
+
+
+class TagBytes(JSONTag):
+    __slots__ = ()
+    key = ' b'
+
+    def check(self, value):
+        return isinstance(value, bytes)
+
+    def to_json(self, value):
+        return b64encode(value).decode('ascii')
+
+    def to_python(self, value):
+        return b64decode(value)
+
+
+class TagMarkup(JSONTag):
+    """Serialize anything matching the :class:`~flask.Markup` API by
+    having a ``__html__`` method to the result of that method. Always
+    deserializes to an instance of :class:`~flask.Markup`."""
+
+    __slots__ = ()
+    key = ' m'
+
+    def check(self, value):
+        return callable(getattr(value, '__html__', None))
+
+    def to_json(self, value):
+        return text_type(value.__html__())
+
+    def to_python(self, value):
+        return Markup(value)
+
+
+class TagUUID(JSONTag):
+    __slots__ = ()
+    key = ' u'
+
+    def check(self, value):
+        return isinstance(value, UUID)
+
+    def to_json(self, value):
+        return value.hex
+
+    def to_python(self, value):
+        return UUID(value)
+
+
+class TagDateTime(JSONTag):
+    __slots__ = ()
+    key = ' d'
+
+    def check(self, value):
+        return isinstance(value, datetime)
+
+    def to_json(self, value):
+        return http_date(value)
+
+    def to_python(self, value):
+        return parse_date(value)
+
+
+class TaggedJSONSerializer(object):
+    """Serializer that uses a tag system to compactly represent objects that
+    are not JSON types. Passed as the intermediate serializer to
+    :class:`itsdangerous.Serializer`.
+
+    The following extra types are supported:
+
+    * :class:`dict`
+    * :class:`tuple`
+    * :class:`bytes`
+    * :class:`~flask.Markup`
+    * :class:`~uuid.UUID`
+    * :class:`~datetime.datetime`
+    """
+
+    __slots__ = ('tags', 'order')
+
+    #: Tag classes to bind when creating the serializer. Other tags can be
+    #: added later using :meth:`~register`.
+    default_tags = [
+        TagDict, PassDict, TagTuple, PassList, TagBytes, TagMarkup, TagUUID,
+        TagDateTime,
+    ]
+
+    def __init__(self):
+        self.tags = {}
+        self.order = []
+
+        for cls in self.default_tags:
+            self.register(cls)
+
+    def register(self, tag_class, force=False, index=None):
+        """Register a new tag with this serializer.
+
+        :param tag_class: tag class to register. Will be instantiated with this
+            serializer instance.
+        :param force: overwrite an existing tag. If false (default), a
+            :exc:`KeyError` is raised.
+        :param index: index to insert the new tag in the tag order. Useful when
+            the new tag is a special case of an existing tag. If ``None``
+            (default), the tag is appended to the end of the order.
+
+        :raise KeyError: if the tag key is already registered and ``force`` is
+            not true.
+        """
+        tag = tag_class(self)
+        key = tag.key
+
+        if key is not None:
+            if not force and key in self.tags:
+                raise KeyError("Tag '{0}' is already registered.".format(key))
+
+            self.tags[key] = tag
+
+        if index is None:
+            self.order.append(tag)
+        else:
+            self.order.insert(index, tag)
+
+    def tag(self, value):
+        """Convert a value to a tagged representation if necessary."""
+        for tag in self.order:
+            if tag.check(value):
+                return tag.tag(value)
+
+        return value
+
+    def untag(self, value):
+        """Convert a tagged representation back to the original type."""
+        if len(value) != 1:
+            return value
+
+        key = next(iter(value))
+
+        if key not in self.tags:
+            return value
+
+        return self.tags[key].to_python(value[key])
+
+    def dumps(self, value):
+        """Tag the value and dump it to a compact JSON string."""
+        return dumps(self.tag(value), separators=(',', ':'))
+
+    def loads(self, value):
+        """Load data from a JSON string and deserialized any tagged objects."""
+        return loads(value, object_hook=self.untag)
diff --git a/lib/python3.7/site-packages/flask/logging.py b/lib/python3.7/site-packages/flask/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..389c2c22e7747c4061a957f4ae686cb5758a2aec
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/logging.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+"""
+flask.logging
+~~~~~~~~~~~~~
+
+:copyright: © 2010 by the Pallets team.
+:license: BSD, see LICENSE for more details.
+"""
+
+from __future__ import absolute_import
+
+import logging
+import sys
+
+from werkzeug.local import LocalProxy
+
+from .globals import request
+
+
+@LocalProxy
+def wsgi_errors_stream():
+    """Find the most appropriate error stream for the application. If a request
+    is active, log to ``wsgi.errors``, otherwise use ``sys.stderr``.
+
+    If you configure your own :class:`logging.StreamHandler`, you may want to
+    use this for the stream. If you are using file or dict configuration and
+    can't import this directly, you can refer to it as
+    ``ext://flask.logging.wsgi_errors_stream``.
+    """
+    return request.environ['wsgi.errors'] if request else sys.stderr
+
+
+def has_level_handler(logger):
+    """Check if there is a handler in the logging chain that will handle the
+    given logger's :meth:`effective level <~logging.Logger.getEffectiveLevel>`.
+    """
+    level = logger.getEffectiveLevel()
+    current = logger
+
+    while current:
+        if any(handler.level <= level for handler in current.handlers):
+            return True
+
+        if not current.propagate:
+            break
+
+        current = current.parent
+
+    return False
+
+
+#: Log messages to :func:`~flask.logging.wsgi_errors_stream` with the format
+#: ``[%(asctime)s] %(levelname)s in %(module)s: %(message)s``.
+default_handler = logging.StreamHandler(wsgi_errors_stream)
+default_handler.setFormatter(logging.Formatter(
+    '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
+))
+
+
+def create_logger(app):
+    """Get the ``'flask.app'`` logger and configure it if needed.
+
+    When :attr:`~flask.Flask.debug` is enabled, set the logger level to
+    :data:`logging.DEBUG` if it is not set.
+
+    If there is no handler for the logger's effective level, add a
+    :class:`~logging.StreamHandler` for
+    :func:`~flask.logging.wsgi_errors_stream` with a basic format.
+    """
+    logger = logging.getLogger('flask.app')
+
+    if app.debug and logger.level == logging.NOTSET:
+        logger.setLevel(logging.DEBUG)
+
+    if not has_level_handler(logger):
+        logger.addHandler(default_handler)
+
+    return logger
diff --git a/lib/python3.7/site-packages/flask/sessions.py b/lib/python3.7/site-packages/flask/sessions.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec4253d55017f62a4fb1a984b80f8c1f8f5dc09e
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/sessions.py
@@ -0,0 +1,385 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.sessions
+    ~~~~~~~~~~~~~~
+
+    Implements cookie based sessions based on itsdangerous.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+import hashlib
+import warnings
+from collections import MutableMapping
+from datetime import datetime
+
+from itsdangerous import BadSignature, URLSafeTimedSerializer
+from werkzeug.datastructures import CallbackDict
+
+from flask.helpers import is_ip, total_seconds
+from flask.json.tag import TaggedJSONSerializer
+
+
+class SessionMixin(MutableMapping):
+    """Expands a basic dictionary with session attributes."""
+
+    @property
+    def permanent(self):
+        """This reflects the ``'_permanent'`` key in the dict."""
+        return self.get('_permanent', False)
+
+    @permanent.setter
+    def permanent(self, value):
+        self['_permanent'] = bool(value)
+
+    #: Some implementations can detect whether a session is newly
+    #: created, but that is not guaranteed. Use with caution. The mixin
+    # default is hard-coded ``False``.
+    new = False
+
+    #: Some implementations can detect changes to the session and set
+    #: this when that happens. The mixin default is hard coded to
+    #: ``True``.
+    modified = True
+
+    #: Some implementations can detect when session data is read or
+    #: written and set this when that happens. The mixin default is hard
+    #: coded to ``True``.
+    accessed = True
+
+
+class SecureCookieSession(CallbackDict, SessionMixin):
+    """Base class for sessions based on signed cookies.
+
+    This session backend will set the :attr:`modified` and
+    :attr:`accessed` attributes. It cannot reliably track whether a
+    session is new (vs. empty), so :attr:`new` remains hard coded to
+    ``False``.
+    """
+
+    #: When data is changed, this is set to ``True``. Only the session
+    #: dictionary itself is tracked; if the session contains mutable
+    #: data (for example a nested dict) then this must be set to
+    #: ``True`` manually when modifying that data. The session cookie
+    #: will only be written to the response if this is ``True``.
+    modified = False
+
+    #: When data is read or written, this is set to ``True``. Used by
+    # :class:`.SecureCookieSessionInterface` to add a ``Vary: Cookie``
+    #: header, which allows caching proxies to cache different pages for
+    #: different users.
+    accessed = False
+
+    def __init__(self, initial=None):
+        def on_update(self):
+            self.modified = True
+            self.accessed = True
+
+        super(SecureCookieSession, self).__init__(initial, on_update)
+
+    def __getitem__(self, key):
+        self.accessed = True
+        return super(SecureCookieSession, self).__getitem__(key)
+
+    def get(self, key, default=None):
+        self.accessed = True
+        return super(SecureCookieSession, self).get(key, default)
+
+    def setdefault(self, key, default=None):
+        self.accessed = True
+        return super(SecureCookieSession, self).setdefault(key, default)
+
+
+class NullSession(SecureCookieSession):
+    """Class used to generate nicer error messages if sessions are not
+    available.  Will still allow read-only access to the empty session
+    but fail on setting.
+    """
+
+    def _fail(self, *args, **kwargs):
+        raise RuntimeError('The session is unavailable because no secret '
+                           'key was set.  Set the secret_key on the '
+                           'application to something unique and secret.')
+    __setitem__ = __delitem__ = clear = pop = popitem = \
+        update = setdefault = _fail
+    del _fail
+
+
+class SessionInterface(object):
+    """The basic interface you have to implement in order to replace the
+    default session interface which uses werkzeug's securecookie
+    implementation.  The only methods you have to implement are
+    :meth:`open_session` and :meth:`save_session`, the others have
+    useful defaults which you don't need to change.
+
+    The session object returned by the :meth:`open_session` method has to
+    provide a dictionary like interface plus the properties and methods
+    from the :class:`SessionMixin`.  We recommend just subclassing a dict
+    and adding that mixin::
+
+        class Session(dict, SessionMixin):
+            pass
+
+    If :meth:`open_session` returns ``None`` Flask will call into
+    :meth:`make_null_session` to create a session that acts as replacement
+    if the session support cannot work because some requirement is not
+    fulfilled.  The default :class:`NullSession` class that is created
+    will complain that the secret key was not set.
+
+    To replace the session interface on an application all you have to do
+    is to assign :attr:`flask.Flask.session_interface`::
+
+        app = Flask(__name__)
+        app.session_interface = MySessionInterface()
+
+    .. versionadded:: 0.8
+    """
+
+    #: :meth:`make_null_session` will look here for the class that should
+    #: be created when a null session is requested.  Likewise the
+    #: :meth:`is_null_session` method will perform a typecheck against
+    #: this type.
+    null_session_class = NullSession
+
+    #: A flag that indicates if the session interface is pickle based.
+    #: This can be used by Flask extensions to make a decision in regards
+    #: to how to deal with the session object.
+    #:
+    #: .. versionadded:: 0.10
+    pickle_based = False
+
+    def make_null_session(self, app):
+        """Creates a null session which acts as a replacement object if the
+        real session support could not be loaded due to a configuration
+        error.  This mainly aids the user experience because the job of the
+        null session is to still support lookup without complaining but
+        modifications are answered with a helpful error message of what
+        failed.
+
+        This creates an instance of :attr:`null_session_class` by default.
+        """
+        return self.null_session_class()
+
+    def is_null_session(self, obj):
+        """Checks if a given object is a null session.  Null sessions are
+        not asked to be saved.
+
+        This checks if the object is an instance of :attr:`null_session_class`
+        by default.
+        """
+        return isinstance(obj, self.null_session_class)
+
+    def get_cookie_domain(self, app):
+        """Returns the domain that should be set for the session cookie.
+
+        Uses ``SESSION_COOKIE_DOMAIN`` if it is configured, otherwise
+        falls back to detecting the domain based on ``SERVER_NAME``.
+
+        Once detected (or if not set at all), ``SESSION_COOKIE_DOMAIN`` is
+        updated to avoid re-running the logic.
+        """
+
+        rv = app.config['SESSION_COOKIE_DOMAIN']
+
+        # set explicitly, or cached from SERVER_NAME detection
+        # if False, return None
+        if rv is not None:
+            return rv if rv else None
+
+        rv = app.config['SERVER_NAME']
+
+        # server name not set, cache False to return none next time
+        if not rv:
+            app.config['SESSION_COOKIE_DOMAIN'] = False
+            return None
+
+        # chop off the port which is usually not supported by browsers
+        # remove any leading '.' since we'll add that later
+        rv = rv.rsplit(':', 1)[0].lstrip('.')
+
+        if '.' not in rv:
+            # Chrome doesn't allow names without a '.'
+            # this should only come up with localhost
+            # hack around this by not setting the name, and show a warning
+            warnings.warn(
+                '"{rv}" is not a valid cookie domain, it must contain a ".".'
+                ' Add an entry to your hosts file, for example'
+                ' "{rv}.localdomain", and use that instead.'.format(rv=rv)
+            )
+            app.config['SESSION_COOKIE_DOMAIN'] = False
+            return None
+
+        ip = is_ip(rv)
+
+        if ip:
+            warnings.warn(
+                'The session cookie domain is an IP address. This may not work'
+                ' as intended in some browsers. Add an entry to your hosts'
+                ' file, for example "localhost.localdomain", and use that'
+                ' instead.'
+            )
+
+        # if this is not an ip and app is mounted at the root, allow subdomain
+        # matching by adding a '.' prefix
+        if self.get_cookie_path(app) == '/' and not ip:
+            rv = '.' + rv
+
+        app.config['SESSION_COOKIE_DOMAIN'] = rv
+        return rv
+
+    def get_cookie_path(self, app):
+        """Returns the path for which the cookie should be valid.  The
+        default implementation uses the value from the ``SESSION_COOKIE_PATH``
+        config var if it's set, and falls back to ``APPLICATION_ROOT`` or
+        uses ``/`` if it's ``None``.
+        """
+        return app.config['SESSION_COOKIE_PATH'] \
+               or app.config['APPLICATION_ROOT']
+
+    def get_cookie_httponly(self, app):
+        """Returns True if the session cookie should be httponly.  This
+        currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
+        config var.
+        """
+        return app.config['SESSION_COOKIE_HTTPONLY']
+
+    def get_cookie_secure(self, app):
+        """Returns True if the cookie should be secure.  This currently
+        just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
+        """
+        return app.config['SESSION_COOKIE_SECURE']
+
+    def get_cookie_samesite(self, app):
+        """Return ``'Strict'`` or ``'Lax'`` if the cookie should use the
+        ``SameSite`` attribute. This currently just returns the value of
+        the :data:`SESSION_COOKIE_SAMESITE` setting.
+        """
+        return app.config['SESSION_COOKIE_SAMESITE']
+
+    def get_expiration_time(self, app, session):
+        """A helper method that returns an expiration date for the session
+        or ``None`` if the session is linked to the browser session.  The
+        default implementation returns now + the permanent session
+        lifetime configured on the application.
+        """
+        if session.permanent:
+            return datetime.utcnow() + app.permanent_session_lifetime
+
+    def should_set_cookie(self, app, session):
+        """Used by session backends to determine if a ``Set-Cookie`` header
+        should be set for this session cookie for this response. If the session
+        has been modified, the cookie is set. If the session is permanent and
+        the ``SESSION_REFRESH_EACH_REQUEST`` config is true, the cookie is
+        always set.
+
+        This check is usually skipped if the session was deleted.
+
+        .. versionadded:: 0.11
+        """
+
+        return session.modified or (
+            session.permanent and app.config['SESSION_REFRESH_EACH_REQUEST']
+        )
+
+    def open_session(self, app, request):
+        """This method has to be implemented and must either return ``None``
+        in case the loading failed because of a configuration error or an
+        instance of a session object which implements a dictionary like
+        interface + the methods and attributes on :class:`SessionMixin`.
+        """
+        raise NotImplementedError()
+
+    def save_session(self, app, session, response):
+        """This is called for actual sessions returned by :meth:`open_session`
+        at the end of the request.  This is still called during a request
+        context so if you absolutely need access to the request you can do
+        that.
+        """
+        raise NotImplementedError()
+
+
+session_json_serializer = TaggedJSONSerializer()
+
+
+class SecureCookieSessionInterface(SessionInterface):
+    """The default session interface that stores sessions in signed cookies
+    through the :mod:`itsdangerous` module.
+    """
+    #: the salt that should be applied on top of the secret key for the
+    #: signing of cookie based sessions.
+    salt = 'cookie-session'
+    #: the hash function to use for the signature.  The default is sha1
+    digest_method = staticmethod(hashlib.sha1)
+    #: the name of the itsdangerous supported key derivation.  The default
+    #: is hmac.
+    key_derivation = 'hmac'
+    #: A python serializer for the payload.  The default is a compact
+    #: JSON derived serializer with support for some extra Python types
+    #: such as datetime objects or tuples.
+    serializer = session_json_serializer
+    session_class = SecureCookieSession
+
+    def get_signing_serializer(self, app):
+        if not app.secret_key:
+            return None
+        signer_kwargs = dict(
+            key_derivation=self.key_derivation,
+            digest_method=self.digest_method
+        )
+        return URLSafeTimedSerializer(app.secret_key, salt=self.salt,
+                                      serializer=self.serializer,
+                                      signer_kwargs=signer_kwargs)
+
+    def open_session(self, app, request):
+        s = self.get_signing_serializer(app)
+        if s is None:
+            return None
+        val = request.cookies.get(app.session_cookie_name)
+        if not val:
+            return self.session_class()
+        max_age = total_seconds(app.permanent_session_lifetime)
+        try:
+            data = s.loads(val, max_age=max_age)
+            return self.session_class(data)
+        except BadSignature:
+            return self.session_class()
+
+    def save_session(self, app, session, response):
+        domain = self.get_cookie_domain(app)
+        path = self.get_cookie_path(app)
+
+        # If the session is modified to be empty, remove the cookie.
+        # If the session is empty, return without setting the cookie.
+        if not session:
+            if session.modified:
+                response.delete_cookie(
+                    app.session_cookie_name,
+                    domain=domain,
+                    path=path
+                )
+
+            return
+
+        # Add a "Vary: Cookie" header if the session was accessed at all.
+        if session.accessed:
+            response.vary.add('Cookie')
+
+        if not self.should_set_cookie(app, session):
+            return
+
+        httponly = self.get_cookie_httponly(app)
+        secure = self.get_cookie_secure(app)
+        samesite = self.get_cookie_samesite(app)
+        expires = self.get_expiration_time(app, session)
+        val = self.get_signing_serializer(app).dumps(dict(session))
+        response.set_cookie(
+            app.session_cookie_name,
+            val,
+            expires=expires,
+            httponly=httponly,
+            domain=domain,
+            path=path,
+            secure=secure,
+            samesite=samesite
+        )
diff --git a/lib/python3.7/site-packages/flask/signals.py b/lib/python3.7/site-packages/flask/signals.py
new file mode 100644
index 0000000000000000000000000000000000000000..18f263072706303850baa9af764cf0c22159c772
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/signals.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.signals
+    ~~~~~~~~~~~~~
+
+    Implements signals based on blinker if available, otherwise
+    falls silently back to a noop.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+signals_available = False
+try:
+    from blinker import Namespace
+    signals_available = True
+except ImportError:
+    class Namespace(object):
+        def signal(self, name, doc=None):
+            return _FakeSignal(name, doc)
+
+    class _FakeSignal(object):
+        """If blinker is unavailable, create a fake class with the same
+        interface that allows sending of signals but will fail with an
+        error on anything else.  Instead of doing anything on send, it
+        will just ignore the arguments and do nothing instead.
+        """
+
+        def __init__(self, name, doc=None):
+            self.name = name
+            self.__doc__ = doc
+        def _fail(self, *args, **kwargs):
+            raise RuntimeError('signalling support is unavailable '
+                               'because the blinker library is '
+                               'not installed.')
+        send = lambda *a, **kw: None
+        connect = disconnect = has_receivers_for = receivers_for = \
+            temporarily_connected_to = connected_to = _fail
+        del _fail
+
+# The namespace for code signals.  If you are not Flask code, do
+# not put signals in here.  Create your own namespace instead.
+_signals = Namespace()
+
+
+# Core signals.  For usage examples grep the source code or consult
+# the API documentation in docs/api.rst as well as docs/signals.rst
+template_rendered = _signals.signal('template-rendered')
+before_render_template = _signals.signal('before-render-template')
+request_started = _signals.signal('request-started')
+request_finished = _signals.signal('request-finished')
+request_tearing_down = _signals.signal('request-tearing-down')
+got_request_exception = _signals.signal('got-request-exception')
+appcontext_tearing_down = _signals.signal('appcontext-tearing-down')
+appcontext_pushed = _signals.signal('appcontext-pushed')
+appcontext_popped = _signals.signal('appcontext-popped')
+message_flashed = _signals.signal('message-flashed')
diff --git a/lib/python3.7/site-packages/flask/templating.py b/lib/python3.7/site-packages/flask/templating.py
new file mode 100644
index 0000000000000000000000000000000000000000..0240200869a3dab748ad1f4949e3455684c96334
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/templating.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.templating
+    ~~~~~~~~~~~~~~~~
+
+    Implements the bridge to Jinja2.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+from jinja2 import BaseLoader, Environment as BaseEnvironment, \
+     TemplateNotFound
+
+from .globals import _request_ctx_stack, _app_ctx_stack
+from .signals import template_rendered, before_render_template
+
+
+def _default_template_ctx_processor():
+    """Default template context processor.  Injects `request`,
+    `session` and `g`.
+    """
+    reqctx = _request_ctx_stack.top
+    appctx = _app_ctx_stack.top
+    rv = {}
+    if appctx is not None:
+        rv['g'] = appctx.g
+    if reqctx is not None:
+        rv['request'] = reqctx.request
+        rv['session'] = reqctx.session
+    return rv
+
+
+class Environment(BaseEnvironment):
+    """Works like a regular Jinja2 environment but has some additional
+    knowledge of how Flask's blueprint works so that it can prepend the
+    name of the blueprint to referenced templates if necessary.
+    """
+
+    def __init__(self, app, **options):
+        if 'loader' not in options:
+            options['loader'] = app.create_global_jinja_loader()
+        BaseEnvironment.__init__(self, **options)
+        self.app = app
+
+
+class DispatchingJinjaLoader(BaseLoader):
+    """A loader that looks for templates in the application and all
+    the blueprint folders.
+    """
+
+    def __init__(self, app):
+        self.app = app
+
+    def get_source(self, environment, template):
+        if self.app.config['EXPLAIN_TEMPLATE_LOADING']:
+            return self._get_source_explained(environment, template)
+        return self._get_source_fast(environment, template)
+
+    def _get_source_explained(self, environment, template):
+        attempts = []
+        trv = None
+
+        for srcobj, loader in self._iter_loaders(template):
+            try:
+                rv = loader.get_source(environment, template)
+                if trv is None:
+                    trv = rv
+            except TemplateNotFound:
+                rv = None
+            attempts.append((loader, srcobj, rv))
+
+        from .debughelpers import explain_template_loading_attempts
+        explain_template_loading_attempts(self.app, template, attempts)
+
+        if trv is not None:
+            return trv
+        raise TemplateNotFound(template)
+
+    def _get_source_fast(self, environment, template):
+        for srcobj, loader in self._iter_loaders(template):
+            try:
+                return loader.get_source(environment, template)
+            except TemplateNotFound:
+                continue
+        raise TemplateNotFound(template)
+
+    def _iter_loaders(self, template):
+        loader = self.app.jinja_loader
+        if loader is not None:
+            yield self.app, loader
+
+        for blueprint in self.app.iter_blueprints():
+            loader = blueprint.jinja_loader
+            if loader is not None:
+                yield blueprint, loader
+
+    def list_templates(self):
+        result = set()
+        loader = self.app.jinja_loader
+        if loader is not None:
+            result.update(loader.list_templates())
+
+        for blueprint in self.app.iter_blueprints():
+            loader = blueprint.jinja_loader
+            if loader is not None:
+                for template in loader.list_templates():
+                    result.add(template)
+
+        return list(result)
+
+
+def _render(template, context, app):
+    """Renders the template and fires the signal"""
+
+    before_render_template.send(app, template=template, context=context)
+    rv = template.render(context)
+    template_rendered.send(app, template=template, context=context)
+    return rv
+
+
+def render_template(template_name_or_list, **context):
+    """Renders a template from the template folder with the given
+    context.
+
+    :param template_name_or_list: the name of the template to be
+                                  rendered, or an iterable with template names
+                                  the first one existing will be rendered
+    :param context: the variables that should be available in the
+                    context of the template.
+    """
+    ctx = _app_ctx_stack.top
+    ctx.app.update_template_context(context)
+    return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
+                   context, ctx.app)
+
+
+def render_template_string(source, **context):
+    """Renders a template from the given template source string
+    with the given context. Template variables will be autoescaped.
+
+    :param source: the source code of the template to be
+                   rendered
+    :param context: the variables that should be available in the
+                    context of the template.
+    """
+    ctx = _app_ctx_stack.top
+    ctx.app.update_template_context(context)
+    return _render(ctx.app.jinja_env.from_string(source),
+                   context, ctx.app)
diff --git a/lib/python3.7/site-packages/flask/testing.py b/lib/python3.7/site-packages/flask/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bf0ebc12a32d1f810cbfcc50c09d1f2c7a82241
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/testing.py
@@ -0,0 +1,250 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.testing
+    ~~~~~~~~~~~~~
+
+    Implements test support helpers.  This module is lazily imported
+    and usually not used in production environments.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+import werkzeug
+from contextlib import contextmanager
+
+from click.testing import CliRunner
+from flask.cli import ScriptInfo
+from werkzeug.test import Client, EnvironBuilder
+from flask import _request_ctx_stack
+from flask.json import dumps as json_dumps
+from werkzeug.urls import url_parse
+
+
+def make_test_environ_builder(
+    app, path='/', base_url=None, subdomain=None, url_scheme=None,
+    *args, **kwargs
+):
+    """Create a :class:`~werkzeug.test.EnvironBuilder`, taking some
+    defaults from the application.
+
+    :param app: The Flask application to configure the environment from.
+    :param path: URL path being requested.
+    :param base_url: Base URL where the app is being served, which
+        ``path`` is relative to. If not given, built from
+        :data:`PREFERRED_URL_SCHEME`, ``subdomain``,
+        :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.
+    :param subdomain: Subdomain name to append to :data:`SERVER_NAME`.
+    :param url_scheme: Scheme to use instead of
+        :data:`PREFERRED_URL_SCHEME`.
+    :param json: If given, this is serialized as JSON and passed as
+        ``data``. Also defaults ``content_type`` to
+        ``application/json``.
+    :param args: other positional arguments passed to
+        :class:`~werkzeug.test.EnvironBuilder`.
+    :param kwargs: other keyword arguments passed to
+        :class:`~werkzeug.test.EnvironBuilder`.
+    """
+
+    assert (
+        not (base_url or subdomain or url_scheme)
+        or (base_url is not None) != bool(subdomain or url_scheme)
+    ), 'Cannot pass "subdomain" or "url_scheme" with "base_url".'
+
+    if base_url is None:
+        http_host = app.config.get('SERVER_NAME') or 'localhost'
+        app_root = app.config['APPLICATION_ROOT']
+
+        if subdomain:
+            http_host = '{0}.{1}'.format(subdomain, http_host)
+
+        if url_scheme is None:
+            url_scheme = app.config['PREFERRED_URL_SCHEME']
+
+        url = url_parse(path)
+        base_url = '{scheme}://{netloc}/{path}'.format(
+            scheme=url.scheme or url_scheme,
+            netloc=url.netloc or http_host,
+            path=app_root.lstrip('/')
+        )
+        path = url.path
+
+        if url.query:
+            sep = b'?' if isinstance(url.query, bytes) else '?'
+            path += sep + url.query
+
+    if 'json' in kwargs:
+        assert 'data' not in kwargs, (
+            "Client cannot provide both 'json' and 'data'."
+        )
+
+        # push a context so flask.json can use app's json attributes
+        with app.app_context():
+            kwargs['data'] = json_dumps(kwargs.pop('json'))
+
+        if 'content_type' not in kwargs:
+            kwargs['content_type'] = 'application/json'
+
+    return EnvironBuilder(path, base_url, *args, **kwargs)
+
+
+class FlaskClient(Client):
+    """Works like a regular Werkzeug test client but has some knowledge about
+    how Flask works to defer the cleanup of the request context stack to the
+    end of a ``with`` body when used in a ``with`` statement.  For general
+    information about how to use this class refer to
+    :class:`werkzeug.test.Client`.
+
+    .. versionchanged:: 0.12
+       `app.test_client()` includes preset default environment, which can be
+       set after instantiation of the `app.test_client()` object in
+       `client.environ_base`.
+
+    Basic usage is outlined in the :ref:`testing` chapter.
+    """
+
+    preserve_context = False
+
+    def __init__(self, *args, **kwargs):
+        super(FlaskClient, self).__init__(*args, **kwargs)
+        self.environ_base = {
+            "REMOTE_ADDR": "127.0.0.1",
+            "HTTP_USER_AGENT": "werkzeug/" + werkzeug.__version__
+        }
+
+    @contextmanager
+    def session_transaction(self, *args, **kwargs):
+        """When used in combination with a ``with`` statement this opens a
+        session transaction.  This can be used to modify the session that
+        the test client uses.  Once the ``with`` block is left the session is
+        stored back.
+
+        ::
+
+            with client.session_transaction() as session:
+                session['value'] = 42
+
+        Internally this is implemented by going through a temporary test
+        request context and since session handling could depend on
+        request variables this function accepts the same arguments as
+        :meth:`~flask.Flask.test_request_context` which are directly
+        passed through.
+        """
+        if self.cookie_jar is None:
+            raise RuntimeError('Session transactions only make sense '
+                               'with cookies enabled.')
+        app = self.application
+        environ_overrides = kwargs.setdefault('environ_overrides', {})
+        self.cookie_jar.inject_wsgi(environ_overrides)
+        outer_reqctx = _request_ctx_stack.top
+        with app.test_request_context(*args, **kwargs) as c:
+            session_interface = app.session_interface
+            sess = session_interface.open_session(app, c.request)
+            if sess is None:
+                raise RuntimeError('Session backend did not open a session. '
+                                   'Check the configuration')
+
+            # Since we have to open a new request context for the session
+            # handling we want to make sure that we hide out own context
+            # from the caller.  By pushing the original request context
+            # (or None) on top of this and popping it we get exactly that
+            # behavior.  It's important to not use the push and pop
+            # methods of the actual request context object since that would
+            # mean that cleanup handlers are called
+            _request_ctx_stack.push(outer_reqctx)
+            try:
+                yield sess
+            finally:
+                _request_ctx_stack.pop()
+
+            resp = app.response_class()
+            if not session_interface.is_null_session(sess):
+                session_interface.save_session(app, sess, resp)
+            headers = resp.get_wsgi_headers(c.request.environ)
+            self.cookie_jar.extract_wsgi(c.request.environ, headers)
+
+    def open(self, *args, **kwargs):
+        as_tuple = kwargs.pop('as_tuple', False)
+        buffered = kwargs.pop('buffered', False)
+        follow_redirects = kwargs.pop('follow_redirects', False)
+
+        if (
+            not kwargs and len(args) == 1
+            and isinstance(args[0], (EnvironBuilder, dict))
+        ):
+            environ = self.environ_base.copy()
+
+            if isinstance(args[0], EnvironBuilder):
+                environ.update(args[0].get_environ())
+            else:
+                environ.update(args[0])
+
+            environ['flask._preserve_context'] = self.preserve_context
+        else:
+            kwargs.setdefault('environ_overrides', {}) \
+                ['flask._preserve_context'] = self.preserve_context
+            kwargs.setdefault('environ_base', self.environ_base)
+            builder = make_test_environ_builder(
+                self.application, *args, **kwargs
+            )
+
+            try:
+                environ = builder.get_environ()
+            finally:
+                builder.close()
+
+        return Client.open(
+            self, environ,
+            as_tuple=as_tuple,
+            buffered=buffered,
+            follow_redirects=follow_redirects
+        )
+
+    def __enter__(self):
+        if self.preserve_context:
+            raise RuntimeError('Cannot nest client invocations')
+        self.preserve_context = True
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self.preserve_context = False
+
+        # on exit we want to clean up earlier.  Normally the request context
+        # stays preserved until the next request in the same thread comes
+        # in.  See RequestGlobals.push() for the general behavior.
+        top = _request_ctx_stack.top
+        if top is not None and top.preserved:
+            top.pop()
+
+
+class FlaskCliRunner(CliRunner):
+    """A :class:`~click.testing.CliRunner` for testing a Flask app's
+    CLI commands. Typically created using
+    :meth:`~flask.Flask.test_cli_runner`. See :ref:`testing-cli`.
+    """
+    def __init__(self, app, **kwargs):
+        self.app = app
+        super(FlaskCliRunner, self).__init__(**kwargs)
+
+    def invoke(self, cli=None, args=None, **kwargs):
+        """Invokes a CLI command in an isolated environment. See
+        :meth:`CliRunner.invoke <click.testing.CliRunner.invoke>` for
+        full method documentation. See :ref:`testing-cli` for examples.
+
+        If the ``obj`` argument is not given, passes an instance of
+        :class:`~flask.cli.ScriptInfo` that knows how to load the Flask
+        app being tested.
+
+        :param cli: Command object to invoke. Default is the app's
+            :attr:`~flask.app.Flask.cli` group.
+        :param args: List of strings to invoke the command with.
+
+        :return: a :class:`~click.testing.Result` object.
+        """
+        if cli is None:
+            cli = self.app.cli
+
+        if 'obj' not in kwargs:
+            kwargs['obj'] = ScriptInfo(create_app=lambda: self.app)
+
+        return super(FlaskCliRunner, self).invoke(cli, args, **kwargs)
diff --git a/lib/python3.7/site-packages/flask/views.py b/lib/python3.7/site-packages/flask/views.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f2c997b4b2242542e33709b177462c2d3924261
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/views.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.views
+    ~~~~~~~~~~~
+
+    This module provides class-based views inspired by the ones in Django.
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+from .globals import request
+from ._compat import with_metaclass
+
+
+http_method_funcs = frozenset(['get', 'post', 'head', 'options',
+                               'delete', 'put', 'trace', 'patch'])
+
+
+class View(object):
+    """Alternative way to use view functions.  A subclass has to implement
+    :meth:`dispatch_request` which is called with the view arguments from
+    the URL routing system.  If :attr:`methods` is provided the methods
+    do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
+    method explicitly::
+
+        class MyView(View):
+            methods = ['GET']
+
+            def dispatch_request(self, name):
+                return 'Hello %s!' % name
+
+        app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
+
+    When you want to decorate a pluggable view you will have to either do that
+    when the view function is created (by wrapping the return value of
+    :meth:`as_view`) or you can use the :attr:`decorators` attribute::
+
+        class SecretView(View):
+            methods = ['GET']
+            decorators = [superuser_required]
+
+            def dispatch_request(self):
+                ...
+
+    The decorators stored in the decorators list are applied one after another
+    when the view function is created.  Note that you can *not* use the class
+    based decorators since those would decorate the view class and not the
+    generated view function!
+    """
+
+    #: A list of methods this view can handle.
+    methods = None
+
+    #: Setting this disables or force-enables the automatic options handling.
+    provide_automatic_options = None
+
+    #: The canonical way to decorate class-based views is to decorate the
+    #: return value of as_view().  However since this moves parts of the
+    #: logic from the class declaration to the place where it's hooked
+    #: into the routing system.
+    #:
+    #: You can place one or more decorators in this list and whenever the
+    #: view function is created the result is automatically decorated.
+    #:
+    #: .. versionadded:: 0.8
+    decorators = ()
+
+    def dispatch_request(self):
+        """Subclasses have to override this method to implement the
+        actual view function code.  This method is called with all
+        the arguments from the URL rule.
+        """
+        raise NotImplementedError()
+
+    @classmethod
+    def as_view(cls, name, *class_args, **class_kwargs):
+        """Converts the class into an actual view function that can be used
+        with the routing system.  Internally this generates a function on the
+        fly which will instantiate the :class:`View` on each request and call
+        the :meth:`dispatch_request` method on it.
+
+        The arguments passed to :meth:`as_view` are forwarded to the
+        constructor of the class.
+        """
+        def view(*args, **kwargs):
+            self = view.view_class(*class_args, **class_kwargs)
+            return self.dispatch_request(*args, **kwargs)
+
+        if cls.decorators:
+            view.__name__ = name
+            view.__module__ = cls.__module__
+            for decorator in cls.decorators:
+                view = decorator(view)
+
+        # We attach the view class to the view function for two reasons:
+        # first of all it allows us to easily figure out what class-based
+        # view this thing came from, secondly it's also used for instantiating
+        # the view class so you can actually replace it with something else
+        # for testing purposes and debugging.
+        view.view_class = cls
+        view.__name__ = name
+        view.__doc__ = cls.__doc__
+        view.__module__ = cls.__module__
+        view.methods = cls.methods
+        view.provide_automatic_options = cls.provide_automatic_options
+        return view
+
+
+class MethodViewType(type):
+    """Metaclass for :class:`MethodView` that determines what methods the view
+    defines.
+    """
+
+    def __init__(cls, name, bases, d):
+        super(MethodViewType, cls).__init__(name, bases, d)
+
+        if 'methods' not in d:
+            methods = set()
+
+            for key in http_method_funcs:
+                if hasattr(cls, key):
+                    methods.add(key.upper())
+
+            # If we have no method at all in there we don't want to add a
+            # method list. This is for instance the case for the base class
+            # or another subclass of a base method view that does not introduce
+            # new methods.
+            if methods:
+                cls.methods = methods
+
+
+class MethodView(with_metaclass(MethodViewType, View)):
+    """A class-based view that dispatches request methods to the corresponding
+    class methods. For example, if you implement a ``get`` method, it will be
+    used to handle ``GET`` requests. ::
+
+        class CounterAPI(MethodView):
+            def get(self):
+                return session.get('counter', 0)
+
+            def post(self):
+                session['counter'] = session.get('counter', 0) + 1
+                return 'OK'
+
+        app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
+    """
+
+    def dispatch_request(self, *args, **kwargs):
+        meth = getattr(self, request.method.lower(), None)
+
+        # If the request method is HEAD and we don't have a handler for it
+        # retry with GET.
+        if meth is None and request.method == 'HEAD':
+            meth = getattr(self, 'get', None)
+
+        assert meth is not None, 'Unimplemented method %r' % request.method
+        return meth(*args, **kwargs)
diff --git a/lib/python3.7/site-packages/flask/wrappers.py b/lib/python3.7/site-packages/flask/wrappers.py
new file mode 100644
index 0000000000000000000000000000000000000000..12eff2c7e92f7dd46945d4c0f2a996dcda1fcde5
--- /dev/null
+++ b/lib/python3.7/site-packages/flask/wrappers.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+"""
+    flask.wrappers
+    ~~~~~~~~~~~~~~
+
+    Implements the WSGI wrappers (request and response).
+
+    :copyright: © 2010 by the Pallets team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+from werkzeug.exceptions import BadRequest
+from werkzeug.wrappers import Request as RequestBase, Response as ResponseBase
+
+from flask import json
+from flask.globals import current_app
+
+
+class JSONMixin(object):
+    """Common mixin for both request and response objects to provide JSON
+    parsing capabilities.
+
+    .. versionadded:: 1.0
+    """
+
+    _cached_json = (Ellipsis, Ellipsis)
+
+    @property
+    def is_json(self):
+        """Check if the mimetype indicates JSON data, either
+        :mimetype:`application/json` or :mimetype:`application/*+json`.
+
+        .. versionadded:: 0.11
+        """
+        mt = self.mimetype
+        return (
+            mt == 'application/json'
+            or (mt.startswith('application/')) and mt.endswith('+json')
+        )
+
+    @property
+    def json(self):
+        """This will contain the parsed JSON data if the mimetype indicates
+        JSON (:mimetype:`application/json`, see :meth:`is_json`), otherwise it
+        will be ``None``.
+        """
+        return self.get_json()
+
+    def _get_data_for_json(self, cache):
+        return self.get_data(cache=cache)
+
+    def get_json(self, force=False, silent=False, cache=True):
+        """Parse and return the data as JSON. If the mimetype does not
+        indicate JSON (:mimetype:`application/json`, see
+        :meth:`is_json`), this returns ``None`` unless ``force`` is
+        true. If parsing fails, :meth:`on_json_loading_failed` is called
+        and its return value is used as the return value.
+
+        :param force: Ignore the mimetype and always try to parse JSON.
+        :param silent: Silence parsing errors and return ``None``
+            instead.
+        :param cache: Store the parsed JSON to return for subsequent
+            calls.
+        """
+        if cache and self._cached_json[silent] is not Ellipsis:
+            return self._cached_json[silent]
+
+        if not (force or self.is_json):
+            return None
+
+        data = self._get_data_for_json(cache=cache)
+
+        try:
+            rv = json.loads(data)
+        except ValueError as e:
+            if silent:
+                rv = None
+                if cache:
+                    normal_rv, _ = self._cached_json
+                    self._cached_json = (normal_rv, rv)
+            else:
+                rv = self.on_json_loading_failed(e)
+                if cache:
+                    _, silent_rv = self._cached_json
+                    self._cached_json = (rv, silent_rv)
+        else:
+            if cache:
+                self._cached_json = (rv, rv)
+
+        return rv
+
+    def on_json_loading_failed(self, e):
+        """Called if :meth:`get_json` parsing fails and isn't silenced. If
+        this method returns a value, it is used as the return value for
+        :meth:`get_json`. The default implementation raises a
+        :class:`BadRequest` exception.
+
+        .. versionchanged:: 0.10
+           Raise a :exc:`BadRequest` error instead of returning an error
+           message as JSON. If you want that behavior you can add it by
+           subclassing.
+
+        .. versionadded:: 0.8
+        """
+        if current_app is not None and current_app.debug:
+            raise BadRequest('Failed to decode JSON object: {0}'.format(e))
+
+        raise BadRequest()
+
+
+class Request(RequestBase, JSONMixin):
+    """The request object used by default in Flask.  Remembers the
+    matched endpoint and view arguments.
+
+    It is what ends up as :class:`~flask.request`.  If you want to replace
+    the request object used you can subclass this and set
+    :attr:`~flask.Flask.request_class` to your subclass.
+
+    The request object is a :class:`~werkzeug.wrappers.Request` subclass and
+    provides all of the attributes Werkzeug defines plus a few Flask
+    specific ones.
+    """
+
+    #: The internal URL rule that matched the request.  This can be
+    #: useful to inspect which methods are allowed for the URL from
+    #: a before/after handler (``request.url_rule.methods``) etc.
+    #: Though if the request's method was invalid for the URL rule,
+    #: the valid list is available in ``routing_exception.valid_methods``
+    #: instead (an attribute of the Werkzeug exception :exc:`~werkzeug.exceptions.MethodNotAllowed`)
+    #: because the request was never internally bound.
+    #:
+    #: .. versionadded:: 0.6
+    url_rule = None
+
+    #: A dict of view arguments that matched the request.  If an exception
+    #: happened when matching, this will be ``None``.
+    view_args = None
+
+    #: If matching the URL failed, this is the exception that will be
+    #: raised / was raised as part of the request handling.  This is
+    #: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
+    #: something similar.
+    routing_exception = None
+
+    @property
+    def max_content_length(self):
+        """Read-only view of the ``MAX_CONTENT_LENGTH`` config key."""
+        if current_app:
+            return current_app.config['MAX_CONTENT_LENGTH']
+
+    @property
+    def endpoint(self):
+        """The endpoint that matched the request.  This in combination with
+        :attr:`view_args` can be used to reconstruct the same or a
+        modified URL.  If an exception happened when matching, this will
+        be ``None``.
+        """
+        if self.url_rule is not None:
+            return self.url_rule.endpoint
+
+    @property
+    def blueprint(self):
+        """The name of the current blueprint"""
+        if self.url_rule and '.' in self.url_rule.endpoint:
+            return self.url_rule.endpoint.rsplit('.', 1)[0]
+
+    def _load_form_data(self):
+        RequestBase._load_form_data(self)
+
+        # In debug mode we're replacing the files multidict with an ad-hoc
+        # subclass that raises a different error for key errors.
+        if (
+            current_app
+            and current_app.debug
+            and self.mimetype != 'multipart/form-data'
+            and not self.files
+        ):
+            from .debughelpers import attach_enctype_error_multidict
+            attach_enctype_error_multidict(self)
+
+
+class Response(ResponseBase, JSONMixin):
+    """The response object that is used by default in Flask.  Works like the
+    response object from Werkzeug but is set to have an HTML mimetype by
+    default.  Quite often you don't have to create this object yourself because
+    :meth:`~flask.Flask.make_response` will take care of that for you.
+
+    If you want to replace the response object used you can subclass this and
+    set :attr:`~flask.Flask.response_class` to your subclass.
+
+    .. versionchanged:: 1.0
+        JSON support is added to the response, like the request. This is useful
+        when testing to get the test client response data as JSON.
+
+    .. versionchanged:: 1.0
+
+        Added :attr:`max_cookie_size`.
+    """
+
+    default_mimetype = 'text/html'
+
+    def _get_data_for_json(self, cache):
+        return self.get_data()
+
+    @property
+    def max_cookie_size(self):
+        """Read-only view of the :data:`MAX_COOKIE_SIZE` config key.
+
+        See :attr:`~werkzeug.wrappers.BaseResponse.max_cookie_size` in
+        Werkzeug's docs.
+        """
+        if current_app:
+            return current_app.config['MAX_COOKIE_SIZE']
+
+        # return Werkzeug's default when not in an app context
+        return super(Response, self).max_cookie_size
diff --git a/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/INSTALLER b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/LICENSE.rst b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/LICENSE.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ef9c194749551e859e650f557a8ad7253b81827e
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/LICENSE.rst
@@ -0,0 +1,47 @@
+`BSD 3-Clause <https://opensource.org/licenses/BSD-3-Clause>`_
+
+Copyright © 2011 by the Pallets team.
+
+Some rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+-   Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+-   Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+-   Neither the name of the copyright holder nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+We kindly ask you to use these themes in an unmodified manner only with
+Pallets and Pallets-related projects, not for unrelated projects. If you
+like the visual style and want to use it for your own projects, please
+consider making some larger changes to the themes (such as changing font
+faces, sizes, colors or margins).
+
+THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
+BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+----
+
+The initial implementation of itsdangerous was inspired by Django's
+signing module.
+
+Copyright © Django Software Foundation and individual contributors.
+All rights reserved.
diff --git a/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/METADATA b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..7389a4d2309adce6feb823780096528422da5d20
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/METADATA
@@ -0,0 +1,98 @@
+Metadata-Version: 2.1
+Name: itsdangerous
+Version: 1.1.0
+Summary: Various helpers to pass data to untrusted environments and back.
+Home-page: https://palletsprojects.com/p/itsdangerous/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+Maintainer: Pallets Team
+Maintainer-email: contact@palletsprojects.com
+License: BSD
+Project-URL: Documentation, https://itsdangerous.palletsprojects.com/
+Project-URL: Code, https://github.com/pallets/itsdangerous
+Project-URL: Issue tracker, https://github.com/pallets/itsdangerous/issues
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+
+itsdangerous
+============
+
+... so better sign this
+
+Various helpers to pass data to untrusted environments and to get it
+back safe and sound. Data is cryptographically signed to ensure that a
+token has not been tampered with.
+
+It's possible to customize how data is serialized. Data is compressed as
+needed. A timestamp can be added and verified automatically while
+loading a token.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+    pip install -U itsdangerous
+
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+A Simple Example
+----------------
+
+Here's how you could generate a token for transmitting a user's id and
+name between web requests.
+
+.. code-block:: python
+
+    from itsdangerous import URLSafeSerializer
+    auth_s = URLSafeSerializer("secret key", "auth")
+    token = auth_s.dumps({"id": 5, "name": "itsdangerous"})
+
+    print(token)
+    # eyJpZCI6NSwibmFtZSI6Iml0c2Rhbmdlcm91cyJ9.6YP6T0BaO67XP--9UzTrmurXSmg
+
+    data = auth_s.loads(token)
+    print(data["name"])
+    # itsdangerous
+
+
+Donate
+------
+
+The Pallets organization develops and supports itsdangerous and other
+popular packages. In order to grow the community of contributors and
+users, and allow the maintainers to devote more time to the projects,
+`please donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+*   Website: https://palletsprojects.com/p/itsdangerous/
+*   Documentation: https://itsdangerous.palletsprojects.com/
+*   License: `BSD <https://github.com/pallets/itsdangerous/blob/master/LICENSE.rst>`_
+*   Releases: https://pypi.org/project/itsdangerous/
+*   Code: https://github.com/pallets/itsdangerous
+*   Issue tracker: https://github.com/pallets/itsdangerous/issues
+*   Test status: https://travis-ci.org/pallets/itsdangerous
+*   Test coverage: https://codecov.io/gh/pallets/itsdangerous
+
+
diff --git a/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/RECORD b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..84a94127f66abbd44df3155d1f72874c0b7fdf75
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/RECORD
@@ -0,0 +1,26 @@
+itsdangerous-1.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+itsdangerous-1.1.0.dist-info/LICENSE.rst,sha256=_rKL-jSNgWsOfbrt3xhJnufoAHxngT241qs3xl4EbNQ,2120
+itsdangerous-1.1.0.dist-info/METADATA,sha256=yyKjL2WOg_WybH2Yt-7NIvGpV3B93IsMc2HbToWc7Sk,3062
+itsdangerous-1.1.0.dist-info/RECORD,,
+itsdangerous-1.1.0.dist-info/WHEEL,sha256=CihQvCnsGZQBGAHLEUMf0IdA4fRduS_NBUTMgCTtvPM,110
+itsdangerous-1.1.0.dist-info/top_level.txt,sha256=gKN1OKLk81i7fbWWildJA88EQ9NhnGMSvZqhfz9ICjk,13
+itsdangerous/__init__.py,sha256=Dr-SkfFdOyiR_WjiqIXnlFpYRMW0XvPBNV5muzE5N_A,708
+itsdangerous/__pycache__/__init__.cpython-37.pyc,,
+itsdangerous/__pycache__/_compat.cpython-37.pyc,,
+itsdangerous/__pycache__/_json.cpython-37.pyc,,
+itsdangerous/__pycache__/encoding.cpython-37.pyc,,
+itsdangerous/__pycache__/exc.cpython-37.pyc,,
+itsdangerous/__pycache__/jws.cpython-37.pyc,,
+itsdangerous/__pycache__/serializer.cpython-37.pyc,,
+itsdangerous/__pycache__/signer.cpython-37.pyc,,
+itsdangerous/__pycache__/timed.cpython-37.pyc,,
+itsdangerous/__pycache__/url_safe.cpython-37.pyc,,
+itsdangerous/_compat.py,sha256=oAAMcQAjwQXQpIbuHT3o-aL56ztm_7Fe-4lD7IteF6A,1133
+itsdangerous/_json.py,sha256=W7BLL4RPnSOjNdo2gfKT3BeARMCIikY6O75rwWV0XoE,431
+itsdangerous/encoding.py,sha256=KhY85PsH3bGHe5JANN4LMZ_3b0IwUWRRnnw1wvLlaIg,1224
+itsdangerous/exc.py,sha256=KFxg7K2XMliMQAxL4jkRNgE8e73z2jcRaLrzwqVObnI,2959
+itsdangerous/jws.py,sha256=6Lh9W-Lu8D9s7bRazs0Zb35eyAZm3pzLeZqHmRELeII,7470
+itsdangerous/serializer.py,sha256=bT-dfjKec9zcKa8Qo8n7mHW_8M-XCTPMOFq1TQI_Fv4,8653
+itsdangerous/signer.py,sha256=OOZbK8XomBjQfOFEul8osesn7fc80MXB0L1r7E86_GQ,6345
+itsdangerous/timed.py,sha256=on5Q5lX7LT_LaETOhzF1ZmrRbia8P98263R8FiRyM6Y,5635
+itsdangerous/url_safe.py,sha256=xnFTaukIPmW6Qwn6uNQLgzdau8RuAKnp5N7ukuXykj0,2275
diff --git a/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/WHEEL b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..dea0e20ccdfea12acf80a5957c9212eace3c39de
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.32.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/top_level.txt b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e163955e80928012f0c46f3d2eca000966af4f93
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous-1.1.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+itsdangerous
diff --git a/lib/python3.7/site-packages/itsdangerous/__init__.py b/lib/python3.7/site-packages/itsdangerous/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fcd8c1b195069aa2d8c2837c3bdf5f56f21c019
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous/__init__.py
@@ -0,0 +1,22 @@
+from ._json import json
+from .encoding import base64_decode
+from .encoding import base64_encode
+from .encoding import want_bytes
+from .exc import BadData
+from .exc import BadHeader
+from .exc import BadPayload
+from .exc import BadSignature
+from .exc import BadTimeSignature
+from .exc import SignatureExpired
+from .jws import JSONWebSignatureSerializer
+from .jws import TimedJSONWebSignatureSerializer
+from .serializer import Serializer
+from .signer import HMACAlgorithm
+from .signer import NoneAlgorithm
+from .signer import Signer
+from .timed import TimedSerializer
+from .timed import TimestampSigner
+from .url_safe import URLSafeSerializer
+from .url_safe import URLSafeTimedSerializer
+
+__version__ = "1.1.0"
diff --git a/lib/python3.7/site-packages/itsdangerous/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/itsdangerous/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2710a7d96f83020dd571bf760a0291e3f91d1a77
Binary files /dev/null and b/lib/python3.7/site-packages/itsdangerous/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/itsdangerous/__pycache__/_compat.cpython-37.pyc b/lib/python3.7/site-packages/itsdangerous/__pycache__/_compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..00deb7bb1166f94f791bef9171e9e0f438806345
Binary files /dev/null and b/lib/python3.7/site-packages/itsdangerous/__pycache__/_compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/itsdangerous/__pycache__/_json.cpython-37.pyc b/lib/python3.7/site-packages/itsdangerous/__pycache__/_json.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e2b2131efb1360a702ff9dd16399f6279190289
Binary files /dev/null and b/lib/python3.7/site-packages/itsdangerous/__pycache__/_json.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/itsdangerous/__pycache__/encoding.cpython-37.pyc b/lib/python3.7/site-packages/itsdangerous/__pycache__/encoding.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..180d80de6e4fd96043a0f563f22557cb94749ddc
Binary files /dev/null and b/lib/python3.7/site-packages/itsdangerous/__pycache__/encoding.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/itsdangerous/__pycache__/exc.cpython-37.pyc b/lib/python3.7/site-packages/itsdangerous/__pycache__/exc.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bc4907e83b45a5a52a05cd10bae4cf15324877d5
Binary files /dev/null and b/lib/python3.7/site-packages/itsdangerous/__pycache__/exc.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/itsdangerous/__pycache__/jws.cpython-37.pyc b/lib/python3.7/site-packages/itsdangerous/__pycache__/jws.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d68b88471a2d35805e5b88162d79f2b3d0a255e4
Binary files /dev/null and b/lib/python3.7/site-packages/itsdangerous/__pycache__/jws.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/itsdangerous/__pycache__/serializer.cpython-37.pyc b/lib/python3.7/site-packages/itsdangerous/__pycache__/serializer.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c37e366a402aa944fda6190dc126ac5be7f65ee6
Binary files /dev/null and b/lib/python3.7/site-packages/itsdangerous/__pycache__/serializer.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/itsdangerous/__pycache__/signer.cpython-37.pyc b/lib/python3.7/site-packages/itsdangerous/__pycache__/signer.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32cedb28b125d2a2e6cf83e002065ee6bfa763b2
Binary files /dev/null and b/lib/python3.7/site-packages/itsdangerous/__pycache__/signer.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/itsdangerous/__pycache__/timed.cpython-37.pyc b/lib/python3.7/site-packages/itsdangerous/__pycache__/timed.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f651b05cd1b86b482206dd986f88080f5c31e399
Binary files /dev/null and b/lib/python3.7/site-packages/itsdangerous/__pycache__/timed.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/itsdangerous/__pycache__/url_safe.cpython-37.pyc b/lib/python3.7/site-packages/itsdangerous/__pycache__/url_safe.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8c102956b34cee04515f2747c0606d96fb271d58
Binary files /dev/null and b/lib/python3.7/site-packages/itsdangerous/__pycache__/url_safe.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/itsdangerous/_compat.py b/lib/python3.7/site-packages/itsdangerous/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..2291bce24015b9488792616d9fb35a60a712c10a
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous/_compat.py
@@ -0,0 +1,46 @@
+import decimal
+import hmac
+import numbers
+import sys
+
+PY2 = sys.version_info[0] == 2
+
+if PY2:
+    from itertools import izip
+
+    text_type = unicode  # noqa: 821
+else:
+    izip = zip
+    text_type = str
+
+number_types = (numbers.Real, decimal.Decimal)
+
+
+def _constant_time_compare(val1, val2):
+    """Return ``True`` if the two strings are equal, ``False``
+    otherwise.
+
+    The time taken is independent of the number of characters that
+    match. Do not use this function for anything else than comparision
+    with known length targets.
+
+    This is should be implemented in C in order to get it completely
+    right.
+
+    This is an alias of :func:`hmac.compare_digest` on Python>=2.7,3.3.
+    """
+    len_eq = len(val1) == len(val2)
+    if len_eq:
+        result = 0
+        left = val1
+    else:
+        result = 1
+        left = val2
+    for x, y in izip(bytearray(left), bytearray(val2)):
+        result |= x ^ y
+    return result == 0
+
+
+# Starting with 2.7/3.3 the standard library has a c-implementation for
+# constant time string compares.
+constant_time_compare = getattr(hmac, "compare_digest", _constant_time_compare)
diff --git a/lib/python3.7/site-packages/itsdangerous/_json.py b/lib/python3.7/site-packages/itsdangerous/_json.py
new file mode 100644
index 0000000000000000000000000000000000000000..426b36e9a635c2b511ffb564b4ef2203505bf562
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous/_json.py
@@ -0,0 +1,18 @@
+try:
+    import simplejson as json
+except ImportError:
+    import json
+
+
+class _CompactJSON(object):
+    """Wrapper around json module that strips whitespace."""
+
+    @staticmethod
+    def loads(payload):
+        return json.loads(payload)
+
+    @staticmethod
+    def dumps(obj, **kwargs):
+        kwargs.setdefault("ensure_ascii", False)
+        kwargs.setdefault("separators", (",", ":"))
+        return json.dumps(obj, **kwargs)
diff --git a/lib/python3.7/site-packages/itsdangerous/encoding.py b/lib/python3.7/site-packages/itsdangerous/encoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e28969d2d6819b7dcc289a2c501705f96d113f2
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous/encoding.py
@@ -0,0 +1,49 @@
+import base64
+import string
+import struct
+
+from ._compat import text_type
+from .exc import BadData
+
+
+def want_bytes(s, encoding="utf-8", errors="strict"):
+    if isinstance(s, text_type):
+        s = s.encode(encoding, errors)
+    return s
+
+
+def base64_encode(string):
+    """Base64 encode a string of bytes or text. The resulting bytes are
+    safe to use in URLs.
+    """
+    string = want_bytes(string)
+    return base64.urlsafe_b64encode(string).rstrip(b"=")
+
+
+def base64_decode(string):
+    """Base64 decode a URL-safe string of bytes or text. The result is
+    bytes.
+    """
+    string = want_bytes(string, encoding="ascii", errors="ignore")
+    string += b"=" * (-len(string) % 4)
+
+    try:
+        return base64.urlsafe_b64decode(string)
+    except (TypeError, ValueError):
+        raise BadData("Invalid base64-encoded data")
+
+
+# The alphabet used by base64.urlsafe_*
+_base64_alphabet = (string.ascii_letters + string.digits + "-_=").encode("ascii")
+
+_int64_struct = struct.Struct(">Q")
+_int_to_bytes = _int64_struct.pack
+_bytes_to_int = _int64_struct.unpack
+
+
+def int_to_bytes(num):
+    return _int_to_bytes(num).lstrip(b"\x00")
+
+
+def bytes_to_int(bytestr):
+    return _bytes_to_int(bytestr.rjust(8, b"\x00"))[0]
diff --git a/lib/python3.7/site-packages/itsdangerous/exc.py b/lib/python3.7/site-packages/itsdangerous/exc.py
new file mode 100644
index 0000000000000000000000000000000000000000..287d6917a2e103bba87813b65524253dfaa612d2
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous/exc.py
@@ -0,0 +1,98 @@
+from ._compat import PY2
+from ._compat import text_type
+
+
+class BadData(Exception):
+    """Raised if bad data of any sort was encountered. This is the base
+    for all exceptions that itsdangerous defines.
+
+    .. versionadded:: 0.15
+    """
+
+    message = None
+
+    def __init__(self, message):
+        super(BadData, self).__init__(self, message)
+        self.message = message
+
+    def __str__(self):
+        return text_type(self.message)
+
+    if PY2:
+        __unicode__ = __str__
+
+        def __str__(self):
+            return self.__unicode__().encode("utf-8")
+
+
+class BadSignature(BadData):
+    """Raised if a signature does not match."""
+
+    def __init__(self, message, payload=None):
+        BadData.__init__(self, message)
+
+        #: The payload that failed the signature test. In some
+        #: situations you might still want to inspect this, even if
+        #: you know it was tampered with.
+        #:
+        #: .. versionadded:: 0.14
+        self.payload = payload
+
+
+class BadTimeSignature(BadSignature):
+    """Raised if a time-based signature is invalid. This is a subclass
+    of :class:`BadSignature`.
+    """
+
+    def __init__(self, message, payload=None, date_signed=None):
+        BadSignature.__init__(self, message, payload)
+
+        #: If the signature expired this exposes the date of when the
+        #: signature was created. This can be helpful in order to
+        #: tell the user how long a link has been gone stale.
+        #:
+        #: .. versionadded:: 0.14
+        self.date_signed = date_signed
+
+
+class SignatureExpired(BadTimeSignature):
+    """Raised if a signature timestamp is older than ``max_age``. This
+    is a subclass of :exc:`BadTimeSignature`.
+    """
+
+
+class BadHeader(BadSignature):
+    """Raised if a signed header is invalid in some form. This only
+    happens for serializers that have a header that goes with the
+    signature.
+
+    .. versionadded:: 0.24
+    """
+
+    def __init__(self, message, payload=None, header=None, original_error=None):
+        BadSignature.__init__(self, message, payload)
+
+        #: If the header is actually available but just malformed it
+        #: might be stored here.
+        self.header = header
+
+        #: If available, the error that indicates why the payload was
+        #: not valid. This might be ``None``.
+        self.original_error = original_error
+
+
+class BadPayload(BadData):
+    """Raised if a payload is invalid. This could happen if the payload
+    is loaded despite an invalid signature, or if there is a mismatch
+    between the serializer and deserializer. The original exception
+    that occurred during loading is stored on as :attr:`original_error`.
+
+    .. versionadded:: 0.15
+    """
+
+    def __init__(self, message, original_error=None):
+        BadData.__init__(self, message)
+
+        #: If available, the error that indicates why the payload was
+        #: not valid. This might be ``None``.
+        self.original_error = original_error
diff --git a/lib/python3.7/site-packages/itsdangerous/jws.py b/lib/python3.7/site-packages/itsdangerous/jws.py
new file mode 100644
index 0000000000000000000000000000000000000000..92e9ec8b49f1cb987ba9ad13e44ec5c32683cfa9
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous/jws.py
@@ -0,0 +1,218 @@
+import hashlib
+import time
+from datetime import datetime
+
+from ._compat import number_types
+from ._json import _CompactJSON
+from ._json import json
+from .encoding import base64_decode
+from .encoding import base64_encode
+from .encoding import want_bytes
+from .exc import BadData
+from .exc import BadHeader
+from .exc import BadPayload
+from .exc import BadSignature
+from .exc import SignatureExpired
+from .serializer import Serializer
+from .signer import HMACAlgorithm
+from .signer import NoneAlgorithm
+
+
+class JSONWebSignatureSerializer(Serializer):
+    """This serializer implements JSON Web Signature (JWS) support. Only
+    supports the JWS Compact Serialization.
+    """
+
+    jws_algorithms = {
+        "HS256": HMACAlgorithm(hashlib.sha256),
+        "HS384": HMACAlgorithm(hashlib.sha384),
+        "HS512": HMACAlgorithm(hashlib.sha512),
+        "none": NoneAlgorithm(),
+    }
+
+    #: The default algorithm to use for signature generation
+    default_algorithm = "HS512"
+
+    default_serializer = _CompactJSON
+
+    def __init__(
+        self,
+        secret_key,
+        salt=None,
+        serializer=None,
+        serializer_kwargs=None,
+        signer=None,
+        signer_kwargs=None,
+        algorithm_name=None,
+    ):
+        Serializer.__init__(
+            self,
+            secret_key=secret_key,
+            salt=salt,
+            serializer=serializer,
+            serializer_kwargs=serializer_kwargs,
+            signer=signer,
+            signer_kwargs=signer_kwargs,
+        )
+        if algorithm_name is None:
+            algorithm_name = self.default_algorithm
+        self.algorithm_name = algorithm_name
+        self.algorithm = self.make_algorithm(algorithm_name)
+
+    def load_payload(self, payload, serializer=None, return_header=False):
+        payload = want_bytes(payload)
+        if b"." not in payload:
+            raise BadPayload('No "." found in value')
+        base64d_header, base64d_payload = payload.split(b".", 1)
+        try:
+            json_header = base64_decode(base64d_header)
+        except Exception as e:
+            raise BadHeader(
+                "Could not base64 decode the header because of an exception",
+                original_error=e,
+            )
+        try:
+            json_payload = base64_decode(base64d_payload)
+        except Exception as e:
+            raise BadPayload(
+                "Could not base64 decode the payload because of an exception",
+                original_error=e,
+            )
+        try:
+            header = Serializer.load_payload(self, json_header, serializer=json)
+        except BadData as e:
+            raise BadHeader(
+                "Could not unserialize header because it was malformed",
+                original_error=e,
+            )
+        if not isinstance(header, dict):
+            raise BadHeader("Header payload is not a JSON object", header=header)
+        payload = Serializer.load_payload(self, json_payload, serializer=serializer)
+        if return_header:
+            return payload, header
+        return payload
+
+    def dump_payload(self, header, obj):
+        base64d_header = base64_encode(
+            self.serializer.dumps(header, **self.serializer_kwargs)
+        )
+        base64d_payload = base64_encode(
+            self.serializer.dumps(obj, **self.serializer_kwargs)
+        )
+        return base64d_header + b"." + base64d_payload
+
+    def make_algorithm(self, algorithm_name):
+        try:
+            return self.jws_algorithms[algorithm_name]
+        except KeyError:
+            raise NotImplementedError("Algorithm not supported")
+
+    def make_signer(self, salt=None, algorithm=None):
+        if salt is None:
+            salt = self.salt
+        key_derivation = "none" if salt is None else None
+        if algorithm is None:
+            algorithm = self.algorithm
+        return self.signer(
+            self.secret_key,
+            salt=salt,
+            sep=".",
+            key_derivation=key_derivation,
+            algorithm=algorithm,
+        )
+
+    def make_header(self, header_fields):
+        header = header_fields.copy() if header_fields else {}
+        header["alg"] = self.algorithm_name
+        return header
+
+    def dumps(self, obj, salt=None, header_fields=None):
+        """Like :meth:`.Serializer.dumps` but creates a JSON Web
+        Signature. It also allows for specifying additional fields to be
+        included in the JWS header.
+        """
+        header = self.make_header(header_fields)
+        signer = self.make_signer(salt, self.algorithm)
+        return signer.sign(self.dump_payload(header, obj))
+
+    def loads(self, s, salt=None, return_header=False):
+        """Reverse of :meth:`dumps`. If requested via ``return_header``
+        it will return a tuple of payload and header.
+        """
+        payload, header = self.load_payload(
+            self.make_signer(salt, self.algorithm).unsign(want_bytes(s)),
+            return_header=True,
+        )
+        if header.get("alg") != self.algorithm_name:
+            raise BadHeader("Algorithm mismatch", header=header, payload=payload)
+        if return_header:
+            return payload, header
+        return payload
+
+    def loads_unsafe(self, s, salt=None, return_header=False):
+        kwargs = {"return_header": return_header}
+        return self._loads_unsafe_impl(s, salt, kwargs, kwargs)
+
+
+class TimedJSONWebSignatureSerializer(JSONWebSignatureSerializer):
+    """Works like the regular :class:`JSONWebSignatureSerializer` but
+    also records the time of the signing and can be used to expire
+    signatures.
+
+    JWS currently does not specify this behavior but it mentions a
+    possible extension like this in the spec. Expiry date is encoded
+    into the header similar to what's specified in `draft-ietf-oauth
+    -json-web-token <http://self-issued.info/docs/draft-ietf-oauth-json
+    -web-token.html#expDef>`_.
+    """
+
+    DEFAULT_EXPIRES_IN = 3600
+
+    def __init__(self, secret_key, expires_in=None, **kwargs):
+        JSONWebSignatureSerializer.__init__(self, secret_key, **kwargs)
+        if expires_in is None:
+            expires_in = self.DEFAULT_EXPIRES_IN
+        self.expires_in = expires_in
+
+    def make_header(self, header_fields):
+        header = JSONWebSignatureSerializer.make_header(self, header_fields)
+        iat = self.now()
+        exp = iat + self.expires_in
+        header["iat"] = iat
+        header["exp"] = exp
+        return header
+
+    def loads(self, s, salt=None, return_header=False):
+        payload, header = JSONWebSignatureSerializer.loads(
+            self, s, salt, return_header=True
+        )
+
+        if "exp" not in header:
+            raise BadSignature("Missing expiry date", payload=payload)
+
+        int_date_error = BadHeader("Expiry date is not an IntDate", payload=payload)
+        try:
+            header["exp"] = int(header["exp"])
+        except ValueError:
+            raise int_date_error
+        if header["exp"] < 0:
+            raise int_date_error
+
+        if header["exp"] < self.now():
+            raise SignatureExpired(
+                "Signature expired",
+                payload=payload,
+                date_signed=self.get_issue_date(header),
+            )
+
+        if return_header:
+            return payload, header
+        return payload
+
+    def get_issue_date(self, header):
+        rv = header.get("iat")
+        if isinstance(rv, number_types):
+            return datetime.utcfromtimestamp(int(rv))
+
+    def now(self):
+        return int(time.time())
diff --git a/lib/python3.7/site-packages/itsdangerous/serializer.py b/lib/python3.7/site-packages/itsdangerous/serializer.py
new file mode 100644
index 0000000000000000000000000000000000000000..12c20f46d2e2d1e8332b8c1be3a43966d340f528
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous/serializer.py
@@ -0,0 +1,233 @@
+import hashlib
+
+from ._compat import text_type
+from ._json import json
+from .encoding import want_bytes
+from .exc import BadPayload
+from .exc import BadSignature
+from .signer import Signer
+
+
+def is_text_serializer(serializer):
+    """Checks whether a serializer generates text or binary."""
+    return isinstance(serializer.dumps({}), text_type)
+
+
+class Serializer(object):
+    """This class provides a serialization interface on top of the
+    signer. It provides a similar API to json/pickle and other modules
+    but is structured differently internally. If you want to change the
+    underlying implementation for parsing and loading you have to
+    override the :meth:`load_payload` and :meth:`dump_payload`
+    functions.
+
+    This implementation uses simplejson if available for dumping and
+    loading and will fall back to the standard library's json module if
+    it's not available.
+
+    You do not need to subclass this class in order to switch out or
+    customize the :class:`.Signer`. You can instead pass a different
+    class to the constructor as well as keyword arguments as a dict that
+    should be forwarded.
+
+    .. code-block:: python
+
+        s = Serializer(signer_kwargs={'key_derivation': 'hmac'})
+
+    You may want to upgrade the signing parameters without invalidating
+    existing signatures that are in use. Fallback signatures can be
+    given that will be tried if unsigning with the current signer fails.
+
+    Fallback signers can be defined by providing a list of
+    ``fallback_signers``. Each item can be one of the following: a
+    signer class (which is instantiated with ``signer_kwargs``,
+    ``salt``, and ``secret_key``), a tuple
+    ``(signer_class, signer_kwargs)``, or a dict of ``signer_kwargs``.
+
+    For example, this is a serializer that signs using SHA-512, but will
+    unsign using either SHA-512 or SHA1:
+
+    .. code-block:: python
+
+        s = Serializer(
+            signer_kwargs={"digest_method": hashlib.sha512},
+            fallback_signers=[{"digest_method": hashlib.sha1}]
+        )
+
+    .. versionchanged:: 0.14:
+        The ``signer`` and ``signer_kwargs`` parameters were added to
+        the constructor.
+
+    .. versionchanged:: 1.1.0:
+        Added support for ``fallback_signers`` and configured a default
+        SHA-512 fallback. This fallback is for users who used the yanked
+        1.0.0 release which defaulted to SHA-512.
+    """
+
+    #: If a serializer module or class is not passed to the constructor
+    #: this one is picked up. This currently defaults to :mod:`json`.
+    default_serializer = json
+
+    #: The default :class:`Signer` class that is being used by this
+    #: serializer.
+    #:
+    #: .. versionadded:: 0.14
+    default_signer = Signer
+
+    #: The default fallback signers.
+    default_fallback_signers = [{"digest_method": hashlib.sha512}]
+
+    def __init__(
+        self,
+        secret_key,
+        salt=b"itsdangerous",
+        serializer=None,
+        serializer_kwargs=None,
+        signer=None,
+        signer_kwargs=None,
+        fallback_signers=None,
+    ):
+        self.secret_key = want_bytes(secret_key)
+        self.salt = want_bytes(salt)
+        if serializer is None:
+            serializer = self.default_serializer
+        self.serializer = serializer
+        self.is_text_serializer = is_text_serializer(serializer)
+        if signer is None:
+            signer = self.default_signer
+        self.signer = signer
+        self.signer_kwargs = signer_kwargs or {}
+        if fallback_signers is None:
+            fallback_signers = list(self.default_fallback_signers or ())
+        self.fallback_signers = fallback_signers
+        self.serializer_kwargs = serializer_kwargs or {}
+
+    def load_payload(self, payload, serializer=None):
+        """Loads the encoded object. This function raises
+        :class:`.BadPayload` if the payload is not valid. The
+        ``serializer`` parameter can be used to override the serializer
+        stored on the class. The encoded ``payload`` should always be
+        bytes.
+        """
+        if serializer is None:
+            serializer = self.serializer
+            is_text = self.is_text_serializer
+        else:
+            is_text = is_text_serializer(serializer)
+        try:
+            if is_text:
+                payload = payload.decode("utf-8")
+            return serializer.loads(payload)
+        except Exception as e:
+            raise BadPayload(
+                "Could not load the payload because an exception"
+                " occurred on unserializing the data.",
+                original_error=e,
+            )
+
+    def dump_payload(self, obj):
+        """Dumps the encoded object. The return value is always bytes.
+        If the internal serializer returns text, the value will be
+        encoded as UTF-8.
+        """
+        return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))
+
+    def make_signer(self, salt=None):
+        """Creates a new instance of the signer to be used. The default
+        implementation uses the :class:`.Signer` base class.
+        """
+        if salt is None:
+            salt = self.salt
+        return self.signer(self.secret_key, salt=salt, **self.signer_kwargs)
+
+    def iter_unsigners(self, salt=None):
+        """Iterates over all signers to be tried for unsigning. Starts
+        with the configured signer, then constructs each signer
+        specified in ``fallback_signers``.
+        """
+        if salt is None:
+            salt = self.salt
+        yield self.make_signer(salt)
+        for fallback in self.fallback_signers:
+            if type(fallback) is dict:
+                kwargs = fallback
+                fallback = self.signer
+            elif type(fallback) is tuple:
+                fallback, kwargs = fallback
+            else:
+                kwargs = self.signer_kwargs
+            yield fallback(self.secret_key, salt=salt, **kwargs)
+
+    def dumps(self, obj, salt=None):
+        """Returns a signed string serialized with the internal
+        serializer. The return value can be either a byte or unicode
+        string depending on the format of the internal serializer.
+        """
+        payload = want_bytes(self.dump_payload(obj))
+        rv = self.make_signer(salt).sign(payload)
+        if self.is_text_serializer:
+            rv = rv.decode("utf-8")
+        return rv
+
+    def dump(self, obj, f, salt=None):
+        """Like :meth:`dumps` but dumps into a file. The file handle has
+        to be compatible with what the internal serializer expects.
+        """
+        f.write(self.dumps(obj, salt))
+
+    def loads(self, s, salt=None):
+        """Reverse of :meth:`dumps`. Raises :exc:`.BadSignature` if the
+        signature validation fails.
+        """
+        s = want_bytes(s)
+        last_exception = None
+        for signer in self.iter_unsigners(salt):
+            try:
+                return self.load_payload(signer.unsign(s))
+            except BadSignature as err:
+                last_exception = err
+        raise last_exception
+
+    def load(self, f, salt=None):
+        """Like :meth:`loads` but loads from a file."""
+        return self.loads(f.read(), salt)
+
+    def loads_unsafe(self, s, salt=None):
+        """Like :meth:`loads` but without verifying the signature. This
+        is potentially very dangerous to use depending on how your
+        serializer works. The return value is ``(signature_valid,
+        payload)`` instead of just the payload. The first item will be a
+        boolean that indicates if the signature is valid. This function
+        never fails.
+
+        Use it for debugging only and if you know that your serializer
+        module is not exploitable (for example, do not use it with a
+        pickle serializer).
+
+        .. versionadded:: 0.15
+        """
+        return self._loads_unsafe_impl(s, salt)
+
+    def _loads_unsafe_impl(self, s, salt, load_kwargs=None, load_payload_kwargs=None):
+        """Low level helper function to implement :meth:`loads_unsafe`
+        in serializer subclasses.
+        """
+        try:
+            return True, self.loads(s, salt=salt, **(load_kwargs or {}))
+        except BadSignature as e:
+            if e.payload is None:
+                return False, None
+            try:
+                return (
+                    False,
+                    self.load_payload(e.payload, **(load_payload_kwargs or {})),
+                )
+            except BadPayload:
+                return False, None
+
+    def load_unsafe(self, f, *args, **kwargs):
+        """Like :meth:`loads_unsafe` but loads from a file.
+
+        .. versionadded:: 0.15
+        """
+        return self.loads_unsafe(f.read(), *args, **kwargs)
diff --git a/lib/python3.7/site-packages/itsdangerous/signer.py b/lib/python3.7/site-packages/itsdangerous/signer.py
new file mode 100644
index 0000000000000000000000000000000000000000..6bddc0396e28c4898f720080e249850ade3492ab
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous/signer.py
@@ -0,0 +1,179 @@
+import hashlib
+import hmac
+
+from ._compat import constant_time_compare
+from .encoding import _base64_alphabet
+from .encoding import base64_decode
+from .encoding import base64_encode
+from .encoding import want_bytes
+from .exc import BadSignature
+
+
+class SigningAlgorithm(object):
+    """Subclasses must implement :meth:`get_signature` to provide
+    signature generation functionality.
+    """
+
+    def get_signature(self, key, value):
+        """Returns the signature for the given key and value."""
+        raise NotImplementedError()
+
+    def verify_signature(self, key, value, sig):
+        """Verifies the given signature matches the expected
+        signature.
+        """
+        return constant_time_compare(sig, self.get_signature(key, value))
+
+
+class NoneAlgorithm(SigningAlgorithm):
+    """Provides an algorithm that does not perform any signing and
+    returns an empty signature.
+    """
+
+    def get_signature(self, key, value):
+        return b""
+
+
+class HMACAlgorithm(SigningAlgorithm):
+    """Provides signature generation using HMACs."""
+
+    #: The digest method to use with the MAC algorithm. This defaults to
+    #: SHA1, but can be changed to any other function in the hashlib
+    #: module.
+    default_digest_method = staticmethod(hashlib.sha1)
+
+    def __init__(self, digest_method=None):
+        if digest_method is None:
+            digest_method = self.default_digest_method
+        self.digest_method = digest_method
+
+    def get_signature(self, key, value):
+        mac = hmac.new(key, msg=value, digestmod=self.digest_method)
+        return mac.digest()
+
+
+class Signer(object):
+    """This class can sign and unsign bytes, validating the signature
+    provided.
+
+    Salt can be used to namespace the hash, so that a signed string is
+    only valid for a given namespace. Leaving this at the default value
+    or re-using a salt value across different parts of your application
+    where the same signed value in one part can mean something different
+    in another part is a security risk.
+
+    See :ref:`the-salt` for an example of what the salt is doing and how
+    you can utilize it.
+
+    .. versionadded:: 0.14
+        ``key_derivation`` and ``digest_method`` were added as arguments
+        to the class constructor.
+
+    .. versionadded:: 0.18
+        ``algorithm`` was added as an argument to the class constructor.
+    """
+
+    #: The digest method to use for the signer.  This defaults to
+    #: SHA1 but can be changed to any other function in the hashlib
+    #: module.
+    #:
+    #: .. versionadded:: 0.14
+    default_digest_method = staticmethod(hashlib.sha1)
+
+    #: Controls how the key is derived. The default is Django-style
+    #: concatenation. Possible values are ``concat``, ``django-concat``
+    #: and ``hmac``. This is used for deriving a key from the secret key
+    #: with an added salt.
+    #:
+    #: .. versionadded:: 0.14
+    default_key_derivation = "django-concat"
+
+    def __init__(
+        self,
+        secret_key,
+        salt=None,
+        sep=".",
+        key_derivation=None,
+        digest_method=None,
+        algorithm=None,
+    ):
+        self.secret_key = want_bytes(secret_key)
+        self.sep = want_bytes(sep)
+        if self.sep in _base64_alphabet:
+            raise ValueError(
+                "The given separator cannot be used because it may be"
+                " contained in the signature itself. Alphanumeric"
+                " characters and `-_=` must not be used."
+            )
+        self.salt = "itsdangerous.Signer" if salt is None else salt
+        if key_derivation is None:
+            key_derivation = self.default_key_derivation
+        self.key_derivation = key_derivation
+        if digest_method is None:
+            digest_method = self.default_digest_method
+        self.digest_method = digest_method
+        if algorithm is None:
+            algorithm = HMACAlgorithm(self.digest_method)
+        self.algorithm = algorithm
+
+    def derive_key(self):
+        """This method is called to derive the key. The default key
+        derivation choices can be overridden here. Key derivation is not
+        intended to be used as a security method to make a complex key
+        out of a short password. Instead you should use large random
+        secret keys.
+        """
+        salt = want_bytes(self.salt)
+        if self.key_derivation == "concat":
+            return self.digest_method(salt + self.secret_key).digest()
+        elif self.key_derivation == "django-concat":
+            return self.digest_method(salt + b"signer" + self.secret_key).digest()
+        elif self.key_derivation == "hmac":
+            mac = hmac.new(self.secret_key, digestmod=self.digest_method)
+            mac.update(salt)
+            return mac.digest()
+        elif self.key_derivation == "none":
+            return self.secret_key
+        else:
+            raise TypeError("Unknown key derivation method")
+
+    def get_signature(self, value):
+        """Returns the signature for the given value."""
+        value = want_bytes(value)
+        key = self.derive_key()
+        sig = self.algorithm.get_signature(key, value)
+        return base64_encode(sig)
+
+    def sign(self, value):
+        """Signs the given string."""
+        return want_bytes(value) + want_bytes(self.sep) + self.get_signature(value)
+
+    def verify_signature(self, value, sig):
+        """Verifies the signature for the given value."""
+        key = self.derive_key()
+        try:
+            sig = base64_decode(sig)
+        except Exception:
+            return False
+        return self.algorithm.verify_signature(key, value, sig)
+
+    def unsign(self, signed_value):
+        """Unsigns the given string."""
+        signed_value = want_bytes(signed_value)
+        sep = want_bytes(self.sep)
+        if sep not in signed_value:
+            raise BadSignature("No %r found in value" % self.sep)
+        value, sig = signed_value.rsplit(sep, 1)
+        if self.verify_signature(value, sig):
+            return value
+        raise BadSignature("Signature %r does not match" % sig, payload=value)
+
+    def validate(self, signed_value):
+        """Only validates the given signed value. Returns ``True`` if
+        the signature exists and is valid.
+        """
+        try:
+            self.unsign(signed_value)
+            return True
+        except BadSignature:
+            return False
diff --git a/lib/python3.7/site-packages/itsdangerous/timed.py b/lib/python3.7/site-packages/itsdangerous/timed.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c117e419d2fe1778832cc692060a83945ef7a1d
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous/timed.py
@@ -0,0 +1,147 @@
+import time
+from datetime import datetime
+
+from ._compat import text_type
+from .encoding import base64_decode
+from .encoding import base64_encode
+from .encoding import bytes_to_int
+from .encoding import int_to_bytes
+from .encoding import want_bytes
+from .exc import BadSignature
+from .exc import BadTimeSignature
+from .exc import SignatureExpired
+from .serializer import Serializer
+from .signer import Signer
+
+
+class TimestampSigner(Signer):
+    """Works like the regular :class:`.Signer` but also records the time
+    of the signing and can be used to expire signatures. The
+    :meth:`unsign` method can raise :exc:`.SignatureExpired` if the
+    unsigning failed because the signature is expired.
+    """
+
+    def get_timestamp(self):
+        """Returns the current timestamp. The function must return an
+        integer.
+        """
+        return int(time.time())
+
+    def timestamp_to_datetime(self, ts):
+        """Used to convert the timestamp from :meth:`get_timestamp` into
+        a datetime object.
+        """
+        return datetime.utcfromtimestamp(ts)
+
+    def sign(self, value):
+        """Signs the given string and also attaches time information."""
+        value = want_bytes(value)
+        timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
+        sep = want_bytes(self.sep)
+        value = value + sep + timestamp
+        return value + sep + self.get_signature(value)
+
+    def unsign(self, value, max_age=None, return_timestamp=False):
+        """Works like the regular :meth:`.Signer.unsign` but can also
+        validate the time. See the base docstring of the class for
+        the general behavior. If ``return_timestamp`` is ``True`` the
+        timestamp of the signature will be returned as a naive
+        :class:`datetime.datetime` object in UTC.
+        """
+        try:
+            result = Signer.unsign(self, value)
+            sig_error = None
+        except BadSignature as e:
+            sig_error = e
+            result = e.payload or b""
+        sep = want_bytes(self.sep)
+
+        # If there is no timestamp in the result there is something
+        # seriously wrong. In case there was a signature error, we raise
+        # that one directly, otherwise we have a weird situation in
+        # which we shouldn't have come except someone uses a time-based
+        # serializer on non-timestamp data, so catch that.
+        if sep not in result:
+            if sig_error:
+                raise sig_error
+            raise BadTimeSignature("timestamp missing", payload=result)
+
+        value, timestamp = result.rsplit(sep, 1)
+        try:
+            timestamp = bytes_to_int(base64_decode(timestamp))
+        except Exception:
+            timestamp = None
+
+        # Signature is *not* okay. Raise a proper error now that we have
+        # split the value and the timestamp.
+        if sig_error is not None:
+            raise BadTimeSignature(
+                text_type(sig_error), payload=value, date_signed=timestamp
+            )
+
+        # Signature was okay but the timestamp is actually not there or
+        # malformed. Should not happen, but we handle it anyway.
+        if timestamp is None:
+            raise BadTimeSignature("Malformed timestamp", payload=value)
+
+        # Check timestamp is not older than max_age
+        if max_age is not None:
+            age = self.get_timestamp() - timestamp
+            if age > max_age:
+                raise SignatureExpired(
+                    "Signature age %s > %s seconds" % (age, max_age),
+                    payload=value,
+                    date_signed=self.timestamp_to_datetime(timestamp),
+                )
+
+        if return_timestamp:
+            return value, self.timestamp_to_datetime(timestamp)
+        return value
+
+    def validate(self, signed_value, max_age=None):
+        """Only validates the given signed value. Returns ``True`` if
+        the signature exists and is valid."""
+        try:
+            self.unsign(signed_value, max_age=max_age)
+            return True
+        except BadSignature:
+            return False
+
+
+class TimedSerializer(Serializer):
+    """Uses :class:`TimestampSigner` instead of the default
+    :class:`.Signer`.
+    """
+
+    default_signer = TimestampSigner
+
+    def loads(self, s, max_age=None, return_timestamp=False, salt=None):
+        """Reverse of :meth:`dumps`, raises :exc:`.BadSignature` if the
+        signature validation fails. If a ``max_age`` is provided it will
+        ensure the signature is not older than that time in seconds. In
+        case the signature is outdated, :exc:`.SignatureExpired` is
+        raised. All arguments are forwarded to the signer's
+        :meth:`~TimestampSigner.unsign` method.
+        """
+        s = want_bytes(s)
+        last_exception = None
+        for signer in self.iter_unsigners(salt):
+            try:
+                base64d, timestamp = signer.unsign(s, max_age, return_timestamp=True)
+                payload = self.load_payload(base64d)
+                if return_timestamp:
+                    return payload, timestamp
+                return payload
+            # If we get a signature expired it means we could read the
+            # signature but it's invalid.  In that case we do not want to
+            # try the next signer.
+            except SignatureExpired:
+                raise
+            except BadSignature as err:
+                last_exception = err
+        raise last_exception
+
+    def loads_unsafe(self, s, max_age=None, salt=None):
+        load_kwargs = {"max_age": max_age}
+        load_payload_kwargs = {}
+        return self._loads_unsafe_impl(s, salt, load_kwargs, load_payload_kwargs)
diff --git a/lib/python3.7/site-packages/itsdangerous/url_safe.py b/lib/python3.7/site-packages/itsdangerous/url_safe.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcaa0112339645800256f1b89b299e7657dbc11b
--- /dev/null
+++ b/lib/python3.7/site-packages/itsdangerous/url_safe.py
@@ -0,0 +1,65 @@
+import zlib
+
+from ._json import _CompactJSON
+from .encoding import base64_decode
+from .encoding import base64_encode
+from .exc import BadPayload
+from .serializer import Serializer
+from .timed import TimedSerializer
+
+
+class URLSafeSerializerMixin(object):
+    """Mixed in with a regular serializer it will attempt to zlib
+    compress the string to make it shorter if necessary. It will also
+    base64 encode the string so that it can safely be placed in a URL.
+    """
+
+    default_serializer = _CompactJSON
+
+    def load_payload(self, payload, *args, **kwargs):
+        decompress = False
+        if payload.startswith(b"."):
+            payload = payload[1:]
+            decompress = True
+        try:
+            json = base64_decode(payload)
+        except Exception as e:
+            raise BadPayload(
+                "Could not base64 decode the payload because of an exception",
+                original_error=e,
+            )
+        if decompress:
+            try:
+                json = zlib.decompress(json)
+            except Exception as e:
+                raise BadPayload(
+                    "Could not zlib decompress the payload before decoding the payload",
+                    original_error=e,
+                )
+        return super(URLSafeSerializerMixin, self).load_payload(json, *args, **kwargs)
+
+    def dump_payload(self, obj):
+        json = super(URLSafeSerializerMixin, self).dump_payload(obj)
+        is_compressed = False
+        compressed = zlib.compress(json)
+        if len(compressed) < (len(json) - 1):
+            json = compressed
+            is_compressed = True
+        base64d = base64_encode(json)
+        if is_compressed:
+            base64d = b"." + base64d
+        return base64d
+
+
+class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
+    """Works like :class:`.Serializer` but dumps and loads into a URL
+    safe string consisting of the upper and lowercase character of the
+    alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
+    """
+
+
+class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
+    """Works like :class:`.TimedSerializer` but dumps and loads into a
+    URL safe string consisting of the upper and lowercase character of
+    the alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
+    """
diff --git a/lib/python3.7/site-packages/jinja2/__init__.py b/lib/python3.7/site-packages/jinja2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..15e13b6f2e6acf30ac115d942d4c377868d3ffbe
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/__init__.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2
+    ~~~~~~
+
+    Jinja2 is a template engine written in pure Python.  It provides a
+    Django inspired non-XML syntax but supports inline expressions and
+    an optional sandboxed environment.
+
+    Nutshell
+    --------
+
+    Here a small example of a Jinja2 template::
+
+        {% extends 'base.html' %}
+        {% block title %}Memberlist{% endblock %}
+        {% block content %}
+          <ul>
+          {% for user in users %}
+            <li><a href="{{ user.url }}">{{ user.username }}</a></li>
+          {% endfor %}
+          </ul>
+        {% endblock %}
+
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+__docformat__ = 'restructuredtext en'
+__version__ = '2.10.1'
+
+# high level interface
+from jinja2.environment import Environment, Template
+
+# loaders
+from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
+     DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
+     ModuleLoader
+
+# bytecode caches
+from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
+     MemcachedBytecodeCache
+
+# undefined types
+from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
+     make_logging_undefined
+
+# exceptions
+from jinja2.exceptions import TemplateError, UndefinedError, \
+     TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
+     TemplateAssertionError, TemplateRuntimeError
+
+# decorators and public utilities
+from jinja2.filters import environmentfilter, contextfilter, \
+     evalcontextfilter
+from jinja2.utils import Markup, escape, clear_caches, \
+     environmentfunction, evalcontextfunction, contextfunction, \
+     is_undefined, select_autoescape
+
+__all__ = [
+    'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
+    'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
+    'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
+    'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
+    'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
+    'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
+    'TemplateRuntimeError',
+    'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
+    'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
+    'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
+    'select_autoescape',
+]
+
+
+def _patch_async():
+    from jinja2.utils import have_async_gen
+    if have_async_gen:
+        from jinja2.asyncsupport import patch_all
+        patch_all()
+
+
+_patch_async()
+del _patch_async
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..77717a46d4680730b86cdf32da2c3c9dba3fb637
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/_compat.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/_compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e0384235e1ba668af1c7a6b61a2d83bbb7ba243
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/_compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/_identifier.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/_identifier.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a90ab92f9ab027ff3eeed75f67b7dc01c5f6d0c3
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/_identifier.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/asyncfilters.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/asyncfilters.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5235fd946df37f36d67b6ad4db67d2e3de10f0eb
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/asyncfilters.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/asyncsupport.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/asyncsupport.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1aceb309eb5d7a45992d69616921a3ee74c21a5c
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/asyncsupport.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/bccache.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/bccache.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7459ff8df1c3d9e80d912ffe7bb19279dea377df
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/bccache.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/compiler.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/compiler.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c71780faeadd01c826fc137a05a638e7d4cfad53
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/compiler.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/constants.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/constants.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08442fe41701c8d86a6b12ce90ed8c8f8c178504
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/constants.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/debug.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/debug.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b1becd2b71415769df6da1cf1e31da9ab591eb8
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/debug.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/defaults.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/defaults.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..877a4780a5a948a829d4609a0bb2e98805ed4ec8
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/defaults.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/environment.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/environment.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ac129d0e6ebe224f9f7c6b29ecadc6c31e47d90
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/environment.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/exceptions.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/exceptions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd74820385ef5514cc49ca794edf134520019644
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/exceptions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/ext.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/ext.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93a459e4d160c79840f4639386b726a7de867310
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/ext.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/filters.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/filters.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fcbc2ce4b3ecc6e65558c2eee4f93dfa7001d276
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/filters.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/idtracking.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/idtracking.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5782db612e59058d4411e9d9c677307632847bfd
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/idtracking.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/lexer.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/lexer.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fa3385ebaaac4dc0dd9e09ff5a763ad41baf2a95
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/lexer.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/loaders.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/loaders.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..497f33db176962c955deab4cc94d86c9e29cb1d4
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/loaders.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/meta.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/meta.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..15b3e4990112b6f1a633dae8f99cafd148867879
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/meta.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/nativetypes.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/nativetypes.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac83a3b902f283cf15c8868ca1c8c5efec289ccb
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/nativetypes.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/nodes.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/nodes.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..65903bfb557105a009c662f0587ef1cdb2533fd5
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/nodes.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/optimizer.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/optimizer.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1d6704f74cf6d5f9a352f3b2ce102ac3d683c11
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/optimizer.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/parser.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/parser.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1b3d03f9e08cac2b2f12a5044b1294197a4783c0
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/parser.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/runtime.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/runtime.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ace3c61c764cf7ac32e2b763f782fa44c8a0273c
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/runtime.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/sandbox.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/sandbox.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..913afb6125df06f2dc91aedb7fdf63eac0f43bba
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/sandbox.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/tests.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/tests.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2eb0733ab26d33be7f9f38c8ca1d3d2e2adb2e6c
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/tests.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/utils.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9cc4ba2f98d9d5a3879d778d6a5dc5ee18811a98
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/__pycache__/visitor.cpython-37.pyc b/lib/python3.7/site-packages/jinja2/__pycache__/visitor.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a2230a29f0f0dfe48b84c86394455cef146e7089
Binary files /dev/null and b/lib/python3.7/site-packages/jinja2/__pycache__/visitor.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/jinja2/_compat.py b/lib/python3.7/site-packages/jinja2/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..61d85301a4a9efa4738624fb7751fbe8e6fbfe67
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/_compat.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2._compat
+    ~~~~~~~~~~~~~~
+
+    Some py2/py3 compatibility support based on a stripped down
+    version of six so we don't have to depend on a specific version
+    of it.
+
+    :copyright: Copyright 2013 by the Jinja team, see AUTHORS.
+    :license: BSD, see LICENSE for details.
+"""
+import sys
+
+PY2 = sys.version_info[0] == 2
+PYPY = hasattr(sys, 'pypy_translation_info')
+_identity = lambda x: x
+
+
+if not PY2:
+    unichr = chr
+    range_type = range
+    text_type = str
+    string_types = (str,)
+    integer_types = (int,)
+
+    iterkeys = lambda d: iter(d.keys())
+    itervalues = lambda d: iter(d.values())
+    iteritems = lambda d: iter(d.items())
+
+    import pickle
+    from io import BytesIO, StringIO
+    NativeStringIO = StringIO
+
+    def reraise(tp, value, tb=None):
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+    ifilter = filter
+    imap = map
+    izip = zip
+    intern = sys.intern
+
+    implements_iterator = _identity
+    implements_to_string = _identity
+    encode_filename = _identity
+
+else:
+    unichr = unichr
+    text_type = unicode
+    range_type = xrange
+    string_types = (str, unicode)
+    integer_types = (int, long)
+
+    iterkeys = lambda d: d.iterkeys()
+    itervalues = lambda d: d.itervalues()
+    iteritems = lambda d: d.iteritems()
+
+    import cPickle as pickle
+    from cStringIO import StringIO as BytesIO, StringIO
+    NativeStringIO = BytesIO
+
+    exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
+
+    from itertools import imap, izip, ifilter
+    intern = intern
+
+    def implements_iterator(cls):
+        cls.next = cls.__next__
+        del cls.__next__
+        return cls
+
+    def implements_to_string(cls):
+        cls.__unicode__ = cls.__str__
+        cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
+        return cls
+
+    def encode_filename(filename):
+        if isinstance(filename, unicode):
+            return filename.encode('utf-8')
+        return filename
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a
+    # dummy metaclass for one level of class instantiation that replaces
+    # itself with the actual metaclass.
+    class metaclass(type):
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+try:
+    from urllib.parse import quote_from_bytes as url_quote
+except ImportError:
+    from urllib import quote as url_quote
diff --git a/lib/python3.7/site-packages/jinja2/_identifier.py b/lib/python3.7/site-packages/jinja2/_identifier.py
new file mode 100644
index 0000000000000000000000000000000000000000..2eac35d5c35531b2b63e3fbdcae4bfd02dd89240
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/_identifier.py
@@ -0,0 +1,2 @@
+# generated by scripts/generate_identifier_pattern.py
+pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯'
diff --git a/lib/python3.7/site-packages/jinja2/asyncfilters.py b/lib/python3.7/site-packages/jinja2/asyncfilters.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c1f46d7fa7d46cff7711321b477f287632ef5e1
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/asyncfilters.py
@@ -0,0 +1,146 @@
+from functools import wraps
+
+from jinja2.asyncsupport import auto_aiter
+from jinja2 import filters
+
+
+async def auto_to_seq(value):
+    seq = []
+    if hasattr(value, '__aiter__'):
+        async for item in value:
+            seq.append(item)
+    else:
+        for item in value:
+            seq.append(item)
+    return seq
+
+
+async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
+    seq, func = filters.prepare_select_or_reject(
+        args, kwargs, modfunc, lookup_attr)
+    if seq:
+        async for item in auto_aiter(seq):
+            if func(item):
+                yield item
+
+
+def dualfilter(normal_filter, async_filter):
+    wrap_evalctx = False
+    if getattr(normal_filter, 'environmentfilter', False):
+        is_async = lambda args: args[0].is_async
+        wrap_evalctx = False
+    else:
+        if not getattr(normal_filter, 'evalcontextfilter', False) and \
+           not getattr(normal_filter, 'contextfilter', False):
+            wrap_evalctx = True
+        is_async = lambda args: args[0].environment.is_async
+
+    @wraps(normal_filter)
+    def wrapper(*args, **kwargs):
+        b = is_async(args)
+        if wrap_evalctx:
+            args = args[1:]
+        if b:
+            return async_filter(*args, **kwargs)
+        return normal_filter(*args, **kwargs)
+
+    if wrap_evalctx:
+        wrapper.evalcontextfilter = True
+
+    wrapper.asyncfiltervariant = True
+
+    return wrapper
+
+
+def asyncfiltervariant(original):
+    def decorator(f):
+        return dualfilter(original, f)
+    return decorator
+
+
+@asyncfiltervariant(filters.do_first)
+async def do_first(environment, seq):
+    try:
+        return await auto_aiter(seq).__anext__()
+    except StopAsyncIteration:
+        return environment.undefined('No first item, sequence was empty.')
+
+
+@asyncfiltervariant(filters.do_groupby)
+async def do_groupby(environment, value, attribute):
+    expr = filters.make_attrgetter(environment, attribute)
+    return [filters._GroupTuple(key, await auto_to_seq(values))
+            for key, values in filters.groupby(sorted(
+                await auto_to_seq(value), key=expr), expr)]
+
+
+@asyncfiltervariant(filters.do_join)
+async def do_join(eval_ctx, value, d=u'', attribute=None):
+    return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
+
+
+@asyncfiltervariant(filters.do_list)
+async def do_list(value):
+    return await auto_to_seq(value)
+
+
+@asyncfiltervariant(filters.do_reject)
+async def do_reject(*args, **kwargs):
+    return async_select_or_reject(args, kwargs, lambda x: not x, False)
+
+
+@asyncfiltervariant(filters.do_rejectattr)
+async def do_rejectattr(*args, **kwargs):
+    return async_select_or_reject(args, kwargs, lambda x: not x, True)
+
+
+@asyncfiltervariant(filters.do_select)
+async def do_select(*args, **kwargs):
+    return async_select_or_reject(args, kwargs, lambda x: x, False)
+
+
+@asyncfiltervariant(filters.do_selectattr)
+async def do_selectattr(*args, **kwargs):
+    return async_select_or_reject(args, kwargs, lambda x: x, True)
+
+
+@asyncfiltervariant(filters.do_map)
+async def do_map(*args, **kwargs):
+    seq, func = filters.prepare_map(args, kwargs)
+    if seq:
+        async for item in auto_aiter(seq):
+            yield func(item)
+
+
+@asyncfiltervariant(filters.do_sum)
+async def do_sum(environment, iterable, attribute=None, start=0):
+    rv = start
+    if attribute is not None:
+        func = filters.make_attrgetter(environment, attribute)
+    else:
+        func = lambda x: x
+    async for item in auto_aiter(iterable):
+        rv += func(item)
+    return rv
+
+
+@asyncfiltervariant(filters.do_slice)
+async def do_slice(value, slices, fill_with=None):
+    return filters.do_slice(await auto_to_seq(value), slices, fill_with)
+
+
+ASYNC_FILTERS = {
+    'first':        do_first,
+    'groupby':      do_groupby,
+    'join':         do_join,
+    'list':         do_list,
+    # we intentionally do not support do_last because that would be
+    # ridiculous
+    'reject':       do_reject,
+    'rejectattr':   do_rejectattr,
+    'map':          do_map,
+    'select':       do_select,
+    'selectattr':   do_selectattr,
+    'sum':          do_sum,
+    'slice':        do_slice,
+}
diff --git a/lib/python3.7/site-packages/jinja2/asyncsupport.py b/lib/python3.7/site-packages/jinja2/asyncsupport.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1e7b5ce9a27a6e069abfab9d36f80117ce6a74c
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/asyncsupport.py
@@ -0,0 +1,256 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.asyncsupport
+    ~~~~~~~~~~~~~~~~~~~
+
+    Has all the code for async support which is implemented as a patch
+    for supported Python versions.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+import sys
+import asyncio
+import inspect
+from functools import update_wrapper
+
+from jinja2.utils import concat, internalcode, Markup
+from jinja2.environment import TemplateModule
+from jinja2.runtime import LoopContextBase, _last_iteration
+
+
+async def concat_async(async_gen):
+    rv = []
+    async def collect():
+        async for event in async_gen:
+            rv.append(event)
+    await collect()
+    return concat(rv)
+
+
+async def generate_async(self, *args, **kwargs):
+    vars = dict(*args, **kwargs)
+    try:
+        async for event in self.root_render_func(self.new_context(vars)):
+            yield event
+    except Exception:
+        exc_info = sys.exc_info()
+    else:
+        return
+    yield self.environment.handle_exception(exc_info, True)
+
+
+def wrap_generate_func(original_generate):
+    def _convert_generator(self, loop, args, kwargs):
+        async_gen = self.generate_async(*args, **kwargs)
+        try:
+            while 1:
+                yield loop.run_until_complete(async_gen.__anext__())
+        except StopAsyncIteration:
+            pass
+    def generate(self, *args, **kwargs):
+        if not self.environment.is_async:
+            return original_generate(self, *args, **kwargs)
+        return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
+    return update_wrapper(generate, original_generate)
+
+
+async def render_async(self, *args, **kwargs):
+    if not self.environment.is_async:
+        raise RuntimeError('The environment was not created with async mode '
+                           'enabled.')
+
+    vars = dict(*args, **kwargs)
+    ctx = self.new_context(vars)
+
+    try:
+        return await concat_async(self.root_render_func(ctx))
+    except Exception:
+        exc_info = sys.exc_info()
+    return self.environment.handle_exception(exc_info, True)
+
+
+def wrap_render_func(original_render):
+    def render(self, *args, **kwargs):
+        if not self.environment.is_async:
+            return original_render(self, *args, **kwargs)
+        loop = asyncio.get_event_loop()
+        return loop.run_until_complete(self.render_async(*args, **kwargs))
+    return update_wrapper(render, original_render)
+
+
+def wrap_block_reference_call(original_call):
+    @internalcode
+    async def async_call(self):
+        rv = await concat_async(self._stack[self._depth](self._context))
+        if self._context.eval_ctx.autoescape:
+            rv = Markup(rv)
+        return rv
+
+    @internalcode
+    def __call__(self):
+        if not self._context.environment.is_async:
+            return original_call(self)
+        return async_call(self)
+
+    return update_wrapper(__call__, original_call)
+
+
+def wrap_macro_invoke(original_invoke):
+    @internalcode
+    async def async_invoke(self, arguments, autoescape):
+        rv = await self._func(*arguments)
+        if autoescape:
+            rv = Markup(rv)
+        return rv
+
+    @internalcode
+    def _invoke(self, arguments, autoescape):
+        if not self._environment.is_async:
+            return original_invoke(self, arguments, autoescape)
+        return async_invoke(self, arguments, autoescape)
+    return update_wrapper(_invoke, original_invoke)
+
+
+@internalcode
+async def get_default_module_async(self):
+    if self._module is not None:
+        return self._module
+    self._module = rv = await self.make_module_async()
+    return rv
+
+
+def wrap_default_module(original_default_module):
+    @internalcode
+    def _get_default_module(self):
+        if self.environment.is_async:
+            raise RuntimeError('Template module attribute is unavailable '
+                               'in async mode')
+        return original_default_module(self)
+    return _get_default_module
+
+
+async def make_module_async(self, vars=None, shared=False, locals=None):
+    context = self.new_context(vars, shared, locals)
+    body_stream = []
+    async for item in self.root_render_func(context):
+        body_stream.append(item)
+    return TemplateModule(self, context, body_stream)
+
+
+def patch_template():
+    from jinja2 import Template
+    Template.generate = wrap_generate_func(Template.generate)
+    Template.generate_async = update_wrapper(
+        generate_async, Template.generate_async)
+    Template.render_async = update_wrapper(
+        render_async, Template.render_async)
+    Template.render = wrap_render_func(Template.render)
+    Template._get_default_module = wrap_default_module(
+        Template._get_default_module)
+    Template._get_default_module_async = get_default_module_async
+    Template.make_module_async = update_wrapper(
+        make_module_async, Template.make_module_async)
+
+
+def patch_runtime():
+    from jinja2.runtime import BlockReference, Macro
+    BlockReference.__call__ = wrap_block_reference_call(
+        BlockReference.__call__)
+    Macro._invoke = wrap_macro_invoke(Macro._invoke)
+
+
+def patch_filters():
+    from jinja2.filters import FILTERS
+    from jinja2.asyncfilters import ASYNC_FILTERS
+    FILTERS.update(ASYNC_FILTERS)
+
+
+def patch_all():
+    patch_template()
+    patch_runtime()
+    patch_filters()
+
+
+async def auto_await(value):
+    if inspect.isawaitable(value):
+        return await value
+    return value
+
+
+async def auto_aiter(iterable):
+    if hasattr(iterable, '__aiter__'):
+        async for item in iterable:
+            yield item
+        return
+    for item in iterable:
+        yield item
+
+
+class AsyncLoopContext(LoopContextBase):
+
+    def __init__(self, async_iterator, undefined, after, length, recurse=None,
+                 depth0=0):
+        LoopContextBase.__init__(self, undefined, recurse, depth0)
+        self._async_iterator = async_iterator
+        self._after = after
+        self._length = length
+
+    @property
+    def length(self):
+        if self._length is None:
+            raise TypeError('Loop length for some iterators cannot be '
+                            'lazily calculated in async mode')
+        return self._length
+
+    def __aiter__(self):
+        return AsyncLoopContextIterator(self)
+
+
+class AsyncLoopContextIterator(object):
+    __slots__ = ('context',)
+
+    def __init__(self, context):
+        self.context = context
+
+    def __aiter__(self):
+        return self
+
+    async def __anext__(self):
+        ctx = self.context
+        ctx.index0 += 1
+        if ctx._after is _last_iteration:
+            raise StopAsyncIteration()
+        ctx._before = ctx._current
+        ctx._current = ctx._after
+        try:
+            ctx._after = await ctx._async_iterator.__anext__()
+        except StopAsyncIteration:
+            ctx._after = _last_iteration
+        return ctx._current, ctx
+
+
+async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
+    # Length is more complicated and less efficient in async mode.  The
+    # reason for this is that we cannot know if length will be used
+    # upfront but because length is a property we cannot lazily execute it
+    # later.  This means that we need to buffer it up and measure :(
+    #
+    # We however only do this for actual iterators, not for async
+    # iterators as blocking here does not seem like the best idea in the
+    # world.
+    try:
+        length = len(iterable)
+    except (TypeError, AttributeError):
+        if not hasattr(iterable, '__aiter__'):
+            iterable = tuple(iterable)
+            length = len(iterable)
+        else:
+            length = None
+    async_iterator = auto_aiter(iterable)
+    try:
+        after = await async_iterator.__anext__()
+    except StopAsyncIteration:
+        after = _last_iteration
+    return AsyncLoopContext(async_iterator, undefined, after, length, recurse,
+                            depth0)
diff --git a/lib/python3.7/site-packages/jinja2/bccache.py b/lib/python3.7/site-packages/jinja2/bccache.py
new file mode 100644
index 0000000000000000000000000000000000000000..080e527cabf33b0422f6b8e5b172c17d7c039d39
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/bccache.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.bccache
+    ~~~~~~~~~~~~~~
+
+    This module implements the bytecode cache system Jinja is optionally
+    using.  This is useful if you have very complex template situations and
+    the compiliation of all those templates slow down your application too
+    much.
+
+    Situations where this is useful are often forking web applications that
+    are initialized on the first request.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD.
+"""
+from os import path, listdir
+import os
+import sys
+import stat
+import errno
+import marshal
+import tempfile
+import fnmatch
+from hashlib import sha1
+from jinja2.utils import open_if_exists
+from jinja2._compat import BytesIO, pickle, PY2, text_type
+
+
+# marshal works better on 3.x, one hack less required
+if not PY2:
+    marshal_dump = marshal.dump
+    marshal_load = marshal.load
+else:
+
+    def marshal_dump(code, f):
+        if isinstance(f, file):
+            marshal.dump(code, f)
+        else:
+            f.write(marshal.dumps(code))
+
+    def marshal_load(f):
+        if isinstance(f, file):
+            return marshal.load(f)
+        return marshal.loads(f.read())
+
+
+bc_version = 3
+
+# magic version used to only change with new jinja versions.  With 2.6
+# we change this to also take Python version changes into account.  The
+# reason for this is that Python tends to segfault if fed earlier bytecode
+# versions because someone thought it would be a good idea to reuse opcodes
+# or make Python incompatible with earlier versions.
+bc_magic = 'j2'.encode('ascii') + \
+    pickle.dumps(bc_version, 2) + \
+    pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
+
+
+class Bucket(object):
+    """Buckets are used to store the bytecode for one template.  It's created
+    and initialized by the bytecode cache and passed to the loading functions.
+
+    The buckets get an internal checksum from the cache assigned and use this
+    to automatically reject outdated cache material.  Individual bytecode
+    cache subclasses don't have to care about cache invalidation.
+    """
+
+    def __init__(self, environment, key, checksum):
+        self.environment = environment
+        self.key = key
+        self.checksum = checksum
+        self.reset()
+
+    def reset(self):
+        """Resets the bucket (unloads the bytecode)."""
+        self.code = None
+
+    def load_bytecode(self, f):
+        """Loads bytecode from a file or file like object."""
+        # make sure the magic header is correct
+        magic = f.read(len(bc_magic))
+        if magic != bc_magic:
+            self.reset()
+            return
+        # the source code of the file changed, we need to reload
+        checksum = pickle.load(f)
+        if self.checksum != checksum:
+            self.reset()
+            return
+        # if marshal_load fails then we need to reload
+        try:
+            self.code = marshal_load(f)
+        except (EOFError, ValueError, TypeError):
+            self.reset()
+            return
+
+    def write_bytecode(self, f):
+        """Dump the bytecode into the file or file like object passed."""
+        if self.code is None:
+            raise TypeError('can\'t write empty bucket')
+        f.write(bc_magic)
+        pickle.dump(self.checksum, f, 2)
+        marshal_dump(self.code, f)
+
+    def bytecode_from_string(self, string):
+        """Load bytecode from a string."""
+        self.load_bytecode(BytesIO(string))
+
+    def bytecode_to_string(self):
+        """Return the bytecode as string."""
+        out = BytesIO()
+        self.write_bytecode(out)
+        return out.getvalue()
+
+
+class BytecodeCache(object):
+    """To implement your own bytecode cache you have to subclass this class
+    and override :meth:`load_bytecode` and :meth:`dump_bytecode`.  Both of
+    these methods are passed a :class:`~jinja2.bccache.Bucket`.
+
+    A very basic bytecode cache that saves the bytecode on the file system::
+
+        from os import path
+
+        class MyCache(BytecodeCache):
+
+            def __init__(self, directory):
+                self.directory = directory
+
+            def load_bytecode(self, bucket):
+                filename = path.join(self.directory, bucket.key)
+                if path.exists(filename):
+                    with open(filename, 'rb') as f:
+                        bucket.load_bytecode(f)
+
+            def dump_bytecode(self, bucket):
+                filename = path.join(self.directory, bucket.key)
+                with open(filename, 'wb') as f:
+                    bucket.write_bytecode(f)
+
+    A more advanced version of a filesystem based bytecode cache is part of
+    Jinja2.
+    """
+
+    def load_bytecode(self, bucket):
+        """Subclasses have to override this method to load bytecode into a
+        bucket.  If they are not able to find code in the cache for the
+        bucket, it must not do anything.
+        """
+        raise NotImplementedError()
+
+    def dump_bytecode(self, bucket):
+        """Subclasses have to override this method to write the bytecode
+        from a bucket back to the cache.  If it unable to do so it must not
+        fail silently but raise an exception.
+        """
+        raise NotImplementedError()
+
+    def clear(self):
+        """Clears the cache.  This method is not used by Jinja2 but should be
+        implemented to allow applications to clear the bytecode cache used
+        by a particular environment.
+        """
+
+    def get_cache_key(self, name, filename=None):
+        """Returns the unique hash key for this template name."""
+        hash = sha1(name.encode('utf-8'))
+        if filename is not None:
+            filename = '|' + filename
+            if isinstance(filename, text_type):
+                filename = filename.encode('utf-8')
+            hash.update(filename)
+        return hash.hexdigest()
+
+    def get_source_checksum(self, source):
+        """Returns a checksum for the source."""
+        return sha1(source.encode('utf-8')).hexdigest()
+
+    def get_bucket(self, environment, name, filename, source):
+        """Return a cache bucket for the given template.  All arguments are
+        mandatory but filename may be `None`.
+        """
+        key = self.get_cache_key(name, filename)
+        checksum = self.get_source_checksum(source)
+        bucket = Bucket(environment, key, checksum)
+        self.load_bytecode(bucket)
+        return bucket
+
+    def set_bucket(self, bucket):
+        """Put the bucket into the cache."""
+        self.dump_bytecode(bucket)
+
+
+class FileSystemBytecodeCache(BytecodeCache):
+    """A bytecode cache that stores bytecode on the filesystem.  It accepts
+    two arguments: The directory where the cache items are stored and a
+    pattern string that is used to build the filename.
+
+    If no directory is specified a default cache directory is selected.  On
+    Windows the user's temp directory is used, on UNIX systems a directory
+    is created for the user in the system temp directory.
+
+    The pattern can be used to have multiple separate caches operate on the
+    same directory.  The default pattern is ``'__jinja2_%s.cache'``.  ``%s``
+    is replaced with the cache key.
+
+    >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
+
+    This bytecode cache supports clearing of the cache using the clear method.
+    """
+
+    def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
+        if directory is None:
+            directory = self._get_default_cache_dir()
+        self.directory = directory
+        self.pattern = pattern
+
+    def _get_default_cache_dir(self):
+        def _unsafe_dir():
+            raise RuntimeError('Cannot determine safe temp directory.  You '
+                               'need to explicitly provide one.')
+
+        tmpdir = tempfile.gettempdir()
+
+        # On windows the temporary directory is used specific unless
+        # explicitly forced otherwise.  We can just use that.
+        if os.name == 'nt':
+            return tmpdir
+        if not hasattr(os, 'getuid'):
+            _unsafe_dir()
+
+        dirname = '_jinja2-cache-%d' % os.getuid()
+        actual_dir = os.path.join(tmpdir, dirname)
+
+        try:
+            os.mkdir(actual_dir, stat.S_IRWXU)
+        except OSError as e:
+            if e.errno != errno.EEXIST:
+                raise
+        try:
+            os.chmod(actual_dir, stat.S_IRWXU)
+            actual_dir_stat = os.lstat(actual_dir)
+            if actual_dir_stat.st_uid != os.getuid() \
+               or not stat.S_ISDIR(actual_dir_stat.st_mode) \
+               or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
+                _unsafe_dir()
+        except OSError as e:
+            if e.errno != errno.EEXIST:
+                raise
+
+        actual_dir_stat = os.lstat(actual_dir)
+        if actual_dir_stat.st_uid != os.getuid() \
+           or not stat.S_ISDIR(actual_dir_stat.st_mode) \
+           or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
+            _unsafe_dir()
+
+        return actual_dir
+
+    def _get_cache_filename(self, bucket):
+        return path.join(self.directory, self.pattern % bucket.key)
+
+    def load_bytecode(self, bucket):
+        f = open_if_exists(self._get_cache_filename(bucket), 'rb')
+        if f is not None:
+            try:
+                bucket.load_bytecode(f)
+            finally:
+                f.close()
+
+    def dump_bytecode(self, bucket):
+        f = open(self._get_cache_filename(bucket), 'wb')
+        try:
+            bucket.write_bytecode(f)
+        finally:
+            f.close()
+
+    def clear(self):
+        # imported lazily here because google app-engine doesn't support
+        # write access on the file system and the function does not exist
+        # normally.
+        from os import remove
+        files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
+        for filename in files:
+            try:
+                remove(path.join(self.directory, filename))
+            except OSError:
+                pass
+
+
+class MemcachedBytecodeCache(BytecodeCache):
+    """This class implements a bytecode cache that uses a memcache cache for
+    storing the information.  It does not enforce a specific memcache library
+    (tummy's memcache or cmemcache) but will accept any class that provides
+    the minimal interface required.
+
+    Libraries compatible with this class:
+
+    -   `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
+    -   `python-memcached <https://www.tummy.com/Community/software/python-memcached/>`_
+    -   `cmemcache <http://gijsbert.org/cmemcache/>`_
+
+    (Unfortunately the django cache interface is not compatible because it
+    does not support storing binary data, only unicode.  You can however pass
+    the underlying cache client to the bytecode cache which is available
+    as `django.core.cache.cache._client`.)
+
+    The minimal interface for the client passed to the constructor is this:
+
+    .. class:: MinimalClientInterface
+
+        .. method:: set(key, value[, timeout])
+
+            Stores the bytecode in the cache.  `value` is a string and
+            `timeout` the timeout of the key.  If timeout is not provided
+            a default timeout or no timeout should be assumed, if it's
+            provided it's an integer with the number of seconds the cache
+            item should exist.
+
+        .. method:: get(key)
+
+            Returns the value for the cache key.  If the item does not
+            exist in the cache the return value must be `None`.
+
+    The other arguments to the constructor are the prefix for all keys that
+    is added before the actual cache key and the timeout for the bytecode in
+    the cache system.  We recommend a high (or no) timeout.
+
+    This bytecode cache does not support clearing of used items in the cache.
+    The clear method is a no-operation function.
+
+    .. versionadded:: 2.7
+       Added support for ignoring memcache errors through the
+       `ignore_memcache_errors` parameter.
+    """
+
+    def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
+                 ignore_memcache_errors=True):
+        self.client = client
+        self.prefix = prefix
+        self.timeout = timeout
+        self.ignore_memcache_errors = ignore_memcache_errors
+
+    def load_bytecode(self, bucket):
+        try:
+            code = self.client.get(self.prefix + bucket.key)
+        except Exception:
+            if not self.ignore_memcache_errors:
+                raise
+            code = None
+        if code is not None:
+            bucket.bytecode_from_string(code)
+
+    def dump_bytecode(self, bucket):
+        args = (self.prefix + bucket.key, bucket.bytecode_to_string())
+        if self.timeout is not None:
+            args += (self.timeout,)
+        try:
+            self.client.set(*args)
+        except Exception:
+            if not self.ignore_memcache_errors:
+                raise
diff --git a/lib/python3.7/site-packages/jinja2/compiler.py b/lib/python3.7/site-packages/jinja2/compiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..d534a827391aeb6f81f94c1ce5688af8d05c5505
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/compiler.py
@@ -0,0 +1,1721 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.compiler
+    ~~~~~~~~~~~~~~~
+
+    Compiles nodes into python code.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+from itertools import chain
+from copy import deepcopy
+from keyword import iskeyword as is_python_keyword
+from functools import update_wrapper
+from jinja2 import nodes
+from jinja2.nodes import EvalContext
+from jinja2.visitor import NodeVisitor
+from jinja2.optimizer import Optimizer
+from jinja2.exceptions import TemplateAssertionError
+from jinja2.utils import Markup, concat, escape
+from jinja2._compat import range_type, text_type, string_types, \
+     iteritems, NativeStringIO, imap, izip
+from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
+     VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
+
+
+operators = {
+    'eq':       '==',
+    'ne':       '!=',
+    'gt':       '>',
+    'gteq':     '>=',
+    'lt':       '<',
+    'lteq':     '<=',
+    'in':       'in',
+    'notin':    'not in'
+}
+
+# what method to iterate over items do we want to use for dict iteration
+# in generated code?  on 2.x let's go with iteritems, on 3.x with items
+if hasattr(dict, 'iteritems'):
+    dict_item_iter = 'iteritems'
+else:
+    dict_item_iter = 'items'
+
+code_features = ['division']
+
+# does this python version support generator stops? (PEP 0479)
+try:
+    exec('from __future__ import generator_stop')
+    code_features.append('generator_stop')
+except SyntaxError:
+    pass
+
+# does this python version support yield from?
+try:
+    exec('def f(): yield from x()')
+except SyntaxError:
+    supports_yield_from = False
+else:
+    supports_yield_from = True
+
+
+def optimizeconst(f):
+    def new_func(self, node, frame, **kwargs):
+        # Only optimize if the frame is not volatile
+        if self.optimized and not frame.eval_ctx.volatile:
+            new_node = self.optimizer.visit(node, frame.eval_ctx)
+            if new_node != node:
+                return self.visit(new_node, frame)
+        return f(self, node, frame, **kwargs)
+    return update_wrapper(new_func, f)
+
+
+def generate(node, environment, name, filename, stream=None,
+             defer_init=False, optimized=True):
+    """Generate the python source for a node tree."""
+    if not isinstance(node, nodes.Template):
+        raise TypeError('Can\'t compile non template nodes')
+    generator = environment.code_generator_class(environment, name, filename,
+                                                 stream, defer_init,
+                                                 optimized)
+    generator.visit(node)
+    if stream is None:
+        return generator.stream.getvalue()
+
+
+def has_safe_repr(value):
+    """Does the node have a safe representation?"""
+    if value is None or value is NotImplemented or value is Ellipsis:
+        return True
+    if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
+        return True
+    if type(value) in (tuple, list, set, frozenset):
+        for item in value:
+            if not has_safe_repr(item):
+                return False
+        return True
+    elif type(value) is dict:
+        for key, value in iteritems(value):
+            if not has_safe_repr(key):
+                return False
+            if not has_safe_repr(value):
+                return False
+        return True
+    return False
+
+
+def find_undeclared(nodes, names):
+    """Check if the names passed are accessed undeclared.  The return value
+    is a set of all the undeclared names from the sequence of names found.
+    """
+    visitor = UndeclaredNameVisitor(names)
+    try:
+        for node in nodes:
+            visitor.visit(node)
+    except VisitorExit:
+        pass
+    return visitor.undeclared
+
+
+class MacroRef(object):
+
+    def __init__(self, node):
+        self.node = node
+        self.accesses_caller = False
+        self.accesses_kwargs = False
+        self.accesses_varargs = False
+
+
+class Frame(object):
+    """Holds compile time information for us."""
+
+    def __init__(self, eval_ctx, parent=None, level=None):
+        self.eval_ctx = eval_ctx
+        self.symbols = Symbols(parent and parent.symbols or None,
+                               level=level)
+
+        # a toplevel frame is the root + soft frames such as if conditions.
+        self.toplevel = False
+
+        # the root frame is basically just the outermost frame, so no if
+        # conditions.  This information is used to optimize inheritance
+        # situations.
+        self.rootlevel = False
+
+        # in some dynamic inheritance situations the compiler needs to add
+        # write tests around output statements.
+        self.require_output_check = parent and parent.require_output_check
+
+        # inside some tags we are using a buffer rather than yield statements.
+        # this for example affects {% filter %} or {% macro %}.  If a frame
+        # is buffered this variable points to the name of the list used as
+        # buffer.
+        self.buffer = None
+
+        # the name of the block we're in, otherwise None.
+        self.block = parent and parent.block or None
+
+        # the parent of this frame
+        self.parent = parent
+
+        if parent is not None:
+            self.buffer = parent.buffer
+
+    def copy(self):
+        """Create a copy of the current one."""
+        rv = object.__new__(self.__class__)
+        rv.__dict__.update(self.__dict__)
+        rv.symbols = self.symbols.copy()
+        return rv
+
+    def inner(self, isolated=False):
+        """Return an inner frame."""
+        if isolated:
+            return Frame(self.eval_ctx, level=self.symbols.level + 1)
+        return Frame(self.eval_ctx, self)
+
+    def soft(self):
+        """Return a soft frame.  A soft frame may not be modified as
+        standalone thing as it shares the resources with the frame it
+        was created of, but it's not a rootlevel frame any longer.
+
+        This is only used to implement if-statements.
+        """
+        rv = self.copy()
+        rv.rootlevel = False
+        return rv
+
+    __copy__ = copy
+
+
+class VisitorExit(RuntimeError):
+    """Exception used by the `UndeclaredNameVisitor` to signal a stop."""
+
+
+class DependencyFinderVisitor(NodeVisitor):
+    """A visitor that collects filter and test calls."""
+
+    def __init__(self):
+        self.filters = set()
+        self.tests = set()
+
+    def visit_Filter(self, node):
+        self.generic_visit(node)
+        self.filters.add(node.name)
+
+    def visit_Test(self, node):
+        self.generic_visit(node)
+        self.tests.add(node.name)
+
+    def visit_Block(self, node):
+        """Stop visiting at blocks."""
+
+
+class UndeclaredNameVisitor(NodeVisitor):
+    """A visitor that checks if a name is accessed without being
+    declared.  This is different from the frame visitor as it will
+    not stop at closure frames.
+    """
+
+    def __init__(self, names):
+        self.names = set(names)
+        self.undeclared = set()
+
+    def visit_Name(self, node):
+        if node.ctx == 'load' and node.name in self.names:
+            self.undeclared.add(node.name)
+            if self.undeclared == self.names:
+                raise VisitorExit()
+        else:
+            self.names.discard(node.name)
+
+    def visit_Block(self, node):
+        """Stop visiting a blocks."""
+
+
+class CompilerExit(Exception):
+    """Raised if the compiler encountered a situation where it just
+    doesn't make sense to further process the code.  Any block that
+    raises such an exception is not further processed.
+    """
+
+
+class CodeGenerator(NodeVisitor):
+
+    def __init__(self, environment, name, filename, stream=None,
+                 defer_init=False, optimized=True):
+        if stream is None:
+            stream = NativeStringIO()
+        self.environment = environment
+        self.name = name
+        self.filename = filename
+        self.stream = stream
+        self.created_block_context = False
+        self.defer_init = defer_init
+        self.optimized = optimized
+        if optimized:
+            self.optimizer = Optimizer(environment)
+
+        # aliases for imports
+        self.import_aliases = {}
+
+        # a registry for all blocks.  Because blocks are moved out
+        # into the global python scope they are registered here
+        self.blocks = {}
+
+        # the number of extends statements so far
+        self.extends_so_far = 0
+
+        # some templates have a rootlevel extends.  In this case we
+        # can safely assume that we're a child template and do some
+        # more optimizations.
+        self.has_known_extends = False
+
+        # the current line number
+        self.code_lineno = 1
+
+        # registry of all filters and tests (global, not block local)
+        self.tests = {}
+        self.filters = {}
+
+        # the debug information
+        self.debug_info = []
+        self._write_debug_info = None
+
+        # the number of new lines before the next write()
+        self._new_lines = 0
+
+        # the line number of the last written statement
+        self._last_line = 0
+
+        # true if nothing was written so far.
+        self._first_write = True
+
+        # used by the `temporary_identifier` method to get new
+        # unique, temporary identifier
+        self._last_identifier = 0
+
+        # the current indentation
+        self._indentation = 0
+
+        # Tracks toplevel assignments
+        self._assign_stack = []
+
+        # Tracks parameter definition blocks
+        self._param_def_block = []
+
+        # Tracks the current context.
+        self._context_reference_stack = ['context']
+
+    # -- Various compilation helpers
+
+    def fail(self, msg, lineno):
+        """Fail with a :exc:`TemplateAssertionError`."""
+        raise TemplateAssertionError(msg, lineno, self.name, self.filename)
+
+    def temporary_identifier(self):
+        """Get a new unique identifier."""
+        self._last_identifier += 1
+        return 't_%d' % self._last_identifier
+
+    def buffer(self, frame):
+        """Enable buffering for the frame from that point onwards."""
+        frame.buffer = self.temporary_identifier()
+        self.writeline('%s = []' % frame.buffer)
+
+    def return_buffer_contents(self, frame, force_unescaped=False):
+        """Return the buffer contents of the frame."""
+        if not force_unescaped:
+            if frame.eval_ctx.volatile:
+                self.writeline('if context.eval_ctx.autoescape:')
+                self.indent()
+                self.writeline('return Markup(concat(%s))' % frame.buffer)
+                self.outdent()
+                self.writeline('else:')
+                self.indent()
+                self.writeline('return concat(%s)' % frame.buffer)
+                self.outdent()
+                return
+            elif frame.eval_ctx.autoescape:
+                self.writeline('return Markup(concat(%s))' % frame.buffer)
+                return
+        self.writeline('return concat(%s)' % frame.buffer)
+
+    def indent(self):
+        """Indent by one."""
+        self._indentation += 1
+
+    def outdent(self, step=1):
+        """Outdent by step."""
+        self._indentation -= step
+
+    def start_write(self, frame, node=None):
+        """Yield or write into the frame buffer."""
+        if frame.buffer is None:
+            self.writeline('yield ', node)
+        else:
+            self.writeline('%s.append(' % frame.buffer, node)
+
+    def end_write(self, frame):
+        """End the writing process started by `start_write`."""
+        if frame.buffer is not None:
+            self.write(')')
+
+    def simple_write(self, s, frame, node=None):
+        """Simple shortcut for start_write + write + end_write."""
+        self.start_write(frame, node)
+        self.write(s)
+        self.end_write(frame)
+
+    def blockvisit(self, nodes, frame):
+        """Visit a list of nodes as block in a frame.  If the current frame
+        is no buffer a dummy ``if 0: yield None`` is written automatically.
+        """
+        try:
+            self.writeline('pass')
+            for node in nodes:
+                self.visit(node, frame)
+        except CompilerExit:
+            pass
+
+    def write(self, x):
+        """Write a string into the output stream."""
+        if self._new_lines:
+            if not self._first_write:
+                self.stream.write('\n' * self._new_lines)
+                self.code_lineno += self._new_lines
+                if self._write_debug_info is not None:
+                    self.debug_info.append((self._write_debug_info,
+                                            self.code_lineno))
+                    self._write_debug_info = None
+            self._first_write = False
+            self.stream.write('    ' * self._indentation)
+            self._new_lines = 0
+        self.stream.write(x)
+
+    def writeline(self, x, node=None, extra=0):
+        """Combination of newline and write."""
+        self.newline(node, extra)
+        self.write(x)
+
+    def newline(self, node=None, extra=0):
+        """Add one or more newlines before the next write."""
+        self._new_lines = max(self._new_lines, 1 + extra)
+        if node is not None and node.lineno != self._last_line:
+            self._write_debug_info = node.lineno
+            self._last_line = node.lineno
+
+    def signature(self, node, frame, extra_kwargs=None):
+        """Writes a function call to the stream for the current node.
+        A leading comma is added automatically.  The extra keyword
+        arguments may not include python keywords otherwise a syntax
+        error could occour.  The extra keyword arguments should be given
+        as python dict.
+        """
+        # if any of the given keyword arguments is a python keyword
+        # we have to make sure that no invalid call is created.
+        kwarg_workaround = False
+        for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
+            if is_python_keyword(kwarg):
+                kwarg_workaround = True
+                break
+
+        for arg in node.args:
+            self.write(', ')
+            self.visit(arg, frame)
+
+        if not kwarg_workaround:
+            for kwarg in node.kwargs:
+                self.write(', ')
+                self.visit(kwarg, frame)
+            if extra_kwargs is not None:
+                for key, value in iteritems(extra_kwargs):
+                    self.write(', %s=%s' % (key, value))
+        if node.dyn_args:
+            self.write(', *')
+            self.visit(node.dyn_args, frame)
+
+        if kwarg_workaround:
+            if node.dyn_kwargs is not None:
+                self.write(', **dict({')
+            else:
+                self.write(', **{')
+            for kwarg in node.kwargs:
+                self.write('%r: ' % kwarg.key)
+                self.visit(kwarg.value, frame)
+                self.write(', ')
+            if extra_kwargs is not None:
+                for key, value in iteritems(extra_kwargs):
+                    self.write('%r: %s, ' % (key, value))
+            if node.dyn_kwargs is not None:
+                self.write('}, **')
+                self.visit(node.dyn_kwargs, frame)
+                self.write(')')
+            else:
+                self.write('}')
+
+        elif node.dyn_kwargs is not None:
+            self.write(', **')
+            self.visit(node.dyn_kwargs, frame)
+
+    def pull_dependencies(self, nodes):
+        """Pull all the dependencies."""
+        visitor = DependencyFinderVisitor()
+        for node in nodes:
+            visitor.visit(node)
+        for dependency in 'filters', 'tests':
+            mapping = getattr(self, dependency)
+            for name in getattr(visitor, dependency):
+                if name not in mapping:
+                    mapping[name] = self.temporary_identifier()
+                self.writeline('%s = environment.%s[%r]' %
+                               (mapping[name], dependency, name))
+
+    def enter_frame(self, frame):
+        undefs = []
+        for target, (action, param) in iteritems(frame.symbols.loads):
+            if action == VAR_LOAD_PARAMETER:
+                pass
+            elif action == VAR_LOAD_RESOLVE:
+                self.writeline('%s = %s(%r)' %
+                               (target, self.get_resolve_func(), param))
+            elif action == VAR_LOAD_ALIAS:
+                self.writeline('%s = %s' % (target, param))
+            elif action == VAR_LOAD_UNDEFINED:
+                undefs.append(target)
+            else:
+                raise NotImplementedError('unknown load instruction')
+        if undefs:
+            self.writeline('%s = missing' % ' = '.join(undefs))
+
+    def leave_frame(self, frame, with_python_scope=False):
+        if not with_python_scope:
+            undefs = []
+            for target, _ in iteritems(frame.symbols.loads):
+                undefs.append(target)
+            if undefs:
+                self.writeline('%s = missing' % ' = '.join(undefs))
+
+    def func(self, name):
+        if self.environment.is_async:
+            return 'async def %s' % name
+        return 'def %s' % name
+
+    def macro_body(self, node, frame):
+        """Dump the function def of a macro or call block."""
+        frame = frame.inner()
+        frame.symbols.analyze_node(node)
+        macro_ref = MacroRef(node)
+
+        explicit_caller = None
+        skip_special_params = set()
+        args = []
+        for idx, arg in enumerate(node.args):
+            if arg.name == 'caller':
+                explicit_caller = idx
+            if arg.name in ('kwargs', 'varargs'):
+                skip_special_params.add(arg.name)
+            args.append(frame.symbols.ref(arg.name))
+
+        undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
+
+        if 'caller' in undeclared:
+            # In older Jinja2 versions there was a bug that allowed caller
+            # to retain the special behavior even if it was mentioned in
+            # the argument list.  However thankfully this was only really
+            # working if it was the last argument.  So we are explicitly
+            # checking this now and error out if it is anywhere else in
+            # the argument list.
+            if explicit_caller is not None:
+                try:
+                    node.defaults[explicit_caller - len(node.args)]
+                except IndexError:
+                    self.fail('When defining macros or call blocks the '
+                              'special "caller" argument must be omitted '
+                              'or be given a default.', node.lineno)
+            else:
+                args.append(frame.symbols.declare_parameter('caller'))
+            macro_ref.accesses_caller = True
+        if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
+            args.append(frame.symbols.declare_parameter('kwargs'))
+            macro_ref.accesses_kwargs = True
+        if 'varargs' in undeclared and not 'varargs' in skip_special_params:
+            args.append(frame.symbols.declare_parameter('varargs'))
+            macro_ref.accesses_varargs = True
+
+        # macros are delayed, they never require output checks
+        frame.require_output_check = False
+        frame.symbols.analyze_node(node)
+        self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
+        self.indent()
+
+        self.buffer(frame)
+        self.enter_frame(frame)
+
+        self.push_parameter_definitions(frame)
+        for idx, arg in enumerate(node.args):
+            ref = frame.symbols.ref(arg.name)
+            self.writeline('if %s is missing:' % ref)
+            self.indent()
+            try:
+                default = node.defaults[idx - len(node.args)]
+            except IndexError:
+                self.writeline('%s = undefined(%r, name=%r)' % (
+                    ref,
+                    'parameter %r was not provided' % arg.name,
+                    arg.name))
+            else:
+                self.writeline('%s = ' % ref)
+                self.visit(default, frame)
+            self.mark_parameter_stored(ref)
+            self.outdent()
+        self.pop_parameter_definitions()
+
+        self.blockvisit(node.body, frame)
+        self.return_buffer_contents(frame, force_unescaped=True)
+        self.leave_frame(frame, with_python_scope=True)
+        self.outdent()
+
+        return frame, macro_ref
+
+    def macro_def(self, macro_ref, frame):
+        """Dump the macro definition for the def created by macro_body."""
+        arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
+        name = getattr(macro_ref.node, 'name', None)
+        if len(macro_ref.node.args) == 1:
+            arg_tuple += ','
+        self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
+                   'context.eval_ctx.autoescape)' %
+                   (name, arg_tuple, macro_ref.accesses_kwargs,
+                    macro_ref.accesses_varargs, macro_ref.accesses_caller))
+
+    def position(self, node):
+        """Return a human readable position for the node."""
+        rv = 'line %d' % node.lineno
+        if self.name is not None:
+            rv += ' in ' + repr(self.name)
+        return rv
+
+    def dump_local_context(self, frame):
+        return '{%s}' % ', '.join(
+            '%r: %s' % (name, target) for name, target
+            in iteritems(frame.symbols.dump_stores()))
+
+    def write_commons(self):
+        """Writes a common preamble that is used by root and block functions.
+        Primarily this sets up common local helpers and enforces a generator
+        through a dead branch.
+        """
+        self.writeline('resolve = context.resolve_or_missing')
+        self.writeline('undefined = environment.undefined')
+        self.writeline('if 0: yield None')
+
+    def push_parameter_definitions(self, frame):
+        """Pushes all parameter targets from the given frame into a local
+        stack that permits tracking of yet to be assigned parameters.  In
+        particular this enables the optimization from `visit_Name` to skip
+        undefined expressions for parameters in macros as macros can reference
+        otherwise unbound parameters.
+        """
+        self._param_def_block.append(frame.symbols.dump_param_targets())
+
+    def pop_parameter_definitions(self):
+        """Pops the current parameter definitions set."""
+        self._param_def_block.pop()
+
+    def mark_parameter_stored(self, target):
+        """Marks a parameter in the current parameter definitions as stored.
+        This will skip the enforced undefined checks.
+        """
+        if self._param_def_block:
+            self._param_def_block[-1].discard(target)
+
+    def push_context_reference(self, target):
+        self._context_reference_stack.append(target)
+
+    def pop_context_reference(self):
+        self._context_reference_stack.pop()
+
+    def get_context_ref(self):
+        return self._context_reference_stack[-1]
+
+    def get_resolve_func(self):
+        target = self._context_reference_stack[-1]
+        if target == 'context':
+            return 'resolve'
+        return '%s.resolve' % target
+
+    def derive_context(self, frame):
+        return '%s.derived(%s)' % (
+            self.get_context_ref(),
+            self.dump_local_context(frame),
+        )
+
+    def parameter_is_undeclared(self, target):
+        """Checks if a given target is an undeclared parameter."""
+        if not self._param_def_block:
+            return False
+        return target in self._param_def_block[-1]
+
+    def push_assign_tracking(self):
+        """Pushes a new layer for assignment tracking."""
+        self._assign_stack.append(set())
+
+    def pop_assign_tracking(self, frame):
+        """Pops the topmost level for assignment tracking and updates the
+        context variables if necessary.
+        """
+        vars = self._assign_stack.pop()
+        if not frame.toplevel or not vars:
+            return
+        public_names = [x for x in vars if x[:1] != '_']
+        if len(vars) == 1:
+            name = next(iter(vars))
+            ref = frame.symbols.ref(name)
+            self.writeline('context.vars[%r] = %s' % (name, ref))
+        else:
+            self.writeline('context.vars.update({')
+            for idx, name in enumerate(vars):
+                if idx:
+                    self.write(', ')
+                ref = frame.symbols.ref(name)
+                self.write('%r: %s' % (name, ref))
+            self.write('})')
+        if public_names:
+            if len(public_names) == 1:
+                self.writeline('context.exported_vars.add(%r)' %
+                               public_names[0])
+            else:
+                self.writeline('context.exported_vars.update((%s))' %
+                               ', '.join(imap(repr, public_names)))
+
+    # -- Statement Visitors
+
+    def visit_Template(self, node, frame=None):
+        assert frame is None, 'no root frame allowed'
+        eval_ctx = EvalContext(self.environment, self.name)
+
+        from jinja2.runtime import __all__ as exported
+        self.writeline('from __future__ import %s' % ', '.join(code_features))
+        self.writeline('from jinja2.runtime import ' + ', '.join(exported))
+
+        if self.environment.is_async:
+            self.writeline('from jinja2.asyncsupport import auto_await, '
+                           'auto_aiter, make_async_loop_context')
+
+        # if we want a deferred initialization we cannot move the
+        # environment into a local name
+        envenv = not self.defer_init and ', environment=environment' or ''
+
+        # do we have an extends tag at all?  If not, we can save some
+        # overhead by just not processing any inheritance code.
+        have_extends = node.find(nodes.Extends) is not None
+
+        # find all blocks
+        for block in node.find_all(nodes.Block):
+            if block.name in self.blocks:
+                self.fail('block %r defined twice' % block.name, block.lineno)
+            self.blocks[block.name] = block
+
+        # find all imports and import them
+        for import_ in node.find_all(nodes.ImportedName):
+            if import_.importname not in self.import_aliases:
+                imp = import_.importname
+                self.import_aliases[imp] = alias = self.temporary_identifier()
+                if '.' in imp:
+                    module, obj = imp.rsplit('.', 1)
+                    self.writeline('from %s import %s as %s' %
+                                   (module, obj, alias))
+                else:
+                    self.writeline('import %s as %s' % (imp, alias))
+
+        # add the load name
+        self.writeline('name = %r' % self.name)
+
+        # generate the root render function.
+        self.writeline('%s(context, missing=missing%s):' %
+                       (self.func('root'), envenv), extra=1)
+        self.indent()
+        self.write_commons()
+
+        # process the root
+        frame = Frame(eval_ctx)
+        if 'self' in find_undeclared(node.body, ('self',)):
+            ref = frame.symbols.declare_parameter('self')
+            self.writeline('%s = TemplateReference(context)' % ref)
+        frame.symbols.analyze_node(node)
+        frame.toplevel = frame.rootlevel = True
+        frame.require_output_check = have_extends and not self.has_known_extends
+        if have_extends:
+            self.writeline('parent_template = None')
+        self.enter_frame(frame)
+        self.pull_dependencies(node.body)
+        self.blockvisit(node.body, frame)
+        self.leave_frame(frame, with_python_scope=True)
+        self.outdent()
+
+        # make sure that the parent root is called.
+        if have_extends:
+            if not self.has_known_extends:
+                self.indent()
+                self.writeline('if parent_template is not None:')
+            self.indent()
+            if supports_yield_from and not self.environment.is_async:
+                self.writeline('yield from parent_template.'
+                               'root_render_func(context)')
+            else:
+                self.writeline('%sfor event in parent_template.'
+                               'root_render_func(context):' %
+                               (self.environment.is_async and 'async ' or ''))
+                self.indent()
+                self.writeline('yield event')
+                self.outdent()
+            self.outdent(1 + (not self.has_known_extends))
+
+        # at this point we now have the blocks collected and can visit them too.
+        for name, block in iteritems(self.blocks):
+            self.writeline('%s(context, missing=missing%s):' %
+                           (self.func('block_' + name), envenv),
+                           block, 1)
+            self.indent()
+            self.write_commons()
+            # It's important that we do not make this frame a child of the
+            # toplevel template.  This would cause a variety of
+            # interesting issues with identifier tracking.
+            block_frame = Frame(eval_ctx)
+            undeclared = find_undeclared(block.body, ('self', 'super'))
+            if 'self' in undeclared:
+                ref = block_frame.symbols.declare_parameter('self')
+                self.writeline('%s = TemplateReference(context)' % ref)
+            if 'super' in undeclared:
+                ref = block_frame.symbols.declare_parameter('super')
+                self.writeline('%s = context.super(%r, '
+                               'block_%s)' % (ref, name, name))
+            block_frame.symbols.analyze_node(block)
+            block_frame.block = name
+            self.enter_frame(block_frame)
+            self.pull_dependencies(block.body)
+            self.blockvisit(block.body, block_frame)
+            self.leave_frame(block_frame, with_python_scope=True)
+            self.outdent()
+
+        self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
+                                                   for x in self.blocks),
+                       extra=1)
+
+        # add a function that returns the debug info
+        self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
+                                                    in self.debug_info))
+
+    def visit_Block(self, node, frame):
+        """Call a block and register it for the template."""
+        level = 0
+        if frame.toplevel:
+            # if we know that we are a child template, there is no need to
+            # check if we are one
+            if self.has_known_extends:
+                return
+            if self.extends_so_far > 0:
+                self.writeline('if parent_template is None:')
+                self.indent()
+                level += 1
+
+        if node.scoped:
+            context = self.derive_context(frame)
+        else:
+            context = self.get_context_ref()
+
+        if supports_yield_from and not self.environment.is_async and \
+           frame.buffer is None:
+            self.writeline('yield from context.blocks[%r][0](%s)' % (
+                           node.name, context), node)
+        else:
+            loop = self.environment.is_async and 'async for' or 'for'
+            self.writeline('%s event in context.blocks[%r][0](%s):' % (
+                           loop, node.name, context), node)
+            self.indent()
+            self.simple_write('event', frame)
+            self.outdent()
+
+        self.outdent(level)
+
+    def visit_Extends(self, node, frame):
+        """Calls the extender."""
+        if not frame.toplevel:
+            self.fail('cannot use extend from a non top-level scope',
+                      node.lineno)
+
+        # if the number of extends statements in general is zero so
+        # far, we don't have to add a check if something extended
+        # the template before this one.
+        if self.extends_so_far > 0:
+
+            # if we have a known extends we just add a template runtime
+            # error into the generated code.  We could catch that at compile
+            # time too, but i welcome it not to confuse users by throwing the
+            # same error at different times just "because we can".
+            if not self.has_known_extends:
+                self.writeline('if parent_template is not None:')
+                self.indent()
+            self.writeline('raise TemplateRuntimeError(%r)' %
+                           'extended multiple times')
+
+            # if we have a known extends already we don't need that code here
+            # as we know that the template execution will end here.
+            if self.has_known_extends:
+                raise CompilerExit()
+            else:
+                self.outdent()
+
+        self.writeline('parent_template = environment.get_template(', node)
+        self.visit(node.template, frame)
+        self.write(', %r)' % self.name)
+        self.writeline('for name, parent_block in parent_template.'
+                       'blocks.%s():' % dict_item_iter)
+        self.indent()
+        self.writeline('context.blocks.setdefault(name, []).'
+                       'append(parent_block)')
+        self.outdent()
+
+        # if this extends statement was in the root level we can take
+        # advantage of that information and simplify the generated code
+        # in the top level from this point onwards
+        if frame.rootlevel:
+            self.has_known_extends = True
+
+        # and now we have one more
+        self.extends_so_far += 1
+
+    def visit_Include(self, node, frame):
+        """Handles includes."""
+        if node.ignore_missing:
+            self.writeline('try:')
+            self.indent()
+
+        func_name = 'get_or_select_template'
+        if isinstance(node.template, nodes.Const):
+            if isinstance(node.template.value, string_types):
+                func_name = 'get_template'
+            elif isinstance(node.template.value, (tuple, list)):
+                func_name = 'select_template'
+        elif isinstance(node.template, (nodes.Tuple, nodes.List)):
+            func_name = 'select_template'
+
+        self.writeline('template = environment.%s(' % func_name, node)
+        self.visit(node.template, frame)
+        self.write(', %r)' % self.name)
+        if node.ignore_missing:
+            self.outdent()
+            self.writeline('except TemplateNotFound:')
+            self.indent()
+            self.writeline('pass')
+            self.outdent()
+            self.writeline('else:')
+            self.indent()
+
+        skip_event_yield = False
+        if node.with_context:
+            loop = self.environment.is_async and 'async for' or 'for'
+            self.writeline('%s event in template.root_render_func('
+                           'template.new_context(context.get_all(), True, '
+                           '%s)):' % (loop, self.dump_local_context(frame)))
+        elif self.environment.is_async:
+            self.writeline('for event in (await '
+                           'template._get_default_module_async())'
+                           '._body_stream:')
+        else:
+            if supports_yield_from:
+                self.writeline('yield from template._get_default_module()'
+                               '._body_stream')
+                skip_event_yield = True
+            else:
+                self.writeline('for event in template._get_default_module()'
+                               '._body_stream:')
+
+        if not skip_event_yield:
+            self.indent()
+            self.simple_write('event', frame)
+            self.outdent()
+
+        if node.ignore_missing:
+            self.outdent()
+
+    def visit_Import(self, node, frame):
+        """Visit regular imports."""
+        self.writeline('%s = ' % frame.symbols.ref(node.target), node)
+        if frame.toplevel:
+            self.write('context.vars[%r] = ' % node.target)
+        if self.environment.is_async:
+            self.write('await ')
+        self.write('environment.get_template(')
+        self.visit(node.template, frame)
+        self.write(', %r).' % self.name)
+        if node.with_context:
+            self.write('make_module%s(context.get_all(), True, %s)'
+                       % (self.environment.is_async and '_async' or '',
+                          self.dump_local_context(frame)))
+        elif self.environment.is_async:
+            self.write('_get_default_module_async()')
+        else:
+            self.write('_get_default_module()')
+        if frame.toplevel and not node.target.startswith('_'):
+            self.writeline('context.exported_vars.discard(%r)' % node.target)
+
+    def visit_FromImport(self, node, frame):
+        """Visit named imports."""
+        self.newline(node)
+        self.write('included_template = %senvironment.get_template('
+                   % (self.environment.is_async and 'await ' or ''))
+        self.visit(node.template, frame)
+        self.write(', %r).' % self.name)
+        if node.with_context:
+            self.write('make_module%s(context.get_all(), True, %s)'
+                       % (self.environment.is_async and '_async' or '',
+                          self.dump_local_context(frame)))
+        elif self.environment.is_async:
+            self.write('_get_default_module_async()')
+        else:
+            self.write('_get_default_module()')
+
+        var_names = []
+        discarded_names = []
+        for name in node.names:
+            if isinstance(name, tuple):
+                name, alias = name
+            else:
+                alias = name
+            self.writeline('%s = getattr(included_template, '
+                           '%r, missing)' % (frame.symbols.ref(alias), name))
+            self.writeline('if %s is missing:' % frame.symbols.ref(alias))
+            self.indent()
+            self.writeline('%s = undefined(%r %% '
+                           'included_template.__name__, '
+                           'name=%r)' %
+                           (frame.symbols.ref(alias),
+                            'the template %%r (imported on %s) does '
+                            'not export the requested name %s' % (
+                                self.position(node),
+                                repr(name)
+                           ), name))
+            self.outdent()
+            if frame.toplevel:
+                var_names.append(alias)
+                if not alias.startswith('_'):
+                    discarded_names.append(alias)
+
+        if var_names:
+            if len(var_names) == 1:
+                name = var_names[0]
+                self.writeline('context.vars[%r] = %s' %
+                               (name, frame.symbols.ref(name)))
+            else:
+                self.writeline('context.vars.update({%s})' % ', '.join(
+                    '%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
+                ))
+        if discarded_names:
+            if len(discarded_names) == 1:
+                self.writeline('context.exported_vars.discard(%r)' %
+                               discarded_names[0])
+            else:
+                self.writeline('context.exported_vars.difference_'
+                               'update((%s))' % ', '.join(imap(repr, discarded_names)))
+
+    def visit_For(self, node, frame):
+        loop_frame = frame.inner()
+        test_frame = frame.inner()
+        else_frame = frame.inner()
+
+        # try to figure out if we have an extended loop.  An extended loop
+        # is necessary if the loop is in recursive mode if the special loop
+        # variable is accessed in the body.
+        extended_loop = node.recursive or 'loop' in \
+                        find_undeclared(node.iter_child_nodes(
+                            only=('body',)), ('loop',))
+
+        loop_ref = None
+        if extended_loop:
+            loop_ref = loop_frame.symbols.declare_parameter('loop')
+
+        loop_frame.symbols.analyze_node(node, for_branch='body')
+        if node.else_:
+            else_frame.symbols.analyze_node(node, for_branch='else')
+
+        if node.test:
+            loop_filter_func = self.temporary_identifier()
+            test_frame.symbols.analyze_node(node, for_branch='test')
+            self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
+            self.indent()
+            self.enter_frame(test_frame)
+            self.writeline(self.environment.is_async and 'async for ' or 'for ')
+            self.visit(node.target, loop_frame)
+            self.write(' in ')
+            self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
+            self.write(':')
+            self.indent()
+            self.writeline('if ', node.test)
+            self.visit(node.test, test_frame)
+            self.write(':')
+            self.indent()
+            self.writeline('yield ')
+            self.visit(node.target, loop_frame)
+            self.outdent(3)
+            self.leave_frame(test_frame, with_python_scope=True)
+
+        # if we don't have an recursive loop we have to find the shadowed
+        # variables at that point.  Because loops can be nested but the loop
+        # variable is a special one we have to enforce aliasing for it.
+        if node.recursive:
+            self.writeline('%s(reciter, loop_render_func, depth=0):' %
+                           self.func('loop'), node)
+            self.indent()
+            self.buffer(loop_frame)
+
+            # Use the same buffer for the else frame
+            else_frame.buffer = loop_frame.buffer
+
+        # make sure the loop variable is a special one and raise a template
+        # assertion error if a loop tries to write to loop
+        if extended_loop:
+            self.writeline('%s = missing' % loop_ref)
+
+        for name in node.find_all(nodes.Name):
+            if name.ctx == 'store' and name.name == 'loop':
+                self.fail('Can\'t assign to special loop variable '
+                          'in for-loop target', name.lineno)
+
+        if node.else_:
+            iteration_indicator = self.temporary_identifier()
+            self.writeline('%s = 1' % iteration_indicator)
+
+        self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
+        self.visit(node.target, loop_frame)
+        if extended_loop:
+            if self.environment.is_async:
+                self.write(', %s in await make_async_loop_context(' % loop_ref)
+            else:
+                self.write(', %s in LoopContext(' % loop_ref)
+        else:
+            self.write(' in ')
+
+        if node.test:
+            self.write('%s(' % loop_filter_func)
+        if node.recursive:
+            self.write('reciter')
+        else:
+            if self.environment.is_async and not extended_loop:
+                self.write('auto_aiter(')
+            self.visit(node.iter, frame)
+            if self.environment.is_async and not extended_loop:
+                self.write(')')
+        if node.test:
+            self.write(')')
+
+        if node.recursive:
+            self.write(', undefined, loop_render_func, depth):')
+        else:
+            self.write(extended_loop and ', undefined):' or ':')
+
+        self.indent()
+        self.enter_frame(loop_frame)
+
+        self.blockvisit(node.body, loop_frame)
+        if node.else_:
+            self.writeline('%s = 0' % iteration_indicator)
+        self.outdent()
+        self.leave_frame(loop_frame, with_python_scope=node.recursive
+                         and not node.else_)
+
+        if node.else_:
+            self.writeline('if %s:' % iteration_indicator)
+            self.indent()
+            self.enter_frame(else_frame)
+            self.blockvisit(node.else_, else_frame)
+            self.leave_frame(else_frame)
+            self.outdent()
+
+        # if the node was recursive we have to return the buffer contents
+        # and start the iteration code
+        if node.recursive:
+            self.return_buffer_contents(loop_frame)
+            self.outdent()
+            self.start_write(frame, node)
+            if self.environment.is_async:
+                self.write('await ')
+            self.write('loop(')
+            if self.environment.is_async:
+                self.write('auto_aiter(')
+            self.visit(node.iter, frame)
+            if self.environment.is_async:
+                self.write(')')
+            self.write(', loop)')
+            self.end_write(frame)
+
+    def visit_If(self, node, frame):
+        if_frame = frame.soft()
+        self.writeline('if ', node)
+        self.visit(node.test, if_frame)
+        self.write(':')
+        self.indent()
+        self.blockvisit(node.body, if_frame)
+        self.outdent()
+        for elif_ in node.elif_:
+            self.writeline('elif ', elif_)
+            self.visit(elif_.test, if_frame)
+            self.write(':')
+            self.indent()
+            self.blockvisit(elif_.body, if_frame)
+            self.outdent()
+        if node.else_:
+            self.writeline('else:')
+            self.indent()
+            self.blockvisit(node.else_, if_frame)
+            self.outdent()
+
+    def visit_Macro(self, node, frame):
+        macro_frame, macro_ref = self.macro_body(node, frame)
+        self.newline()
+        if frame.toplevel:
+            if not node.name.startswith('_'):
+                self.write('context.exported_vars.add(%r)' % node.name)
+            ref = frame.symbols.ref(node.name)
+            self.writeline('context.vars[%r] = ' % node.name)
+        self.write('%s = ' % frame.symbols.ref(node.name))
+        self.macro_def(macro_ref, macro_frame)
+
+    def visit_CallBlock(self, node, frame):
+        call_frame, macro_ref = self.macro_body(node, frame)
+        self.writeline('caller = ')
+        self.macro_def(macro_ref, call_frame)
+        self.start_write(frame, node)
+        self.visit_Call(node.call, frame, forward_caller=True)
+        self.end_write(frame)
+
+    def visit_FilterBlock(self, node, frame):
+        filter_frame = frame.inner()
+        filter_frame.symbols.analyze_node(node)
+        self.enter_frame(filter_frame)
+        self.buffer(filter_frame)
+        self.blockvisit(node.body, filter_frame)
+        self.start_write(frame, node)
+        self.visit_Filter(node.filter, filter_frame)
+        self.end_write(frame)
+        self.leave_frame(filter_frame)
+
+    def visit_With(self, node, frame):
+        with_frame = frame.inner()
+        with_frame.symbols.analyze_node(node)
+        self.enter_frame(with_frame)
+        for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
+            self.newline()
+            self.visit(target, with_frame)
+            self.write(' = ')
+            self.visit(expr, frame)
+        self.blockvisit(node.body, with_frame)
+        self.leave_frame(with_frame)
+
+    def visit_ExprStmt(self, node, frame):
+        self.newline(node)
+        self.visit(node.node, frame)
+
+    def visit_Output(self, node, frame):
+        # if we have a known extends statement, we don't output anything
+        # if we are in a require_output_check section
+        if self.has_known_extends and frame.require_output_check:
+            return
+
+        allow_constant_finalize = True
+        if self.environment.finalize:
+            func = self.environment.finalize
+            if getattr(func, 'contextfunction', False) or \
+               getattr(func, 'evalcontextfunction', False):
+                allow_constant_finalize = False
+            elif getattr(func, 'environmentfunction', False):
+                finalize = lambda x: text_type(
+                    self.environment.finalize(self.environment, x))
+            else:
+                finalize = lambda x: text_type(self.environment.finalize(x))
+        else:
+            finalize = text_type
+
+        # if we are inside a frame that requires output checking, we do so
+        outdent_later = False
+        if frame.require_output_check:
+            self.writeline('if parent_template is None:')
+            self.indent()
+            outdent_later = True
+
+        # try to evaluate as many chunks as possible into a static
+        # string at compile time.
+        body = []
+        for child in node.nodes:
+            try:
+                if not allow_constant_finalize:
+                    raise nodes.Impossible()
+                const = child.as_const(frame.eval_ctx)
+            except nodes.Impossible:
+                body.append(child)
+                continue
+            # the frame can't be volatile here, becaus otherwise the
+            # as_const() function would raise an Impossible exception
+            # at that point.
+            try:
+                if frame.eval_ctx.autoescape:
+                    if hasattr(const, '__html__'):
+                        const = const.__html__()
+                    else:
+                        const = escape(const)
+                const = finalize(const)
+            except Exception:
+                # if something goes wrong here we evaluate the node
+                # at runtime for easier debugging
+                body.append(child)
+                continue
+            if body and isinstance(body[-1], list):
+                body[-1].append(const)
+            else:
+                body.append([const])
+
+        # if we have less than 3 nodes or a buffer we yield or extend/append
+        if len(body) < 3 or frame.buffer is not None:
+            if frame.buffer is not None:
+                # for one item we append, for more we extend
+                if len(body) == 1:
+                    self.writeline('%s.append(' % frame.buffer)
+                else:
+                    self.writeline('%s.extend((' % frame.buffer)
+                self.indent()
+            for item in body:
+                if isinstance(item, list):
+                    val = repr(concat(item))
+                    if frame.buffer is None:
+                        self.writeline('yield ' + val)
+                    else:
+                        self.writeline(val + ',')
+                else:
+                    if frame.buffer is None:
+                        self.writeline('yield ', item)
+                    else:
+                        self.newline(item)
+                    close = 1
+                    if frame.eval_ctx.volatile:
+                        self.write('(escape if context.eval_ctx.autoescape'
+                                   ' else to_string)(')
+                    elif frame.eval_ctx.autoescape:
+                        self.write('escape(')
+                    else:
+                        self.write('to_string(')
+                    if self.environment.finalize is not None:
+                        self.write('environment.finalize(')
+                        if getattr(self.environment.finalize,
+                                   "contextfunction", False):
+                            self.write('context, ')
+                        close += 1
+                    self.visit(item, frame)
+                    self.write(')' * close)
+                    if frame.buffer is not None:
+                        self.write(',')
+            if frame.buffer is not None:
+                # close the open parentheses
+                self.outdent()
+                self.writeline(len(body) == 1 and ')' or '))')
+
+        # otherwise we create a format string as this is faster in that case
+        else:
+            format = []
+            arguments = []
+            for item in body:
+                if isinstance(item, list):
+                    format.append(concat(item).replace('%', '%%'))
+                else:
+                    format.append('%s')
+                    arguments.append(item)
+            self.writeline('yield ')
+            self.write(repr(concat(format)) + ' % (')
+            self.indent()
+            for argument in arguments:
+                self.newline(argument)
+                close = 0
+                if frame.eval_ctx.volatile:
+                    self.write('(escape if context.eval_ctx.autoescape else'
+                               ' to_string)(')
+                    close += 1
+                elif frame.eval_ctx.autoescape:
+                    self.write('escape(')
+                    close += 1
+                if self.environment.finalize is not None:
+                    self.write('environment.finalize(')
+                    if getattr(self.environment.finalize,
+                               'contextfunction', False):
+                        self.write('context, ')
+                    elif getattr(self.environment.finalize,
+                               'evalcontextfunction', False):
+                        self.write('context.eval_ctx, ')
+                    elif getattr(self.environment.finalize,
+                               'environmentfunction', False):
+                        self.write('environment, ')
+                    close += 1
+                self.visit(argument, frame)
+                self.write(')' * close + ', ')
+            self.outdent()
+            self.writeline(')')
+
+        if outdent_later:
+            self.outdent()
+
+    def visit_Assign(self, node, frame):
+        self.push_assign_tracking()
+        self.newline(node)
+        self.visit(node.target, frame)
+        self.write(' = ')
+        self.visit(node.node, frame)
+        self.pop_assign_tracking(frame)
+
+    def visit_AssignBlock(self, node, frame):
+        self.push_assign_tracking()
+        block_frame = frame.inner()
+        # This is a special case.  Since a set block always captures we
+        # will disable output checks.  This way one can use set blocks
+        # toplevel even in extended templates.
+        block_frame.require_output_check = False
+        block_frame.symbols.analyze_node(node)
+        self.enter_frame(block_frame)
+        self.buffer(block_frame)
+        self.blockvisit(node.body, block_frame)
+        self.newline(node)
+        self.visit(node.target, frame)
+        self.write(' = (Markup if context.eval_ctx.autoescape '
+                   'else identity)(')
+        if node.filter is not None:
+            self.visit_Filter(node.filter, block_frame)
+        else:
+            self.write('concat(%s)' % block_frame.buffer)
+        self.write(')')
+        self.pop_assign_tracking(frame)
+        self.leave_frame(block_frame)
+
+    # -- Expression Visitors
+
+    def visit_Name(self, node, frame):
+        if node.ctx == 'store' and frame.toplevel:
+            if self._assign_stack:
+                self._assign_stack[-1].add(node.name)
+        ref = frame.symbols.ref(node.name)
+
+        # If we are looking up a variable we might have to deal with the
+        # case where it's undefined.  We can skip that case if the load
+        # instruction indicates a parameter which are always defined.
+        if node.ctx == 'load':
+            load = frame.symbols.find_load(ref)
+            if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
+                    not self.parameter_is_undeclared(ref)):
+                self.write('(undefined(name=%r) if %s is missing else %s)' %
+                           (node.name, ref, ref))
+                return
+
+        self.write(ref)
+
+    def visit_NSRef(self, node, frame):
+        # NSRefs can only be used to store values; since they use the normal
+        # `foo.bar` notation they will be parsed as a normal attribute access
+        # when used anywhere but in a `set` context
+        ref = frame.symbols.ref(node.name)
+        self.writeline('if not isinstance(%s, Namespace):' % ref)
+        self.indent()
+        self.writeline('raise TemplateRuntimeError(%r)' %
+                       'cannot assign attribute on non-namespace object')
+        self.outdent()
+        self.writeline('%s[%r]' % (ref, node.attr))
+
+    def visit_Const(self, node, frame):
+        val = node.as_const(frame.eval_ctx)
+        if isinstance(val, float):
+            self.write(str(val))
+        else:
+            self.write(repr(val))
+
+    def visit_TemplateData(self, node, frame):
+        try:
+            self.write(repr(node.as_const(frame.eval_ctx)))
+        except nodes.Impossible:
+            self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
+                       % node.data)
+
+    def visit_Tuple(self, node, frame):
+        self.write('(')
+        idx = -1
+        for idx, item in enumerate(node.items):
+            if idx:
+                self.write(', ')
+            self.visit(item, frame)
+        self.write(idx == 0 and ',)' or ')')
+
+    def visit_List(self, node, frame):
+        self.write('[')
+        for idx, item in enumerate(node.items):
+            if idx:
+                self.write(', ')
+            self.visit(item, frame)
+        self.write(']')
+
+    def visit_Dict(self, node, frame):
+        self.write('{')
+        for idx, item in enumerate(node.items):
+            if idx:
+                self.write(', ')
+            self.visit(item.key, frame)
+            self.write(': ')
+            self.visit(item.value, frame)
+        self.write('}')
+
+    def binop(operator, interceptable=True):
+        @optimizeconst
+        def visitor(self, node, frame):
+            if self.environment.sandboxed and \
+               operator in self.environment.intercepted_binops:
+                self.write('environment.call_binop(context, %r, ' % operator)
+                self.visit(node.left, frame)
+                self.write(', ')
+                self.visit(node.right, frame)
+            else:
+                self.write('(')
+                self.visit(node.left, frame)
+                self.write(' %s ' % operator)
+                self.visit(node.right, frame)
+            self.write(')')
+        return visitor
+
+    def uaop(operator, interceptable=True):
+        @optimizeconst
+        def visitor(self, node, frame):
+            if self.environment.sandboxed and \
+               operator in self.environment.intercepted_unops:
+                self.write('environment.call_unop(context, %r, ' % operator)
+                self.visit(node.node, frame)
+            else:
+                self.write('(' + operator)
+                self.visit(node.node, frame)
+            self.write(')')
+        return visitor
+
+    visit_Add = binop('+')
+    visit_Sub = binop('-')
+    visit_Mul = binop('*')
+    visit_Div = binop('/')
+    visit_FloorDiv = binop('//')
+    visit_Pow = binop('**')
+    visit_Mod = binop('%')
+    visit_And = binop('and', interceptable=False)
+    visit_Or = binop('or', interceptable=False)
+    visit_Pos = uaop('+')
+    visit_Neg = uaop('-')
+    visit_Not = uaop('not ', interceptable=False)
+    del binop, uaop
+
+    @optimizeconst
+    def visit_Concat(self, node, frame):
+        if frame.eval_ctx.volatile:
+            func_name = '(context.eval_ctx.volatile and' \
+                        ' markup_join or unicode_join)'
+        elif frame.eval_ctx.autoescape:
+            func_name = 'markup_join'
+        else:
+            func_name = 'unicode_join'
+        self.write('%s((' % func_name)
+        for arg in node.nodes:
+            self.visit(arg, frame)
+            self.write(', ')
+        self.write('))')
+
+    @optimizeconst
+    def visit_Compare(self, node, frame):
+        self.visit(node.expr, frame)
+        for op in node.ops:
+            self.visit(op, frame)
+
+    def visit_Operand(self, node, frame):
+        self.write(' %s ' % operators[node.op])
+        self.visit(node.expr, frame)
+
+    @optimizeconst
+    def visit_Getattr(self, node, frame):
+        self.write('environment.getattr(')
+        self.visit(node.node, frame)
+        self.write(', %r)' % node.attr)
+
+    @optimizeconst
+    def visit_Getitem(self, node, frame):
+        # slices bypass the environment getitem method.
+        if isinstance(node.arg, nodes.Slice):
+            self.visit(node.node, frame)
+            self.write('[')
+            self.visit(node.arg, frame)
+            self.write(']')
+        else:
+            self.write('environment.getitem(')
+            self.visit(node.node, frame)
+            self.write(', ')
+            self.visit(node.arg, frame)
+            self.write(')')
+
+    def visit_Slice(self, node, frame):
+        if node.start is not None:
+            self.visit(node.start, frame)
+        self.write(':')
+        if node.stop is not None:
+            self.visit(node.stop, frame)
+        if node.step is not None:
+            self.write(':')
+            self.visit(node.step, frame)
+
+    @optimizeconst
+    def visit_Filter(self, node, frame):
+        if self.environment.is_async:
+            self.write('await auto_await(')
+        self.write(self.filters[node.name] + '(')
+        func = self.environment.filters.get(node.name)
+        if func is None:
+            self.fail('no filter named %r' % node.name, node.lineno)
+        if getattr(func, 'contextfilter', False):
+            self.write('context, ')
+        elif getattr(func, 'evalcontextfilter', False):
+            self.write('context.eval_ctx, ')
+        elif getattr(func, 'environmentfilter', False):
+            self.write('environment, ')
+
+        # if the filter node is None we are inside a filter block
+        # and want to write to the current buffer
+        if node.node is not None:
+            self.visit(node.node, frame)
+        elif frame.eval_ctx.volatile:
+            self.write('(context.eval_ctx.autoescape and'
+                       ' Markup(concat(%s)) or concat(%s))' %
+                       (frame.buffer, frame.buffer))
+        elif frame.eval_ctx.autoescape:
+            self.write('Markup(concat(%s))' % frame.buffer)
+        else:
+            self.write('concat(%s)' % frame.buffer)
+        self.signature(node, frame)
+        self.write(')')
+        if self.environment.is_async:
+            self.write(')')
+
+    @optimizeconst
+    def visit_Test(self, node, frame):
+        self.write(self.tests[node.name] + '(')
+        if node.name not in self.environment.tests:
+            self.fail('no test named %r' % node.name, node.lineno)
+        self.visit(node.node, frame)
+        self.signature(node, frame)
+        self.write(')')
+
+    @optimizeconst
+    def visit_CondExpr(self, node, frame):
+        def write_expr2():
+            if node.expr2 is not None:
+                return self.visit(node.expr2, frame)
+            self.write('undefined(%r)' % ('the inline if-'
+                       'expression on %s evaluated to false and '
+                       'no else section was defined.' % self.position(node)))
+
+        self.write('(')
+        self.visit(node.expr1, frame)
+        self.write(' if ')
+        self.visit(node.test, frame)
+        self.write(' else ')
+        write_expr2()
+        self.write(')')
+
+    @optimizeconst
+    def visit_Call(self, node, frame, forward_caller=False):
+        if self.environment.is_async:
+            self.write('await auto_await(')
+        if self.environment.sandboxed:
+            self.write('environment.call(context, ')
+        else:
+            self.write('context.call(')
+        self.visit(node.node, frame)
+        extra_kwargs = forward_caller and {'caller': 'caller'} or None
+        self.signature(node, frame, extra_kwargs)
+        self.write(')')
+        if self.environment.is_async:
+            self.write(')')
+
+    def visit_Keyword(self, node, frame):
+        self.write(node.key + '=')
+        self.visit(node.value, frame)
+
+    # -- Unused nodes for extensions
+
+    def visit_MarkSafe(self, node, frame):
+        self.write('Markup(')
+        self.visit(node.expr, frame)
+        self.write(')')
+
+    def visit_MarkSafeIfAutoescape(self, node, frame):
+        self.write('(context.eval_ctx.autoescape and Markup or identity)(')
+        self.visit(node.expr, frame)
+        self.write(')')
+
+    def visit_EnvironmentAttribute(self, node, frame):
+        self.write('environment.' + node.name)
+
+    def visit_ExtensionAttribute(self, node, frame):
+        self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
+
+    def visit_ImportedName(self, node, frame):
+        self.write(self.import_aliases[node.importname])
+
+    def visit_InternalName(self, node, frame):
+        self.write(node.name)
+
+    def visit_ContextReference(self, node, frame):
+        self.write('context')
+
+    def visit_Continue(self, node, frame):
+        self.writeline('continue', node)
+
+    def visit_Break(self, node, frame):
+        self.writeline('break', node)
+
+    def visit_Scope(self, node, frame):
+        scope_frame = frame.inner()
+        scope_frame.symbols.analyze_node(node)
+        self.enter_frame(scope_frame)
+        self.blockvisit(node.body, scope_frame)
+        self.leave_frame(scope_frame)
+
+    def visit_OverlayScope(self, node, frame):
+        ctx = self.temporary_identifier()
+        self.writeline('%s = %s' % (ctx, self.derive_context(frame)))
+        self.writeline('%s.vars = ' % ctx)
+        self.visit(node.context, frame)
+        self.push_context_reference(ctx)
+
+        scope_frame = frame.inner(isolated=True)
+        scope_frame.symbols.analyze_node(node)
+        self.enter_frame(scope_frame)
+        self.blockvisit(node.body, scope_frame)
+        self.leave_frame(scope_frame)
+        self.pop_context_reference()
+
+    def visit_EvalContextModifier(self, node, frame):
+        for keyword in node.options:
+            self.writeline('context.eval_ctx.%s = ' % keyword.key)
+            self.visit(keyword.value, frame)
+            try:
+                val = keyword.value.as_const(frame.eval_ctx)
+            except nodes.Impossible:
+                frame.eval_ctx.volatile = True
+            else:
+                setattr(frame.eval_ctx, keyword.key, val)
+
+    def visit_ScopedEvalContextModifier(self, node, frame):
+        old_ctx_name = self.temporary_identifier()
+        saved_ctx = frame.eval_ctx.save()
+        self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
+        self.visit_EvalContextModifier(node, frame)
+        for child in node.body:
+            self.visit(child, frame)
+        frame.eval_ctx.revert(saved_ctx)
+        self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
diff --git a/lib/python3.7/site-packages/jinja2/constants.py b/lib/python3.7/site-packages/jinja2/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..11efd1ed15832d51acef200d1ce93efc57297664
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/constants.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja.constants
+    ~~~~~~~~~~~~~~~
+
+    Various constants.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+
+
+#: list of lorem ipsum words used by the lipsum() helper function
+LOREM_IPSUM_WORDS = u'''\
+a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
+auctor augue bibendum blandit class commodo condimentum congue consectetuer
+consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
+diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
+elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
+faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
+hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
+justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
+luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
+mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
+nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
+penatibus per pharetra phasellus placerat platea porta porttitor posuere
+potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
+ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
+sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
+tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
+ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
+viverra volutpat vulputate'''
diff --git a/lib/python3.7/site-packages/jinja2/debug.py b/lib/python3.7/site-packages/jinja2/debug.py
new file mode 100644
index 0000000000000000000000000000000000000000..b61139f0cde17f4914b66bdd541ff149f492fe1d
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/debug.py
@@ -0,0 +1,372 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.debug
+    ~~~~~~~~~~~~
+
+    Implements the debug interface for Jinja.  This module does some pretty
+    ugly stuff with the Python traceback system in order to achieve tracebacks
+    with correct line numbers, locals and contents.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+import sys
+import traceback
+from types import TracebackType, CodeType
+from jinja2.utils import missing, internal_code
+from jinja2.exceptions import TemplateSyntaxError
+from jinja2._compat import iteritems, reraise, PY2
+
+# on pypy we can take advantage of transparent proxies
+try:
+    from __pypy__ import tproxy
+except ImportError:
+    tproxy = None
+
+
+# how does the raise helper look like?
+try:
+    exec("raise TypeError, 'foo'")
+except SyntaxError:
+    raise_helper = 'raise __jinja_exception__[1]'
+except TypeError:
+    raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
+
+
+class TracebackFrameProxy(object):
+    """Proxies a traceback frame."""
+
+    def __init__(self, tb):
+        self.tb = tb
+        self._tb_next = None
+
+    @property
+    def tb_next(self):
+        return self._tb_next
+
+    def set_next(self, next):
+        if tb_set_next is not None:
+            try:
+                tb_set_next(self.tb, next and next.tb or None)
+            except Exception:
+                # this function can fail due to all the hackery it does
+                # on various python implementations.  We just catch errors
+                # down and ignore them if necessary.
+                pass
+        self._tb_next = next
+
+    @property
+    def is_jinja_frame(self):
+        return '__jinja_template__' in self.tb.tb_frame.f_globals
+
+    def __getattr__(self, name):
+        return getattr(self.tb, name)
+
+
+def make_frame_proxy(frame):
+    proxy = TracebackFrameProxy(frame)
+    if tproxy is None:
+        return proxy
+    def operation_handler(operation, *args, **kwargs):
+        if operation in ('__getattribute__', '__getattr__'):
+            return getattr(proxy, args[0])
+        elif operation == '__setattr__':
+            proxy.__setattr__(*args, **kwargs)
+        else:
+            return getattr(proxy, operation)(*args, **kwargs)
+    return tproxy(TracebackType, operation_handler)
+
+
+class ProcessedTraceback(object):
+    """Holds a Jinja preprocessed traceback for printing or reraising."""
+
+    def __init__(self, exc_type, exc_value, frames):
+        assert frames, 'no frames for this traceback?'
+        self.exc_type = exc_type
+        self.exc_value = exc_value
+        self.frames = frames
+
+        # newly concatenate the frames (which are proxies)
+        prev_tb = None
+        for tb in self.frames:
+            if prev_tb is not None:
+                prev_tb.set_next(tb)
+            prev_tb = tb
+        prev_tb.set_next(None)
+
+    def render_as_text(self, limit=None):
+        """Return a string with the traceback."""
+        lines = traceback.format_exception(self.exc_type, self.exc_value,
+                                           self.frames[0], limit=limit)
+        return ''.join(lines).rstrip()
+
+    def render_as_html(self, full=False):
+        """Return a unicode string with the traceback as rendered HTML."""
+        from jinja2.debugrenderer import render_traceback
+        return u'%s\n\n<!--\n%s\n-->' % (
+            render_traceback(self, full=full),
+            self.render_as_text().decode('utf-8', 'replace')
+        )
+
+    @property
+    def is_template_syntax_error(self):
+        """`True` if this is a template syntax error."""
+        return isinstance(self.exc_value, TemplateSyntaxError)
+
+    @property
+    def exc_info(self):
+        """Exception info tuple with a proxy around the frame objects."""
+        return self.exc_type, self.exc_value, self.frames[0]
+
+    @property
+    def standard_exc_info(self):
+        """Standard python exc_info for re-raising"""
+        tb = self.frames[0]
+        # the frame will be an actual traceback (or transparent proxy) if
+        # we are on pypy or a python implementation with support for tproxy
+        if type(tb) is not TracebackType:
+            tb = tb.tb
+        return self.exc_type, self.exc_value, tb
+
+
+def make_traceback(exc_info, source_hint=None):
+    """Creates a processed traceback object from the exc_info."""
+    exc_type, exc_value, tb = exc_info
+    if isinstance(exc_value, TemplateSyntaxError):
+        exc_info = translate_syntax_error(exc_value, source_hint)
+        initial_skip = 0
+    else:
+        initial_skip = 1
+    return translate_exception(exc_info, initial_skip)
+
+
+def translate_syntax_error(error, source=None):
+    """Rewrites a syntax error to please traceback systems."""
+    error.source = source
+    error.translated = True
+    exc_info = (error.__class__, error, None)
+    filename = error.filename
+    if filename is None:
+        filename = '<unknown>'
+    return fake_exc_info(exc_info, filename, error.lineno)
+
+
+def translate_exception(exc_info, initial_skip=0):
+    """If passed an exc_info it will automatically rewrite the exceptions
+    all the way down to the correct line numbers and frames.
+    """
+    tb = exc_info[2]
+    frames = []
+
+    # skip some internal frames if wanted
+    for x in range(initial_skip):
+        if tb is not None:
+            tb = tb.tb_next
+    initial_tb = tb
+
+    while tb is not None:
+        # skip frames decorated with @internalcode.  These are internal
+        # calls we can't avoid and that are useless in template debugging
+        # output.
+        if tb.tb_frame.f_code in internal_code:
+            tb = tb.tb_next
+            continue
+
+        # save a reference to the next frame if we override the current
+        # one with a faked one.
+        next = tb.tb_next
+
+        # fake template exceptions
+        template = tb.tb_frame.f_globals.get('__jinja_template__')
+        if template is not None:
+            lineno = template.get_corresponding_lineno(tb.tb_lineno)
+            tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
+                               lineno)[2]
+
+        frames.append(make_frame_proxy(tb))
+        tb = next
+
+    # if we don't have any exceptions in the frames left, we have to
+    # reraise it unchanged.
+    # XXX: can we backup here?  when could this happen?
+    if not frames:
+        reraise(exc_info[0], exc_info[1], exc_info[2])
+
+    return ProcessedTraceback(exc_info[0], exc_info[1], frames)
+
+
+def get_jinja_locals(real_locals):
+    ctx = real_locals.get('context')
+    if ctx:
+        locals = ctx.get_all().copy()
+    else:
+        locals = {}
+
+    local_overrides = {}
+
+    for name, value in iteritems(real_locals):
+        if not name.startswith('l_') or value is missing:
+            continue
+        try:
+            _, depth, name = name.split('_', 2)
+            depth = int(depth)
+        except ValueError:
+            continue
+        cur_depth = local_overrides.get(name, (-1,))[0]
+        if cur_depth < depth:
+            local_overrides[name] = (depth, value)
+
+    for name, (_, value) in iteritems(local_overrides):
+        if value is missing:
+            locals.pop(name, None)
+        else:
+            locals[name] = value
+
+    return locals
+
+
+def fake_exc_info(exc_info, filename, lineno):
+    """Helper for `translate_exception`."""
+    exc_type, exc_value, tb = exc_info
+
+    # figure the real context out
+    if tb is not None:
+        locals = get_jinja_locals(tb.tb_frame.f_locals)
+
+        # if there is a local called __jinja_exception__, we get
+        # rid of it to not break the debug functionality.
+        locals.pop('__jinja_exception__', None)
+    else:
+        locals = {}
+
+    # assamble fake globals we need
+    globals = {
+        '__name__':             filename,
+        '__file__':             filename,
+        '__jinja_exception__':  exc_info[:2],
+
+        # we don't want to keep the reference to the template around
+        # to not cause circular dependencies, but we mark it as Jinja
+        # frame for the ProcessedTraceback
+        '__jinja_template__':   None
+    }
+
+    # and fake the exception
+    code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
+
+    # if it's possible, change the name of the code.  This won't work
+    # on some python environments such as google appengine
+    try:
+        if tb is None:
+            location = 'template'
+        else:
+            function = tb.tb_frame.f_code.co_name
+            if function == 'root':
+                location = 'top-level template code'
+            elif function.startswith('block_'):
+                location = 'block "%s"' % function[6:]
+            else:
+                location = 'template'
+
+        if PY2:
+            code = CodeType(0, code.co_nlocals, code.co_stacksize,
+                            code.co_flags, code.co_code, code.co_consts,
+                            code.co_names, code.co_varnames, filename,
+                            location, code.co_firstlineno,
+                            code.co_lnotab, (), ())
+        else:
+            code = CodeType(0, code.co_kwonlyargcount,
+                            code.co_nlocals, code.co_stacksize,
+                            code.co_flags, code.co_code, code.co_consts,
+                            code.co_names, code.co_varnames, filename,
+                            location, code.co_firstlineno,
+                            code.co_lnotab, (), ())
+    except Exception as e:
+        pass
+
+    # execute the code and catch the new traceback
+    try:
+        exec(code, globals, locals)
+    except:
+        exc_info = sys.exc_info()
+        new_tb = exc_info[2].tb_next
+
+    # return without this frame
+    return exc_info[:2] + (new_tb,)
+
+
+def _init_ugly_crap():
+    """This function implements a few ugly things so that we can patch the
+    traceback objects.  The function returned allows resetting `tb_next` on
+    any python traceback object.  Do not attempt to use this on non cpython
+    interpreters
+    """
+    import ctypes
+    from types import TracebackType
+
+    if PY2:
+        # figure out size of _Py_ssize_t for Python 2:
+        if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
+            _Py_ssize_t = ctypes.c_int64
+        else:
+            _Py_ssize_t = ctypes.c_int
+    else:
+        # platform ssize_t on Python 3
+        _Py_ssize_t = ctypes.c_ssize_t
+
+    # regular python
+    class _PyObject(ctypes.Structure):
+        pass
+    _PyObject._fields_ = [
+        ('ob_refcnt', _Py_ssize_t),
+        ('ob_type', ctypes.POINTER(_PyObject))
+    ]
+
+    # python with trace
+    if hasattr(sys, 'getobjects'):
+        class _PyObject(ctypes.Structure):
+            pass
+        _PyObject._fields_ = [
+            ('_ob_next', ctypes.POINTER(_PyObject)),
+            ('_ob_prev', ctypes.POINTER(_PyObject)),
+            ('ob_refcnt', _Py_ssize_t),
+            ('ob_type', ctypes.POINTER(_PyObject))
+        ]
+
+    class _Traceback(_PyObject):
+        pass
+    _Traceback._fields_ = [
+        ('tb_next', ctypes.POINTER(_Traceback)),
+        ('tb_frame', ctypes.POINTER(_PyObject)),
+        ('tb_lasti', ctypes.c_int),
+        ('tb_lineno', ctypes.c_int)
+    ]
+
+    def tb_set_next(tb, next):
+        """Set the tb_next attribute of a traceback object."""
+        if not (isinstance(tb, TracebackType) and
+                (next is None or isinstance(next, TracebackType))):
+            raise TypeError('tb_set_next arguments must be traceback objects')
+        obj = _Traceback.from_address(id(tb))
+        if tb.tb_next is not None:
+            old = _Traceback.from_address(id(tb.tb_next))
+            old.ob_refcnt -= 1
+        if next is None:
+            obj.tb_next = ctypes.POINTER(_Traceback)()
+        else:
+            next = _Traceback.from_address(id(next))
+            next.ob_refcnt += 1
+            obj.tb_next = ctypes.pointer(next)
+
+    return tb_set_next
+
+
+# try to get a tb_set_next implementation if we don't have transparent
+# proxies.
+tb_set_next = None
+if tproxy is None:
+    try:
+        tb_set_next = _init_ugly_crap()
+    except:
+        pass
+    del _init_ugly_crap
diff --git a/lib/python3.7/site-packages/jinja2/defaults.py b/lib/python3.7/site-packages/jinja2/defaults.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c93dec0aeb202495ad4c3e7d25e2d11c507e775
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/defaults.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.defaults
+    ~~~~~~~~~~~~~~~
+
+    Jinja default filters and tags.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+from jinja2._compat import range_type
+from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner, Namespace
+
+
+# defaults for the parser / lexer
+BLOCK_START_STRING = '{%'
+BLOCK_END_STRING = '%}'
+VARIABLE_START_STRING = '{{'
+VARIABLE_END_STRING = '}}'
+COMMENT_START_STRING = '{#'
+COMMENT_END_STRING = '#}'
+LINE_STATEMENT_PREFIX = None
+LINE_COMMENT_PREFIX = None
+TRIM_BLOCKS = False
+LSTRIP_BLOCKS = False
+NEWLINE_SEQUENCE = '\n'
+KEEP_TRAILING_NEWLINE = False
+
+
+# default filters, tests and namespace
+from jinja2.filters import FILTERS as DEFAULT_FILTERS
+from jinja2.tests import TESTS as DEFAULT_TESTS
+DEFAULT_NAMESPACE = {
+    'range':        range_type,
+    'dict':         dict,
+    'lipsum':       generate_lorem_ipsum,
+    'cycler':       Cycler,
+    'joiner':       Joiner,
+    'namespace':    Namespace
+}
+
+
+# default policies
+DEFAULT_POLICIES = {
+    'compiler.ascii_str':   True,
+    'urlize.rel':           'noopener',
+    'urlize.target':        None,
+    'truncate.leeway':      5,
+    'json.dumps_function':  None,
+    'json.dumps_kwargs':    {'sort_keys': True},
+    'ext.i18n.trimmed':     False,
+}
+
+
+# export all constants
+__all__ = tuple(x for x in locals().keys() if x.isupper())
diff --git a/lib/python3.7/site-packages/jinja2/environment.py b/lib/python3.7/site-packages/jinja2/environment.py
new file mode 100644
index 0000000000000000000000000000000000000000..549d9afab456b45032945fd47d5c2900e3797a1b
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/environment.py
@@ -0,0 +1,1276 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.environment
+    ~~~~~~~~~~~~~~~~~~
+
+    Provides a class that holds runtime and parsing time options.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+import weakref
+from functools import reduce, partial
+from jinja2 import nodes
+from jinja2.defaults import BLOCK_START_STRING, \
+     BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
+     COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
+     LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
+     DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
+     DEFAULT_POLICIES, KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
+from jinja2.lexer import get_lexer, TokenStream
+from jinja2.parser import Parser
+from jinja2.nodes import EvalContext
+from jinja2.compiler import generate, CodeGenerator
+from jinja2.runtime import Undefined, new_context, Context
+from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
+     TemplatesNotFound, TemplateRuntimeError
+from jinja2.utils import import_string, LRUCache, Markup, missing, \
+     concat, consume, internalcode, have_async_gen
+from jinja2._compat import imap, ifilter, string_types, iteritems, \
+     text_type, reraise, implements_iterator, implements_to_string, \
+     encode_filename, PY2, PYPY
+
+
+# for direct template usage we have up to ten living environments
+_spontaneous_environments = LRUCache(10)
+
+# the function to create jinja traceback objects.  This is dynamically
+# imported on the first exception in the exception handler.
+_make_traceback = None
+
+
+def get_spontaneous_environment(*args):
+    """Return a new spontaneous environment.  A spontaneous environment is an
+    unnamed and unaccessible (in theory) environment that is used for
+    templates generated from a string and not from the file system.
+    """
+    try:
+        env = _spontaneous_environments.get(args)
+    except TypeError:
+        return Environment(*args)
+    if env is not None:
+        return env
+    _spontaneous_environments[args] = env = Environment(*args)
+    env.shared = True
+    return env
+
+
+def create_cache(size):
+    """Return the cache class for the given size."""
+    if size == 0:
+        return None
+    if size < 0:
+        return {}
+    return LRUCache(size)
+
+
+def copy_cache(cache):
+    """Create an empty copy of the given cache."""
+    if cache is None:
+        return None
+    elif type(cache) is dict:
+        return {}
+    return LRUCache(cache.capacity)
+
+
+def load_extensions(environment, extensions):
+    """Load the extensions from the list and bind it to the environment.
+    Returns a dict of instantiated environments.
+    """
+    result = {}
+    for extension in extensions:
+        if isinstance(extension, string_types):
+            extension = import_string(extension)
+        result[extension.identifier] = extension(environment)
+    return result
+
+
+def fail_for_missing_callable(string, name):
+    msg = string % name
+    if isinstance(name, Undefined):
+        try:
+            name._fail_with_undefined_error()
+        except Exception as e:
+            msg = '%s (%s; did you forget to quote the callable name?)' % (msg, e)
+    raise TemplateRuntimeError(msg)
+
+
+def _environment_sanity_check(environment):
+    """Perform a sanity check on the environment."""
+    assert issubclass(environment.undefined, Undefined), 'undefined must ' \
+        'be a subclass of undefined because filters depend on it.'
+    assert environment.block_start_string != \
+        environment.variable_start_string != \
+        environment.comment_start_string, 'block, variable and comment ' \
+        'start strings must be different'
+    assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
+        'newline_sequence set to unknown line ending string.'
+    return environment
+
+
+class Environment(object):
+    r"""The core component of Jinja is the `Environment`.  It contains
+    important shared variables like configuration, filters, tests,
+    globals and others.  Instances of this class may be modified if
+    they are not shared and if no template was loaded so far.
+    Modifications on environments after the first template was loaded
+    will lead to surprising effects and undefined behavior.
+
+    Here are the possible initialization parameters:
+
+        `block_start_string`
+            The string marking the beginning of a block.  Defaults to ``'{%'``.
+
+        `block_end_string`
+            The string marking the end of a block.  Defaults to ``'%}'``.
+
+        `variable_start_string`
+            The string marking the beginning of a print statement.
+            Defaults to ``'{{'``.
+
+        `variable_end_string`
+            The string marking the end of a print statement.  Defaults to
+            ``'}}'``.
+
+        `comment_start_string`
+            The string marking the beginning of a comment.  Defaults to ``'{#'``.
+
+        `comment_end_string`
+            The string marking the end of a comment.  Defaults to ``'#}'``.
+
+        `line_statement_prefix`
+            If given and a string, this will be used as prefix for line based
+            statements.  See also :ref:`line-statements`.
+
+        `line_comment_prefix`
+            If given and a string, this will be used as prefix for line based
+            comments.  See also :ref:`line-statements`.
+
+            .. versionadded:: 2.2
+
+        `trim_blocks`
+            If this is set to ``True`` the first newline after a block is
+            removed (block, not variable tag!).  Defaults to `False`.
+
+        `lstrip_blocks`
+            If this is set to ``True`` leading spaces and tabs are stripped
+            from the start of a line to a block.  Defaults to `False`.
+
+        `newline_sequence`
+            The sequence that starts a newline.  Must be one of ``'\r'``,
+            ``'\n'`` or ``'\r\n'``.  The default is ``'\n'`` which is a
+            useful default for Linux and OS X systems as well as web
+            applications.
+
+        `keep_trailing_newline`
+            Preserve the trailing newline when rendering templates.
+            The default is ``False``, which causes a single newline,
+            if present, to be stripped from the end of the template.
+
+            .. versionadded:: 2.7
+
+        `extensions`
+            List of Jinja extensions to use.  This can either be import paths
+            as strings or extension classes.  For more information have a
+            look at :ref:`the extensions documentation <jinja-extensions>`.
+
+        `optimized`
+            should the optimizer be enabled?  Default is ``True``.
+
+        `undefined`
+            :class:`Undefined` or a subclass of it that is used to represent
+            undefined values in the template.
+
+        `finalize`
+            A callable that can be used to process the result of a variable
+            expression before it is output.  For example one can convert
+            ``None`` implicitly into an empty string here.
+
+        `autoescape`
+            If set to ``True`` the XML/HTML autoescaping feature is enabled by
+            default.  For more details about autoescaping see
+            :class:`~jinja2.utils.Markup`.  As of Jinja 2.4 this can also
+            be a callable that is passed the template name and has to
+            return ``True`` or ``False`` depending on autoescape should be
+            enabled by default.
+
+            .. versionchanged:: 2.4
+               `autoescape` can now be a function
+
+        `loader`
+            The template loader for this environment.
+
+        `cache_size`
+            The size of the cache.  Per default this is ``400`` which means
+            that if more than 400 templates are loaded the loader will clean
+            out the least recently used template.  If the cache size is set to
+            ``0`` templates are recompiled all the time, if the cache size is
+            ``-1`` the cache will not be cleaned.
+
+            .. versionchanged:: 2.8
+               The cache size was increased to 400 from a low 50.
+
+        `auto_reload`
+            Some loaders load templates from locations where the template
+            sources may change (ie: file system or database).  If
+            ``auto_reload`` is set to ``True`` (default) every time a template is
+            requested the loader checks if the source changed and if yes, it
+            will reload the template.  For higher performance it's possible to
+            disable that.
+
+        `bytecode_cache`
+            If set to a bytecode cache object, this object will provide a
+            cache for the internal Jinja bytecode so that templates don't
+            have to be parsed if they were not changed.
+
+            See :ref:`bytecode-cache` for more information.
+
+        `enable_async`
+            If set to true this enables async template execution which allows
+            you to take advantage of newer Python features.  This requires
+            Python 3.6 or later.
+    """
+
+    #: if this environment is sandboxed.  Modifying this variable won't make
+    #: the environment sandboxed though.  For a real sandboxed environment
+    #: have a look at jinja2.sandbox.  This flag alone controls the code
+    #: generation by the compiler.
+    sandboxed = False
+
+    #: True if the environment is just an overlay
+    overlayed = False
+
+    #: the environment this environment is linked to if it is an overlay
+    linked_to = None
+
+    #: shared environments have this set to `True`.  A shared environment
+    #: must not be modified
+    shared = False
+
+    #: these are currently EXPERIMENTAL undocumented features.
+    exception_handler = None
+    exception_formatter = None
+
+    #: the class that is used for code generation.  See
+    #: :class:`~jinja2.compiler.CodeGenerator` for more information.
+    code_generator_class = CodeGenerator
+
+    #: the context class thatis used for templates.  See
+    #: :class:`~jinja2.runtime.Context` for more information.
+    context_class = Context
+
+    def __init__(self,
+                 block_start_string=BLOCK_START_STRING,
+                 block_end_string=BLOCK_END_STRING,
+                 variable_start_string=VARIABLE_START_STRING,
+                 variable_end_string=VARIABLE_END_STRING,
+                 comment_start_string=COMMENT_START_STRING,
+                 comment_end_string=COMMENT_END_STRING,
+                 line_statement_prefix=LINE_STATEMENT_PREFIX,
+                 line_comment_prefix=LINE_COMMENT_PREFIX,
+                 trim_blocks=TRIM_BLOCKS,
+                 lstrip_blocks=LSTRIP_BLOCKS,
+                 newline_sequence=NEWLINE_SEQUENCE,
+                 keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+                 extensions=(),
+                 optimized=True,
+                 undefined=Undefined,
+                 finalize=None,
+                 autoescape=False,
+                 loader=None,
+                 cache_size=400,
+                 auto_reload=True,
+                 bytecode_cache=None,
+                 enable_async=False):
+        # !!Important notice!!
+        #   The constructor accepts quite a few arguments that should be
+        #   passed by keyword rather than position.  However it's important to
+        #   not change the order of arguments because it's used at least
+        #   internally in those cases:
+        #       -   spontaneous environments (i18n extension and Template)
+        #       -   unittests
+        #   If parameter changes are required only add parameters at the end
+        #   and don't change the arguments (or the defaults!) of the arguments
+        #   existing already.
+
+        # lexer / parser information
+        self.block_start_string = block_start_string
+        self.block_end_string = block_end_string
+        self.variable_start_string = variable_start_string
+        self.variable_end_string = variable_end_string
+        self.comment_start_string = comment_start_string
+        self.comment_end_string = comment_end_string
+        self.line_statement_prefix = line_statement_prefix
+        self.line_comment_prefix = line_comment_prefix
+        self.trim_blocks = trim_blocks
+        self.lstrip_blocks = lstrip_blocks
+        self.newline_sequence = newline_sequence
+        self.keep_trailing_newline = keep_trailing_newline
+
+        # runtime information
+        self.undefined = undefined
+        self.optimized = optimized
+        self.finalize = finalize
+        self.autoescape = autoescape
+
+        # defaults
+        self.filters = DEFAULT_FILTERS.copy()
+        self.tests = DEFAULT_TESTS.copy()
+        self.globals = DEFAULT_NAMESPACE.copy()
+
+        # set the loader provided
+        self.loader = loader
+        self.cache = create_cache(cache_size)
+        self.bytecode_cache = bytecode_cache
+        self.auto_reload = auto_reload
+
+        # configurable policies
+        self.policies = DEFAULT_POLICIES.copy()
+
+        # load extensions
+        self.extensions = load_extensions(self, extensions)
+
+        self.enable_async = enable_async
+        self.is_async = self.enable_async and have_async_gen
+
+        _environment_sanity_check(self)
+
+    def add_extension(self, extension):
+        """Adds an extension after the environment was created.
+
+        .. versionadded:: 2.5
+        """
+        self.extensions.update(load_extensions(self, [extension]))
+
+    def extend(self, **attributes):
+        """Add the items to the instance of the environment if they do not exist
+        yet.  This is used by :ref:`extensions <writing-extensions>` to register
+        callbacks and configuration values without breaking inheritance.
+        """
+        for key, value in iteritems(attributes):
+            if not hasattr(self, key):
+                setattr(self, key, value)
+
+    def overlay(self, block_start_string=missing, block_end_string=missing,
+                variable_start_string=missing, variable_end_string=missing,
+                comment_start_string=missing, comment_end_string=missing,
+                line_statement_prefix=missing, line_comment_prefix=missing,
+                trim_blocks=missing, lstrip_blocks=missing,
+                extensions=missing, optimized=missing,
+                undefined=missing, finalize=missing, autoescape=missing,
+                loader=missing, cache_size=missing, auto_reload=missing,
+                bytecode_cache=missing):
+        """Create a new overlay environment that shares all the data with the
+        current environment except for cache and the overridden attributes.
+        Extensions cannot be removed for an overlayed environment.  An overlayed
+        environment automatically gets all the extensions of the environment it
+        is linked to plus optional extra extensions.
+
+        Creating overlays should happen after the initial environment was set
+        up completely.  Not all attributes are truly linked, some are just
+        copied over so modifications on the original environment may not shine
+        through.
+        """
+        args = dict(locals())
+        del args['self'], args['cache_size'], args['extensions']
+
+        rv = object.__new__(self.__class__)
+        rv.__dict__.update(self.__dict__)
+        rv.overlayed = True
+        rv.linked_to = self
+
+        for key, value in iteritems(args):
+            if value is not missing:
+                setattr(rv, key, value)
+
+        if cache_size is not missing:
+            rv.cache = create_cache(cache_size)
+        else:
+            rv.cache = copy_cache(self.cache)
+
+        rv.extensions = {}
+        for key, value in iteritems(self.extensions):
+            rv.extensions[key] = value.bind(rv)
+        if extensions is not missing:
+            rv.extensions.update(load_extensions(rv, extensions))
+
+        return _environment_sanity_check(rv)
+
+    lexer = property(get_lexer, doc="The lexer for this environment.")
+
+    def iter_extensions(self):
+        """Iterates over the extensions by priority."""
+        return iter(sorted(self.extensions.values(),
+                           key=lambda x: x.priority))
+
+    def getitem(self, obj, argument):
+        """Get an item or attribute of an object but prefer the item."""
+        try:
+            return obj[argument]
+        except (AttributeError, TypeError, LookupError):
+            if isinstance(argument, string_types):
+                try:
+                    attr = str(argument)
+                except Exception:
+                    pass
+                else:
+                    try:
+                        return getattr(obj, attr)
+                    except AttributeError:
+                        pass
+            return self.undefined(obj=obj, name=argument)
+
+    def getattr(self, obj, attribute):
+        """Get an item or attribute of an object but prefer the attribute.
+        Unlike :meth:`getitem` the attribute *must* be a bytestring.
+        """
+        try:
+            return getattr(obj, attribute)
+        except AttributeError:
+            pass
+        try:
+            return obj[attribute]
+        except (TypeError, LookupError, AttributeError):
+            return self.undefined(obj=obj, name=attribute)
+
+    def call_filter(self, name, value, args=None, kwargs=None,
+                    context=None, eval_ctx=None):
+        """Invokes a filter on a value the same way the compiler does it.
+
+        Note that on Python 3 this might return a coroutine in case the
+        filter is running from an environment in async mode and the filter
+        supports async execution.  It's your responsibility to await this
+        if needed.
+
+        .. versionadded:: 2.7
+        """
+        func = self.filters.get(name)
+        if func is None:
+            fail_for_missing_callable('no filter named %r', name)
+        args = [value] + list(args or ())
+        if getattr(func, 'contextfilter', False):
+            if context is None:
+                raise TemplateRuntimeError('Attempted to invoke context '
+                                           'filter without context')
+            args.insert(0, context)
+        elif getattr(func, 'evalcontextfilter', False):
+            if eval_ctx is None:
+                if context is not None:
+                    eval_ctx = context.eval_ctx
+                else:
+                    eval_ctx = EvalContext(self)
+            args.insert(0, eval_ctx)
+        elif getattr(func, 'environmentfilter', False):
+            args.insert(0, self)
+        return func(*args, **(kwargs or {}))
+
+    def call_test(self, name, value, args=None, kwargs=None):
+        """Invokes a test on a value the same way the compiler does it.
+
+        .. versionadded:: 2.7
+        """
+        func = self.tests.get(name)
+        if func is None:
+            fail_for_missing_callable('no test named %r', name)
+        return func(value, *(args or ()), **(kwargs or {}))
+
+    @internalcode
+    def parse(self, source, name=None, filename=None):
+        """Parse the sourcecode and return the abstract syntax tree.  This
+        tree of nodes is used by the compiler to convert the template into
+        executable source- or bytecode.  This is useful for debugging or to
+        extract information from templates.
+
+        If you are :ref:`developing Jinja2 extensions <writing-extensions>`
+        this gives you a good overview of the node tree generated.
+        """
+        try:
+            return self._parse(source, name, filename)
+        except TemplateSyntaxError:
+            exc_info = sys.exc_info()
+        self.handle_exception(exc_info, source_hint=source)
+
+    def _parse(self, source, name, filename):
+        """Internal parsing function used by `parse` and `compile`."""
+        return Parser(self, source, name, encode_filename(filename)).parse()
+
+    def lex(self, source, name=None, filename=None):
+        """Lex the given sourcecode and return a generator that yields
+        tokens as tuples in the form ``(lineno, token_type, value)``.
+        This can be useful for :ref:`extension development <writing-extensions>`
+        and debugging templates.
+
+        This does not perform preprocessing.  If you want the preprocessing
+        of the extensions to be applied you have to filter source through
+        the :meth:`preprocess` method.
+        """
+        source = text_type(source)
+        try:
+            return self.lexer.tokeniter(source, name, filename)
+        except TemplateSyntaxError:
+            exc_info = sys.exc_info()
+        self.handle_exception(exc_info, source_hint=source)
+
+    def preprocess(self, source, name=None, filename=None):
+        """Preprocesses the source with all extensions.  This is automatically
+        called for all parsing and compiling methods but *not* for :meth:`lex`
+        because there you usually only want the actual source tokenized.
+        """
+        return reduce(lambda s, e: e.preprocess(s, name, filename),
+                      self.iter_extensions(), text_type(source))
+
+    def _tokenize(self, source, name, filename=None, state=None):
+        """Called by the parser to do the preprocessing and filtering
+        for all the extensions.  Returns a :class:`~jinja2.lexer.TokenStream`.
+        """
+        source = self.preprocess(source, name, filename)
+        stream = self.lexer.tokenize(source, name, filename, state)
+        for ext in self.iter_extensions():
+            stream = ext.filter_stream(stream)
+            if not isinstance(stream, TokenStream):
+                stream = TokenStream(stream, name, filename)
+        return stream
+
+    def _generate(self, source, name, filename, defer_init=False):
+        """Internal hook that can be overridden to hook a different generate
+        method in.
+
+        .. versionadded:: 2.5
+        """
+        return generate(source, self, name, filename, defer_init=defer_init,
+                        optimized=self.optimized)
+
+    def _compile(self, source, filename):
+        """Internal hook that can be overridden to hook a different compile
+        method in.
+
+        .. versionadded:: 2.5
+        """
+        return compile(source, filename, 'exec')
+
+    @internalcode
+    def compile(self, source, name=None, filename=None, raw=False,
+                defer_init=False):
+        """Compile a node or template source code.  The `name` parameter is
+        the load name of the template after it was joined using
+        :meth:`join_path` if necessary, not the filename on the file system.
+        the `filename` parameter is the estimated filename of the template on
+        the file system.  If the template came from a database or memory this
+        can be omitted.
+
+        The return value of this method is a python code object.  If the `raw`
+        parameter is `True` the return value will be a string with python
+        code equivalent to the bytecode returned otherwise.  This method is
+        mainly used internally.
+
+        `defer_init` is use internally to aid the module code generator.  This
+        causes the generated code to be able to import without the global
+        environment variable to be set.
+
+        .. versionadded:: 2.4
+           `defer_init` parameter added.
+        """
+        source_hint = None
+        try:
+            if isinstance(source, string_types):
+                source_hint = source
+                source = self._parse(source, name, filename)
+            source = self._generate(source, name, filename,
+                                    defer_init=defer_init)
+            if raw:
+                return source
+            if filename is None:
+                filename = '<template>'
+            else:
+                filename = encode_filename(filename)
+            return self._compile(source, filename)
+        except TemplateSyntaxError:
+            exc_info = sys.exc_info()
+        self.handle_exception(exc_info, source_hint=source_hint)
+
+    def compile_expression(self, source, undefined_to_none=True):
+        """A handy helper method that returns a callable that accepts keyword
+        arguments that appear as variables in the expression.  If called it
+        returns the result of the expression.
+
+        This is useful if applications want to use the same rules as Jinja
+        in template "configuration files" or similar situations.
+
+        Example usage:
+
+        >>> env = Environment()
+        >>> expr = env.compile_expression('foo == 42')
+        >>> expr(foo=23)
+        False
+        >>> expr(foo=42)
+        True
+
+        Per default the return value is converted to `None` if the
+        expression returns an undefined value.  This can be changed
+        by setting `undefined_to_none` to `False`.
+
+        >>> env.compile_expression('var')() is None
+        True
+        >>> env.compile_expression('var', undefined_to_none=False)()
+        Undefined
+
+        .. versionadded:: 2.1
+        """
+        parser = Parser(self, source, state='variable')
+        exc_info = None
+        try:
+            expr = parser.parse_expression()
+            if not parser.stream.eos:
+                raise TemplateSyntaxError('chunk after expression',
+                                          parser.stream.current.lineno,
+                                          None, None)
+            expr.set_environment(self)
+        except TemplateSyntaxError:
+            exc_info = sys.exc_info()
+        if exc_info is not None:
+            self.handle_exception(exc_info, source_hint=source)
+        body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
+        template = self.from_string(nodes.Template(body, lineno=1))
+        return TemplateExpression(template, undefined_to_none)
+
+    def compile_templates(self, target, extensions=None, filter_func=None,
+                          zip='deflated', log_function=None,
+                          ignore_errors=True, py_compile=False):
+        """Finds all the templates the loader can find, compiles them
+        and stores them in `target`.  If `zip` is `None`, instead of in a
+        zipfile, the templates will be stored in a directory.
+        By default a deflate zip algorithm is used. To switch to
+        the stored algorithm, `zip` can be set to ``'stored'``.
+
+        `extensions` and `filter_func` are passed to :meth:`list_templates`.
+        Each template returned will be compiled to the target folder or
+        zipfile.
+
+        By default template compilation errors are ignored.  In case a
+        log function is provided, errors are logged.  If you want template
+        syntax errors to abort the compilation you can set `ignore_errors`
+        to `False` and you will get an exception on syntax errors.
+
+        If `py_compile` is set to `True` .pyc files will be written to the
+        target instead of standard .py files.  This flag does not do anything
+        on pypy and Python 3 where pyc files are not picked up by itself and
+        don't give much benefit.
+
+        .. versionadded:: 2.4
+        """
+        from jinja2.loaders import ModuleLoader
+
+        if log_function is None:
+            log_function = lambda x: None
+
+        if py_compile:
+            if not PY2 or PYPY:
+                from warnings import warn
+                warn(Warning('py_compile has no effect on pypy or Python 3'))
+                py_compile = False
+            else:
+                import imp
+                import marshal
+                py_header = imp.get_magic() + \
+                    u'\xff\xff\xff\xff'.encode('iso-8859-15')
+
+                # Python 3.3 added a source filesize to the header
+                if sys.version_info >= (3, 3):
+                    py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
+
+        def write_file(filename, data, mode):
+            if zip:
+                info = ZipInfo(filename)
+                info.external_attr = 0o755 << 16
+                zip_file.writestr(info, data)
+            else:
+                f = open(os.path.join(target, filename), mode)
+                try:
+                    f.write(data)
+                finally:
+                    f.close()
+
+        if zip is not None:
+            from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
+            zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
+                                                 stored=ZIP_STORED)[zip])
+            log_function('Compiling into Zip archive "%s"' % target)
+        else:
+            if not os.path.isdir(target):
+                os.makedirs(target)
+            log_function('Compiling into folder "%s"' % target)
+
+        try:
+            for name in self.list_templates(extensions, filter_func):
+                source, filename, _ = self.loader.get_source(self, name)
+                try:
+                    code = self.compile(source, name, filename, True, True)
+                except TemplateSyntaxError as e:
+                    if not ignore_errors:
+                        raise
+                    log_function('Could not compile "%s": %s' % (name, e))
+                    continue
+
+                filename = ModuleLoader.get_module_filename(name)
+
+                if py_compile:
+                    c = self._compile(code, encode_filename(filename))
+                    write_file(filename + 'c', py_header +
+                               marshal.dumps(c), 'wb')
+                    log_function('Byte-compiled "%s" as %s' %
+                                 (name, filename + 'c'))
+                else:
+                    write_file(filename, code, 'w')
+                    log_function('Compiled "%s" as %s' % (name, filename))
+        finally:
+            if zip:
+                zip_file.close()
+
+        log_function('Finished compiling templates')
+
+    def list_templates(self, extensions=None, filter_func=None):
+        """Returns a list of templates for this environment.  This requires
+        that the loader supports the loader's
+        :meth:`~BaseLoader.list_templates` method.
+
+        If there are other files in the template folder besides the
+        actual templates, the returned list can be filtered.  There are two
+        ways: either `extensions` is set to a list of file extensions for
+        templates, or a `filter_func` can be provided which is a callable that
+        is passed a template name and should return `True` if it should end up
+        in the result list.
+
+        If the loader does not support that, a :exc:`TypeError` is raised.
+
+        .. versionadded:: 2.4
+        """
+        x = self.loader.list_templates()
+        if extensions is not None:
+            if filter_func is not None:
+                raise TypeError('either extensions or filter_func '
+                                'can be passed, but not both')
+            filter_func = lambda x: '.' in x and \
+                                    x.rsplit('.', 1)[1] in extensions
+        if filter_func is not None:
+            x = list(ifilter(filter_func, x))
+        return x
+
+    def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
+        """Exception handling helper.  This is used internally to either raise
+        rewritten exceptions or return a rendered traceback for the template.
+        """
+        global _make_traceback
+        if exc_info is None:
+            exc_info = sys.exc_info()
+
+        # the debugging module is imported when it's used for the first time.
+        # we're doing a lot of stuff there and for applications that do not
+        # get any exceptions in template rendering there is no need to load
+        # all of that.
+        if _make_traceback is None:
+            from jinja2.debug import make_traceback as _make_traceback
+        traceback = _make_traceback(exc_info, source_hint)
+        if rendered and self.exception_formatter is not None:
+            return self.exception_formatter(traceback)
+        if self.exception_handler is not None:
+            self.exception_handler(traceback)
+        exc_type, exc_value, tb = traceback.standard_exc_info
+        reraise(exc_type, exc_value, tb)
+
+    def join_path(self, template, parent):
+        """Join a template with the parent.  By default all the lookups are
+        relative to the loader root so this method returns the `template`
+        parameter unchanged, but if the paths should be relative to the
+        parent template, this function can be used to calculate the real
+        template name.
+
+        Subclasses may override this method and implement template path
+        joining here.
+        """
+        return template
+
+    @internalcode
+    def _load_template(self, name, globals):
+        if self.loader is None:
+            raise TypeError('no loader for this environment specified')
+        cache_key = (weakref.ref(self.loader), name)
+        if self.cache is not None:
+            template = self.cache.get(cache_key)
+            if template is not None and (not self.auto_reload or
+                                         template.is_up_to_date):
+                return template
+        template = self.loader.load(self, name, globals)
+        if self.cache is not None:
+            self.cache[cache_key] = template
+        return template
+
+    @internalcode
+    def get_template(self, name, parent=None, globals=None):
+        """Load a template from the loader.  If a loader is configured this
+        method asks the loader for the template and returns a :class:`Template`.
+        If the `parent` parameter is not `None`, :meth:`join_path` is called
+        to get the real template name before loading.
+
+        The `globals` parameter can be used to provide template wide globals.
+        These variables are available in the context at render time.
+
+        If the template does not exist a :exc:`TemplateNotFound` exception is
+        raised.
+
+        .. versionchanged:: 2.4
+           If `name` is a :class:`Template` object it is returned from the
+           function unchanged.
+        """
+        if isinstance(name, Template):
+            return name
+        if parent is not None:
+            name = self.join_path(name, parent)
+        return self._load_template(name, self.make_globals(globals))
+
+    @internalcode
+    def select_template(self, names, parent=None, globals=None):
+        """Works like :meth:`get_template` but tries a number of templates
+        before it fails.  If it cannot find any of the templates, it will
+        raise a :exc:`TemplatesNotFound` exception.
+
+        .. versionadded:: 2.3
+
+        .. versionchanged:: 2.4
+           If `names` contains a :class:`Template` object it is returned
+           from the function unchanged.
+        """
+        if not names:
+            raise TemplatesNotFound(message=u'Tried to select from an empty list '
+                                            u'of templates.')
+        globals = self.make_globals(globals)
+        for name in names:
+            if isinstance(name, Template):
+                return name
+            if parent is not None:
+                name = self.join_path(name, parent)
+            try:
+                return self._load_template(name, globals)
+            except TemplateNotFound:
+                pass
+        raise TemplatesNotFound(names)
+
+    @internalcode
+    def get_or_select_template(self, template_name_or_list,
+                               parent=None, globals=None):
+        """Does a typecheck and dispatches to :meth:`select_template`
+        if an iterable of template names is given, otherwise to
+        :meth:`get_template`.
+
+        .. versionadded:: 2.3
+        """
+        if isinstance(template_name_or_list, string_types):
+            return self.get_template(template_name_or_list, parent, globals)
+        elif isinstance(template_name_or_list, Template):
+            return template_name_or_list
+        return self.select_template(template_name_or_list, parent, globals)
+
+    def from_string(self, source, globals=None, template_class=None):
+        """Load a template from a string.  This parses the source given and
+        returns a :class:`Template` object.
+        """
+        globals = self.make_globals(globals)
+        cls = template_class or self.template_class
+        return cls.from_code(self, self.compile(source), globals, None)
+
+    def make_globals(self, d):
+        """Return a dict for the globals."""
+        if not d:
+            return self.globals
+        return dict(self.globals, **d)
+
+
+class Template(object):
+    """The central template object.  This class represents a compiled template
+    and is used to evaluate it.
+
+    Normally the template object is generated from an :class:`Environment` but
+    it also has a constructor that makes it possible to create a template
+    instance directly using the constructor.  It takes the same arguments as
+    the environment constructor but it's not possible to specify a loader.
+
+    Every template object has a few methods and members that are guaranteed
+    to exist.  However it's important that a template object should be
+    considered immutable.  Modifications on the object are not supported.
+
+    Template objects created from the constructor rather than an environment
+    do have an `environment` attribute that points to a temporary environment
+    that is probably shared with other templates created with the constructor
+    and compatible settings.
+
+    >>> template = Template('Hello {{ name }}!')
+    >>> template.render(name='John Doe') == u'Hello John Doe!'
+    True
+    >>> stream = template.stream(name='John Doe')
+    >>> next(stream) == u'Hello John Doe!'
+    True
+    >>> next(stream)
+    Traceback (most recent call last):
+        ...
+    StopIteration
+    """
+
+    def __new__(cls, source,
+                block_start_string=BLOCK_START_STRING,
+                block_end_string=BLOCK_END_STRING,
+                variable_start_string=VARIABLE_START_STRING,
+                variable_end_string=VARIABLE_END_STRING,
+                comment_start_string=COMMENT_START_STRING,
+                comment_end_string=COMMENT_END_STRING,
+                line_statement_prefix=LINE_STATEMENT_PREFIX,
+                line_comment_prefix=LINE_COMMENT_PREFIX,
+                trim_blocks=TRIM_BLOCKS,
+                lstrip_blocks=LSTRIP_BLOCKS,
+                newline_sequence=NEWLINE_SEQUENCE,
+                keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+                extensions=(),
+                optimized=True,
+                undefined=Undefined,
+                finalize=None,
+                autoescape=False,
+                enable_async=False):
+        env = get_spontaneous_environment(
+            block_start_string, block_end_string, variable_start_string,
+            variable_end_string, comment_start_string, comment_end_string,
+            line_statement_prefix, line_comment_prefix, trim_blocks,
+            lstrip_blocks, newline_sequence, keep_trailing_newline,
+            frozenset(extensions), optimized, undefined, finalize, autoescape,
+            None, 0, False, None, enable_async)
+        return env.from_string(source, template_class=cls)
+
+    @classmethod
+    def from_code(cls, environment, code, globals, uptodate=None):
+        """Creates a template object from compiled code and the globals.  This
+        is used by the loaders and environment to create a template object.
+        """
+        namespace = {
+            'environment':  environment,
+            '__file__':     code.co_filename
+        }
+        exec(code, namespace)
+        rv = cls._from_namespace(environment, namespace, globals)
+        rv._uptodate = uptodate
+        return rv
+
+    @classmethod
+    def from_module_dict(cls, environment, module_dict, globals):
+        """Creates a template object from a module.  This is used by the
+        module loader to create a template object.
+
+        .. versionadded:: 2.4
+        """
+        return cls._from_namespace(environment, module_dict, globals)
+
+    @classmethod
+    def _from_namespace(cls, environment, namespace, globals):
+        t = object.__new__(cls)
+        t.environment = environment
+        t.globals = globals
+        t.name = namespace['name']
+        t.filename = namespace['__file__']
+        t.blocks = namespace['blocks']
+
+        # render function and module
+        t.root_render_func = namespace['root']
+        t._module = None
+
+        # debug and loader helpers
+        t._debug_info = namespace['debug_info']
+        t._uptodate = None
+
+        # store the reference
+        namespace['environment'] = environment
+        namespace['__jinja_template__'] = t
+
+        return t
+
+    def render(self, *args, **kwargs):
+        """This method accepts the same arguments as the `dict` constructor:
+        A dict, a dict subclass or some keyword arguments.  If no arguments
+        are given the context will be empty.  These two calls do the same::
+
+            template.render(knights='that say nih')
+            template.render({'knights': 'that say nih'})
+
+        This will return the rendered template as unicode string.
+        """
+        vars = dict(*args, **kwargs)
+        try:
+            return concat(self.root_render_func(self.new_context(vars)))
+        except Exception:
+            exc_info = sys.exc_info()
+        return self.environment.handle_exception(exc_info, True)
+
+    def render_async(self, *args, **kwargs):
+        """This works similar to :meth:`render` but returns a coroutine
+        that when awaited returns the entire rendered template string.  This
+        requires the async feature to be enabled.
+
+        Example usage::
+
+            await template.render_async(knights='that say nih; asynchronously')
+        """
+        # see asyncsupport for the actual implementation
+        raise NotImplementedError('This feature is not available for this '
+                                  'version of Python')
+
+    def stream(self, *args, **kwargs):
+        """Works exactly like :meth:`generate` but returns a
+        :class:`TemplateStream`.
+        """
+        return TemplateStream(self.generate(*args, **kwargs))
+
+    def generate(self, *args, **kwargs):
+        """For very large templates it can be useful to not render the whole
+        template at once but evaluate each statement after another and yield
+        piece for piece.  This method basically does exactly that and returns
+        a generator that yields one item after another as unicode strings.
+
+        It accepts the same arguments as :meth:`render`.
+        """
+        vars = dict(*args, **kwargs)
+        try:
+            for event in self.root_render_func(self.new_context(vars)):
+                yield event
+        except Exception:
+            exc_info = sys.exc_info()
+        else:
+            return
+        yield self.environment.handle_exception(exc_info, True)
+
+    def generate_async(self, *args, **kwargs):
+        """An async version of :meth:`generate`.  Works very similarly but
+        returns an async iterator instead.
+        """
+        # see asyncsupport for the actual implementation
+        raise NotImplementedError('This feature is not available for this '
+                                  'version of Python')
+
+    def new_context(self, vars=None, shared=False, locals=None):
+        """Create a new :class:`Context` for this template.  The vars
+        provided will be passed to the template.  Per default the globals
+        are added to the context.  If shared is set to `True` the data
+        is passed as it to the context without adding the globals.
+
+        `locals` can be a dict of local variables for internal usage.
+        """
+        return new_context(self.environment, self.name, self.blocks,
+                           vars, shared, self.globals, locals)
+
+    def make_module(self, vars=None, shared=False, locals=None):
+        """This method works like the :attr:`module` attribute when called
+        without arguments but it will evaluate the template on every call
+        rather than caching it.  It's also possible to provide
+        a dict which is then used as context.  The arguments are the same
+        as for the :meth:`new_context` method.
+        """
+        return TemplateModule(self, self.new_context(vars, shared, locals))
+
+    def make_module_async(self, vars=None, shared=False, locals=None):
+        """As template module creation can invoke template code for
+        asynchronous exections this method must be used instead of the
+        normal :meth:`make_module` one.  Likewise the module attribute
+        becomes unavailable in async mode.
+        """
+        # see asyncsupport for the actual implementation
+        raise NotImplementedError('This feature is not available for this '
+                                  'version of Python')
+
+    @internalcode
+    def _get_default_module(self):
+        if self._module is not None:
+            return self._module
+        self._module = rv = self.make_module()
+        return rv
+
+    @property
+    def module(self):
+        """The template as module.  This is used for imports in the
+        template runtime but is also useful if one wants to access
+        exported template variables from the Python layer:
+
+        >>> t = Template('{% macro foo() %}42{% endmacro %}23')
+        >>> str(t.module)
+        '23'
+        >>> t.module.foo() == u'42'
+        True
+
+        This attribute is not available if async mode is enabled.
+        """
+        return self._get_default_module()
+
+    def get_corresponding_lineno(self, lineno):
+        """Return the source line number of a line number in the
+        generated bytecode as they are not in sync.
+        """
+        for template_line, code_line in reversed(self.debug_info):
+            if code_line <= lineno:
+                return template_line
+        return 1
+
+    @property
+    def is_up_to_date(self):
+        """If this variable is `False` there is a newer version available."""
+        if self._uptodate is None:
+            return True
+        return self._uptodate()
+
+    @property
+    def debug_info(self):
+        """The debug info mapping."""
+        return [tuple(imap(int, x.split('='))) for x in
+                self._debug_info.split('&')]
+
+    def __repr__(self):
+        if self.name is None:
+            name = 'memory:%x' % id(self)
+        else:
+            name = repr(self.name)
+        return '<%s %s>' % (self.__class__.__name__, name)
+
+
+@implements_to_string
+class TemplateModule(object):
+    """Represents an imported template.  All the exported names of the
+    template are available as attributes on this object.  Additionally
+    converting it into an unicode- or bytestrings renders the contents.
+    """
+
+    def __init__(self, template, context, body_stream=None):
+        if body_stream is None:
+            if context.environment.is_async:
+                raise RuntimeError('Async mode requires a body stream '
+                                   'to be passed to a template module.  Use '
+                                   'the async methods of the API you are '
+                                   'using.')
+            body_stream = list(template.root_render_func(context))
+        self._body_stream = body_stream
+        self.__dict__.update(context.get_exported())
+        self.__name__ = template.name
+
+    def __html__(self):
+        return Markup(concat(self._body_stream))
+
+    def __str__(self):
+        return concat(self._body_stream)
+
+    def __repr__(self):
+        if self.__name__ is None:
+            name = 'memory:%x' % id(self)
+        else:
+            name = repr(self.__name__)
+        return '<%s %s>' % (self.__class__.__name__, name)
+
+
+class TemplateExpression(object):
+    """The :meth:`jinja2.Environment.compile_expression` method returns an
+    instance of this object.  It encapsulates the expression-like access
+    to the template with an expression it wraps.
+    """
+
+    def __init__(self, template, undefined_to_none):
+        self._template = template
+        self._undefined_to_none = undefined_to_none
+
+    def __call__(self, *args, **kwargs):
+        context = self._template.new_context(dict(*args, **kwargs))
+        consume(self._template.root_render_func(context))
+        rv = context.vars['result']
+        if self._undefined_to_none and isinstance(rv, Undefined):
+            rv = None
+        return rv
+
+
+@implements_iterator
+class TemplateStream(object):
+    """A template stream works pretty much like an ordinary python generator
+    but it can buffer multiple items to reduce the number of total iterations.
+    Per default the output is unbuffered which means that for every unbuffered
+    instruction in the template one unicode string is yielded.
+
+    If buffering is enabled with a buffer size of 5, five items are combined
+    into a new unicode string.  This is mainly useful if you are streaming
+    big templates to a client via WSGI which flushes after each iteration.
+    """
+
+    def __init__(self, gen):
+        self._gen = gen
+        self.disable_buffering()
+
+    def dump(self, fp, encoding=None, errors='strict'):
+        """Dump the complete stream into a file or file-like object.
+        Per default unicode strings are written, if you want to encode
+        before writing specify an `encoding`.
+
+        Example usage::
+
+            Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
+        """
+        close = False
+        if isinstance(fp, string_types):
+            if encoding is None:
+                encoding = 'utf-8'
+            fp = open(fp, 'wb')
+            close = True
+        try:
+            if encoding is not None:
+                iterable = (x.encode(encoding, errors) for x in self)
+            else:
+                iterable = self
+            if hasattr(fp, 'writelines'):
+                fp.writelines(iterable)
+            else:
+                for item in iterable:
+                    fp.write(item)
+        finally:
+            if close:
+                fp.close()
+
+    def disable_buffering(self):
+        """Disable the output buffering."""
+        self._next = partial(next, self._gen)
+        self.buffered = False
+
+    def _buffered_generator(self, size):
+        buf = []
+        c_size = 0
+        push = buf.append
+
+        while 1:
+            try:
+                while c_size < size:
+                    c = next(self._gen)
+                    push(c)
+                    if c:
+                        c_size += 1
+            except StopIteration:
+                if not c_size:
+                    return
+            yield concat(buf)
+            del buf[:]
+            c_size = 0
+
+    def enable_buffering(self, size=5):
+        """Enable buffering.  Buffer `size` items before yielding them."""
+        if size <= 1:
+            raise ValueError('buffer size too small')
+
+        self.buffered = True
+        self._next = partial(next, self._buffered_generator(size))
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        return self._next()
+
+
+# hook in default template class.  if anyone reads this comment: ignore that
+# it's possible to use custom templates ;-)
+Environment.template_class = Template
diff --git a/lib/python3.7/site-packages/jinja2/exceptions.py b/lib/python3.7/site-packages/jinja2/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..c018a33e323cc5682900e5be3bd0daf2c25bdda6
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/exceptions.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.exceptions
+    ~~~~~~~~~~~~~~~~~
+
+    Jinja exceptions.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+from jinja2._compat import imap, text_type, PY2, implements_to_string
+
+
+class TemplateError(Exception):
+    """Baseclass for all template errors."""
+
+    if PY2:
+        def __init__(self, message=None):
+            if message is not None:
+                message = text_type(message).encode('utf-8')
+            Exception.__init__(self, message)
+
+        @property
+        def message(self):
+            if self.args:
+                message = self.args[0]
+                if message is not None:
+                    return message.decode('utf-8', 'replace')
+
+        def __unicode__(self):
+            return self.message or u''
+    else:
+        def __init__(self, message=None):
+            Exception.__init__(self, message)
+
+        @property
+        def message(self):
+            if self.args:
+                message = self.args[0]
+                if message is not None:
+                    return message
+
+
+@implements_to_string
+class TemplateNotFound(IOError, LookupError, TemplateError):
+    """Raised if a template does not exist."""
+
+    # looks weird, but removes the warning descriptor that just
+    # bogusly warns us about message being deprecated
+    message = None
+
+    def __init__(self, name, message=None):
+        IOError.__init__(self)
+        if message is None:
+            message = name
+        self.message = message
+        self.name = name
+        self.templates = [name]
+
+    def __str__(self):
+        return self.message
+
+
+class TemplatesNotFound(TemplateNotFound):
+    """Like :class:`TemplateNotFound` but raised if multiple templates
+    are selected.  This is a subclass of :class:`TemplateNotFound`
+    exception, so just catching the base exception will catch both.
+
+    .. versionadded:: 2.2
+    """
+
+    def __init__(self, names=(), message=None):
+        if message is None:
+            message = u'none of the templates given were found: ' + \
+                      u', '.join(imap(text_type, names))
+        TemplateNotFound.__init__(self, names and names[-1] or None, message)
+        self.templates = list(names)
+
+
+@implements_to_string
+class TemplateSyntaxError(TemplateError):
+    """Raised to tell the user that there is a problem with the template."""
+
+    def __init__(self, message, lineno, name=None, filename=None):
+        TemplateError.__init__(self, message)
+        self.lineno = lineno
+        self.name = name
+        self.filename = filename
+        self.source = None
+
+        # this is set to True if the debug.translate_syntax_error
+        # function translated the syntax error into a new traceback
+        self.translated = False
+
+    def __str__(self):
+        # for translated errors we only return the message
+        if self.translated:
+            return self.message
+
+        # otherwise attach some stuff
+        location = 'line %d' % self.lineno
+        name = self.filename or self.name
+        if name:
+            location = 'File "%s", %s' % (name, location)
+        lines = [self.message, '  ' + location]
+
+        # if the source is set, add the line to the output
+        if self.source is not None:
+            try:
+                line = self.source.splitlines()[self.lineno - 1]
+            except IndexError:
+                line = None
+            if line:
+                lines.append('    ' + line.strip())
+
+        return u'\n'.join(lines)
+
+
+class TemplateAssertionError(TemplateSyntaxError):
+    """Like a template syntax error, but covers cases where something in the
+    template caused an error at compile time that wasn't necessarily caused
+    by a syntax error.  However it's a direct subclass of
+    :exc:`TemplateSyntaxError` and has the same attributes.
+    """
+
+
+class TemplateRuntimeError(TemplateError):
+    """A generic runtime error in the template engine.  Under some situations
+    Jinja may raise this exception.
+    """
+
+
+class UndefinedError(TemplateRuntimeError):
+    """Raised if a template tries to operate on :class:`Undefined`."""
+
+
+class SecurityError(TemplateRuntimeError):
+    """Raised if a template tries to do something insecure if the
+    sandbox is enabled.
+    """
+
+
+class FilterArgumentError(TemplateRuntimeError):
+    """This error is raised if a filter was called with inappropriate
+    arguments
+    """
diff --git a/lib/python3.7/site-packages/jinja2/ext.py b/lib/python3.7/site-packages/jinja2/ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..0734a84f73d0ddb735a004c9e0416e018a7d50bd
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/ext.py
@@ -0,0 +1,627 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.ext
+    ~~~~~~~~~~
+
+    Jinja extensions allow to add custom tags similar to the way django custom
+    tags work.  By default two example extensions exist: an i18n and a cache
+    extension.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD.
+"""
+import re
+
+from jinja2 import nodes
+from jinja2.defaults import BLOCK_START_STRING, \
+     BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
+     COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
+     LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
+     KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
+from jinja2.environment import Environment
+from jinja2.runtime import concat
+from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
+from jinja2.utils import contextfunction, import_string, Markup
+from jinja2._compat import with_metaclass, string_types, iteritems
+
+
+# the only real useful gettext functions for a Jinja template.  Note
+# that ugettext must be assigned to gettext as Jinja doesn't support
+# non unicode strings.
+GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
+
+
+class ExtensionRegistry(type):
+    """Gives the extension an unique identifier."""
+
+    def __new__(cls, name, bases, d):
+        rv = type.__new__(cls, name, bases, d)
+        rv.identifier = rv.__module__ + '.' + rv.__name__
+        return rv
+
+
+class Extension(with_metaclass(ExtensionRegistry, object)):
+    """Extensions can be used to add extra functionality to the Jinja template
+    system at the parser level.  Custom extensions are bound to an environment
+    but may not store environment specific data on `self`.  The reason for
+    this is that an extension can be bound to another environment (for
+    overlays) by creating a copy and reassigning the `environment` attribute.
+
+    As extensions are created by the environment they cannot accept any
+    arguments for configuration.  One may want to work around that by using
+    a factory function, but that is not possible as extensions are identified
+    by their import name.  The correct way to configure the extension is
+    storing the configuration values on the environment.  Because this way the
+    environment ends up acting as central configuration storage the
+    attributes may clash which is why extensions have to ensure that the names
+    they choose for configuration are not too generic.  ``prefix`` for example
+    is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
+    name as includes the name of the extension (fragment cache).
+    """
+
+    #: if this extension parses this is the list of tags it's listening to.
+    tags = set()
+
+    #: the priority of that extension.  This is especially useful for
+    #: extensions that preprocess values.  A lower value means higher
+    #: priority.
+    #:
+    #: .. versionadded:: 2.4
+    priority = 100
+
+    def __init__(self, environment):
+        self.environment = environment
+
+    def bind(self, environment):
+        """Create a copy of this extension bound to another environment."""
+        rv = object.__new__(self.__class__)
+        rv.__dict__.update(self.__dict__)
+        rv.environment = environment
+        return rv
+
+    def preprocess(self, source, name, filename=None):
+        """This method is called before the actual lexing and can be used to
+        preprocess the source.  The `filename` is optional.  The return value
+        must be the preprocessed source.
+        """
+        return source
+
+    def filter_stream(self, stream):
+        """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
+        to filter tokens returned.  This method has to return an iterable of
+        :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
+        :class:`~jinja2.lexer.TokenStream`.
+
+        In the `ext` folder of the Jinja2 source distribution there is a file
+        called `inlinegettext.py` which implements a filter that utilizes this
+        method.
+        """
+        return stream
+
+    def parse(self, parser):
+        """If any of the :attr:`tags` matched this method is called with the
+        parser as first argument.  The token the parser stream is pointing at
+        is the name token that matched.  This method has to return one or a
+        list of multiple nodes.
+        """
+        raise NotImplementedError()
+
+    def attr(self, name, lineno=None):
+        """Return an attribute node for the current extension.  This is useful
+        to pass constants on extensions to generated template code.
+
+        ::
+
+            self.attr('_my_attribute', lineno=lineno)
+        """
+        return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
+
+    def call_method(self, name, args=None, kwargs=None, dyn_args=None,
+                    dyn_kwargs=None, lineno=None):
+        """Call a method of the extension.  This is a shortcut for
+        :meth:`attr` + :class:`jinja2.nodes.Call`.
+        """
+        if args is None:
+            args = []
+        if kwargs is None:
+            kwargs = []
+        return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
+                          dyn_args, dyn_kwargs, lineno=lineno)
+
+
+@contextfunction
+def _gettext_alias(__context, *args, **kwargs):
+    return __context.call(__context.resolve('gettext'), *args, **kwargs)
+
+
+def _make_new_gettext(func):
+    @contextfunction
+    def gettext(__context, __string, **variables):
+        rv = __context.call(func, __string)
+        if __context.eval_ctx.autoescape:
+            rv = Markup(rv)
+        return rv % variables
+    return gettext
+
+
+def _make_new_ngettext(func):
+    @contextfunction
+    def ngettext(__context, __singular, __plural, __num, **variables):
+        variables.setdefault('num', __num)
+        rv = __context.call(func, __singular, __plural, __num)
+        if __context.eval_ctx.autoescape:
+            rv = Markup(rv)
+        return rv % variables
+    return ngettext
+
+
+class InternationalizationExtension(Extension):
+    """This extension adds gettext support to Jinja2."""
+    tags = set(['trans'])
+
+    # TODO: the i18n extension is currently reevaluating values in a few
+    # situations.  Take this example:
+    #   {% trans count=something() %}{{ count }} foo{% pluralize
+    #     %}{{ count }} fooss{% endtrans %}
+    # something is called twice here.  One time for the gettext value and
+    # the other time for the n-parameter of the ngettext function.
+
+    def __init__(self, environment):
+        Extension.__init__(self, environment)
+        environment.globals['_'] = _gettext_alias
+        environment.extend(
+            install_gettext_translations=self._install,
+            install_null_translations=self._install_null,
+            install_gettext_callables=self._install_callables,
+            uninstall_gettext_translations=self._uninstall,
+            extract_translations=self._extract,
+            newstyle_gettext=False
+        )
+
+    def _install(self, translations, newstyle=None):
+        gettext = getattr(translations, 'ugettext', None)
+        if gettext is None:
+            gettext = translations.gettext
+        ngettext = getattr(translations, 'ungettext', None)
+        if ngettext is None:
+            ngettext = translations.ngettext
+        self._install_callables(gettext, ngettext, newstyle)
+
+    def _install_null(self, newstyle=None):
+        self._install_callables(
+            lambda x: x,
+            lambda s, p, n: (n != 1 and (p,) or (s,))[0],
+            newstyle
+        )
+
+    def _install_callables(self, gettext, ngettext, newstyle=None):
+        if newstyle is not None:
+            self.environment.newstyle_gettext = newstyle
+        if self.environment.newstyle_gettext:
+            gettext = _make_new_gettext(gettext)
+            ngettext = _make_new_ngettext(ngettext)
+        self.environment.globals.update(
+            gettext=gettext,
+            ngettext=ngettext
+        )
+
+    def _uninstall(self, translations):
+        for key in 'gettext', 'ngettext':
+            self.environment.globals.pop(key, None)
+
+    def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
+        if isinstance(source, string_types):
+            source = self.environment.parse(source)
+        return extract_from_ast(source, gettext_functions)
+
+    def parse(self, parser):
+        """Parse a translatable tag."""
+        lineno = next(parser.stream).lineno
+        num_called_num = False
+
+        # find all the variables referenced.  Additionally a variable can be
+        # defined in the body of the trans block too, but this is checked at
+        # a later state.
+        plural_expr = None
+        plural_expr_assignment = None
+        variables = {}
+        trimmed = None
+        while parser.stream.current.type != 'block_end':
+            if variables:
+                parser.stream.expect('comma')
+
+            # skip colon for python compatibility
+            if parser.stream.skip_if('colon'):
+                break
+
+            name = parser.stream.expect('name')
+            if name.value in variables:
+                parser.fail('translatable variable %r defined twice.' %
+                            name.value, name.lineno,
+                            exc=TemplateAssertionError)
+
+            # expressions
+            if parser.stream.current.type == 'assign':
+                next(parser.stream)
+                variables[name.value] = var = parser.parse_expression()
+            elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
+                trimmed = name.value == 'trimmed'
+                continue
+            else:
+                variables[name.value] = var = nodes.Name(name.value, 'load')
+
+            if plural_expr is None:
+                if isinstance(var, nodes.Call):
+                    plural_expr = nodes.Name('_trans', 'load')
+                    variables[name.value] = plural_expr
+                    plural_expr_assignment = nodes.Assign(
+                        nodes.Name('_trans', 'store'), var)
+                else:
+                    plural_expr = var
+                num_called_num = name.value == 'num'
+
+        parser.stream.expect('block_end')
+
+        plural = None
+        have_plural = False
+        referenced = set()
+
+        # now parse until endtrans or pluralize
+        singular_names, singular = self._parse_block(parser, True)
+        if singular_names:
+            referenced.update(singular_names)
+            if plural_expr is None:
+                plural_expr = nodes.Name(singular_names[0], 'load')
+                num_called_num = singular_names[0] == 'num'
+
+        # if we have a pluralize block, we parse that too
+        if parser.stream.current.test('name:pluralize'):
+            have_plural = True
+            next(parser.stream)
+            if parser.stream.current.type != 'block_end':
+                name = parser.stream.expect('name')
+                if name.value not in variables:
+                    parser.fail('unknown variable %r for pluralization' %
+                                name.value, name.lineno,
+                                exc=TemplateAssertionError)
+                plural_expr = variables[name.value]
+                num_called_num = name.value == 'num'
+            parser.stream.expect('block_end')
+            plural_names, plural = self._parse_block(parser, False)
+            next(parser.stream)
+            referenced.update(plural_names)
+        else:
+            next(parser.stream)
+
+        # register free names as simple name expressions
+        for var in referenced:
+            if var not in variables:
+                variables[var] = nodes.Name(var, 'load')
+
+        if not have_plural:
+            plural_expr = None
+        elif plural_expr is None:
+            parser.fail('pluralize without variables', lineno)
+
+        if trimmed is None:
+            trimmed = self.environment.policies['ext.i18n.trimmed']
+        if trimmed:
+            singular = self._trim_whitespace(singular)
+            if plural:
+                plural = self._trim_whitespace(plural)
+
+        node = self._make_node(singular, plural, variables, plural_expr,
+                               bool(referenced),
+                               num_called_num and have_plural)
+        node.set_lineno(lineno)
+        if plural_expr_assignment is not None:
+            return [plural_expr_assignment, node]
+        else:
+            return node
+
+    def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')):
+        return _ws_re.sub(' ', string.strip())
+
+    def _parse_block(self, parser, allow_pluralize):
+        """Parse until the next block tag with a given name."""
+        referenced = []
+        buf = []
+        while 1:
+            if parser.stream.current.type == 'data':
+                buf.append(parser.stream.current.value.replace('%', '%%'))
+                next(parser.stream)
+            elif parser.stream.current.type == 'variable_begin':
+                next(parser.stream)
+                name = parser.stream.expect('name').value
+                referenced.append(name)
+                buf.append('%%(%s)s' % name)
+                parser.stream.expect('variable_end')
+            elif parser.stream.current.type == 'block_begin':
+                next(parser.stream)
+                if parser.stream.current.test('name:endtrans'):
+                    break
+                elif parser.stream.current.test('name:pluralize'):
+                    if allow_pluralize:
+                        break
+                    parser.fail('a translatable section can have only one '
+                                'pluralize section')
+                parser.fail('control structures in translatable sections are '
+                            'not allowed')
+            elif parser.stream.eos:
+                parser.fail('unclosed translation block')
+            else:
+                assert False, 'internal parser error'
+
+        return referenced, concat(buf)
+
+    def _make_node(self, singular, plural, variables, plural_expr,
+                   vars_referenced, num_called_num):
+        """Generates a useful node from the data provided."""
+        # no variables referenced?  no need to escape for old style
+        # gettext invocations only if there are vars.
+        if not vars_referenced and not self.environment.newstyle_gettext:
+            singular = singular.replace('%%', '%')
+            if plural:
+                plural = plural.replace('%%', '%')
+
+        # singular only:
+        if plural_expr is None:
+            gettext = nodes.Name('gettext', 'load')
+            node = nodes.Call(gettext, [nodes.Const(singular)],
+                              [], None, None)
+
+        # singular and plural
+        else:
+            ngettext = nodes.Name('ngettext', 'load')
+            node = nodes.Call(ngettext, [
+                nodes.Const(singular),
+                nodes.Const(plural),
+                plural_expr
+            ], [], None, None)
+
+        # in case newstyle gettext is used, the method is powerful
+        # enough to handle the variable expansion and autoescape
+        # handling itself
+        if self.environment.newstyle_gettext:
+            for key, value in iteritems(variables):
+                # the function adds that later anyways in case num was
+                # called num, so just skip it.
+                if num_called_num and key == 'num':
+                    continue
+                node.kwargs.append(nodes.Keyword(key, value))
+
+        # otherwise do that here
+        else:
+            # mark the return value as safe if we are in an
+            # environment with autoescaping turned on
+            node = nodes.MarkSafeIfAutoescape(node)
+            if variables:
+                node = nodes.Mod(node, nodes.Dict([
+                    nodes.Pair(nodes.Const(key), value)
+                    for key, value in variables.items()
+                ]))
+        return nodes.Output([node])
+
+
+class ExprStmtExtension(Extension):
+    """Adds a `do` tag to Jinja2 that works like the print statement just
+    that it doesn't print the return value.
+    """
+    tags = set(['do'])
+
+    def parse(self, parser):
+        node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
+        node.node = parser.parse_tuple()
+        return node
+
+
+class LoopControlExtension(Extension):
+    """Adds break and continue to the template engine."""
+    tags = set(['break', 'continue'])
+
+    def parse(self, parser):
+        token = next(parser.stream)
+        if token.value == 'break':
+            return nodes.Break(lineno=token.lineno)
+        return nodes.Continue(lineno=token.lineno)
+
+
+class WithExtension(Extension):
+    pass
+
+
+class AutoEscapeExtension(Extension):
+    pass
+
+
+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
+                     babel_style=True):
+    """Extract localizable strings from the given template node.  Per
+    default this function returns matches in babel style that means non string
+    parameters as well as keyword arguments are returned as `None`.  This
+    allows Babel to figure out what you really meant if you are using
+    gettext functions that allow keyword arguments for placeholder expansion.
+    If you don't want that behavior set the `babel_style` parameter to `False`
+    which causes only strings to be returned and parameters are always stored
+    in tuples.  As a consequence invalid gettext calls (calls without a single
+    string parameter or string parameters after non-string parameters) are
+    skipped.
+
+    This example explains the behavior:
+
+    >>> from jinja2 import Environment
+    >>> env = Environment()
+    >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
+    >>> list(extract_from_ast(node))
+    [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
+    >>> list(extract_from_ast(node, babel_style=False))
+    [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
+
+    For every string found this function yields a ``(lineno, function,
+    message)`` tuple, where:
+
+    * ``lineno`` is the number of the line on which the string was found,
+    * ``function`` is the name of the ``gettext`` function used (if the
+      string was extracted from embedded Python code), and
+    *  ``message`` is the string itself (a ``unicode`` object, or a tuple
+       of ``unicode`` objects for functions with multiple string arguments).
+
+    This extraction function operates on the AST and is because of that unable
+    to extract any comments.  For comment support you have to use the babel
+    extraction interface or extract comments yourself.
+    """
+    for node in node.find_all(nodes.Call):
+        if not isinstance(node.node, nodes.Name) or \
+           node.node.name not in gettext_functions:
+            continue
+
+        strings = []
+        for arg in node.args:
+            if isinstance(arg, nodes.Const) and \
+               isinstance(arg.value, string_types):
+                strings.append(arg.value)
+            else:
+                strings.append(None)
+
+        for arg in node.kwargs:
+            strings.append(None)
+        if node.dyn_args is not None:
+            strings.append(None)
+        if node.dyn_kwargs is not None:
+            strings.append(None)
+
+        if not babel_style:
+            strings = tuple(x for x in strings if x is not None)
+            if not strings:
+                continue
+        else:
+            if len(strings) == 1:
+                strings = strings[0]
+            else:
+                strings = tuple(strings)
+        yield node.lineno, node.node.name, strings
+
+
+class _CommentFinder(object):
+    """Helper class to find comments in a token stream.  Can only
+    find comments for gettext calls forwards.  Once the comment
+    from line 4 is found, a comment for line 1 will not return a
+    usable value.
+    """
+
+    def __init__(self, tokens, comment_tags):
+        self.tokens = tokens
+        self.comment_tags = comment_tags
+        self.offset = 0
+        self.last_lineno = 0
+
+    def find_backwards(self, offset):
+        try:
+            for _, token_type, token_value in \
+                    reversed(self.tokens[self.offset:offset]):
+                if token_type in ('comment', 'linecomment'):
+                    try:
+                        prefix, comment = token_value.split(None, 1)
+                    except ValueError:
+                        continue
+                    if prefix in self.comment_tags:
+                        return [comment.rstrip()]
+            return []
+        finally:
+            self.offset = offset
+
+    def find_comments(self, lineno):
+        if not self.comment_tags or self.last_lineno > lineno:
+            return []
+        for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
+            if token_lineno > lineno:
+                return self.find_backwards(self.offset + idx)
+        return self.find_backwards(len(self.tokens))
+
+
+def babel_extract(fileobj, keywords, comment_tags, options):
+    """Babel extraction method for Jinja templates.
+
+    .. versionchanged:: 2.3
+       Basic support for translation comments was added.  If `comment_tags`
+       is now set to a list of keywords for extraction, the extractor will
+       try to find the best preceeding comment that begins with one of the
+       keywords.  For best results, make sure to not have more than one
+       gettext call in one line of code and the matching comment in the
+       same line or the line before.
+
+    .. versionchanged:: 2.5.1
+       The `newstyle_gettext` flag can be set to `True` to enable newstyle
+       gettext calls.
+
+    .. versionchanged:: 2.7
+       A `silent` option can now be provided.  If set to `False` template
+       syntax errors are propagated instead of being ignored.
+
+    :param fileobj: the file-like object the messages should be extracted from
+    :param keywords: a list of keywords (i.e. function names) that should be
+                     recognized as translation functions
+    :param comment_tags: a list of translator tags to search for and include
+                         in the results.
+    :param options: a dictionary of additional options (optional)
+    :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
+             (comments will be empty currently)
+    """
+    extensions = set()
+    for extension in options.get('extensions', '').split(','):
+        extension = extension.strip()
+        if not extension:
+            continue
+        extensions.add(import_string(extension))
+    if InternationalizationExtension not in extensions:
+        extensions.add(InternationalizationExtension)
+
+    def getbool(options, key, default=False):
+        return options.get(key, str(default)).lower() in \
+            ('1', 'on', 'yes', 'true')
+
+    silent = getbool(options, 'silent', True)
+    environment = Environment(
+        options.get('block_start_string', BLOCK_START_STRING),
+        options.get('block_end_string', BLOCK_END_STRING),
+        options.get('variable_start_string', VARIABLE_START_STRING),
+        options.get('variable_end_string', VARIABLE_END_STRING),
+        options.get('comment_start_string', COMMENT_START_STRING),
+        options.get('comment_end_string', COMMENT_END_STRING),
+        options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
+        options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
+        getbool(options, 'trim_blocks', TRIM_BLOCKS),
+        getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
+        NEWLINE_SEQUENCE,
+        getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
+        frozenset(extensions),
+        cache_size=0,
+        auto_reload=False
+    )
+
+    if getbool(options, 'trimmed'):
+        environment.policies['ext.i18n.trimmed'] = True
+    if getbool(options, 'newstyle_gettext'):
+        environment.newstyle_gettext = True
+
+    source = fileobj.read().decode(options.get('encoding', 'utf-8'))
+    try:
+        node = environment.parse(source)
+        tokens = list(environment.lex(environment.preprocess(source)))
+    except TemplateSyntaxError as e:
+        if not silent:
+            raise
+        # skip templates with syntax errors
+        return
+
+    finder = _CommentFinder(tokens, comment_tags)
+    for lineno, func, message in extract_from_ast(node, keywords):
+        yield lineno, func, message, finder.find_comments(lineno)
+
+
+#: nicer import names
+i18n = InternationalizationExtension
+do = ExprStmtExtension
+loopcontrols = LoopControlExtension
+with_ = WithExtension
+autoescape = AutoEscapeExtension
diff --git a/lib/python3.7/site-packages/jinja2/filters.py b/lib/python3.7/site-packages/jinja2/filters.py
new file mode 100644
index 0000000000000000000000000000000000000000..267ddddaa03478ed77d640c011b8f2c0d6641d04
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/filters.py
@@ -0,0 +1,1190 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.filters
+    ~~~~~~~~~~~~~~
+
+    Bundled jinja filters.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+import math
+import random
+import warnings
+
+from itertools import groupby, chain
+from collections import namedtuple
+from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
+     unicode_urlencode, htmlsafe_json_dumps
+from jinja2.runtime import Undefined
+from jinja2.exceptions import FilterArgumentError
+from jinja2._compat import imap, string_types, text_type, iteritems, PY2
+
+
+_word_re = re.compile(r'\w+', re.UNICODE)
+_word_beginning_split_re = re.compile(r'([-\s\(\{\[\<]+)', re.UNICODE)
+
+
+def contextfilter(f):
+    """Decorator for marking context dependent filters. The current
+    :class:`Context` will be passed as first argument.
+    """
+    f.contextfilter = True
+    return f
+
+
+def evalcontextfilter(f):
+    """Decorator for marking eval-context dependent filters.  An eval
+    context object is passed as first argument.  For more information
+    about the eval context, see :ref:`eval-context`.
+
+    .. versionadded:: 2.4
+    """
+    f.evalcontextfilter = True
+    return f
+
+
+def environmentfilter(f):
+    """Decorator for marking environment dependent filters.  The current
+    :class:`Environment` is passed to the filter as first argument.
+    """
+    f.environmentfilter = True
+    return f
+
+
+def ignore_case(value):
+    """For use as a postprocessor for :func:`make_attrgetter`. Converts strings
+    to lowercase and returns other types as-is."""
+    return value.lower() if isinstance(value, string_types) else value
+
+
+def make_attrgetter(environment, attribute, postprocess=None):
+    """Returns a callable that looks up the given attribute from a
+    passed object with the rules of the environment.  Dots are allowed
+    to access attributes of attributes.  Integer parts in paths are
+    looked up as integers.
+    """
+    if attribute is None:
+        attribute = []
+    elif isinstance(attribute, string_types):
+        attribute = [int(x) if x.isdigit() else x for x in attribute.split('.')]
+    else:
+        attribute = [attribute]
+
+    def attrgetter(item):
+        for part in attribute:
+            item = environment.getitem(item, part)
+
+        if postprocess is not None:
+            item = postprocess(item)
+
+        return item
+
+    return attrgetter
+
+
+def do_forceescape(value):
+    """Enforce HTML escaping.  This will probably double escape variables."""
+    if hasattr(value, '__html__'):
+        value = value.__html__()
+    return escape(text_type(value))
+
+
+def do_urlencode(value):
+    """Escape strings for use in URLs (uses UTF-8 encoding).  It accepts both
+    dictionaries and regular strings as well as pairwise iterables.
+
+    .. versionadded:: 2.7
+    """
+    itemiter = None
+    if isinstance(value, dict):
+        itemiter = iteritems(value)
+    elif not isinstance(value, string_types):
+        try:
+            itemiter = iter(value)
+        except TypeError:
+            pass
+    if itemiter is None:
+        return unicode_urlencode(value)
+    return u'&'.join(unicode_urlencode(k) + '=' +
+                     unicode_urlencode(v, for_qs=True)
+                     for k, v in itemiter)
+
+
+@evalcontextfilter
+def do_replace(eval_ctx, s, old, new, count=None):
+    """Return a copy of the value with all occurrences of a substring
+    replaced with a new one. The first argument is the substring
+    that should be replaced, the second is the replacement string.
+    If the optional third argument ``count`` is given, only the first
+    ``count`` occurrences are replaced:
+
+    .. sourcecode:: jinja
+
+        {{ "Hello World"|replace("Hello", "Goodbye") }}
+            -> Goodbye World
+
+        {{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
+            -> d'oh, d'oh, aaargh
+    """
+    if count is None:
+        count = -1
+    if not eval_ctx.autoescape:
+        return text_type(s).replace(text_type(old), text_type(new), count)
+    if hasattr(old, '__html__') or hasattr(new, '__html__') and \
+       not hasattr(s, '__html__'):
+        s = escape(s)
+    else:
+        s = soft_unicode(s)
+    return s.replace(soft_unicode(old), soft_unicode(new), count)
+
+
+def do_upper(s):
+    """Convert a value to uppercase."""
+    return soft_unicode(s).upper()
+
+
+def do_lower(s):
+    """Convert a value to lowercase."""
+    return soft_unicode(s).lower()
+
+
+@evalcontextfilter
+def do_xmlattr(_eval_ctx, d, autospace=True):
+    """Create an SGML/XML attribute string based on the items in a dict.
+    All values that are neither `none` nor `undefined` are automatically
+    escaped:
+
+    .. sourcecode:: html+jinja
+
+        <ul{{ {'class': 'my_list', 'missing': none,
+                'id': 'list-%d'|format(variable)}|xmlattr }}>
+        ...
+        </ul>
+
+    Results in something like this:
+
+    .. sourcecode:: html
+
+        <ul class="my_list" id="list-42">
+        ...
+        </ul>
+
+    As you can see it automatically prepends a space in front of the item
+    if the filter returned something unless the second parameter is false.
+    """
+    rv = u' '.join(
+        u'%s="%s"' % (escape(key), escape(value))
+        for key, value in iteritems(d)
+        if value is not None and not isinstance(value, Undefined)
+    )
+    if autospace and rv:
+        rv = u' ' + rv
+    if _eval_ctx.autoescape:
+        rv = Markup(rv)
+    return rv
+
+
+def do_capitalize(s):
+    """Capitalize a value. The first character will be uppercase, all others
+    lowercase.
+    """
+    return soft_unicode(s).capitalize()
+
+
+def do_title(s):
+    """Return a titlecased version of the value. I.e. words will start with
+    uppercase letters, all remaining characters are lowercase.
+    """
+    return ''.join(
+        [item[0].upper() + item[1:].lower()
+         for item in _word_beginning_split_re.split(soft_unicode(s))
+         if item])
+
+
+def do_dictsort(value, case_sensitive=False, by='key', reverse=False):
+    """Sort a dict and yield (key, value) pairs. Because python dicts are
+    unsorted you may want to use this function to order them by either
+    key or value:
+
+    .. sourcecode:: jinja
+
+        {% for item in mydict|dictsort %}
+            sort the dict by key, case insensitive
+
+        {% for item in mydict|dictsort(reverse=true) %}
+            sort the dict by key, case insensitive, reverse order
+
+        {% for item in mydict|dictsort(true) %}
+            sort the dict by key, case sensitive
+
+        {% for item in mydict|dictsort(false, 'value') %}
+            sort the dict by value, case insensitive
+    """
+    if by == 'key':
+        pos = 0
+    elif by == 'value':
+        pos = 1
+    else:
+        raise FilterArgumentError(
+            'You can only sort by either "key" or "value"'
+        )
+
+    def sort_func(item):
+        value = item[pos]
+
+        if not case_sensitive:
+            value = ignore_case(value)
+
+        return value
+
+    return sorted(value.items(), key=sort_func, reverse=reverse)
+
+
+@environmentfilter
+def do_sort(
+    environment, value, reverse=False, case_sensitive=False, attribute=None
+):
+    """Sort an iterable.  Per default it sorts ascending, if you pass it
+    true as first argument it will reverse the sorting.
+
+    If the iterable is made of strings the third parameter can be used to
+    control the case sensitiveness of the comparison which is disabled by
+    default.
+
+    .. sourcecode:: jinja
+
+        {% for item in iterable|sort %}
+            ...
+        {% endfor %}
+
+    It is also possible to sort by an attribute (for example to sort
+    by the date of an object) by specifying the `attribute` parameter:
+
+    .. sourcecode:: jinja
+
+        {% for item in iterable|sort(attribute='date') %}
+            ...
+        {% endfor %}
+
+    .. versionchanged:: 2.6
+       The `attribute` parameter was added.
+    """
+    key_func = make_attrgetter(
+        environment, attribute,
+        postprocess=ignore_case if not case_sensitive else None
+    )
+    return sorted(value, key=key_func, reverse=reverse)
+
+
+@environmentfilter
+def do_unique(environment, value, case_sensitive=False, attribute=None):
+    """Returns a list of unique items from the the given iterable.
+
+    .. sourcecode:: jinja
+
+        {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }}
+            -> ['foo', 'bar', 'foobar']
+
+    The unique items are yielded in the same order as their first occurrence in
+    the iterable passed to the filter.
+
+    :param case_sensitive: Treat upper and lower case strings as distinct.
+    :param attribute: Filter objects with unique values for this attribute.
+    """
+    getter = make_attrgetter(
+        environment, attribute,
+        postprocess=ignore_case if not case_sensitive else None
+    )
+    seen = set()
+
+    for item in value:
+        key = getter(item)
+
+        if key not in seen:
+            seen.add(key)
+            yield item
+
+
+def _min_or_max(environment, value, func, case_sensitive, attribute):
+    it = iter(value)
+
+    try:
+        first = next(it)
+    except StopIteration:
+        return environment.undefined('No aggregated item, sequence was empty.')
+
+    key_func = make_attrgetter(
+        environment, attribute,
+        ignore_case if not case_sensitive else None
+    )
+    return func(chain([first], it), key=key_func)
+
+
+@environmentfilter
+def do_min(environment, value, case_sensitive=False, attribute=None):
+    """Return the smallest item from the sequence.
+
+    .. sourcecode:: jinja
+
+        {{ [1, 2, 3]|min }}
+            -> 1
+
+    :param case_sensitive: Treat upper and lower case strings as distinct.
+    :param attribute: Get the object with the max value of this attribute.
+    """
+    return _min_or_max(environment, value, min, case_sensitive, attribute)
+
+
+@environmentfilter
+def do_max(environment, value, case_sensitive=False, attribute=None):
+    """Return the largest item from the sequence.
+
+    .. sourcecode:: jinja
+
+        {{ [1, 2, 3]|max }}
+            -> 3
+
+    :param case_sensitive: Treat upper and lower case strings as distinct.
+    :param attribute: Get the object with the max value of this attribute.
+    """
+    return _min_or_max(environment, value, max, case_sensitive, attribute)
+
+
+def do_default(value, default_value=u'', boolean=False):
+    """If the value is undefined it will return the passed default value,
+    otherwise the value of the variable:
+
+    .. sourcecode:: jinja
+
+        {{ my_variable|default('my_variable is not defined') }}
+
+    This will output the value of ``my_variable`` if the variable was
+    defined, otherwise ``'my_variable is not defined'``. If you want
+    to use default with variables that evaluate to false you have to
+    set the second parameter to `true`:
+
+    .. sourcecode:: jinja
+
+        {{ ''|default('the string was empty', true) }}
+    """
+    if isinstance(value, Undefined) or (boolean and not value):
+        return default_value
+    return value
+
+
+@evalcontextfilter
+def do_join(eval_ctx, value, d=u'', attribute=None):
+    """Return a string which is the concatenation of the strings in the
+    sequence. The separator between elements is an empty string per
+    default, you can define it with the optional parameter:
+
+    .. sourcecode:: jinja
+
+        {{ [1, 2, 3]|join('|') }}
+            -> 1|2|3
+
+        {{ [1, 2, 3]|join }}
+            -> 123
+
+    It is also possible to join certain attributes of an object:
+
+    .. sourcecode:: jinja
+
+        {{ users|join(', ', attribute='username') }}
+
+    .. versionadded:: 2.6
+       The `attribute` parameter was added.
+    """
+    if attribute is not None:
+        value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
+
+    # no automatic escaping?  joining is a lot eaiser then
+    if not eval_ctx.autoescape:
+        return text_type(d).join(imap(text_type, value))
+
+    # if the delimiter doesn't have an html representation we check
+    # if any of the items has.  If yes we do a coercion to Markup
+    if not hasattr(d, '__html__'):
+        value = list(value)
+        do_escape = False
+        for idx, item in enumerate(value):
+            if hasattr(item, '__html__'):
+                do_escape = True
+            else:
+                value[idx] = text_type(item)
+        if do_escape:
+            d = escape(d)
+        else:
+            d = text_type(d)
+        return d.join(value)
+
+    # no html involved, to normal joining
+    return soft_unicode(d).join(imap(soft_unicode, value))
+
+
+def do_center(value, width=80):
+    """Centers the value in a field of a given width."""
+    return text_type(value).center(width)
+
+
+@environmentfilter
+def do_first(environment, seq):
+    """Return the first item of a sequence."""
+    try:
+        return next(iter(seq))
+    except StopIteration:
+        return environment.undefined('No first item, sequence was empty.')
+
+
+@environmentfilter
+def do_last(environment, seq):
+    """Return the last item of a sequence."""
+    try:
+        return next(iter(reversed(seq)))
+    except StopIteration:
+        return environment.undefined('No last item, sequence was empty.')
+
+
+@contextfilter
+def do_random(context, seq):
+    """Return a random item from the sequence."""
+    try:
+        return random.choice(seq)
+    except IndexError:
+        return context.environment.undefined('No random item, sequence was empty.')
+
+
+def do_filesizeformat(value, binary=False):
+    """Format the value like a 'human-readable' file size (i.e. 13 kB,
+    4.1 MB, 102 Bytes, etc).  Per default decimal prefixes are used (Mega,
+    Giga, etc.), if the second parameter is set to `True` the binary
+    prefixes are used (Mebi, Gibi).
+    """
+    bytes = float(value)
+    base = binary and 1024 or 1000
+    prefixes = [
+        (binary and 'KiB' or 'kB'),
+        (binary and 'MiB' or 'MB'),
+        (binary and 'GiB' or 'GB'),
+        (binary and 'TiB' or 'TB'),
+        (binary and 'PiB' or 'PB'),
+        (binary and 'EiB' or 'EB'),
+        (binary and 'ZiB' or 'ZB'),
+        (binary and 'YiB' or 'YB')
+    ]
+    if bytes == 1:
+        return '1 Byte'
+    elif bytes < base:
+        return '%d Bytes' % bytes
+    else:
+        for i, prefix in enumerate(prefixes):
+            unit = base ** (i + 2)
+            if bytes < unit:
+                return '%.1f %s' % ((base * bytes / unit), prefix)
+        return '%.1f %s' % ((base * bytes / unit), prefix)
+
+
+def do_pprint(value, verbose=False):
+    """Pretty print a variable. Useful for debugging.
+
+    With Jinja 1.2 onwards you can pass it a parameter.  If this parameter
+    is truthy the output will be more verbose (this requires `pretty`)
+    """
+    return pformat(value, verbose=verbose)
+
+
+@evalcontextfilter
+def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
+              target=None, rel=None):
+    """Converts URLs in plain text into clickable links.
+
+    If you pass the filter an additional integer it will shorten the urls
+    to that number. Also a third argument exists that makes the urls
+    "nofollow":
+
+    .. sourcecode:: jinja
+
+        {{ mytext|urlize(40, true) }}
+            links are shortened to 40 chars and defined with rel="nofollow"
+
+    If *target* is specified, the ``target`` attribute will be added to the
+    ``<a>`` tag:
+
+    .. sourcecode:: jinja
+
+       {{ mytext|urlize(40, target='_blank') }}
+
+    .. versionchanged:: 2.8+
+       The *target* parameter was added.
+    """
+    policies = eval_ctx.environment.policies
+    rel = set((rel or '').split() or [])
+    if nofollow:
+        rel.add('nofollow')
+    rel.update((policies['urlize.rel'] or '').split())
+    if target is None:
+        target = policies['urlize.target']
+    rel = ' '.join(sorted(rel)) or None
+    rv = urlize(value, trim_url_limit, rel=rel, target=target)
+    if eval_ctx.autoescape:
+        rv = Markup(rv)
+    return rv
+
+
+def do_indent(
+    s, width=4, first=False, blank=False, indentfirst=None
+):
+    """Return a copy of the string with each line indented by 4 spaces. The
+    first line and blank lines are not indented by default.
+
+    :param width: Number of spaces to indent by.
+    :param first: Don't skip indenting the first line.
+    :param blank: Don't skip indenting empty lines.
+
+    .. versionchanged:: 2.10
+        Blank lines are not indented by default.
+
+        Rename the ``indentfirst`` argument to ``first``.
+    """
+    if indentfirst is not None:
+        warnings.warn(DeprecationWarning(
+            'The "indentfirst" argument is renamed to "first".'
+        ), stacklevel=2)
+        first = indentfirst
+
+    s += u'\n'  # this quirk is necessary for splitlines method
+    indention = u' ' * width
+
+    if blank:
+        rv = (u'\n' + indention).join(s.splitlines())
+    else:
+        lines = s.splitlines()
+        rv = lines.pop(0)
+
+        if lines:
+            rv += u'\n' + u'\n'.join(
+                indention + line if line else line for line in lines
+            )
+
+    if first:
+        rv = indention + rv
+
+    return rv
+
+
+@environmentfilter
+def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None):
+    """Return a truncated copy of the string. The length is specified
+    with the first parameter which defaults to ``255``. If the second
+    parameter is ``true`` the filter will cut the text at length. Otherwise
+    it will discard the last word. If the text was in fact
+    truncated it will append an ellipsis sign (``"..."``). If you want a
+    different ellipsis sign than ``"..."`` you can specify it using the
+    third parameter. Strings that only exceed the length by the tolerance
+    margin given in the fourth parameter will not be truncated.
+
+    .. sourcecode:: jinja
+
+        {{ "foo bar baz qux"|truncate(9) }}
+            -> "foo..."
+        {{ "foo bar baz qux"|truncate(9, True) }}
+            -> "foo ba..."
+        {{ "foo bar baz qux"|truncate(11) }}
+            -> "foo bar baz qux"
+        {{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
+            -> "foo bar..."
+
+    The default leeway on newer Jinja2 versions is 5 and was 0 before but
+    can be reconfigured globally.
+    """
+    if leeway is None:
+        leeway = env.policies['truncate.leeway']
+    assert length >= len(end), 'expected length >= %s, got %s' % (len(end), length)
+    assert leeway >= 0, 'expected leeway >= 0, got %s' % leeway
+    if len(s) <= length + leeway:
+        return s
+    if killwords:
+        return s[:length - len(end)] + end
+    result = s[:length - len(end)].rsplit(' ', 1)[0]
+    return result + end
+
+
+@environmentfilter
+def do_wordwrap(environment, s, width=79, break_long_words=True,
+                wrapstring=None):
+    """
+    Return a copy of the string passed to the filter wrapped after
+    ``79`` characters.  You can override this default using the first
+    parameter.  If you set the second parameter to `false` Jinja will not
+    split words apart if they are longer than `width`. By default, the newlines
+    will be the default newlines for the environment, but this can be changed
+    using the wrapstring keyword argument.
+
+    .. versionadded:: 2.7
+       Added support for the `wrapstring` parameter.
+    """
+    if not wrapstring:
+        wrapstring = environment.newline_sequence
+    import textwrap
+    return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
+                                   replace_whitespace=False,
+                                   break_long_words=break_long_words))
+
+
+def do_wordcount(s):
+    """Count the words in that string."""
+    return len(_word_re.findall(s))
+
+
+def do_int(value, default=0, base=10):
+    """Convert the value into an integer. If the
+    conversion doesn't work it will return ``0``. You can
+    override this default using the first parameter. You
+    can also override the default base (10) in the second
+    parameter, which handles input with prefixes such as
+    0b, 0o and 0x for bases 2, 8 and 16 respectively.
+    The base is ignored for decimal numbers and non-string values.
+    """
+    try:
+        if isinstance(value, string_types):
+            return int(value, base)
+        return int(value)
+    except (TypeError, ValueError):
+        # this quirk is necessary so that "42.23"|int gives 42.
+        try:
+            return int(float(value))
+        except (TypeError, ValueError):
+            return default
+
+
+def do_float(value, default=0.0):
+    """Convert the value into a floating point number. If the
+    conversion doesn't work it will return ``0.0``. You can
+    override this default using the first parameter.
+    """
+    try:
+        return float(value)
+    except (TypeError, ValueError):
+        return default
+
+
+def do_format(value, *args, **kwargs):
+    """
+    Apply python string formatting on an object:
+
+    .. sourcecode:: jinja
+
+        {{ "%s - %s"|format("Hello?", "Foo!") }}
+            -> Hello? - Foo!
+    """
+    if args and kwargs:
+        raise FilterArgumentError('can\'t handle positional and keyword '
+                                  'arguments at the same time')
+    return soft_unicode(value) % (kwargs or args)
+
+
+def do_trim(value):
+    """Strip leading and trailing whitespace."""
+    return soft_unicode(value).strip()
+
+
+def do_striptags(value):
+    """Strip SGML/XML tags and replace adjacent whitespace by one space.
+    """
+    if hasattr(value, '__html__'):
+        value = value.__html__()
+    return Markup(text_type(value)).striptags()
+
+
+def do_slice(value, slices, fill_with=None):
+    """Slice an iterator and return a list of lists containing
+    those items. Useful if you want to create a div containing
+    three ul tags that represent columns:
+
+    .. sourcecode:: html+jinja
+
+        <div class="columwrapper">
+          {%- for column in items|slice(3) %}
+            <ul class="column-{{ loop.index }}">
+            {%- for item in column %}
+              <li>{{ item }}</li>
+            {%- endfor %}
+            </ul>
+          {%- endfor %}
+        </div>
+
+    If you pass it a second argument it's used to fill missing
+    values on the last iteration.
+    """
+    seq = list(value)
+    length = len(seq)
+    items_per_slice = length // slices
+    slices_with_extra = length % slices
+    offset = 0
+    for slice_number in range(slices):
+        start = offset + slice_number * items_per_slice
+        if slice_number < slices_with_extra:
+            offset += 1
+        end = offset + (slice_number + 1) * items_per_slice
+        tmp = seq[start:end]
+        if fill_with is not None and slice_number >= slices_with_extra:
+            tmp.append(fill_with)
+        yield tmp
+
+
+def do_batch(value, linecount, fill_with=None):
+    """
+    A filter that batches items. It works pretty much like `slice`
+    just the other way round. It returns a list of lists with the
+    given number of items. If you provide a second parameter this
+    is used to fill up missing items. See this example:
+
+    .. sourcecode:: html+jinja
+
+        <table>
+        {%- for row in items|batch(3, '&nbsp;') %}
+          <tr>
+          {%- for column in row %}
+            <td>{{ column }}</td>
+          {%- endfor %}
+          </tr>
+        {%- endfor %}
+        </table>
+    """
+    tmp = []
+    for item in value:
+        if len(tmp) == linecount:
+            yield tmp
+            tmp = []
+        tmp.append(item)
+    if tmp:
+        if fill_with is not None and len(tmp) < linecount:
+            tmp += [fill_with] * (linecount - len(tmp))
+        yield tmp
+
+
+def do_round(value, precision=0, method='common'):
+    """Round the number to a given precision. The first
+    parameter specifies the precision (default is ``0``), the
+    second the rounding method:
+
+    - ``'common'`` rounds either up or down
+    - ``'ceil'`` always rounds up
+    - ``'floor'`` always rounds down
+
+    If you don't specify a method ``'common'`` is used.
+
+    .. sourcecode:: jinja
+
+        {{ 42.55|round }}
+            -> 43.0
+        {{ 42.55|round(1, 'floor') }}
+            -> 42.5
+
+    Note that even if rounded to 0 precision, a float is returned.  If
+    you need a real integer, pipe it through `int`:
+
+    .. sourcecode:: jinja
+
+        {{ 42.55|round|int }}
+            -> 43
+    """
+    if not method in ('common', 'ceil', 'floor'):
+        raise FilterArgumentError('method must be common, ceil or floor')
+    if method == 'common':
+        return round(value, precision)
+    func = getattr(math, method)
+    return func(value * (10 ** precision)) / (10 ** precision)
+
+
+# Use a regular tuple repr here.  This is what we did in the past and we
+# really want to hide this custom type as much as possible.  In particular
+# we do not want to accidentally expose an auto generated repr in case
+# people start to print this out in comments or something similar for
+# debugging.
+_GroupTuple = namedtuple('_GroupTuple', ['grouper', 'list'])
+_GroupTuple.__repr__ = tuple.__repr__
+_GroupTuple.__str__ = tuple.__str__
+
+@environmentfilter
+def do_groupby(environment, value, attribute):
+    """Group a sequence of objects by a common attribute.
+
+    If you for example have a list of dicts or objects that represent persons
+    with `gender`, `first_name` and `last_name` attributes and you want to
+    group all users by genders you can do something like the following
+    snippet:
+
+    .. sourcecode:: html+jinja
+
+        <ul>
+        {% for group in persons|groupby('gender') %}
+            <li>{{ group.grouper }}<ul>
+            {% for person in group.list %}
+                <li>{{ person.first_name }} {{ person.last_name }}</li>
+            {% endfor %}</ul></li>
+        {% endfor %}
+        </ul>
+
+    Additionally it's possible to use tuple unpacking for the grouper and
+    list:
+
+    .. sourcecode:: html+jinja
+
+        <ul>
+        {% for grouper, list in persons|groupby('gender') %}
+            ...
+        {% endfor %}
+        </ul>
+
+    As you can see the item we're grouping by is stored in the `grouper`
+    attribute and the `list` contains all the objects that have this grouper
+    in common.
+
+    .. versionchanged:: 2.6
+       It's now possible to use dotted notation to group by the child
+       attribute of another attribute.
+    """
+    expr = make_attrgetter(environment, attribute)
+    return [_GroupTuple(key, list(values)) for key, values
+            in groupby(sorted(value, key=expr), expr)]
+
+
+@environmentfilter
+def do_sum(environment, iterable, attribute=None, start=0):
+    """Returns the sum of a sequence of numbers plus the value of parameter
+    'start' (which defaults to 0).  When the sequence is empty it returns
+    start.
+
+    It is also possible to sum up only certain attributes:
+
+    .. sourcecode:: jinja
+
+        Total: {{ items|sum(attribute='price') }}
+
+    .. versionchanged:: 2.6
+       The `attribute` parameter was added to allow suming up over
+       attributes.  Also the `start` parameter was moved on to the right.
+    """
+    if attribute is not None:
+        iterable = imap(make_attrgetter(environment, attribute), iterable)
+    return sum(iterable, start)
+
+
+def do_list(value):
+    """Convert the value into a list.  If it was a string the returned list
+    will be a list of characters.
+    """
+    return list(value)
+
+
+def do_mark_safe(value):
+    """Mark the value as safe which means that in an environment with automatic
+    escaping enabled this variable will not be escaped.
+    """
+    return Markup(value)
+
+
+def do_mark_unsafe(value):
+    """Mark a value as unsafe.  This is the reverse operation for :func:`safe`."""
+    return text_type(value)
+
+
+def do_reverse(value):
+    """Reverse the object or return an iterator that iterates over it the other
+    way round.
+    """
+    if isinstance(value, string_types):
+        return value[::-1]
+    try:
+        return reversed(value)
+    except TypeError:
+        try:
+            rv = list(value)
+            rv.reverse()
+            return rv
+        except TypeError:
+            raise FilterArgumentError('argument must be iterable')
+
+
+@environmentfilter
+def do_attr(environment, obj, name):
+    """Get an attribute of an object.  ``foo|attr("bar")`` works like
+    ``foo.bar`` just that always an attribute is returned and items are not
+    looked up.
+
+    See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
+    """
+    try:
+        name = str(name)
+    except UnicodeError:
+        pass
+    else:
+        try:
+            value = getattr(obj, name)
+        except AttributeError:
+            pass
+        else:
+            if environment.sandboxed and not \
+               environment.is_safe_attribute(obj, name, value):
+                return environment.unsafe_undefined(obj, name)
+            return value
+    return environment.undefined(obj=obj, name=name)
+
+
+@contextfilter
+def do_map(*args, **kwargs):
+    """Applies a filter on a sequence of objects or looks up an attribute.
+    This is useful when dealing with lists of objects but you are really
+    only interested in a certain value of it.
+
+    The basic usage is mapping on an attribute.  Imagine you have a list
+    of users but you are only interested in a list of usernames:
+
+    .. sourcecode:: jinja
+
+        Users on this page: {{ users|map(attribute='username')|join(', ') }}
+
+    Alternatively you can let it invoke a filter by passing the name of the
+    filter and the arguments afterwards.  A good example would be applying a
+    text conversion filter on a sequence:
+
+    .. sourcecode:: jinja
+
+        Users on this page: {{ titles|map('lower')|join(', ') }}
+
+    .. versionadded:: 2.7
+    """
+    seq, func = prepare_map(args, kwargs)
+    if seq:
+        for item in seq:
+            yield func(item)
+
+
+@contextfilter
+def do_select(*args, **kwargs):
+    """Filters a sequence of objects by applying a test to each object,
+    and only selecting the objects with the test succeeding.
+
+    If no test is specified, each object will be evaluated as a boolean.
+
+    Example usage:
+
+    .. sourcecode:: jinja
+
+        {{ numbers|select("odd") }}
+        {{ numbers|select("odd") }}
+        {{ numbers|select("divisibleby", 3) }}
+        {{ numbers|select("lessthan", 42) }}
+        {{ strings|select("equalto", "mystring") }}
+
+    .. versionadded:: 2.7
+    """
+    return select_or_reject(args, kwargs, lambda x: x, False)
+
+
+@contextfilter
+def do_reject(*args, **kwargs):
+    """Filters a sequence of objects by applying a test to each object,
+    and rejecting the objects with the test succeeding.
+
+    If no test is specified, each object will be evaluated as a boolean.
+
+    Example usage:
+
+    .. sourcecode:: jinja
+
+        {{ numbers|reject("odd") }}
+
+    .. versionadded:: 2.7
+    """
+    return select_or_reject(args, kwargs, lambda x: not x, False)
+
+
+@contextfilter
+def do_selectattr(*args, **kwargs):
+    """Filters a sequence of objects by applying a test to the specified
+    attribute of each object, and only selecting the objects with the
+    test succeeding.
+
+    If no test is specified, the attribute's value will be evaluated as
+    a boolean.
+
+    Example usage:
+
+    .. sourcecode:: jinja
+
+        {{ users|selectattr("is_active") }}
+        {{ users|selectattr("email", "none") }}
+
+    .. versionadded:: 2.7
+    """
+    return select_or_reject(args, kwargs, lambda x: x, True)
+
+
+@contextfilter
+def do_rejectattr(*args, **kwargs):
+    """Filters a sequence of objects by applying a test to the specified
+    attribute of each object, and rejecting the objects with the test
+    succeeding.
+
+    If no test is specified, the attribute's value will be evaluated as
+    a boolean.
+
+    .. sourcecode:: jinja
+
+        {{ users|rejectattr("is_active") }}
+        {{ users|rejectattr("email", "none") }}
+
+    .. versionadded:: 2.7
+    """
+    return select_or_reject(args, kwargs, lambda x: not x, True)
+
+
+@evalcontextfilter
+def do_tojson(eval_ctx, value, indent=None):
+    """Dumps a structure to JSON so that it's safe to use in ``<script>``
+    tags.  It accepts the same arguments and returns a JSON string.  Note that
+    this is available in templates through the ``|tojson`` filter which will
+    also mark the result as safe.  Due to how this function escapes certain
+    characters this is safe even if used outside of ``<script>`` tags.
+
+    The following characters are escaped in strings:
+
+    -   ``<``
+    -   ``>``
+    -   ``&``
+    -   ``'``
+
+    This makes it safe to embed such strings in any place in HTML with the
+    notable exception of double quoted attributes.  In that case single
+    quote your attributes or HTML escape it in addition.
+
+    The indent parameter can be used to enable pretty printing.  Set it to
+    the number of spaces that the structures should be indented with.
+
+    Note that this filter is for use in HTML contexts only.
+
+    .. versionadded:: 2.9
+    """
+    policies = eval_ctx.environment.policies
+    dumper = policies['json.dumps_function']
+    options = policies['json.dumps_kwargs']
+    if indent is not None:
+        options = dict(options)
+        options['indent'] = indent
+    return htmlsafe_json_dumps(value, dumper=dumper, **options)
+
+
+def prepare_map(args, kwargs):
+    context = args[0]
+    seq = args[1]
+
+    if len(args) == 2 and 'attribute' in kwargs:
+        attribute = kwargs.pop('attribute')
+        if kwargs:
+            raise FilterArgumentError('Unexpected keyword argument %r' %
+                next(iter(kwargs)))
+        func = make_attrgetter(context.environment, attribute)
+    else:
+        try:
+            name = args[2]
+            args = args[3:]
+        except LookupError:
+            raise FilterArgumentError('map requires a filter argument')
+        func = lambda item: context.environment.call_filter(
+            name, item, args, kwargs, context=context)
+
+    return seq, func
+
+
+def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
+    context = args[0]
+    seq = args[1]
+    if lookup_attr:
+        try:
+            attr = args[2]
+        except LookupError:
+            raise FilterArgumentError('Missing parameter for attribute name')
+        transfunc = make_attrgetter(context.environment, attr)
+        off = 1
+    else:
+        off = 0
+        transfunc = lambda x: x
+
+    try:
+        name = args[2 + off]
+        args = args[3 + off:]
+        func = lambda item: context.environment.call_test(
+            name, item, args, kwargs)
+    except LookupError:
+        func = bool
+
+    return seq, lambda item: modfunc(func(transfunc(item)))
+
+
+def select_or_reject(args, kwargs, modfunc, lookup_attr):
+    seq, func = prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
+    if seq:
+        for item in seq:
+            if func(item):
+                yield item
+
+
+FILTERS = {
+    'abs':                  abs,
+    'attr':                 do_attr,
+    'batch':                do_batch,
+    'capitalize':           do_capitalize,
+    'center':               do_center,
+    'count':                len,
+    'd':                    do_default,
+    'default':              do_default,
+    'dictsort':             do_dictsort,
+    'e':                    escape,
+    'escape':               escape,
+    'filesizeformat':       do_filesizeformat,
+    'first':                do_first,
+    'float':                do_float,
+    'forceescape':          do_forceescape,
+    'format':               do_format,
+    'groupby':              do_groupby,
+    'indent':               do_indent,
+    'int':                  do_int,
+    'join':                 do_join,
+    'last':                 do_last,
+    'length':               len,
+    'list':                 do_list,
+    'lower':                do_lower,
+    'map':                  do_map,
+    'min':                  do_min,
+    'max':                  do_max,
+    'pprint':               do_pprint,
+    'random':               do_random,
+    'reject':               do_reject,
+    'rejectattr':           do_rejectattr,
+    'replace':              do_replace,
+    'reverse':              do_reverse,
+    'round':                do_round,
+    'safe':                 do_mark_safe,
+    'select':               do_select,
+    'selectattr':           do_selectattr,
+    'slice':                do_slice,
+    'sort':                 do_sort,
+    'string':               soft_unicode,
+    'striptags':            do_striptags,
+    'sum':                  do_sum,
+    'title':                do_title,
+    'trim':                 do_trim,
+    'truncate':             do_truncate,
+    'unique':               do_unique,
+    'upper':                do_upper,
+    'urlencode':            do_urlencode,
+    'urlize':               do_urlize,
+    'wordcount':            do_wordcount,
+    'wordwrap':             do_wordwrap,
+    'xmlattr':              do_xmlattr,
+    'tojson':               do_tojson,
+}
diff --git a/lib/python3.7/site-packages/jinja2/idtracking.py b/lib/python3.7/site-packages/jinja2/idtracking.py
new file mode 100644
index 0000000000000000000000000000000000000000..491bfe083647c9d914b1641432000c3dcd625c22
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/idtracking.py
@@ -0,0 +1,286 @@
+from jinja2.visitor import NodeVisitor
+from jinja2._compat import iteritems
+
+
+VAR_LOAD_PARAMETER = 'param'
+VAR_LOAD_RESOLVE = 'resolve'
+VAR_LOAD_ALIAS = 'alias'
+VAR_LOAD_UNDEFINED = 'undefined'
+
+
+def find_symbols(nodes, parent_symbols=None):
+    sym = Symbols(parent=parent_symbols)
+    visitor = FrameSymbolVisitor(sym)
+    for node in nodes:
+        visitor.visit(node)
+    return sym
+
+
+def symbols_for_node(node, parent_symbols=None):
+    sym = Symbols(parent=parent_symbols)
+    sym.analyze_node(node)
+    return sym
+
+
+class Symbols(object):
+
+    def __init__(self, parent=None, level=None):
+        if level is None:
+            if parent is None:
+                level = 0
+            else:
+                level = parent.level + 1
+        self.level = level
+        self.parent = parent
+        self.refs = {}
+        self.loads = {}
+        self.stores = set()
+
+    def analyze_node(self, node, **kwargs):
+        visitor = RootVisitor(self)
+        visitor.visit(node, **kwargs)
+
+    def _define_ref(self, name, load=None):
+        ident = 'l_%d_%s' % (self.level, name)
+        self.refs[name] = ident
+        if load is not None:
+            self.loads[ident] = load
+        return ident
+
+    def find_load(self, target):
+        if target in self.loads:
+            return self.loads[target]
+        if self.parent is not None:
+            return self.parent.find_load(target)
+
+    def find_ref(self, name):
+        if name in self.refs:
+            return self.refs[name]
+        if self.parent is not None:
+            return self.parent.find_ref(name)
+
+    def ref(self, name):
+        rv = self.find_ref(name)
+        if rv is None:
+            raise AssertionError('Tried to resolve a name to a reference that '
+                                 'was unknown to the frame (%r)' % name)
+        return rv
+
+    def copy(self):
+        rv = object.__new__(self.__class__)
+        rv.__dict__.update(self.__dict__)
+        rv.refs = self.refs.copy()
+        rv.loads = self.loads.copy()
+        rv.stores = self.stores.copy()
+        return rv
+
+    def store(self, name):
+        self.stores.add(name)
+
+        # If we have not see the name referenced yet, we need to figure
+        # out what to set it to.
+        if name not in self.refs:
+            # If there is a parent scope we check if the name has a
+            # reference there.  If it does it means we might have to alias
+            # to a variable there.
+            if self.parent is not None:
+                outer_ref = self.parent.find_ref(name)
+                if outer_ref is not None:
+                    self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
+                    return
+
+            # Otherwise we can just set it to undefined.
+            self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
+
+    def declare_parameter(self, name):
+        self.stores.add(name)
+        return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
+
+    def load(self, name):
+        target = self.find_ref(name)
+        if target is None:
+            self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
+
+    def branch_update(self, branch_symbols):
+        stores = {}
+        for branch in branch_symbols:
+            for target in branch.stores:
+                if target in self.stores:
+                    continue
+                stores[target] = stores.get(target, 0) + 1
+
+        for sym in branch_symbols:
+            self.refs.update(sym.refs)
+            self.loads.update(sym.loads)
+            self.stores.update(sym.stores)
+
+        for name, branch_count in iteritems(stores):
+            if branch_count == len(branch_symbols):
+                continue
+            target = self.find_ref(name)
+            assert target is not None, 'should not happen'
+
+            if self.parent is not None:
+                outer_target = self.parent.find_ref(name)
+                if outer_target is not None:
+                    self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
+                    continue
+            self.loads[target] = (VAR_LOAD_RESOLVE, name)
+
+    def dump_stores(self):
+        rv = {}
+        node = self
+        while node is not None:
+            for name in node.stores:
+                if name not in rv:
+                    rv[name] = self.find_ref(name)
+            node = node.parent
+        return rv
+
+    def dump_param_targets(self):
+        rv = set()
+        node = self
+        while node is not None:
+            for target, (instr, _) in iteritems(self.loads):
+                if instr == VAR_LOAD_PARAMETER:
+                    rv.add(target)
+            node = node.parent
+        return rv
+
+
+class RootVisitor(NodeVisitor):
+
+    def __init__(self, symbols):
+        self.sym_visitor = FrameSymbolVisitor(symbols)
+
+    def _simple_visit(self, node, **kwargs):
+        for child in node.iter_child_nodes():
+            self.sym_visitor.visit(child)
+
+    visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \
+        visit_Scope = visit_If = visit_ScopedEvalContextModifier = \
+        _simple_visit
+
+    def visit_AssignBlock(self, node, **kwargs):
+        for child in node.body:
+            self.sym_visitor.visit(child)
+
+    def visit_CallBlock(self, node, **kwargs):
+        for child in node.iter_child_nodes(exclude=('call',)):
+            self.sym_visitor.visit(child)
+
+    def visit_OverlayScope(self, node, **kwargs):
+        for child in node.body:
+            self.sym_visitor.visit(child)
+
+    def visit_For(self, node, for_branch='body', **kwargs):
+        if for_branch == 'body':
+            self.sym_visitor.visit(node.target, store_as_param=True)
+            branch = node.body
+        elif for_branch == 'else':
+            branch = node.else_
+        elif for_branch == 'test':
+            self.sym_visitor.visit(node.target, store_as_param=True)
+            if node.test is not None:
+                self.sym_visitor.visit(node.test)
+            return
+        else:
+            raise RuntimeError('Unknown for branch')
+        for item in branch or ():
+            self.sym_visitor.visit(item)
+
+    def visit_With(self, node, **kwargs):
+        for target in node.targets:
+            self.sym_visitor.visit(target)
+        for child in node.body:
+            self.sym_visitor.visit(child)
+
+    def generic_visit(self, node, *args, **kwargs):
+        raise NotImplementedError('Cannot find symbols for %r' %
+                                  node.__class__.__name__)
+
+
+class FrameSymbolVisitor(NodeVisitor):
+    """A visitor for `Frame.inspect`."""
+
+    def __init__(self, symbols):
+        self.symbols = symbols
+
+    def visit_Name(self, node, store_as_param=False, **kwargs):
+        """All assignments to names go through this function."""
+        if store_as_param or node.ctx == 'param':
+            self.symbols.declare_parameter(node.name)
+        elif node.ctx == 'store':
+            self.symbols.store(node.name)
+        elif node.ctx == 'load':
+            self.symbols.load(node.name)
+
+    def visit_NSRef(self, node, **kwargs):
+        self.symbols.load(node.name)
+
+    def visit_If(self, node, **kwargs):
+        self.visit(node.test, **kwargs)
+
+        original_symbols = self.symbols
+
+        def inner_visit(nodes):
+            self.symbols = rv = original_symbols.copy()
+            for subnode in nodes:
+                self.visit(subnode, **kwargs)
+            self.symbols = original_symbols
+            return rv
+
+        body_symbols = inner_visit(node.body)
+        elif_symbols = inner_visit(node.elif_)
+        else_symbols = inner_visit(node.else_ or ())
+
+        self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
+
+    def visit_Macro(self, node, **kwargs):
+        self.symbols.store(node.name)
+
+    def visit_Import(self, node, **kwargs):
+        self.generic_visit(node, **kwargs)
+        self.symbols.store(node.target)
+
+    def visit_FromImport(self, node, **kwargs):
+        self.generic_visit(node, **kwargs)
+        for name in node.names:
+            if isinstance(name, tuple):
+                self.symbols.store(name[1])
+            else:
+                self.symbols.store(name)
+
+    def visit_Assign(self, node, **kwargs):
+        """Visit assignments in the correct order."""
+        self.visit(node.node, **kwargs)
+        self.visit(node.target, **kwargs)
+
+    def visit_For(self, node, **kwargs):
+        """Visiting stops at for blocks.  However the block sequence
+        is visited as part of the outer scope.
+        """
+        self.visit(node.iter, **kwargs)
+
+    def visit_CallBlock(self, node, **kwargs):
+        self.visit(node.call, **kwargs)
+
+    def visit_FilterBlock(self, node, **kwargs):
+        self.visit(node.filter, **kwargs)
+
+    def visit_With(self, node, **kwargs):
+        for target in node.values:
+            self.visit(target)
+
+    def visit_AssignBlock(self, node, **kwargs):
+        """Stop visiting at block assigns."""
+        self.visit(node.target, **kwargs)
+
+    def visit_Scope(self, node, **kwargs):
+        """Stop visiting at scopes."""
+
+    def visit_Block(self, node, **kwargs):
+        """Stop visiting at blocks."""
+
+    def visit_OverlayScope(self, node, **kwargs):
+        """Do not visit into overlay scopes."""
diff --git a/lib/python3.7/site-packages/jinja2/lexer.py b/lib/python3.7/site-packages/jinja2/lexer.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fd135dd5b0a3a31896794ab2f25cd68582752ce
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/lexer.py
@@ -0,0 +1,739 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.lexer
+    ~~~~~~~~~~~~
+
+    This module implements a Jinja / Python combination lexer. The
+    `Lexer` class provided by this module is used to do some preprocessing
+    for Jinja.
+
+    On the one hand it filters out invalid operators like the bitshift
+    operators we don't allow in templates. On the other hand it separates
+    template code and python code in expressions.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+from collections import deque
+from operator import itemgetter
+
+from jinja2._compat import implements_iterator, intern, iteritems, text_type
+from jinja2.exceptions import TemplateSyntaxError
+from jinja2.utils import LRUCache
+
+# cache for the lexers. Exists in order to be able to have multiple
+# environments with the same lexer
+_lexer_cache = LRUCache(50)
+
+# static regular expressions
+whitespace_re = re.compile(r'\s+', re.U)
+string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
+                       r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
+integer_re = re.compile(r'\d+')
+
+try:
+    # check if this Python supports Unicode identifiers
+    compile('föö', '<unknown>', 'eval')
+except SyntaxError:
+    # no Unicode support, use ASCII identifiers
+    name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
+    check_ident = False
+else:
+    # Unicode support, build a pattern to match valid characters, and set flag
+    # to use str.isidentifier to validate during lexing
+    from jinja2 import _identifier
+    name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
+    check_ident = True
+    # remove the pattern from memory after building the regex
+    import sys
+    del sys.modules['jinja2._identifier']
+    import jinja2
+    del jinja2._identifier
+    del _identifier
+
+float_re = re.compile(r'(?<!\.)\d+\.\d+')
+newline_re = re.compile(r'(\r\n|\r|\n)')
+
+# internal the tokens and keep references to them
+TOKEN_ADD = intern('add')
+TOKEN_ASSIGN = intern('assign')
+TOKEN_COLON = intern('colon')
+TOKEN_COMMA = intern('comma')
+TOKEN_DIV = intern('div')
+TOKEN_DOT = intern('dot')
+TOKEN_EQ = intern('eq')
+TOKEN_FLOORDIV = intern('floordiv')
+TOKEN_GT = intern('gt')
+TOKEN_GTEQ = intern('gteq')
+TOKEN_LBRACE = intern('lbrace')
+TOKEN_LBRACKET = intern('lbracket')
+TOKEN_LPAREN = intern('lparen')
+TOKEN_LT = intern('lt')
+TOKEN_LTEQ = intern('lteq')
+TOKEN_MOD = intern('mod')
+TOKEN_MUL = intern('mul')
+TOKEN_NE = intern('ne')
+TOKEN_PIPE = intern('pipe')
+TOKEN_POW = intern('pow')
+TOKEN_RBRACE = intern('rbrace')
+TOKEN_RBRACKET = intern('rbracket')
+TOKEN_RPAREN = intern('rparen')
+TOKEN_SEMICOLON = intern('semicolon')
+TOKEN_SUB = intern('sub')
+TOKEN_TILDE = intern('tilde')
+TOKEN_WHITESPACE = intern('whitespace')
+TOKEN_FLOAT = intern('float')
+TOKEN_INTEGER = intern('integer')
+TOKEN_NAME = intern('name')
+TOKEN_STRING = intern('string')
+TOKEN_OPERATOR = intern('operator')
+TOKEN_BLOCK_BEGIN = intern('block_begin')
+TOKEN_BLOCK_END = intern('block_end')
+TOKEN_VARIABLE_BEGIN = intern('variable_begin')
+TOKEN_VARIABLE_END = intern('variable_end')
+TOKEN_RAW_BEGIN = intern('raw_begin')
+TOKEN_RAW_END = intern('raw_end')
+TOKEN_COMMENT_BEGIN = intern('comment_begin')
+TOKEN_COMMENT_END = intern('comment_end')
+TOKEN_COMMENT = intern('comment')
+TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
+TOKEN_LINESTATEMENT_END = intern('linestatement_end')
+TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
+TOKEN_LINECOMMENT_END = intern('linecomment_end')
+TOKEN_LINECOMMENT = intern('linecomment')
+TOKEN_DATA = intern('data')
+TOKEN_INITIAL = intern('initial')
+TOKEN_EOF = intern('eof')
+
+# bind operators to token types
+operators = {
+    '+':            TOKEN_ADD,
+    '-':            TOKEN_SUB,
+    '/':            TOKEN_DIV,
+    '//':           TOKEN_FLOORDIV,
+    '*':            TOKEN_MUL,
+    '%':            TOKEN_MOD,
+    '**':           TOKEN_POW,
+    '~':            TOKEN_TILDE,
+    '[':            TOKEN_LBRACKET,
+    ']':            TOKEN_RBRACKET,
+    '(':            TOKEN_LPAREN,
+    ')':            TOKEN_RPAREN,
+    '{':            TOKEN_LBRACE,
+    '}':            TOKEN_RBRACE,
+    '==':           TOKEN_EQ,
+    '!=':           TOKEN_NE,
+    '>':            TOKEN_GT,
+    '>=':           TOKEN_GTEQ,
+    '<':            TOKEN_LT,
+    '<=':           TOKEN_LTEQ,
+    '=':            TOKEN_ASSIGN,
+    '.':            TOKEN_DOT,
+    ':':            TOKEN_COLON,
+    '|':            TOKEN_PIPE,
+    ',':            TOKEN_COMMA,
+    ';':            TOKEN_SEMICOLON
+}
+
+reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
+assert len(operators) == len(reverse_operators), 'operators dropped'
+operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
+                         sorted(operators, key=lambda x: -len(x))))
+
+ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
+                            TOKEN_COMMENT_END, TOKEN_WHITESPACE,
+                            TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
+                            TOKEN_LINECOMMENT])
+ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
+                             TOKEN_COMMENT, TOKEN_LINECOMMENT])
+
+
+def _describe_token_type(token_type):
+    if token_type in reverse_operators:
+        return reverse_operators[token_type]
+    return {
+        TOKEN_COMMENT_BEGIN:        'begin of comment',
+        TOKEN_COMMENT_END:          'end of comment',
+        TOKEN_COMMENT:              'comment',
+        TOKEN_LINECOMMENT:          'comment',
+        TOKEN_BLOCK_BEGIN:          'begin of statement block',
+        TOKEN_BLOCK_END:            'end of statement block',
+        TOKEN_VARIABLE_BEGIN:       'begin of print statement',
+        TOKEN_VARIABLE_END:         'end of print statement',
+        TOKEN_LINESTATEMENT_BEGIN:  'begin of line statement',
+        TOKEN_LINESTATEMENT_END:    'end of line statement',
+        TOKEN_DATA:                 'template data / text',
+        TOKEN_EOF:                  'end of template'
+    }.get(token_type, token_type)
+
+
+def describe_token(token):
+    """Returns a description of the token."""
+    if token.type == 'name':
+        return token.value
+    return _describe_token_type(token.type)
+
+
+def describe_token_expr(expr):
+    """Like `describe_token` but for token expressions."""
+    if ':' in expr:
+        type, value = expr.split(':', 1)
+        if type == 'name':
+            return value
+    else:
+        type = expr
+    return _describe_token_type(type)
+
+
+def count_newlines(value):
+    """Count the number of newline characters in the string.  This is
+    useful for extensions that filter a stream.
+    """
+    return len(newline_re.findall(value))
+
+
+def compile_rules(environment):
+    """Compiles all the rules from the environment into a list of rules."""
+    e = re.escape
+    rules = [
+        (len(environment.comment_start_string), 'comment',
+         e(environment.comment_start_string)),
+        (len(environment.block_start_string), 'block',
+         e(environment.block_start_string)),
+        (len(environment.variable_start_string), 'variable',
+         e(environment.variable_start_string))
+    ]
+
+    if environment.line_statement_prefix is not None:
+        rules.append((len(environment.line_statement_prefix), 'linestatement',
+                      r'^[ \t\v]*' + e(environment.line_statement_prefix)))
+    if environment.line_comment_prefix is not None:
+        rules.append((len(environment.line_comment_prefix), 'linecomment',
+                      r'(?:^|(?<=\S))[^\S\r\n]*' +
+                      e(environment.line_comment_prefix)))
+
+    return [x[1:] for x in sorted(rules, reverse=True)]
+
+
+class Failure(object):
+    """Class that raises a `TemplateSyntaxError` if called.
+    Used by the `Lexer` to specify known errors.
+    """
+
+    def __init__(self, message, cls=TemplateSyntaxError):
+        self.message = message
+        self.error_class = cls
+
+    def __call__(self, lineno, filename):
+        raise self.error_class(self.message, lineno, filename)
+
+
+class Token(tuple):
+    """Token class."""
+    __slots__ = ()
+    lineno, type, value = (property(itemgetter(x)) for x in range(3))
+
+    def __new__(cls, lineno, type, value):
+        return tuple.__new__(cls, (lineno, intern(str(type)), value))
+
+    def __str__(self):
+        if self.type in reverse_operators:
+            return reverse_operators[self.type]
+        elif self.type == 'name':
+            return self.value
+        return self.type
+
+    def test(self, expr):
+        """Test a token against a token expression.  This can either be a
+        token type or ``'token_type:token_value'``.  This can only test
+        against string values and types.
+        """
+        # here we do a regular string equality check as test_any is usually
+        # passed an iterable of not interned strings.
+        if self.type == expr:
+            return True
+        elif ':' in expr:
+            return expr.split(':', 1) == [self.type, self.value]
+        return False
+
+    def test_any(self, *iterable):
+        """Test against multiple token expressions."""
+        for expr in iterable:
+            if self.test(expr):
+                return True
+        return False
+
+    def __repr__(self):
+        return 'Token(%r, %r, %r)' % (
+            self.lineno,
+            self.type,
+            self.value
+        )
+
+
+@implements_iterator
+class TokenStreamIterator(object):
+    """The iterator for tokenstreams.  Iterate over the stream
+    until the eof token is reached.
+    """
+
+    def __init__(self, stream):
+        self.stream = stream
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        token = self.stream.current
+        if token.type is TOKEN_EOF:
+            self.stream.close()
+            raise StopIteration()
+        next(self.stream)
+        return token
+
+
+@implements_iterator
+class TokenStream(object):
+    """A token stream is an iterable that yields :class:`Token`\\s.  The
+    parser however does not iterate over it but calls :meth:`next` to go
+    one token ahead.  The current active token is stored as :attr:`current`.
+    """
+
+    def __init__(self, generator, name, filename):
+        self._iter = iter(generator)
+        self._pushed = deque()
+        self.name = name
+        self.filename = filename
+        self.closed = False
+        self.current = Token(1, TOKEN_INITIAL, '')
+        next(self)
+
+    def __iter__(self):
+        return TokenStreamIterator(self)
+
+    def __bool__(self):
+        return bool(self._pushed) or self.current.type is not TOKEN_EOF
+    __nonzero__ = __bool__  # py2
+
+    eos = property(lambda x: not x, doc="Are we at the end of the stream?")
+
+    def push(self, token):
+        """Push a token back to the stream."""
+        self._pushed.append(token)
+
+    def look(self):
+        """Look at the next token."""
+        old_token = next(self)
+        result = self.current
+        self.push(result)
+        self.current = old_token
+        return result
+
+    def skip(self, n=1):
+        """Got n tokens ahead."""
+        for x in range(n):
+            next(self)
+
+    def next_if(self, expr):
+        """Perform the token test and return the token if it matched.
+        Otherwise the return value is `None`.
+        """
+        if self.current.test(expr):
+            return next(self)
+
+    def skip_if(self, expr):
+        """Like :meth:`next_if` but only returns `True` or `False`."""
+        return self.next_if(expr) is not None
+
+    def __next__(self):
+        """Go one token ahead and return the old one.
+
+        Use the built-in :func:`next` instead of calling this directly.
+        """
+        rv = self.current
+        if self._pushed:
+            self.current = self._pushed.popleft()
+        elif self.current.type is not TOKEN_EOF:
+            try:
+                self.current = next(self._iter)
+            except StopIteration:
+                self.close()
+        return rv
+
+    def close(self):
+        """Close the stream."""
+        self.current = Token(self.current.lineno, TOKEN_EOF, '')
+        self._iter = None
+        self.closed = True
+
+    def expect(self, expr):
+        """Expect a given token type and return it.  This accepts the same
+        argument as :meth:`jinja2.lexer.Token.test`.
+        """
+        if not self.current.test(expr):
+            expr = describe_token_expr(expr)
+            if self.current.type is TOKEN_EOF:
+                raise TemplateSyntaxError('unexpected end of template, '
+                                          'expected %r.' % expr,
+                                          self.current.lineno,
+                                          self.name, self.filename)
+            raise TemplateSyntaxError("expected token %r, got %r" %
+                                      (expr, describe_token(self.current)),
+                                      self.current.lineno,
+                                      self.name, self.filename)
+        try:
+            return self.current
+        finally:
+            next(self)
+
+
+def get_lexer(environment):
+    """Return a lexer which is probably cached."""
+    key = (environment.block_start_string,
+           environment.block_end_string,
+           environment.variable_start_string,
+           environment.variable_end_string,
+           environment.comment_start_string,
+           environment.comment_end_string,
+           environment.line_statement_prefix,
+           environment.line_comment_prefix,
+           environment.trim_blocks,
+           environment.lstrip_blocks,
+           environment.newline_sequence,
+           environment.keep_trailing_newline)
+    lexer = _lexer_cache.get(key)
+    if lexer is None:
+        lexer = Lexer(environment)
+        _lexer_cache[key] = lexer
+    return lexer
+
+
+class Lexer(object):
+    """Class that implements a lexer for a given environment. Automatically
+    created by the environment class, usually you don't have to do that.
+
+    Note that the lexer is not automatically bound to an environment.
+    Multiple environments can share the same lexer.
+    """
+
+    def __init__(self, environment):
+        # shortcuts
+        c = lambda x: re.compile(x, re.M | re.S)
+        e = re.escape
+
+        # lexing rules for tags
+        tag_rules = [
+            (whitespace_re, TOKEN_WHITESPACE, None),
+            (float_re, TOKEN_FLOAT, None),
+            (integer_re, TOKEN_INTEGER, None),
+            (name_re, TOKEN_NAME, None),
+            (string_re, TOKEN_STRING, None),
+            (operator_re, TOKEN_OPERATOR, None)
+        ]
+
+        # assemble the root lexing rule. because "|" is ungreedy
+        # we have to sort by length so that the lexer continues working
+        # as expected when we have parsing rules like <% for block and
+        # <%= for variables. (if someone wants asp like syntax)
+        # variables are just part of the rules if variable processing
+        # is required.
+        root_tag_rules = compile_rules(environment)
+
+        # block suffix if trimming is enabled
+        block_suffix_re = environment.trim_blocks and '\\n?' or ''
+
+        # strip leading spaces if lstrip_blocks is enabled
+        prefix_re = {}
+        if environment.lstrip_blocks:
+            # use '{%+' to manually disable lstrip_blocks behavior
+            no_lstrip_re = e('+')
+            # detect overlap between block and variable or comment strings
+            block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
+            # make sure we don't mistake a block for a variable or a comment
+            m = block_diff.match(environment.comment_start_string)
+            no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
+            m = block_diff.match(environment.variable_start_string)
+            no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
+
+            # detect overlap between comment and variable strings
+            comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
+            m = comment_diff.match(environment.variable_start_string)
+            no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
+
+            lstrip_re = r'^[ \t]*'
+            block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
+                    lstrip_re,
+                    e(environment.block_start_string),
+                    no_lstrip_re,
+                    e(environment.block_start_string),
+                    )
+            comment_prefix_re = r'%s%s%s|%s\+?' % (
+                    lstrip_re,
+                    e(environment.comment_start_string),
+                    no_variable_re,
+                    e(environment.comment_start_string),
+                    )
+            prefix_re['block'] = block_prefix_re
+            prefix_re['comment'] = comment_prefix_re
+        else:
+            block_prefix_re = '%s' % e(environment.block_start_string)
+
+        self.newline_sequence = environment.newline_sequence
+        self.keep_trailing_newline = environment.keep_trailing_newline
+
+        # global lexing rules
+        self.rules = {
+            'root': [
+                # directives
+                (c('(.*?)(?:%s)' % '|'.join(
+                    [r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
+                        e(environment.block_start_string),
+                        block_prefix_re,
+                        e(environment.block_end_string),
+                        e(environment.block_end_string)
+                    )] + [
+                        r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
+                        for n, r in root_tag_rules
+                    ])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
+                # data
+                (c('.+'), TOKEN_DATA, None)
+            ],
+            # comments
+            TOKEN_COMMENT_BEGIN: [
+                (c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
+                    e(environment.comment_end_string),
+                    e(environment.comment_end_string),
+                    block_suffix_re
+                )), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
+                (c('(.)'), (Failure('Missing end of comment tag'),), None)
+            ],
+            # blocks
+            TOKEN_BLOCK_BEGIN: [
+                (c(r'(?:\-%s\s*|%s)%s' % (
+                    e(environment.block_end_string),
+                    e(environment.block_end_string),
+                    block_suffix_re
+                )), TOKEN_BLOCK_END, '#pop'),
+            ] + tag_rules,
+            # variables
+            TOKEN_VARIABLE_BEGIN: [
+                (c(r'\-%s\s*|%s' % (
+                    e(environment.variable_end_string),
+                    e(environment.variable_end_string)
+                )), TOKEN_VARIABLE_END, '#pop')
+            ] + tag_rules,
+            # raw block
+            TOKEN_RAW_BEGIN: [
+                (c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
+                    e(environment.block_start_string),
+                    block_prefix_re,
+                    e(environment.block_end_string),
+                    e(environment.block_end_string),
+                    block_suffix_re
+                )), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
+                (c('(.)'), (Failure('Missing end of raw directive'),), None)
+            ],
+            # line statements
+            TOKEN_LINESTATEMENT_BEGIN: [
+                (c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
+            ] + tag_rules,
+            # line comments
+            TOKEN_LINECOMMENT_BEGIN: [
+                (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
+                 TOKEN_LINECOMMENT_END), '#pop')
+            ]
+        }
+
+    def _normalize_newlines(self, value):
+        """Called for strings and template data to normalize it to unicode."""
+        return newline_re.sub(self.newline_sequence, value)
+
+    def tokenize(self, source, name=None, filename=None, state=None):
+        """Calls tokeniter + tokenize and wraps it in a token stream.
+        """
+        stream = self.tokeniter(source, name, filename, state)
+        return TokenStream(self.wrap(stream, name, filename), name, filename)
+
+    def wrap(self, stream, name=None, filename=None):
+        """This is called with the stream as returned by `tokenize` and wraps
+        every token in a :class:`Token` and converts the value.
+        """
+        for lineno, token, value in stream:
+            if token in ignored_tokens:
+                continue
+            elif token == 'linestatement_begin':
+                token = 'block_begin'
+            elif token == 'linestatement_end':
+                token = 'block_end'
+            # we are not interested in those tokens in the parser
+            elif token in ('raw_begin', 'raw_end'):
+                continue
+            elif token == 'data':
+                value = self._normalize_newlines(value)
+            elif token == 'keyword':
+                token = value
+            elif token == 'name':
+                value = str(value)
+                if check_ident and not value.isidentifier():
+                    raise TemplateSyntaxError(
+                        'Invalid character in identifier',
+                        lineno, name, filename)
+            elif token == 'string':
+                # try to unescape string
+                try:
+                    value = self._normalize_newlines(value[1:-1]) \
+                        .encode('ascii', 'backslashreplace') \
+                        .decode('unicode-escape')
+                except Exception as e:
+                    msg = str(e).split(':')[-1].strip()
+                    raise TemplateSyntaxError(msg, lineno, name, filename)
+            elif token == 'integer':
+                value = int(value)
+            elif token == 'float':
+                value = float(value)
+            elif token == 'operator':
+                token = operators[value]
+            yield Token(lineno, token, value)
+
+    def tokeniter(self, source, name, filename=None, state=None):
+        """This method tokenizes the text and returns the tokens in a
+        generator.  Use this method if you just want to tokenize a template.
+        """
+        source = text_type(source)
+        lines = source.splitlines()
+        if self.keep_trailing_newline and source:
+            for newline in ('\r\n', '\r', '\n'):
+                if source.endswith(newline):
+                    lines.append('')
+                    break
+        source = '\n'.join(lines)
+        pos = 0
+        lineno = 1
+        stack = ['root']
+        if state is not None and state != 'root':
+            assert state in ('variable', 'block'), 'invalid state'
+            stack.append(state + '_begin')
+        else:
+            state = 'root'
+        statetokens = self.rules[stack[-1]]
+        source_length = len(source)
+
+        balancing_stack = []
+
+        while 1:
+            # tokenizer loop
+            for regex, tokens, new_state in statetokens:
+                m = regex.match(source, pos)
+                # if no match we try again with the next rule
+                if m is None:
+                    continue
+
+                # we only match blocks and variables if braces / parentheses
+                # are balanced. continue parsing with the lower rule which
+                # is the operator rule. do this only if the end tags look
+                # like operators
+                if balancing_stack and \
+                   tokens in ('variable_end', 'block_end',
+                              'linestatement_end'):
+                    continue
+
+                # tuples support more options
+                if isinstance(tokens, tuple):
+                    for idx, token in enumerate(tokens):
+                        # failure group
+                        if token.__class__ is Failure:
+                            raise token(lineno, filename)
+                        # bygroup is a bit more complex, in that case we
+                        # yield for the current token the first named
+                        # group that matched
+                        elif token == '#bygroup':
+                            for key, value in iteritems(m.groupdict()):
+                                if value is not None:
+                                    yield lineno, key, value
+                                    lineno += value.count('\n')
+                                    break
+                            else:
+                                raise RuntimeError('%r wanted to resolve '
+                                                   'the token dynamically'
+                                                   ' but no group matched'
+                                                   % regex)
+                        # normal group
+                        else:
+                            data = m.group(idx + 1)
+                            if data or token not in ignore_if_empty:
+                                yield lineno, token, data
+                            lineno += data.count('\n')
+
+                # strings as token just are yielded as it.
+                else:
+                    data = m.group()
+                    # update brace/parentheses balance
+                    if tokens == 'operator':
+                        if data == '{':
+                            balancing_stack.append('}')
+                        elif data == '(':
+                            balancing_stack.append(')')
+                        elif data == '[':
+                            balancing_stack.append(']')
+                        elif data in ('}', ')', ']'):
+                            if not balancing_stack:
+                                raise TemplateSyntaxError('unexpected \'%s\'' %
+                                                          data, lineno, name,
+                                                          filename)
+                            expected_op = balancing_stack.pop()
+                            if expected_op != data:
+                                raise TemplateSyntaxError('unexpected \'%s\', '
+                                                          'expected \'%s\'' %
+                                                          (data, expected_op),
+                                                          lineno, name,
+                                                          filename)
+                    # yield items
+                    if data or tokens not in ignore_if_empty:
+                        yield lineno, tokens, data
+                    lineno += data.count('\n')
+
+                # fetch new position into new variable so that we can check
+                # if there is a internal parsing error which would result
+                # in an infinite loop
+                pos2 = m.end()
+
+                # handle state changes
+                if new_state is not None:
+                    # remove the uppermost state
+                    if new_state == '#pop':
+                        stack.pop()
+                    # resolve the new state by group checking
+                    elif new_state == '#bygroup':
+                        for key, value in iteritems(m.groupdict()):
+                            if value is not None:
+                                stack.append(key)
+                                break
+                        else:
+                            raise RuntimeError('%r wanted to resolve the '
+                                               'new state dynamically but'
+                                               ' no group matched' %
+                                               regex)
+                    # direct state name given
+                    else:
+                        stack.append(new_state)
+                    statetokens = self.rules[stack[-1]]
+                # we are still at the same position and no stack change.
+                # this means a loop without break condition, avoid that and
+                # raise error
+                elif pos2 == pos:
+                    raise RuntimeError('%r yielded empty string without '
+                                       'stack change' % regex)
+                # publish new function and start again
+                pos = pos2
+                break
+            # if loop terminated without break we haven't found a single match
+            # either we are at the end of the file or we have a problem
+            else:
+                # end of text
+                if pos >= source_length:
+                    return
+                # something went wrong
+                raise TemplateSyntaxError('unexpected char %r at %d' %
+                                          (source[pos], pos), lineno,
+                                          name, filename)
diff --git a/lib/python3.7/site-packages/jinja2/loaders.py b/lib/python3.7/site-packages/jinja2/loaders.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c7979376065907d2adca51fbe76ff71079b8000
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/loaders.py
@@ -0,0 +1,481 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.loaders
+    ~~~~~~~~~~~~~~
+
+    Jinja loader classes.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+import os
+import sys
+import weakref
+from types import ModuleType
+from os import path
+from hashlib import sha1
+from jinja2.exceptions import TemplateNotFound
+from jinja2.utils import open_if_exists, internalcode
+from jinja2._compat import string_types, iteritems
+
+
+def split_template_path(template):
+    """Split a path into segments and perform a sanity check.  If it detects
+    '..' in the path it will raise a `TemplateNotFound` error.
+    """
+    pieces = []
+    for piece in template.split('/'):
+        if path.sep in piece \
+           or (path.altsep and path.altsep in piece) or \
+           piece == path.pardir:
+            raise TemplateNotFound(template)
+        elif piece and piece != '.':
+            pieces.append(piece)
+    return pieces
+
+
+class BaseLoader(object):
+    """Baseclass for all loaders.  Subclass this and override `get_source` to
+    implement a custom loading mechanism.  The environment provides a
+    `get_template` method that calls the loader's `load` method to get the
+    :class:`Template` object.
+
+    A very basic example for a loader that looks up templates on the file
+    system could look like this::
+
+        from jinja2 import BaseLoader, TemplateNotFound
+        from os.path import join, exists, getmtime
+
+        class MyLoader(BaseLoader):
+
+            def __init__(self, path):
+                self.path = path
+
+            def get_source(self, environment, template):
+                path = join(self.path, template)
+                if not exists(path):
+                    raise TemplateNotFound(template)
+                mtime = getmtime(path)
+                with file(path) as f:
+                    source = f.read().decode('utf-8')
+                return source, path, lambda: mtime == getmtime(path)
+    """
+
+    #: if set to `False` it indicates that the loader cannot provide access
+    #: to the source of templates.
+    #:
+    #: .. versionadded:: 2.4
+    has_source_access = True
+
+    def get_source(self, environment, template):
+        """Get the template source, filename and reload helper for a template.
+        It's passed the environment and template name and has to return a
+        tuple in the form ``(source, filename, uptodate)`` or raise a
+        `TemplateNotFound` error if it can't locate the template.
+
+        The source part of the returned tuple must be the source of the
+        template as unicode string or a ASCII bytestring.  The filename should
+        be the name of the file on the filesystem if it was loaded from there,
+        otherwise `None`.  The filename is used by python for the tracebacks
+        if no loader extension is used.
+
+        The last item in the tuple is the `uptodate` function.  If auto
+        reloading is enabled it's always called to check if the template
+        changed.  No arguments are passed so the function must store the
+        old state somewhere (for example in a closure).  If it returns `False`
+        the template will be reloaded.
+        """
+        if not self.has_source_access:
+            raise RuntimeError('%s cannot provide access to the source' %
+                               self.__class__.__name__)
+        raise TemplateNotFound(template)
+
+    def list_templates(self):
+        """Iterates over all templates.  If the loader does not support that
+        it should raise a :exc:`TypeError` which is the default behavior.
+        """
+        raise TypeError('this loader cannot iterate over all templates')
+
+    @internalcode
+    def load(self, environment, name, globals=None):
+        """Loads a template.  This method looks up the template in the cache
+        or loads one by calling :meth:`get_source`.  Subclasses should not
+        override this method as loaders working on collections of other
+        loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
+        will not call this method but `get_source` directly.
+        """
+        code = None
+        if globals is None:
+            globals = {}
+
+        # first we try to get the source for this template together
+        # with the filename and the uptodate function.
+        source, filename, uptodate = self.get_source(environment, name)
+
+        # try to load the code from the bytecode cache if there is a
+        # bytecode cache configured.
+        bcc = environment.bytecode_cache
+        if bcc is not None:
+            bucket = bcc.get_bucket(environment, name, filename, source)
+            code = bucket.code
+
+        # if we don't have code so far (not cached, no longer up to
+        # date) etc. we compile the template
+        if code is None:
+            code = environment.compile(source, name, filename)
+
+        # if the bytecode cache is available and the bucket doesn't
+        # have a code so far, we give the bucket the new code and put
+        # it back to the bytecode cache.
+        if bcc is not None and bucket.code is None:
+            bucket.code = code
+            bcc.set_bucket(bucket)
+
+        return environment.template_class.from_code(environment, code,
+                                                    globals, uptodate)
+
+
+class FileSystemLoader(BaseLoader):
+    """Loads templates from the file system.  This loader can find templates
+    in folders on the file system and is the preferred way to load them.
+
+    The loader takes the path to the templates as string, or if multiple
+    locations are wanted a list of them which is then looked up in the
+    given order::
+
+    >>> loader = FileSystemLoader('/path/to/templates')
+    >>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
+
+    Per default the template encoding is ``'utf-8'`` which can be changed
+    by setting the `encoding` parameter to something else.
+
+    To follow symbolic links, set the *followlinks* parameter to ``True``::
+
+    >>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
+
+    .. versionchanged:: 2.8+
+       The *followlinks* parameter was added.
+    """
+
+    def __init__(self, searchpath, encoding='utf-8', followlinks=False):
+        if isinstance(searchpath, string_types):
+            searchpath = [searchpath]
+        self.searchpath = list(searchpath)
+        self.encoding = encoding
+        self.followlinks = followlinks
+
+    def get_source(self, environment, template):
+        pieces = split_template_path(template)
+        for searchpath in self.searchpath:
+            filename = path.join(searchpath, *pieces)
+            f = open_if_exists(filename)
+            if f is None:
+                continue
+            try:
+                contents = f.read().decode(self.encoding)
+            finally:
+                f.close()
+
+            mtime = path.getmtime(filename)
+
+            def uptodate():
+                try:
+                    return path.getmtime(filename) == mtime
+                except OSError:
+                    return False
+            return contents, filename, uptodate
+        raise TemplateNotFound(template)
+
+    def list_templates(self):
+        found = set()
+        for searchpath in self.searchpath:
+            walk_dir = os.walk(searchpath, followlinks=self.followlinks)
+            for dirpath, dirnames, filenames in walk_dir:
+                for filename in filenames:
+                    template = os.path.join(dirpath, filename) \
+                        [len(searchpath):].strip(os.path.sep) \
+                                          .replace(os.path.sep, '/')
+                    if template[:2] == './':
+                        template = template[2:]
+                    if template not in found:
+                        found.add(template)
+        return sorted(found)
+
+
+class PackageLoader(BaseLoader):
+    """Load templates from python eggs or packages.  It is constructed with
+    the name of the python package and the path to the templates in that
+    package::
+
+        loader = PackageLoader('mypackage', 'views')
+
+    If the package path is not given, ``'templates'`` is assumed.
+
+    Per default the template encoding is ``'utf-8'`` which can be changed
+    by setting the `encoding` parameter to something else.  Due to the nature
+    of eggs it's only possible to reload templates if the package was loaded
+    from the file system and not a zip file.
+    """
+
+    def __init__(self, package_name, package_path='templates',
+                 encoding='utf-8'):
+        from pkg_resources import DefaultProvider, ResourceManager, \
+                                  get_provider
+        provider = get_provider(package_name)
+        self.encoding = encoding
+        self.manager = ResourceManager()
+        self.filesystem_bound = isinstance(provider, DefaultProvider)
+        self.provider = provider
+        self.package_path = package_path
+
+    def get_source(self, environment, template):
+        pieces = split_template_path(template)
+        p = '/'.join((self.package_path,) + tuple(pieces))
+        if not self.provider.has_resource(p):
+            raise TemplateNotFound(template)
+
+        filename = uptodate = None
+        if self.filesystem_bound:
+            filename = self.provider.get_resource_filename(self.manager, p)
+            mtime = path.getmtime(filename)
+            def uptodate():
+                try:
+                    return path.getmtime(filename) == mtime
+                except OSError:
+                    return False
+
+        source = self.provider.get_resource_string(self.manager, p)
+        return source.decode(self.encoding), filename, uptodate
+
+    def list_templates(self):
+        path = self.package_path
+        if path[:2] == './':
+            path = path[2:]
+        elif path == '.':
+            path = ''
+        offset = len(path)
+        results = []
+        def _walk(path):
+            for filename in self.provider.resource_listdir(path):
+                fullname = path + '/' + filename
+                if self.provider.resource_isdir(fullname):
+                    _walk(fullname)
+                else:
+                    results.append(fullname[offset:].lstrip('/'))
+        _walk(path)
+        results.sort()
+        return results
+
+
+class DictLoader(BaseLoader):
+    """Loads a template from a python dict.  It's passed a dict of unicode
+    strings bound to template names.  This loader is useful for unittesting:
+
+    >>> loader = DictLoader({'index.html': 'source here'})
+
+    Because auto reloading is rarely useful this is disabled per default.
+    """
+
+    def __init__(self, mapping):
+        self.mapping = mapping
+
+    def get_source(self, environment, template):
+        if template in self.mapping:
+            source = self.mapping[template]
+            return source, None, lambda: source == self.mapping.get(template)
+        raise TemplateNotFound(template)
+
+    def list_templates(self):
+        return sorted(self.mapping)
+
+
+class FunctionLoader(BaseLoader):
+    """A loader that is passed a function which does the loading.  The
+    function receives the name of the template and has to return either
+    an unicode string with the template source, a tuple in the form ``(source,
+    filename, uptodatefunc)`` or `None` if the template does not exist.
+
+    >>> def load_template(name):
+    ...     if name == 'index.html':
+    ...         return '...'
+    ...
+    >>> loader = FunctionLoader(load_template)
+
+    The `uptodatefunc` is a function that is called if autoreload is enabled
+    and has to return `True` if the template is still up to date.  For more
+    details have a look at :meth:`BaseLoader.get_source` which has the same
+    return value.
+    """
+
+    def __init__(self, load_func):
+        self.load_func = load_func
+
+    def get_source(self, environment, template):
+        rv = self.load_func(template)
+        if rv is None:
+            raise TemplateNotFound(template)
+        elif isinstance(rv, string_types):
+            return rv, None, None
+        return rv
+
+
+class PrefixLoader(BaseLoader):
+    """A loader that is passed a dict of loaders where each loader is bound
+    to a prefix.  The prefix is delimited from the template by a slash per
+    default, which can be changed by setting the `delimiter` argument to
+    something else::
+
+        loader = PrefixLoader({
+            'app1':     PackageLoader('mypackage.app1'),
+            'app2':     PackageLoader('mypackage.app2')
+        })
+
+    By loading ``'app1/index.html'`` the file from the app1 package is loaded,
+    by loading ``'app2/index.html'`` the file from the second.
+    """
+
+    def __init__(self, mapping, delimiter='/'):
+        self.mapping = mapping
+        self.delimiter = delimiter
+
+    def get_loader(self, template):
+        try:
+            prefix, name = template.split(self.delimiter, 1)
+            loader = self.mapping[prefix]
+        except (ValueError, KeyError):
+            raise TemplateNotFound(template)
+        return loader, name
+
+    def get_source(self, environment, template):
+        loader, name = self.get_loader(template)
+        try:
+            return loader.get_source(environment, name)
+        except TemplateNotFound:
+            # re-raise the exception with the correct filename here.
+            # (the one that includes the prefix)
+            raise TemplateNotFound(template)
+
+    @internalcode
+    def load(self, environment, name, globals=None):
+        loader, local_name = self.get_loader(name)
+        try:
+            return loader.load(environment, local_name, globals)
+        except TemplateNotFound:
+            # re-raise the exception with the correct filename here.
+            # (the one that includes the prefix)
+            raise TemplateNotFound(name)
+
+    def list_templates(self):
+        result = []
+        for prefix, loader in iteritems(self.mapping):
+            for template in loader.list_templates():
+                result.append(prefix + self.delimiter + template)
+        return result
+
+
+class ChoiceLoader(BaseLoader):
+    """This loader works like the `PrefixLoader` just that no prefix is
+    specified.  If a template could not be found by one loader the next one
+    is tried.
+
+    >>> loader = ChoiceLoader([
+    ...     FileSystemLoader('/path/to/user/templates'),
+    ...     FileSystemLoader('/path/to/system/templates')
+    ... ])
+
+    This is useful if you want to allow users to override builtin templates
+    from a different location.
+    """
+
+    def __init__(self, loaders):
+        self.loaders = loaders
+
+    def get_source(self, environment, template):
+        for loader in self.loaders:
+            try:
+                return loader.get_source(environment, template)
+            except TemplateNotFound:
+                pass
+        raise TemplateNotFound(template)
+
+    @internalcode
+    def load(self, environment, name, globals=None):
+        for loader in self.loaders:
+            try:
+                return loader.load(environment, name, globals)
+            except TemplateNotFound:
+                pass
+        raise TemplateNotFound(name)
+
+    def list_templates(self):
+        found = set()
+        for loader in self.loaders:
+            found.update(loader.list_templates())
+        return sorted(found)
+
+
+class _TemplateModule(ModuleType):
+    """Like a normal module but with support for weak references"""
+
+
+class ModuleLoader(BaseLoader):
+    """This loader loads templates from precompiled templates.
+
+    Example usage:
+
+    >>> loader = ChoiceLoader([
+    ...     ModuleLoader('/path/to/compiled/templates'),
+    ...     FileSystemLoader('/path/to/templates')
+    ... ])
+
+    Templates can be precompiled with :meth:`Environment.compile_templates`.
+    """
+
+    has_source_access = False
+
+    def __init__(self, path):
+        package_name = '_jinja2_module_templates_%x' % id(self)
+
+        # create a fake module that looks for the templates in the
+        # path given.
+        mod = _TemplateModule(package_name)
+        if isinstance(path, string_types):
+            path = [path]
+        else:
+            path = list(path)
+        mod.__path__ = path
+
+        sys.modules[package_name] = weakref.proxy(mod,
+            lambda x: sys.modules.pop(package_name, None))
+
+        # the only strong reference, the sys.modules entry is weak
+        # so that the garbage collector can remove it once the
+        # loader that created it goes out of business.
+        self.module = mod
+        self.package_name = package_name
+
+    @staticmethod
+    def get_template_key(name):
+        return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
+
+    @staticmethod
+    def get_module_filename(name):
+        return ModuleLoader.get_template_key(name) + '.py'
+
+    @internalcode
+    def load(self, environment, name, globals=None):
+        key = self.get_template_key(name)
+        module = '%s.%s' % (self.package_name, key)
+        mod = getattr(self.module, module, None)
+        if mod is None:
+            try:
+                mod = __import__(module, None, None, ['root'])
+            except ImportError:
+                raise TemplateNotFound(name)
+
+            # remove the entry from sys.modules, we only want the attribute
+            # on the module object we have stored on the loader.
+            sys.modules.pop(module, None)
+
+        return environment.template_class.from_module_dict(
+            environment, mod.__dict__, globals)
diff --git a/lib/python3.7/site-packages/jinja2/meta.py b/lib/python3.7/site-packages/jinja2/meta.py
new file mode 100644
index 0000000000000000000000000000000000000000..7421914f77242d755ede1a43b9518e73b85e894e
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/meta.py
@@ -0,0 +1,106 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.meta
+    ~~~~~~~~~~~
+
+    This module implements various functions that exposes information about
+    templates that might be interesting for various kinds of applications.
+
+    :copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from jinja2 import nodes
+from jinja2.compiler import CodeGenerator
+from jinja2._compat import string_types, iteritems
+
+
+class TrackingCodeGenerator(CodeGenerator):
+    """We abuse the code generator for introspection."""
+
+    def __init__(self, environment):
+        CodeGenerator.__init__(self, environment, '<introspection>',
+                               '<introspection>')
+        self.undeclared_identifiers = set()
+
+    def write(self, x):
+        """Don't write."""
+
+    def enter_frame(self, frame):
+        """Remember all undeclared identifiers."""
+        CodeGenerator.enter_frame(self, frame)
+        for _, (action, param) in iteritems(frame.symbols.loads):
+            if action == 'resolve':
+                self.undeclared_identifiers.add(param)
+
+
+def find_undeclared_variables(ast):
+    """Returns a set of all variables in the AST that will be looked up from
+    the context at runtime.  Because at compile time it's not known which
+    variables will be used depending on the path the execution takes at
+    runtime, all variables are returned.
+
+    >>> from jinja2 import Environment, meta
+    >>> env = Environment()
+    >>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
+    >>> meta.find_undeclared_variables(ast) == set(['bar'])
+    True
+
+    .. admonition:: Implementation
+
+       Internally the code generator is used for finding undeclared variables.
+       This is good to know because the code generator might raise a
+       :exc:`TemplateAssertionError` during compilation and as a matter of
+       fact this function can currently raise that exception as well.
+    """
+    codegen = TrackingCodeGenerator(ast.environment)
+    codegen.visit(ast)
+    return codegen.undeclared_identifiers
+
+
+def find_referenced_templates(ast):
+    """Finds all the referenced templates from the AST.  This will return an
+    iterator over all the hardcoded template extensions, inclusions and
+    imports.  If dynamic inheritance or inclusion is used, `None` will be
+    yielded.
+
+    >>> from jinja2 import Environment, meta
+    >>> env = Environment()
+    >>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
+    >>> list(meta.find_referenced_templates(ast))
+    ['layout.html', None]
+
+    This function is useful for dependency tracking.  For example if you want
+    to rebuild parts of the website after a layout template has changed.
+    """
+    for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
+                              nodes.Include)):
+        if not isinstance(node.template, nodes.Const):
+            # a tuple with some non consts in there
+            if isinstance(node.template, (nodes.Tuple, nodes.List)):
+                for template_name in node.template.items:
+                    # something const, only yield the strings and ignore
+                    # non-string consts that really just make no sense
+                    if isinstance(template_name, nodes.Const):
+                        if isinstance(template_name.value, string_types):
+                            yield template_name.value
+                    # something dynamic in there
+                    else:
+                        yield None
+            # something dynamic we don't know about here
+            else:
+                yield None
+            continue
+        # constant is a basestring, direct template name
+        if isinstance(node.template.value, string_types):
+            yield node.template.value
+        # a tuple or list (latter *should* not happen) made of consts,
+        # yield the consts that are strings.  We could warn here for
+        # non string values
+        elif isinstance(node, nodes.Include) and \
+             isinstance(node.template.value, (tuple, list)):
+            for template_name in node.template.value:
+                if isinstance(template_name, string_types):
+                    yield template_name
+        # something else we don't care about, we could warn here
+        else:
+            yield None
diff --git a/lib/python3.7/site-packages/jinja2/nativetypes.py b/lib/python3.7/site-packages/jinja2/nativetypes.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe17e4138df3f1c1d3cf8d207135c847f5d66615
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/nativetypes.py
@@ -0,0 +1,220 @@
+import sys
+from ast import literal_eval
+from itertools import islice, chain
+from jinja2 import nodes
+from jinja2._compat import text_type
+from jinja2.compiler import CodeGenerator, has_safe_repr
+from jinja2.environment import Environment, Template
+from jinja2.utils import concat, escape
+
+
+def native_concat(nodes):
+    """Return a native Python type from the list of compiled nodes. If the
+    result is a single node, its value is returned. Otherwise, the nodes are
+    concatenated as strings. If the result can be parsed with
+    :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
+    string is returned.
+    """
+    head = list(islice(nodes, 2))
+
+    if not head:
+        return None
+
+    if len(head) == 1:
+        out = head[0]
+    else:
+        out = u''.join([text_type(v) for v in chain(head, nodes)])
+
+    try:
+        return literal_eval(out)
+    except (ValueError, SyntaxError, MemoryError):
+        return out
+
+
+class NativeCodeGenerator(CodeGenerator):
+    """A code generator which avoids injecting ``to_string()`` calls around the
+    internal code Jinja uses to render templates.
+    """
+
+    def visit_Output(self, node, frame):
+        """Same as :meth:`CodeGenerator.visit_Output`, but do not call
+        ``to_string`` on output nodes in generated code.
+        """
+        if self.has_known_extends and frame.require_output_check:
+            return
+
+        finalize = self.environment.finalize
+        finalize_context = getattr(finalize, 'contextfunction', False)
+        finalize_eval = getattr(finalize, 'evalcontextfunction', False)
+        finalize_env = getattr(finalize, 'environmentfunction', False)
+
+        if finalize is not None:
+            if finalize_context or finalize_eval:
+                const_finalize = None
+            elif finalize_env:
+                def const_finalize(x):
+                    return finalize(self.environment, x)
+            else:
+                const_finalize = finalize
+        else:
+            def const_finalize(x):
+                return x
+
+        # If we are inside a frame that requires output checking, we do so.
+        outdent_later = False
+
+        if frame.require_output_check:
+            self.writeline('if parent_template is None:')
+            self.indent()
+            outdent_later = True
+
+        # Try to evaluate as many chunks as possible into a static string at
+        # compile time.
+        body = []
+
+        for child in node.nodes:
+            try:
+                if const_finalize is None:
+                    raise nodes.Impossible()
+
+                const = child.as_const(frame.eval_ctx)
+                if not has_safe_repr(const):
+                    raise nodes.Impossible()
+            except nodes.Impossible:
+                body.append(child)
+                continue
+
+            # the frame can't be volatile here, because otherwise the as_const
+            # function would raise an Impossible exception at that point
+            try:
+                if frame.eval_ctx.autoescape:
+                    if hasattr(const, '__html__'):
+                        const = const.__html__()
+                    else:
+                        const = escape(const)
+
+                const = const_finalize(const)
+            except Exception:
+                # if something goes wrong here we evaluate the node at runtime
+                # for easier debugging
+                body.append(child)
+                continue
+
+            if body and isinstance(body[-1], list):
+                body[-1].append(const)
+            else:
+                body.append([const])
+
+        # if we have less than 3 nodes or a buffer we yield or extend/append
+        if len(body) < 3 or frame.buffer is not None:
+            if frame.buffer is not None:
+                # for one item we append, for more we extend
+                if len(body) == 1:
+                    self.writeline('%s.append(' % frame.buffer)
+                else:
+                    self.writeline('%s.extend((' % frame.buffer)
+
+                self.indent()
+
+            for item in body:
+                if isinstance(item, list):
+                    val = repr(native_concat(item))
+
+                    if frame.buffer is None:
+                        self.writeline('yield ' + val)
+                    else:
+                        self.writeline(val + ',')
+                else:
+                    if frame.buffer is None:
+                        self.writeline('yield ', item)
+                    else:
+                        self.newline(item)
+
+                    close = 0
+
+                    if finalize is not None:
+                        self.write('environment.finalize(')
+
+                        if finalize_context:
+                            self.write('context, ')
+
+                        close += 1
+
+                    self.visit(item, frame)
+
+                    if close > 0:
+                        self.write(')' * close)
+
+                    if frame.buffer is not None:
+                        self.write(',')
+
+            if frame.buffer is not None:
+                # close the open parentheses
+                self.outdent()
+                self.writeline(len(body) == 1 and ')' or '))')
+
+        # otherwise we create a format string as this is faster in that case
+        else:
+            format = []
+            arguments = []
+
+            for item in body:
+                if isinstance(item, list):
+                    format.append(native_concat(item).replace('%', '%%'))
+                else:
+                    format.append('%s')
+                    arguments.append(item)
+
+            self.writeline('yield ')
+            self.write(repr(concat(format)) + ' % (')
+            self.indent()
+
+            for argument in arguments:
+                self.newline(argument)
+                close = 0
+
+                if finalize is not None:
+                    self.write('environment.finalize(')
+
+                    if finalize_context:
+                        self.write('context, ')
+                    elif finalize_eval:
+                        self.write('context.eval_ctx, ')
+                    elif finalize_env:
+                        self.write('environment, ')
+
+                    close += 1
+
+                self.visit(argument, frame)
+                self.write(')' * close + ', ')
+
+            self.outdent()
+            self.writeline(')')
+
+        if outdent_later:
+            self.outdent()
+
+
+class NativeTemplate(Template):
+    def render(self, *args, **kwargs):
+        """Render the template to produce a native Python type. If the result
+        is a single node, its value is returned. Otherwise, the nodes are
+        concatenated as strings. If the result can be parsed with
+        :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
+        string is returned.
+        """
+        vars = dict(*args, **kwargs)
+
+        try:
+            return native_concat(self.root_render_func(self.new_context(vars)))
+        except Exception:
+            exc_info = sys.exc_info()
+
+        return self.environment.handle_exception(exc_info, True)
+
+
+class NativeEnvironment(Environment):
+    """An environment that renders templates to native Python types."""
+
+    code_generator_class = NativeCodeGenerator
+    template_class = NativeTemplate
diff --git a/lib/python3.7/site-packages/jinja2/nodes.py b/lib/python3.7/site-packages/jinja2/nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d9a01ad8bb2e7b0b63ed483e7c4c34ed0a22033
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/nodes.py
@@ -0,0 +1,999 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.nodes
+    ~~~~~~~~~~~~
+
+    This module implements additional nodes derived from the ast base node.
+
+    It also provides some node tree helper functions like `in_lineno` and
+    `get_nodes` used by the parser and translator in order to normalize
+    python and jinja nodes.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+import types
+import operator
+
+from collections import deque
+from jinja2.utils import Markup
+from jinja2._compat import izip, with_metaclass, text_type, PY2
+
+
+#: the types we support for context functions
+_context_function_types = (types.FunctionType, types.MethodType)
+
+
+_binop_to_func = {
+    '*':        operator.mul,
+    '/':        operator.truediv,
+    '//':       operator.floordiv,
+    '**':       operator.pow,
+    '%':        operator.mod,
+    '+':        operator.add,
+    '-':        operator.sub
+}
+
+_uaop_to_func = {
+    'not':      operator.not_,
+    '+':        operator.pos,
+    '-':        operator.neg
+}
+
+_cmpop_to_func = {
+    'eq':       operator.eq,
+    'ne':       operator.ne,
+    'gt':       operator.gt,
+    'gteq':     operator.ge,
+    'lt':       operator.lt,
+    'lteq':     operator.le,
+    'in':       lambda a, b: a in b,
+    'notin':    lambda a, b: a not in b
+}
+
+
+class Impossible(Exception):
+    """Raised if the node could not perform a requested action."""
+
+
+class NodeType(type):
+    """A metaclass for nodes that handles the field and attribute
+    inheritance.  fields and attributes from the parent class are
+    automatically forwarded to the child."""
+
+    def __new__(cls, name, bases, d):
+        for attr in 'fields', 'attributes':
+            storage = []
+            storage.extend(getattr(bases[0], attr, ()))
+            storage.extend(d.get(attr, ()))
+            assert len(bases) == 1, 'multiple inheritance not allowed'
+            assert len(storage) == len(set(storage)), 'layout conflict'
+            d[attr] = tuple(storage)
+        d.setdefault('abstract', False)
+        return type.__new__(cls, name, bases, d)
+
+
+class EvalContext(object):
+    """Holds evaluation time information.  Custom attributes can be attached
+    to it in extensions.
+    """
+
+    def __init__(self, environment, template_name=None):
+        self.environment = environment
+        if callable(environment.autoescape):
+            self.autoescape = environment.autoescape(template_name)
+        else:
+            self.autoescape = environment.autoescape
+        self.volatile = False
+
+    def save(self):
+        return self.__dict__.copy()
+
+    def revert(self, old):
+        self.__dict__.clear()
+        self.__dict__.update(old)
+
+
+def get_eval_context(node, ctx):
+    if ctx is None:
+        if node.environment is None:
+            raise RuntimeError('if no eval context is passed, the '
+                               'node must have an attached '
+                               'environment.')
+        return EvalContext(node.environment)
+    return ctx
+
+
+class Node(with_metaclass(NodeType, object)):
+    """Baseclass for all Jinja2 nodes.  There are a number of nodes available
+    of different types.  There are four major types:
+
+    -   :class:`Stmt`: statements
+    -   :class:`Expr`: expressions
+    -   :class:`Helper`: helper nodes
+    -   :class:`Template`: the outermost wrapper node
+
+    All nodes have fields and attributes.  Fields may be other nodes, lists,
+    or arbitrary values.  Fields are passed to the constructor as regular
+    positional arguments, attributes as keyword arguments.  Each node has
+    two attributes: `lineno` (the line number of the node) and `environment`.
+    The `environment` attribute is set at the end of the parsing process for
+    all nodes automatically.
+    """
+    fields = ()
+    attributes = ('lineno', 'environment')
+    abstract = True
+
+    def __init__(self, *fields, **attributes):
+        if self.abstract:
+            raise TypeError('abstract nodes are not instanciable')
+        if fields:
+            if len(fields) != len(self.fields):
+                if not self.fields:
+                    raise TypeError('%r takes 0 arguments' %
+                                    self.__class__.__name__)
+                raise TypeError('%r takes 0 or %d argument%s' % (
+                    self.__class__.__name__,
+                    len(self.fields),
+                    len(self.fields) != 1 and 's' or ''
+                ))
+            for name, arg in izip(self.fields, fields):
+                setattr(self, name, arg)
+        for attr in self.attributes:
+            setattr(self, attr, attributes.pop(attr, None))
+        if attributes:
+            raise TypeError('unknown attribute %r' %
+                            next(iter(attributes)))
+
+    def iter_fields(self, exclude=None, only=None):
+        """This method iterates over all fields that are defined and yields
+        ``(key, value)`` tuples.  Per default all fields are returned, but
+        it's possible to limit that to some fields by providing the `only`
+        parameter or to exclude some using the `exclude` parameter.  Both
+        should be sets or tuples of field names.
+        """
+        for name in self.fields:
+            if (exclude is only is None) or \
+               (exclude is not None and name not in exclude) or \
+               (only is not None and name in only):
+                try:
+                    yield name, getattr(self, name)
+                except AttributeError:
+                    pass
+
+    def iter_child_nodes(self, exclude=None, only=None):
+        """Iterates over all direct child nodes of the node.  This iterates
+        over all fields and yields the values of they are nodes.  If the value
+        of a field is a list all the nodes in that list are returned.
+        """
+        for field, item in self.iter_fields(exclude, only):
+            if isinstance(item, list):
+                for n in item:
+                    if isinstance(n, Node):
+                        yield n
+            elif isinstance(item, Node):
+                yield item
+
+    def find(self, node_type):
+        """Find the first node of a given type.  If no such node exists the
+        return value is `None`.
+        """
+        for result in self.find_all(node_type):
+            return result
+
+    def find_all(self, node_type):
+        """Find all the nodes of a given type.  If the type is a tuple,
+        the check is performed for any of the tuple items.
+        """
+        for child in self.iter_child_nodes():
+            if isinstance(child, node_type):
+                yield child
+            for result in child.find_all(node_type):
+                yield result
+
+    def set_ctx(self, ctx):
+        """Reset the context of a node and all child nodes.  Per default the
+        parser will all generate nodes that have a 'load' context as it's the
+        most common one.  This method is used in the parser to set assignment
+        targets and other nodes to a store context.
+        """
+        todo = deque([self])
+        while todo:
+            node = todo.popleft()
+            if 'ctx' in node.fields:
+                node.ctx = ctx
+            todo.extend(node.iter_child_nodes())
+        return self
+
+    def set_lineno(self, lineno, override=False):
+        """Set the line numbers of the node and children."""
+        todo = deque([self])
+        while todo:
+            node = todo.popleft()
+            if 'lineno' in node.attributes:
+                if node.lineno is None or override:
+                    node.lineno = lineno
+            todo.extend(node.iter_child_nodes())
+        return self
+
+    def set_environment(self, environment):
+        """Set the environment for all nodes."""
+        todo = deque([self])
+        while todo:
+            node = todo.popleft()
+            node.environment = environment
+            todo.extend(node.iter_child_nodes())
+        return self
+
+    def __eq__(self, other):
+        return type(self) is type(other) and \
+               tuple(self.iter_fields()) == tuple(other.iter_fields())
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    # Restore Python 2 hashing behavior on Python 3
+    __hash__ = object.__hash__
+
+    def __repr__(self):
+        return '%s(%s)' % (
+            self.__class__.__name__,
+            ', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
+                      arg in self.fields)
+        )
+
+    def dump(self):
+        def _dump(node):
+            if not isinstance(node, Node):
+                buf.append(repr(node))
+                return
+
+            buf.append('nodes.%s(' % node.__class__.__name__)
+            if not node.fields:
+                buf.append(')')
+                return
+            for idx, field in enumerate(node.fields):
+                if idx:
+                    buf.append(', ')
+                value = getattr(node, field)
+                if isinstance(value, list):
+                    buf.append('[')
+                    for idx, item in enumerate(value):
+                        if idx:
+                            buf.append(', ')
+                        _dump(item)
+                    buf.append(']')
+                else:
+                    _dump(value)
+            buf.append(')')
+        buf = []
+        _dump(self)
+        return ''.join(buf)
+
+
+
+class Stmt(Node):
+    """Base node for all statements."""
+    abstract = True
+
+
+class Helper(Node):
+    """Nodes that exist in a specific context only."""
+    abstract = True
+
+
+class Template(Node):
+    """Node that represents a template.  This must be the outermost node that
+    is passed to the compiler.
+    """
+    fields = ('body',)
+
+
+class Output(Stmt):
+    """A node that holds multiple expressions which are then printed out.
+    This is used both for the `print` statement and the regular template data.
+    """
+    fields = ('nodes',)
+
+
+class Extends(Stmt):
+    """Represents an extends statement."""
+    fields = ('template',)
+
+
+class For(Stmt):
+    """The for loop.  `target` is the target for the iteration (usually a
+    :class:`Name` or :class:`Tuple`), `iter` the iterable.  `body` is a list
+    of nodes that are used as loop-body, and `else_` a list of nodes for the
+    `else` block.  If no else node exists it has to be an empty list.
+
+    For filtered nodes an expression can be stored as `test`, otherwise `None`.
+    """
+    fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
+
+
+class If(Stmt):
+    """If `test` is true, `body` is rendered, else `else_`."""
+    fields = ('test', 'body', 'elif_', 'else_')
+
+
+class Macro(Stmt):
+    """A macro definition.  `name` is the name of the macro, `args` a list of
+    arguments and `defaults` a list of defaults if there are any.  `body` is
+    a list of nodes for the macro body.
+    """
+    fields = ('name', 'args', 'defaults', 'body')
+
+
+class CallBlock(Stmt):
+    """Like a macro without a name but a call instead.  `call` is called with
+    the unnamed macro as `caller` argument this node holds.
+    """
+    fields = ('call', 'args', 'defaults', 'body')
+
+
+class FilterBlock(Stmt):
+    """Node for filter sections."""
+    fields = ('body', 'filter')
+
+
+class With(Stmt):
+    """Specific node for with statements.  In older versions of Jinja the
+    with statement was implemented on the base of the `Scope` node instead.
+
+    .. versionadded:: 2.9.3
+    """
+    fields = ('targets', 'values', 'body')
+
+
+class Block(Stmt):
+    """A node that represents a block."""
+    fields = ('name', 'body', 'scoped')
+
+
+class Include(Stmt):
+    """A node that represents the include tag."""
+    fields = ('template', 'with_context', 'ignore_missing')
+
+
+class Import(Stmt):
+    """A node that represents the import tag."""
+    fields = ('template', 'target', 'with_context')
+
+
+class FromImport(Stmt):
+    """A node that represents the from import tag.  It's important to not
+    pass unsafe names to the name attribute.  The compiler translates the
+    attribute lookups directly into getattr calls and does *not* use the
+    subscript callback of the interface.  As exported variables may not
+    start with double underscores (which the parser asserts) this is not a
+    problem for regular Jinja code, but if this node is used in an extension
+    extra care must be taken.
+
+    The list of names may contain tuples if aliases are wanted.
+    """
+    fields = ('template', 'names', 'with_context')
+
+
+class ExprStmt(Stmt):
+    """A statement that evaluates an expression and discards the result."""
+    fields = ('node',)
+
+
+class Assign(Stmt):
+    """Assigns an expression to a target."""
+    fields = ('target', 'node')
+
+
+class AssignBlock(Stmt):
+    """Assigns a block to a target."""
+    fields = ('target', 'filter', 'body')
+
+
+class Expr(Node):
+    """Baseclass for all expressions."""
+    abstract = True
+
+    def as_const(self, eval_ctx=None):
+        """Return the value of the expression as constant or raise
+        :exc:`Impossible` if this was not possible.
+
+        An :class:`EvalContext` can be provided, if none is given
+        a default context is created which requires the nodes to have
+        an attached environment.
+
+        .. versionchanged:: 2.4
+           the `eval_ctx` parameter was added.
+        """
+        raise Impossible()
+
+    def can_assign(self):
+        """Check if it's possible to assign something to this node."""
+        return False
+
+
+class BinExpr(Expr):
+    """Baseclass for all binary expressions."""
+    fields = ('left', 'right')
+    operator = None
+    abstract = True
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        # intercepted operators cannot be folded at compile time
+        if self.environment.sandboxed and \
+           self.operator in self.environment.intercepted_binops:
+            raise Impossible()
+        f = _binop_to_func[self.operator]
+        try:
+            return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
+        except Exception:
+            raise Impossible()
+
+
+class UnaryExpr(Expr):
+    """Baseclass for all unary expressions."""
+    fields = ('node',)
+    operator = None
+    abstract = True
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        # intercepted operators cannot be folded at compile time
+        if self.environment.sandboxed and \
+           self.operator in self.environment.intercepted_unops:
+            raise Impossible()
+        f = _uaop_to_func[self.operator]
+        try:
+            return f(self.node.as_const(eval_ctx))
+        except Exception:
+            raise Impossible()
+
+
+class Name(Expr):
+    """Looks up a name or stores a value in a name.
+    The `ctx` of the node can be one of the following values:
+
+    -   `store`: store a value in the name
+    -   `load`: load that name
+    -   `param`: like `store` but if the name was defined as function parameter.
+    """
+    fields = ('name', 'ctx')
+
+    def can_assign(self):
+        return self.name not in ('true', 'false', 'none',
+                                 'True', 'False', 'None')
+
+
+class NSRef(Expr):
+    """Reference to a namespace value assignment"""
+    fields = ('name', 'attr')
+
+    def can_assign(self):
+        # We don't need any special checks here; NSRef assignments have a
+        # runtime check to ensure the target is a namespace object which will
+        # have been checked already as it is created using a normal assignment
+        # which goes through a `Name` node.
+        return True
+
+
+class Literal(Expr):
+    """Baseclass for literals."""
+    abstract = True
+
+
+class Const(Literal):
+    """All constant values.  The parser will return this node for simple
+    constants such as ``42`` or ``"foo"`` but it can be used to store more
+    complex values such as lists too.  Only constants with a safe
+    representation (objects where ``eval(repr(x)) == x`` is true).
+    """
+    fields = ('value',)
+
+    def as_const(self, eval_ctx=None):
+        rv = self.value
+        if PY2 and type(rv) is text_type and \
+           self.environment.policies['compiler.ascii_str']:
+            try:
+                rv = rv.encode('ascii')
+            except UnicodeError:
+                pass
+        return rv
+
+    @classmethod
+    def from_untrusted(cls, value, lineno=None, environment=None):
+        """Return a const object if the value is representable as
+        constant value in the generated code, otherwise it will raise
+        an `Impossible` exception.
+        """
+        from .compiler import has_safe_repr
+        if not has_safe_repr(value):
+            raise Impossible()
+        return cls(value, lineno=lineno, environment=environment)
+
+
+class TemplateData(Literal):
+    """A constant template string."""
+    fields = ('data',)
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        if eval_ctx.volatile:
+            raise Impossible()
+        if eval_ctx.autoescape:
+            return Markup(self.data)
+        return self.data
+
+
+class Tuple(Literal):
+    """For loop unpacking and some other things like multiple arguments
+    for subscripts.  Like for :class:`Name` `ctx` specifies if the tuple
+    is used for loading the names or storing.
+    """
+    fields = ('items', 'ctx')
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        return tuple(x.as_const(eval_ctx) for x in self.items)
+
+    def can_assign(self):
+        for item in self.items:
+            if not item.can_assign():
+                return False
+        return True
+
+
+class List(Literal):
+    """Any list literal such as ``[1, 2, 3]``"""
+    fields = ('items',)
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        return [x.as_const(eval_ctx) for x in self.items]
+
+
+class Dict(Literal):
+    """Any dict literal such as ``{1: 2, 3: 4}``.  The items must be a list of
+    :class:`Pair` nodes.
+    """
+    fields = ('items',)
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        return dict(x.as_const(eval_ctx) for x in self.items)
+
+
+class Pair(Helper):
+    """A key, value pair for dicts."""
+    fields = ('key', 'value')
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
+
+
+class Keyword(Helper):
+    """A key, value pair for keyword arguments where key is a string."""
+    fields = ('key', 'value')
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        return self.key, self.value.as_const(eval_ctx)
+
+
+class CondExpr(Expr):
+    """A conditional expression (inline if expression).  (``{{
+    foo if bar else baz }}``)
+    """
+    fields = ('test', 'expr1', 'expr2')
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        if self.test.as_const(eval_ctx):
+            return self.expr1.as_const(eval_ctx)
+
+        # if we evaluate to an undefined object, we better do that at runtime
+        if self.expr2 is None:
+            raise Impossible()
+
+        return self.expr2.as_const(eval_ctx)
+
+
+def args_as_const(node, eval_ctx):
+    args = [x.as_const(eval_ctx) for x in node.args]
+    kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
+
+    if node.dyn_args is not None:
+        try:
+            args.extend(node.dyn_args.as_const(eval_ctx))
+        except Exception:
+            raise Impossible()
+
+    if node.dyn_kwargs is not None:
+        try:
+            kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
+        except Exception:
+            raise Impossible()
+
+    return args, kwargs
+
+
+class Filter(Expr):
+    """This node applies a filter on an expression.  `name` is the name of
+    the filter, the rest of the fields are the same as for :class:`Call`.
+
+    If the `node` of a filter is `None` the contents of the last buffer are
+    filtered.  Buffers are created by macros and filter blocks.
+    """
+
+    fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+
+        if eval_ctx.volatile or self.node is None:
+            raise Impossible()
+
+        # we have to be careful here because we call filter_ below.
+        # if this variable would be called filter, 2to3 would wrap the
+        # call in a list beause it is assuming we are talking about the
+        # builtin filter function here which no longer returns a list in
+        # python 3.  because of that, do not rename filter_ to filter!
+        filter_ = self.environment.filters.get(self.name)
+
+        if filter_ is None or getattr(filter_, 'contextfilter', False):
+            raise Impossible()
+
+        # We cannot constant handle async filters, so we need to make sure
+        # to not go down this path.
+        if (
+            eval_ctx.environment.is_async
+            and getattr(filter_, 'asyncfiltervariant', False)
+        ):
+            raise Impossible()
+
+        args, kwargs = args_as_const(self, eval_ctx)
+        args.insert(0, self.node.as_const(eval_ctx))
+
+        if getattr(filter_, 'evalcontextfilter', False):
+            args.insert(0, eval_ctx)
+        elif getattr(filter_, 'environmentfilter', False):
+            args.insert(0, self.environment)
+
+        try:
+            return filter_(*args, **kwargs)
+        except Exception:
+            raise Impossible()
+
+
+class Test(Expr):
+    """Applies a test on an expression.  `name` is the name of the test, the
+    rest of the fields are the same as for :class:`Call`.
+    """
+
+    fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+    def as_const(self, eval_ctx=None):
+        test = self.environment.tests.get(self.name)
+
+        if test is None:
+            raise Impossible()
+
+        eval_ctx = get_eval_context(self, eval_ctx)
+        args, kwargs = args_as_const(self, eval_ctx)
+        args.insert(0, self.node.as_const(eval_ctx))
+
+        try:
+            return test(*args, **kwargs)
+        except Exception:
+            raise Impossible()
+
+
+class Call(Expr):
+    """Calls an expression.  `args` is a list of arguments, `kwargs` a list
+    of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
+    and `dyn_kwargs` has to be either `None` or a node that is used as
+    node for dynamic positional (``*args``) or keyword (``**kwargs``)
+    arguments.
+    """
+    fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+
+class Getitem(Expr):
+    """Get an attribute or item from an expression and prefer the item."""
+    fields = ('node', 'arg', 'ctx')
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        if self.ctx != 'load':
+            raise Impossible()
+        try:
+            return self.environment.getitem(self.node.as_const(eval_ctx),
+                                            self.arg.as_const(eval_ctx))
+        except Exception:
+            raise Impossible()
+
+    def can_assign(self):
+        return False
+
+
+class Getattr(Expr):
+    """Get an attribute or item from an expression that is a ascii-only
+    bytestring and prefer the attribute.
+    """
+    fields = ('node', 'attr', 'ctx')
+
+    def as_const(self, eval_ctx=None):
+        if self.ctx != 'load':
+            raise Impossible()
+        try:
+            eval_ctx = get_eval_context(self, eval_ctx)
+            return self.environment.getattr(self.node.as_const(eval_ctx),
+                                            self.attr)
+        except Exception:
+            raise Impossible()
+
+    def can_assign(self):
+        return False
+
+
+class Slice(Expr):
+    """Represents a slice object.  This must only be used as argument for
+    :class:`Subscript`.
+    """
+    fields = ('start', 'stop', 'step')
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        def const(obj):
+            if obj is None:
+                return None
+            return obj.as_const(eval_ctx)
+        return slice(const(self.start), const(self.stop), const(self.step))
+
+
+class Concat(Expr):
+    """Concatenates the list of expressions provided after converting them to
+    unicode.
+    """
+    fields = ('nodes',)
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
+
+
+class Compare(Expr):
+    """Compares an expression with some other expressions.  `ops` must be a
+    list of :class:`Operand`\\s.
+    """
+    fields = ('expr', 'ops')
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        result = value = self.expr.as_const(eval_ctx)
+        try:
+            for op in self.ops:
+                new_value = op.expr.as_const(eval_ctx)
+                result = _cmpop_to_func[op.op](value, new_value)
+                value = new_value
+        except Exception:
+            raise Impossible()
+        return result
+
+
+class Operand(Helper):
+    """Holds an operator and an expression."""
+    fields = ('op', 'expr')
+
+if __debug__:
+    Operand.__doc__ += '\nThe following operators are available: ' + \
+        ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
+                  set(_uaop_to_func) | set(_cmpop_to_func)))
+
+
+class Mul(BinExpr):
+    """Multiplies the left with the right node."""
+    operator = '*'
+
+
+class Div(BinExpr):
+    """Divides the left by the right node."""
+    operator = '/'
+
+
+class FloorDiv(BinExpr):
+    """Divides the left by the right node and truncates conver the
+    result into an integer by truncating.
+    """
+    operator = '//'
+
+
+class Add(BinExpr):
+    """Add the left to the right node."""
+    operator = '+'
+
+
+class Sub(BinExpr):
+    """Subtract the right from the left node."""
+    operator = '-'
+
+
+class Mod(BinExpr):
+    """Left modulo right."""
+    operator = '%'
+
+
+class Pow(BinExpr):
+    """Left to the power of right."""
+    operator = '**'
+
+
+class And(BinExpr):
+    """Short circuited AND."""
+    operator = 'and'
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
+
+
+class Or(BinExpr):
+    """Short circuited OR."""
+    operator = 'or'
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
+
+
+class Not(UnaryExpr):
+    """Negate the expression."""
+    operator = 'not'
+
+
+class Neg(UnaryExpr):
+    """Make the expression negative."""
+    operator = '-'
+
+
+class Pos(UnaryExpr):
+    """Make the expression positive (noop for most expressions)"""
+    operator = '+'
+
+
+# Helpers for extensions
+
+
+class EnvironmentAttribute(Expr):
+    """Loads an attribute from the environment object.  This is useful for
+    extensions that want to call a callback stored on the environment.
+    """
+    fields = ('name',)
+
+
+class ExtensionAttribute(Expr):
+    """Returns the attribute of an extension bound to the environment.
+    The identifier is the identifier of the :class:`Extension`.
+
+    This node is usually constructed by calling the
+    :meth:`~jinja2.ext.Extension.attr` method on an extension.
+    """
+    fields = ('identifier', 'name')
+
+
+class ImportedName(Expr):
+    """If created with an import name the import name is returned on node
+    access.  For example ``ImportedName('cgi.escape')`` returns the `escape`
+    function from the cgi module on evaluation.  Imports are optimized by the
+    compiler so there is no need to assign them to local variables.
+    """
+    fields = ('importname',)
+
+
+class InternalName(Expr):
+    """An internal name in the compiler.  You cannot create these nodes
+    yourself but the parser provides a
+    :meth:`~jinja2.parser.Parser.free_identifier` method that creates
+    a new identifier for you.  This identifier is not available from the
+    template and is not threated specially by the compiler.
+    """
+    fields = ('name',)
+
+    def __init__(self):
+        raise TypeError('Can\'t create internal names.  Use the '
+                        '`free_identifier` method on a parser.')
+
+
+class MarkSafe(Expr):
+    """Mark the wrapped expression as safe (wrap it as `Markup`)."""
+    fields = ('expr',)
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        return Markup(self.expr.as_const(eval_ctx))
+
+
+class MarkSafeIfAutoescape(Expr):
+    """Mark the wrapped expression as safe (wrap it as `Markup`) but
+    only if autoescaping is active.
+
+    .. versionadded:: 2.5
+    """
+    fields = ('expr',)
+
+    def as_const(self, eval_ctx=None):
+        eval_ctx = get_eval_context(self, eval_ctx)
+        if eval_ctx.volatile:
+            raise Impossible()
+        expr = self.expr.as_const(eval_ctx)
+        if eval_ctx.autoescape:
+            return Markup(expr)
+        return expr
+
+
+class ContextReference(Expr):
+    """Returns the current template context.  It can be used like a
+    :class:`Name` node, with a ``'load'`` ctx and will return the
+    current :class:`~jinja2.runtime.Context` object.
+
+    Here an example that assigns the current template name to a
+    variable named `foo`::
+
+        Assign(Name('foo', ctx='store'),
+               Getattr(ContextReference(), 'name'))
+    """
+
+
+class Continue(Stmt):
+    """Continue a loop."""
+
+
+class Break(Stmt):
+    """Break a loop."""
+
+
+class Scope(Stmt):
+    """An artificial scope."""
+    fields = ('body',)
+
+
+class OverlayScope(Stmt):
+    """An overlay scope for extensions.  This is a largely unoptimized scope
+    that however can be used to introduce completely arbitrary variables into
+    a sub scope from a dictionary or dictionary like object.  The `context`
+    field has to evaluate to a dictionary object.
+
+    Example usage::
+
+        OverlayScope(context=self.call_method('get_context'),
+                     body=[...])
+
+    .. versionadded:: 2.10
+    """
+    fields = ('context', 'body')
+
+
+class EvalContextModifier(Stmt):
+    """Modifies the eval context.  For each option that should be modified,
+    a :class:`Keyword` has to be added to the :attr:`options` list.
+
+    Example to change the `autoescape` setting::
+
+        EvalContextModifier(options=[Keyword('autoescape', Const(True))])
+    """
+    fields = ('options',)
+
+
+class ScopedEvalContextModifier(EvalContextModifier):
+    """Modifies the eval context and reverts it later.  Works exactly like
+    :class:`EvalContextModifier` but will only modify the
+    :class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
+    """
+    fields = ('body',)
+
+
+# make sure nobody creates custom nodes
+def _failing_new(*args, **kwargs):
+    raise TypeError('can\'t create custom node types')
+NodeType.__new__ = staticmethod(_failing_new); del _failing_new
diff --git a/lib/python3.7/site-packages/jinja2/optimizer.py b/lib/python3.7/site-packages/jinja2/optimizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..65ab3ceb71f6130e4229a8272c495a91cdea352c
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/optimizer.py
@@ -0,0 +1,49 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.optimizer
+    ~~~~~~~~~~~~~~~~
+
+    The jinja optimizer is currently trying to constant fold a few expressions
+    and modify the AST in place so that it should be easier to evaluate it.
+
+    Because the AST does not contain all the scoping information and the
+    compiler has to find that out, we cannot do all the optimizations we
+    want.  For example loop unrolling doesn't work because unrolled loops would
+    have a different scoping.
+
+    The solution would be a second syntax tree that has the scoping rules stored.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD.
+"""
+from jinja2 import nodes
+from jinja2.visitor import NodeTransformer
+
+
+def optimize(node, environment):
+    """The context hint can be used to perform an static optimization
+    based on the context given."""
+    optimizer = Optimizer(environment)
+    return optimizer.visit(node)
+
+
+class Optimizer(NodeTransformer):
+
+    def __init__(self, environment):
+        self.environment = environment
+
+    def fold(self, node, eval_ctx=None):
+        """Do constant folding."""
+        node = self.generic_visit(node)
+        try:
+            return nodes.Const.from_untrusted(node.as_const(eval_ctx),
+                                              lineno=node.lineno,
+                                              environment=self.environment)
+        except nodes.Impossible:
+            return node
+
+    visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
+    visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
+    visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
+    visit_Filter = visit_Test = visit_CondExpr = fold
+    del fold
diff --git a/lib/python3.7/site-packages/jinja2/parser.py b/lib/python3.7/site-packages/jinja2/parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed00d9708e962f16c6b9ce061be47068d9b9b1ad
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/parser.py
@@ -0,0 +1,903 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.parser
+    ~~~~~~~~~~~~~
+
+    Implements the template parser.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+from jinja2 import nodes
+from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
+from jinja2.lexer import describe_token, describe_token_expr
+from jinja2._compat import imap
+
+
+_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
+                                 'macro', 'include', 'from', 'import',
+                                 'set', 'with', 'autoescape'])
+_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
+
+_math_nodes = {
+    'add': nodes.Add,
+    'sub': nodes.Sub,
+    'mul': nodes.Mul,
+    'div': nodes.Div,
+    'floordiv': nodes.FloorDiv,
+    'mod': nodes.Mod,
+}
+
+
+class Parser(object):
+    """This is the central parsing class Jinja2 uses.  It's passed to
+    extensions and can be used to parse expressions or statements.
+    """
+
+    def __init__(self, environment, source, name=None, filename=None,
+                 state=None):
+        self.environment = environment
+        self.stream = environment._tokenize(source, name, filename, state)
+        self.name = name
+        self.filename = filename
+        self.closed = False
+        self.extensions = {}
+        for extension in environment.iter_extensions():
+            for tag in extension.tags:
+                self.extensions[tag] = extension.parse
+        self._last_identifier = 0
+        self._tag_stack = []
+        self._end_token_stack = []
+
+    def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
+        """Convenience method that raises `exc` with the message, passed
+        line number or last line number as well as the current name and
+        filename.
+        """
+        if lineno is None:
+            lineno = self.stream.current.lineno
+        raise exc(msg, lineno, self.name, self.filename)
+
+    def _fail_ut_eof(self, name, end_token_stack, lineno):
+        expected = []
+        for exprs in end_token_stack:
+            expected.extend(imap(describe_token_expr, exprs))
+        if end_token_stack:
+            currently_looking = ' or '.join(
+                "'%s'" % describe_token_expr(expr)
+                for expr in end_token_stack[-1])
+        else:
+            currently_looking = None
+
+        if name is None:
+            message = ['Unexpected end of template.']
+        else:
+            message = ['Encountered unknown tag \'%s\'.' % name]
+
+        if currently_looking:
+            if name is not None and name in expected:
+                message.append('You probably made a nesting mistake. Jinja '
+                               'is expecting this tag, but currently looking '
+                               'for %s.' % currently_looking)
+            else:
+                message.append('Jinja was looking for the following tags: '
+                               '%s.' % currently_looking)
+
+        if self._tag_stack:
+            message.append('The innermost block that needs to be '
+                           'closed is \'%s\'.' % self._tag_stack[-1])
+
+        self.fail(' '.join(message), lineno)
+
+    def fail_unknown_tag(self, name, lineno=None):
+        """Called if the parser encounters an unknown tag.  Tries to fail
+        with a human readable error message that could help to identify
+        the problem.
+        """
+        return self._fail_ut_eof(name, self._end_token_stack, lineno)
+
+    def fail_eof(self, end_tokens=None, lineno=None):
+        """Like fail_unknown_tag but for end of template situations."""
+        stack = list(self._end_token_stack)
+        if end_tokens is not None:
+            stack.append(end_tokens)
+        return self._fail_ut_eof(None, stack, lineno)
+
+    def is_tuple_end(self, extra_end_rules=None):
+        """Are we at the end of a tuple?"""
+        if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
+            return True
+        elif extra_end_rules is not None:
+            return self.stream.current.test_any(extra_end_rules)
+        return False
+
+    def free_identifier(self, lineno=None):
+        """Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
+        self._last_identifier += 1
+        rv = object.__new__(nodes.InternalName)
+        nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
+        return rv
+
+    def parse_statement(self):
+        """Parse a single statement."""
+        token = self.stream.current
+        if token.type != 'name':
+            self.fail('tag name expected', token.lineno)
+        self._tag_stack.append(token.value)
+        pop_tag = True
+        try:
+            if token.value in _statement_keywords:
+                return getattr(self, 'parse_' + self.stream.current.value)()
+            if token.value == 'call':
+                return self.parse_call_block()
+            if token.value == 'filter':
+                return self.parse_filter_block()
+            ext = self.extensions.get(token.value)
+            if ext is not None:
+                return ext(self)
+
+            # did not work out, remove the token we pushed by accident
+            # from the stack so that the unknown tag fail function can
+            # produce a proper error message.
+            self._tag_stack.pop()
+            pop_tag = False
+            self.fail_unknown_tag(token.value, token.lineno)
+        finally:
+            if pop_tag:
+                self._tag_stack.pop()
+
+    def parse_statements(self, end_tokens, drop_needle=False):
+        """Parse multiple statements into a list until one of the end tokens
+        is reached.  This is used to parse the body of statements as it also
+        parses template data if appropriate.  The parser checks first if the
+        current token is a colon and skips it if there is one.  Then it checks
+        for the block end and parses until if one of the `end_tokens` is
+        reached.  Per default the active token in the stream at the end of
+        the call is the matched end token.  If this is not wanted `drop_needle`
+        can be set to `True` and the end token is removed.
+        """
+        # the first token may be a colon for python compatibility
+        self.stream.skip_if('colon')
+
+        # in the future it would be possible to add whole code sections
+        # by adding some sort of end of statement token and parsing those here.
+        self.stream.expect('block_end')
+        result = self.subparse(end_tokens)
+
+        # we reached the end of the template too early, the subparser
+        # does not check for this, so we do that now
+        if self.stream.current.type == 'eof':
+            self.fail_eof(end_tokens)
+
+        if drop_needle:
+            next(self.stream)
+        return result
+
+    def parse_set(self):
+        """Parse an assign statement."""
+        lineno = next(self.stream).lineno
+        target = self.parse_assign_target(with_namespace=True)
+        if self.stream.skip_if('assign'):
+            expr = self.parse_tuple()
+            return nodes.Assign(target, expr, lineno=lineno)
+        filter_node = self.parse_filter(None)
+        body = self.parse_statements(('name:endset',),
+                                     drop_needle=True)
+        return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
+
+    def parse_for(self):
+        """Parse a for loop."""
+        lineno = self.stream.expect('name:for').lineno
+        target = self.parse_assign_target(extra_end_rules=('name:in',))
+        self.stream.expect('name:in')
+        iter = self.parse_tuple(with_condexpr=False,
+                                extra_end_rules=('name:recursive',))
+        test = None
+        if self.stream.skip_if('name:if'):
+            test = self.parse_expression()
+        recursive = self.stream.skip_if('name:recursive')
+        body = self.parse_statements(('name:endfor', 'name:else'))
+        if next(self.stream).value == 'endfor':
+            else_ = []
+        else:
+            else_ = self.parse_statements(('name:endfor',), drop_needle=True)
+        return nodes.For(target, iter, body, else_, test,
+                         recursive, lineno=lineno)
+
+    def parse_if(self):
+        """Parse an if construct."""
+        node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
+        while 1:
+            node.test = self.parse_tuple(with_condexpr=False)
+            node.body = self.parse_statements(('name:elif', 'name:else',
+                                               'name:endif'))
+            node.elif_ = []
+            node.else_ = []
+            token = next(self.stream)
+            if token.test('name:elif'):
+                node = nodes.If(lineno=self.stream.current.lineno)
+                result.elif_.append(node)
+                continue
+            elif token.test('name:else'):
+                result.else_ = self.parse_statements(('name:endif',),
+                                                     drop_needle=True)
+            break
+        return result
+
+    def parse_with(self):
+        node = nodes.With(lineno=next(self.stream).lineno)
+        targets = []
+        values = []
+        while self.stream.current.type != 'block_end':
+            lineno = self.stream.current.lineno
+            if targets:
+                self.stream.expect('comma')
+            target = self.parse_assign_target()
+            target.set_ctx('param')
+            targets.append(target)
+            self.stream.expect('assign')
+            values.append(self.parse_expression())
+        node.targets = targets
+        node.values = values
+        node.body = self.parse_statements(('name:endwith',),
+                                          drop_needle=True)
+        return node
+
+    def parse_autoescape(self):
+        node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
+        node.options = [
+            nodes.Keyword('autoescape', self.parse_expression())
+        ]
+        node.body = self.parse_statements(('name:endautoescape',),
+                                            drop_needle=True)
+        return nodes.Scope([node])
+
+    def parse_block(self):
+        node = nodes.Block(lineno=next(self.stream).lineno)
+        node.name = self.stream.expect('name').value
+        node.scoped = self.stream.skip_if('name:scoped')
+
+        # common problem people encounter when switching from django
+        # to jinja.  we do not support hyphens in block names, so let's
+        # raise a nicer error message in that case.
+        if self.stream.current.type == 'sub':
+            self.fail('Block names in Jinja have to be valid Python '
+                      'identifiers and may not contain hyphens, use an '
+                      'underscore instead.')
+
+        node.body = self.parse_statements(('name:endblock',), drop_needle=True)
+        self.stream.skip_if('name:' + node.name)
+        return node
+
+    def parse_extends(self):
+        node = nodes.Extends(lineno=next(self.stream).lineno)
+        node.template = self.parse_expression()
+        return node
+
+    def parse_import_context(self, node, default):
+        if self.stream.current.test_any('name:with', 'name:without') and \
+           self.stream.look().test('name:context'):
+            node.with_context = next(self.stream).value == 'with'
+            self.stream.skip()
+        else:
+            node.with_context = default
+        return node
+
+    def parse_include(self):
+        node = nodes.Include(lineno=next(self.stream).lineno)
+        node.template = self.parse_expression()
+        if self.stream.current.test('name:ignore') and \
+           self.stream.look().test('name:missing'):
+            node.ignore_missing = True
+            self.stream.skip(2)
+        else:
+            node.ignore_missing = False
+        return self.parse_import_context(node, True)
+
+    def parse_import(self):
+        node = nodes.Import(lineno=next(self.stream).lineno)
+        node.template = self.parse_expression()
+        self.stream.expect('name:as')
+        node.target = self.parse_assign_target(name_only=True).name
+        return self.parse_import_context(node, False)
+
+    def parse_from(self):
+        node = nodes.FromImport(lineno=next(self.stream).lineno)
+        node.template = self.parse_expression()
+        self.stream.expect('name:import')
+        node.names = []
+
+        def parse_context():
+            if self.stream.current.value in ('with', 'without') and \
+               self.stream.look().test('name:context'):
+                node.with_context = next(self.stream).value == 'with'
+                self.stream.skip()
+                return True
+            return False
+
+        while 1:
+            if node.names:
+                self.stream.expect('comma')
+            if self.stream.current.type == 'name':
+                if parse_context():
+                    break
+                target = self.parse_assign_target(name_only=True)
+                if target.name.startswith('_'):
+                    self.fail('names starting with an underline can not '
+                              'be imported', target.lineno,
+                              exc=TemplateAssertionError)
+                if self.stream.skip_if('name:as'):
+                    alias = self.parse_assign_target(name_only=True)
+                    node.names.append((target.name, alias.name))
+                else:
+                    node.names.append(target.name)
+                if parse_context() or self.stream.current.type != 'comma':
+                    break
+            else:
+                self.stream.expect('name')
+        if not hasattr(node, 'with_context'):
+            node.with_context = False
+        return node
+
+    def parse_signature(self, node):
+        node.args = args = []
+        node.defaults = defaults = []
+        self.stream.expect('lparen')
+        while self.stream.current.type != 'rparen':
+            if args:
+                self.stream.expect('comma')
+            arg = self.parse_assign_target(name_only=True)
+            arg.set_ctx('param')
+            if self.stream.skip_if('assign'):
+                defaults.append(self.parse_expression())
+            elif defaults:
+                self.fail('non-default argument follows default argument')
+            args.append(arg)
+        self.stream.expect('rparen')
+
+    def parse_call_block(self):
+        node = nodes.CallBlock(lineno=next(self.stream).lineno)
+        if self.stream.current.type == 'lparen':
+            self.parse_signature(node)
+        else:
+            node.args = []
+            node.defaults = []
+
+        node.call = self.parse_expression()
+        if not isinstance(node.call, nodes.Call):
+            self.fail('expected call', node.lineno)
+        node.body = self.parse_statements(('name:endcall',), drop_needle=True)
+        return node
+
+    def parse_filter_block(self):
+        node = nodes.FilterBlock(lineno=next(self.stream).lineno)
+        node.filter = self.parse_filter(None, start_inline=True)
+        node.body = self.parse_statements(('name:endfilter',),
+                                          drop_needle=True)
+        return node
+
+    def parse_macro(self):
+        node = nodes.Macro(lineno=next(self.stream).lineno)
+        node.name = self.parse_assign_target(name_only=True).name
+        self.parse_signature(node)
+        node.body = self.parse_statements(('name:endmacro',),
+                                          drop_needle=True)
+        return node
+
+    def parse_print(self):
+        node = nodes.Output(lineno=next(self.stream).lineno)
+        node.nodes = []
+        while self.stream.current.type != 'block_end':
+            if node.nodes:
+                self.stream.expect('comma')
+            node.nodes.append(self.parse_expression())
+        return node
+
+    def parse_assign_target(self, with_tuple=True, name_only=False,
+                            extra_end_rules=None, with_namespace=False):
+        """Parse an assignment target.  As Jinja2 allows assignments to
+        tuples, this function can parse all allowed assignment targets.  Per
+        default assignments to tuples are parsed, that can be disable however
+        by setting `with_tuple` to `False`.  If only assignments to names are
+        wanted `name_only` can be set to `True`.  The `extra_end_rules`
+        parameter is forwarded to the tuple parsing function.  If
+        `with_namespace` is enabled, a namespace assignment may be parsed.
+        """
+        if with_namespace and self.stream.look().type == 'dot':
+            token = self.stream.expect('name')
+            next(self.stream)  # dot
+            attr = self.stream.expect('name')
+            target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
+        elif name_only:
+            token = self.stream.expect('name')
+            target = nodes.Name(token.value, 'store', lineno=token.lineno)
+        else:
+            if with_tuple:
+                target = self.parse_tuple(simplified=True,
+                                          extra_end_rules=extra_end_rules)
+            else:
+                target = self.parse_primary()
+            target.set_ctx('store')
+        if not target.can_assign():
+            self.fail('can\'t assign to %r' % target.__class__.
+                      __name__.lower(), target.lineno)
+        return target
+
+    def parse_expression(self, with_condexpr=True):
+        """Parse an expression.  Per default all expressions are parsed, if
+        the optional `with_condexpr` parameter is set to `False` conditional
+        expressions are not parsed.
+        """
+        if with_condexpr:
+            return self.parse_condexpr()
+        return self.parse_or()
+
+    def parse_condexpr(self):
+        lineno = self.stream.current.lineno
+        expr1 = self.parse_or()
+        while self.stream.skip_if('name:if'):
+            expr2 = self.parse_or()
+            if self.stream.skip_if('name:else'):
+                expr3 = self.parse_condexpr()
+            else:
+                expr3 = None
+            expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return expr1
+
+    def parse_or(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_and()
+        while self.stream.skip_if('name:or'):
+            right = self.parse_and()
+            left = nodes.Or(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_and(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_not()
+        while self.stream.skip_if('name:and'):
+            right = self.parse_not()
+            left = nodes.And(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_not(self):
+        if self.stream.current.test('name:not'):
+            lineno = next(self.stream).lineno
+            return nodes.Not(self.parse_not(), lineno=lineno)
+        return self.parse_compare()
+
+    def parse_compare(self):
+        lineno = self.stream.current.lineno
+        expr = self.parse_math1()
+        ops = []
+        while 1:
+            token_type = self.stream.current.type
+            if token_type in _compare_operators:
+                next(self.stream)
+                ops.append(nodes.Operand(token_type, self.parse_math1()))
+            elif self.stream.skip_if('name:in'):
+                ops.append(nodes.Operand('in', self.parse_math1()))
+            elif (self.stream.current.test('name:not') and
+                  self.stream.look().test('name:in')):
+                self.stream.skip(2)
+                ops.append(nodes.Operand('notin', self.parse_math1()))
+            else:
+                break
+            lineno = self.stream.current.lineno
+        if not ops:
+            return expr
+        return nodes.Compare(expr, ops, lineno=lineno)
+
+    def parse_math1(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_concat()
+        while self.stream.current.type in ('add', 'sub'):
+            cls = _math_nodes[self.stream.current.type]
+            next(self.stream)
+            right = self.parse_concat()
+            left = cls(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_concat(self):
+        lineno = self.stream.current.lineno
+        args = [self.parse_math2()]
+        while self.stream.current.type == 'tilde':
+            next(self.stream)
+            args.append(self.parse_math2())
+        if len(args) == 1:
+            return args[0]
+        return nodes.Concat(args, lineno=lineno)
+
+    def parse_math2(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_pow()
+        while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
+            cls = _math_nodes[self.stream.current.type]
+            next(self.stream)
+            right = self.parse_pow()
+            left = cls(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_pow(self):
+        lineno = self.stream.current.lineno
+        left = self.parse_unary()
+        while self.stream.current.type == 'pow':
+            next(self.stream)
+            right = self.parse_unary()
+            left = nodes.Pow(left, right, lineno=lineno)
+            lineno = self.stream.current.lineno
+        return left
+
+    def parse_unary(self, with_filter=True):
+        token_type = self.stream.current.type
+        lineno = self.stream.current.lineno
+        if token_type == 'sub':
+            next(self.stream)
+            node = nodes.Neg(self.parse_unary(False), lineno=lineno)
+        elif token_type == 'add':
+            next(self.stream)
+            node = nodes.Pos(self.parse_unary(False), lineno=lineno)
+        else:
+            node = self.parse_primary()
+        node = self.parse_postfix(node)
+        if with_filter:
+            node = self.parse_filter_expr(node)
+        return node
+
+    def parse_primary(self):
+        token = self.stream.current
+        if token.type == 'name':
+            if token.value in ('true', 'false', 'True', 'False'):
+                node = nodes.Const(token.value in ('true', 'True'),
+                                   lineno=token.lineno)
+            elif token.value in ('none', 'None'):
+                node = nodes.Const(None, lineno=token.lineno)
+            else:
+                node = nodes.Name(token.value, 'load', lineno=token.lineno)
+            next(self.stream)
+        elif token.type == 'string':
+            next(self.stream)
+            buf = [token.value]
+            lineno = token.lineno
+            while self.stream.current.type == 'string':
+                buf.append(self.stream.current.value)
+                next(self.stream)
+            node = nodes.Const(''.join(buf), lineno=lineno)
+        elif token.type in ('integer', 'float'):
+            next(self.stream)
+            node = nodes.Const(token.value, lineno=token.lineno)
+        elif token.type == 'lparen':
+            next(self.stream)
+            node = self.parse_tuple(explicit_parentheses=True)
+            self.stream.expect('rparen')
+        elif token.type == 'lbracket':
+            node = self.parse_list()
+        elif token.type == 'lbrace':
+            node = self.parse_dict()
+        else:
+            self.fail("unexpected '%s'" % describe_token(token), token.lineno)
+        return node
+
+    def parse_tuple(self, simplified=False, with_condexpr=True,
+                    extra_end_rules=None, explicit_parentheses=False):
+        """Works like `parse_expression` but if multiple expressions are
+        delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
+        This method could also return a regular expression instead of a tuple
+        if no commas where found.
+
+        The default parsing mode is a full tuple.  If `simplified` is `True`
+        only names and literals are parsed.  The `no_condexpr` parameter is
+        forwarded to :meth:`parse_expression`.
+
+        Because tuples do not require delimiters and may end in a bogus comma
+        an extra hint is needed that marks the end of a tuple.  For example
+        for loops support tuples between `for` and `in`.  In that case the
+        `extra_end_rules` is set to ``['name:in']``.
+
+        `explicit_parentheses` is true if the parsing was triggered by an
+        expression in parentheses.  This is used to figure out if an empty
+        tuple is a valid expression or not.
+        """
+        lineno = self.stream.current.lineno
+        if simplified:
+            parse = self.parse_primary
+        elif with_condexpr:
+            parse = self.parse_expression
+        else:
+            parse = lambda: self.parse_expression(with_condexpr=False)
+        args = []
+        is_tuple = False
+        while 1:
+            if args:
+                self.stream.expect('comma')
+            if self.is_tuple_end(extra_end_rules):
+                break
+            args.append(parse())
+            if self.stream.current.type == 'comma':
+                is_tuple = True
+            else:
+                break
+            lineno = self.stream.current.lineno
+
+        if not is_tuple:
+            if args:
+                return args[0]
+
+            # if we don't have explicit parentheses, an empty tuple is
+            # not a valid expression.  This would mean nothing (literally
+            # nothing) in the spot of an expression would be an empty
+            # tuple.
+            if not explicit_parentheses:
+                self.fail('Expected an expression, got \'%s\'' %
+                          describe_token(self.stream.current))
+
+        return nodes.Tuple(args, 'load', lineno=lineno)
+
+    def parse_list(self):
+        token = self.stream.expect('lbracket')
+        items = []
+        while self.stream.current.type != 'rbracket':
+            if items:
+                self.stream.expect('comma')
+            if self.stream.current.type == 'rbracket':
+                break
+            items.append(self.parse_expression())
+        self.stream.expect('rbracket')
+        return nodes.List(items, lineno=token.lineno)
+
+    def parse_dict(self):
+        token = self.stream.expect('lbrace')
+        items = []
+        while self.stream.current.type != 'rbrace':
+            if items:
+                self.stream.expect('comma')
+            if self.stream.current.type == 'rbrace':
+                break
+            key = self.parse_expression()
+            self.stream.expect('colon')
+            value = self.parse_expression()
+            items.append(nodes.Pair(key, value, lineno=key.lineno))
+        self.stream.expect('rbrace')
+        return nodes.Dict(items, lineno=token.lineno)
+
+    def parse_postfix(self, node):
+        while 1:
+            token_type = self.stream.current.type
+            if token_type == 'dot' or token_type == 'lbracket':
+                node = self.parse_subscript(node)
+            # calls are valid both after postfix expressions (getattr
+            # and getitem) as well as filters and tests
+            elif token_type == 'lparen':
+                node = self.parse_call(node)
+            else:
+                break
+        return node
+
+    def parse_filter_expr(self, node):
+        while 1:
+            token_type = self.stream.current.type
+            if token_type == 'pipe':
+                node = self.parse_filter(node)
+            elif token_type == 'name' and self.stream.current.value == 'is':
+                node = self.parse_test(node)
+            # calls are valid both after postfix expressions (getattr
+            # and getitem) as well as filters and tests
+            elif token_type == 'lparen':
+                node = self.parse_call(node)
+            else:
+                break
+        return node
+
+    def parse_subscript(self, node):
+        token = next(self.stream)
+        if token.type == 'dot':
+            attr_token = self.stream.current
+            next(self.stream)
+            if attr_token.type == 'name':
+                return nodes.Getattr(node, attr_token.value, 'load',
+                                     lineno=token.lineno)
+            elif attr_token.type != 'integer':
+                self.fail('expected name or number', attr_token.lineno)
+            arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
+            return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
+        if token.type == 'lbracket':
+            args = []
+            while self.stream.current.type != 'rbracket':
+                if args:
+                    self.stream.expect('comma')
+                args.append(self.parse_subscribed())
+            self.stream.expect('rbracket')
+            if len(args) == 1:
+                arg = args[0]
+            else:
+                arg = nodes.Tuple(args, 'load', lineno=token.lineno)
+            return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
+        self.fail('expected subscript expression', self.lineno)
+
+    def parse_subscribed(self):
+        lineno = self.stream.current.lineno
+
+        if self.stream.current.type == 'colon':
+            next(self.stream)
+            args = [None]
+        else:
+            node = self.parse_expression()
+            if self.stream.current.type != 'colon':
+                return node
+            next(self.stream)
+            args = [node]
+
+        if self.stream.current.type == 'colon':
+            args.append(None)
+        elif self.stream.current.type not in ('rbracket', 'comma'):
+            args.append(self.parse_expression())
+        else:
+            args.append(None)
+
+        if self.stream.current.type == 'colon':
+            next(self.stream)
+            if self.stream.current.type not in ('rbracket', 'comma'):
+                args.append(self.parse_expression())
+            else:
+                args.append(None)
+        else:
+            args.append(None)
+
+        return nodes.Slice(lineno=lineno, *args)
+
+    def parse_call(self, node):
+        token = self.stream.expect('lparen')
+        args = []
+        kwargs = []
+        dyn_args = dyn_kwargs = None
+        require_comma = False
+
+        def ensure(expr):
+            if not expr:
+                self.fail('invalid syntax for function call expression',
+                          token.lineno)
+
+        while self.stream.current.type != 'rparen':
+            if require_comma:
+                self.stream.expect('comma')
+                # support for trailing comma
+                if self.stream.current.type == 'rparen':
+                    break
+            if self.stream.current.type == 'mul':
+                ensure(dyn_args is None and dyn_kwargs is None)
+                next(self.stream)
+                dyn_args = self.parse_expression()
+            elif self.stream.current.type == 'pow':
+                ensure(dyn_kwargs is None)
+                next(self.stream)
+                dyn_kwargs = self.parse_expression()
+            else:
+                ensure(dyn_args is None and dyn_kwargs is None)
+                if self.stream.current.type == 'name' and \
+                   self.stream.look().type == 'assign':
+                    key = self.stream.current.value
+                    self.stream.skip(2)
+                    value = self.parse_expression()
+                    kwargs.append(nodes.Keyword(key, value,
+                                                lineno=value.lineno))
+                else:
+                    ensure(not kwargs)
+                    args.append(self.parse_expression())
+
+            require_comma = True
+        self.stream.expect('rparen')
+
+        if node is None:
+            return args, kwargs, dyn_args, dyn_kwargs
+        return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
+                          lineno=token.lineno)
+
+    def parse_filter(self, node, start_inline=False):
+        while self.stream.current.type == 'pipe' or start_inline:
+            if not start_inline:
+                next(self.stream)
+            token = self.stream.expect('name')
+            name = token.value
+            while self.stream.current.type == 'dot':
+                next(self.stream)
+                name += '.' + self.stream.expect('name').value
+            if self.stream.current.type == 'lparen':
+                args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+            else:
+                args = []
+                kwargs = []
+                dyn_args = dyn_kwargs = None
+            node = nodes.Filter(node, name, args, kwargs, dyn_args,
+                                dyn_kwargs, lineno=token.lineno)
+            start_inline = False
+        return node
+
+    def parse_test(self, node):
+        token = next(self.stream)
+        if self.stream.current.test('name:not'):
+            next(self.stream)
+            negated = True
+        else:
+            negated = False
+        name = self.stream.expect('name').value
+        while self.stream.current.type == 'dot':
+            next(self.stream)
+            name += '.' + self.stream.expect('name').value
+        dyn_args = dyn_kwargs = None
+        kwargs = []
+        if self.stream.current.type == 'lparen':
+            args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+        elif (self.stream.current.type in ('name', 'string', 'integer',
+                                           'float', 'lparen', 'lbracket',
+                                           'lbrace') and not
+              self.stream.current.test_any('name:else', 'name:or',
+                                           'name:and')):
+            if self.stream.current.test('name:is'):
+                self.fail('You cannot chain multiple tests with is')
+            args = [self.parse_primary()]
+        else:
+            args = []
+        node = nodes.Test(node, name, args, kwargs, dyn_args,
+                          dyn_kwargs, lineno=token.lineno)
+        if negated:
+            node = nodes.Not(node, lineno=token.lineno)
+        return node
+
+    def subparse(self, end_tokens=None):
+        body = []
+        data_buffer = []
+        add_data = data_buffer.append
+
+        if end_tokens is not None:
+            self._end_token_stack.append(end_tokens)
+
+        def flush_data():
+            if data_buffer:
+                lineno = data_buffer[0].lineno
+                body.append(nodes.Output(data_buffer[:], lineno=lineno))
+                del data_buffer[:]
+
+        try:
+            while self.stream:
+                token = self.stream.current
+                if token.type == 'data':
+                    if token.value:
+                        add_data(nodes.TemplateData(token.value,
+                                                    lineno=token.lineno))
+                    next(self.stream)
+                elif token.type == 'variable_begin':
+                    next(self.stream)
+                    add_data(self.parse_tuple(with_condexpr=True))
+                    self.stream.expect('variable_end')
+                elif token.type == 'block_begin':
+                    flush_data()
+                    next(self.stream)
+                    if end_tokens is not None and \
+                       self.stream.current.test_any(*end_tokens):
+                        return body
+                    rv = self.parse_statement()
+                    if isinstance(rv, list):
+                        body.extend(rv)
+                    else:
+                        body.append(rv)
+                    self.stream.expect('block_end')
+                else:
+                    raise AssertionError('internal parsing error')
+
+            flush_data()
+        finally:
+            if end_tokens is not None:
+                self._end_token_stack.pop()
+
+        return body
+
+    def parse(self):
+        """Parse the whole template into a `Template` node."""
+        result = nodes.Template(self.subparse(), lineno=1)
+        result.set_environment(self.environment)
+        return result
diff --git a/lib/python3.7/site-packages/jinja2/runtime.py b/lib/python3.7/site-packages/jinja2/runtime.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9d7a6806caf54996c05f2e108883d380eeec923
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/runtime.py
@@ -0,0 +1,813 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.runtime
+    ~~~~~~~~~~~~~~
+
+    Runtime helpers.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD.
+"""
+import sys
+
+from itertools import chain
+from types import MethodType
+
+from jinja2.nodes import EvalContext, _context_function_types
+from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
+     internalcode, object_type_repr, evalcontextfunction, Namespace
+from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
+     TemplateNotFound
+from jinja2._compat import imap, text_type, iteritems, \
+     implements_iterator, implements_to_string, string_types, PY2, \
+     with_metaclass
+
+
+# these variables are exported to the template runtime
+__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
+           'TemplateRuntimeError', 'missing', 'concat', 'escape',
+           'markup_join', 'unicode_join', 'to_string', 'identity',
+           'TemplateNotFound', 'Namespace']
+
+#: the name of the function that is used to convert something into
+#: a string.  We can just use the text type here.
+to_string = text_type
+
+#: the identity function.  Useful for certain things in the environment
+identity = lambda x: x
+
+_first_iteration = object()
+_last_iteration = object()
+
+
+def markup_join(seq):
+    """Concatenation that escapes if necessary and converts to unicode."""
+    buf = []
+    iterator = imap(soft_unicode, seq)
+    for arg in iterator:
+        buf.append(arg)
+        if hasattr(arg, '__html__'):
+            return Markup(u'').join(chain(buf, iterator))
+    return concat(buf)
+
+
+def unicode_join(seq):
+    """Simple args to unicode conversion and concatenation."""
+    return concat(imap(text_type, seq))
+
+
+def new_context(environment, template_name, blocks, vars=None,
+                shared=None, globals=None, locals=None):
+    """Internal helper to for context creation."""
+    if vars is None:
+        vars = {}
+    if shared:
+        parent = vars
+    else:
+        parent = dict(globals or (), **vars)
+    if locals:
+        # if the parent is shared a copy should be created because
+        # we don't want to modify the dict passed
+        if shared:
+            parent = dict(parent)
+        for key, value in iteritems(locals):
+            if value is not missing:
+                parent[key] = value
+    return environment.context_class(environment, parent, template_name,
+                                     blocks)
+
+
+class TemplateReference(object):
+    """The `self` in templates."""
+
+    def __init__(self, context):
+        self.__context = context
+
+    def __getitem__(self, name):
+        blocks = self.__context.blocks[name]
+        return BlockReference(name, self.__context, blocks, 0)
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self.__context.name
+        )
+
+
+def _get_func(x):
+    return getattr(x, '__func__', x)
+
+
+class ContextMeta(type):
+
+    def __new__(cls, name, bases, d):
+        rv = type.__new__(cls, name, bases, d)
+        if bases == ():
+            return rv
+
+        resolve = _get_func(rv.resolve)
+        default_resolve = _get_func(Context.resolve)
+        resolve_or_missing = _get_func(rv.resolve_or_missing)
+        default_resolve_or_missing = _get_func(Context.resolve_or_missing)
+
+        # If we have a changed resolve but no changed default or missing
+        # resolve we invert the call logic.
+        if resolve is not default_resolve and \
+           resolve_or_missing is default_resolve_or_missing:
+            rv._legacy_resolve_mode = True
+        elif resolve is default_resolve and \
+             resolve_or_missing is default_resolve_or_missing:
+            rv._fast_resolve_mode = True
+
+        return rv
+
+
+def resolve_or_missing(context, key, missing=missing):
+    if key in context.vars:
+        return context.vars[key]
+    if key in context.parent:
+        return context.parent[key]
+    return missing
+
+
+class Context(with_metaclass(ContextMeta)):
+    """The template context holds the variables of a template.  It stores the
+    values passed to the template and also the names the template exports.
+    Creating instances is neither supported nor useful as it's created
+    automatically at various stages of the template evaluation and should not
+    be created by hand.
+
+    The context is immutable.  Modifications on :attr:`parent` **must not**
+    happen and modifications on :attr:`vars` are allowed from generated
+    template code only.  Template filters and global functions marked as
+    :func:`contextfunction`\\s get the active context passed as first argument
+    and are allowed to access the context read-only.
+
+    The template context supports read only dict operations (`get`,
+    `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
+    `__getitem__`, `__contains__`).  Additionally there is a :meth:`resolve`
+    method that doesn't fail with a `KeyError` but returns an
+    :class:`Undefined` object for missing variables.
+    """
+    # XXX: we want to eventually make this be a deprecation warning and
+    # remove it.
+    _legacy_resolve_mode = False
+    _fast_resolve_mode = False
+
+    def __init__(self, environment, parent, name, blocks):
+        self.parent = parent
+        self.vars = {}
+        self.environment = environment
+        self.eval_ctx = EvalContext(self.environment, name)
+        self.exported_vars = set()
+        self.name = name
+
+        # create the initial mapping of blocks.  Whenever template inheritance
+        # takes place the runtime will update this mapping with the new blocks
+        # from the template.
+        self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
+
+        # In case we detect the fast resolve mode we can set up an alias
+        # here that bypasses the legacy code logic.
+        if self._fast_resolve_mode:
+            self.resolve_or_missing = MethodType(resolve_or_missing, self)
+
+    def super(self, name, current):
+        """Render a parent block."""
+        try:
+            blocks = self.blocks[name]
+            index = blocks.index(current) + 1
+            blocks[index]
+        except LookupError:
+            return self.environment.undefined('there is no parent block '
+                                              'called %r.' % name,
+                                              name='super')
+        return BlockReference(name, self, blocks, index)
+
+    def get(self, key, default=None):
+        """Returns an item from the template context, if it doesn't exist
+        `default` is returned.
+        """
+        try:
+            return self[key]
+        except KeyError:
+            return default
+
+    def resolve(self, key):
+        """Looks up a variable like `__getitem__` or `get` but returns an
+        :class:`Undefined` object with the name of the name looked up.
+        """
+        if self._legacy_resolve_mode:
+            rv = resolve_or_missing(self, key)
+        else:
+            rv = self.resolve_or_missing(key)
+        if rv is missing:
+            return self.environment.undefined(name=key)
+        return rv
+
+    def resolve_or_missing(self, key):
+        """Resolves a variable like :meth:`resolve` but returns the
+        special `missing` value if it cannot be found.
+        """
+        if self._legacy_resolve_mode:
+            rv = self.resolve(key)
+            if isinstance(rv, Undefined):
+                rv = missing
+            return rv
+        return resolve_or_missing(self, key)
+
+    def get_exported(self):
+        """Get a new dict with the exported variables."""
+        return dict((k, self.vars[k]) for k in self.exported_vars)
+
+    def get_all(self):
+        """Return the complete context as dict including the exported
+        variables.  For optimizations reasons this might not return an
+        actual copy so be careful with using it.
+        """
+        if not self.vars:
+            return self.parent
+        if not self.parent:
+            return self.vars
+        return dict(self.parent, **self.vars)
+
+    @internalcode
+    def call(__self, __obj, *args, **kwargs):
+        """Call the callable with the arguments and keyword arguments
+        provided but inject the active context or environment as first
+        argument if the callable is a :func:`contextfunction` or
+        :func:`environmentfunction`.
+        """
+        if __debug__:
+            __traceback_hide__ = True  # noqa
+
+        # Allow callable classes to take a context
+        if hasattr(__obj, '__call__'):
+            fn = __obj.__call__
+            for fn_type in ('contextfunction',
+                            'evalcontextfunction',
+                            'environmentfunction'):
+                if hasattr(fn, fn_type):
+                    __obj = fn
+                    break
+
+        if isinstance(__obj, _context_function_types):
+            if getattr(__obj, 'contextfunction', 0):
+                args = (__self,) + args
+            elif getattr(__obj, 'evalcontextfunction', 0):
+                args = (__self.eval_ctx,) + args
+            elif getattr(__obj, 'environmentfunction', 0):
+                args = (__self.environment,) + args
+        try:
+            return __obj(*args, **kwargs)
+        except StopIteration:
+            return __self.environment.undefined('value was undefined because '
+                                                'a callable raised a '
+                                                'StopIteration exception')
+
+    def derived(self, locals=None):
+        """Internal helper function to create a derived context.  This is
+        used in situations where the system needs a new context in the same
+        template that is independent.
+        """
+        context = new_context(self.environment, self.name, {},
+                              self.get_all(), True, None, locals)
+        context.eval_ctx = self.eval_ctx
+        context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
+        return context
+
+    def _all(meth):
+        proxy = lambda self: getattr(self.get_all(), meth)()
+        proxy.__doc__ = getattr(dict, meth).__doc__
+        proxy.__name__ = meth
+        return proxy
+
+    keys = _all('keys')
+    values = _all('values')
+    items = _all('items')
+
+    # not available on python 3
+    if PY2:
+        iterkeys = _all('iterkeys')
+        itervalues = _all('itervalues')
+        iteritems = _all('iteritems')
+    del _all
+
+    def __contains__(self, name):
+        return name in self.vars or name in self.parent
+
+    def __getitem__(self, key):
+        """Lookup a variable or raise `KeyError` if the variable is
+        undefined.
+        """
+        item = self.resolve_or_missing(key)
+        if item is missing:
+            raise KeyError(key)
+        return item
+
+    def __repr__(self):
+        return '<%s %s of %r>' % (
+            self.__class__.__name__,
+            repr(self.get_all()),
+            self.name
+        )
+
+
+# register the context as mapping if possible
+try:
+    from collections import Mapping
+    Mapping.register(Context)
+except ImportError:
+    pass
+
+
+class BlockReference(object):
+    """One block on a template reference."""
+
+    def __init__(self, name, context, stack, depth):
+        self.name = name
+        self._context = context
+        self._stack = stack
+        self._depth = depth
+
+    @property
+    def super(self):
+        """Super the block."""
+        if self._depth + 1 >= len(self._stack):
+            return self._context.environment. \
+                undefined('there is no parent block called %r.' %
+                          self.name, name='super')
+        return BlockReference(self.name, self._context, self._stack,
+                              self._depth + 1)
+
+    @internalcode
+    def __call__(self):
+        rv = concat(self._stack[self._depth](self._context))
+        if self._context.eval_ctx.autoescape:
+            rv = Markup(rv)
+        return rv
+
+
+class LoopContextBase(object):
+    """A loop context for dynamic iteration."""
+
+    _before = _first_iteration
+    _current = _first_iteration
+    _after = _last_iteration
+    _length = None
+
+    def __init__(self, undefined, recurse=None, depth0=0):
+        self._undefined = undefined
+        self._recurse = recurse
+        self.index0 = -1
+        self.depth0 = depth0
+        self._last_checked_value = missing
+
+    def cycle(self, *args):
+        """Cycles among the arguments with the current loop index."""
+        if not args:
+            raise TypeError('no items for cycling given')
+        return args[self.index0 % len(args)]
+
+    def changed(self, *value):
+        """Checks whether the value has changed since the last call."""
+        if self._last_checked_value != value:
+            self._last_checked_value = value
+            return True
+        return False
+
+    first = property(lambda x: x.index0 == 0)
+    last = property(lambda x: x._after is _last_iteration)
+    index = property(lambda x: x.index0 + 1)
+    revindex = property(lambda x: x.length - x.index0)
+    revindex0 = property(lambda x: x.length - x.index)
+    depth = property(lambda x: x.depth0 + 1)
+
+    @property
+    def previtem(self):
+        if self._before is _first_iteration:
+            return self._undefined('there is no previous item')
+        return self._before
+
+    @property
+    def nextitem(self):
+        if self._after is _last_iteration:
+            return self._undefined('there is no next item')
+        return self._after
+
+    def __len__(self):
+        return self.length
+
+    @internalcode
+    def loop(self, iterable):
+        if self._recurse is None:
+            raise TypeError('Tried to call non recursive loop.  Maybe you '
+                            "forgot the 'recursive' modifier.")
+        return self._recurse(iterable, self._recurse, self.depth0 + 1)
+
+    # a nifty trick to enhance the error message if someone tried to call
+    # the the loop without or with too many arguments.
+    __call__ = loop
+    del loop
+
+    def __repr__(self):
+        return '<%s %r/%r>' % (
+            self.__class__.__name__,
+            self.index,
+            self.length
+        )
+
+
+class LoopContext(LoopContextBase):
+
+    def __init__(self, iterable, undefined, recurse=None, depth0=0):
+        LoopContextBase.__init__(self, undefined, recurse, depth0)
+        self._iterator = iter(iterable)
+
+        # try to get the length of the iterable early.  This must be done
+        # here because there are some broken iterators around where there
+        # __len__ is the number of iterations left (i'm looking at your
+        # listreverseiterator!).
+        try:
+            self._length = len(iterable)
+        except (TypeError, AttributeError):
+            self._length = None
+        self._after = self._safe_next()
+
+    @property
+    def length(self):
+        if self._length is None:
+            # if was not possible to get the length of the iterator when
+            # the loop context was created (ie: iterating over a generator)
+            # we have to convert the iterable into a sequence and use the
+            # length of that + the number of iterations so far.
+            iterable = tuple(self._iterator)
+            self._iterator = iter(iterable)
+            iterations_done = self.index0 + 2
+            self._length = len(iterable) + iterations_done
+        return self._length
+
+    def __iter__(self):
+        return LoopContextIterator(self)
+
+    def _safe_next(self):
+        try:
+            return next(self._iterator)
+        except StopIteration:
+            return _last_iteration
+
+
+@implements_iterator
+class LoopContextIterator(object):
+    """The iterator for a loop context."""
+    __slots__ = ('context',)
+
+    def __init__(self, context):
+        self.context = context
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        ctx = self.context
+        ctx.index0 += 1
+        if ctx._after is _last_iteration:
+            raise StopIteration()
+        ctx._before = ctx._current
+        ctx._current = ctx._after
+        ctx._after = ctx._safe_next()
+        return ctx._current, ctx
+
+
+class Macro(object):
+    """Wraps a macro function."""
+
+    def __init__(self, environment, func, name, arguments,
+                 catch_kwargs, catch_varargs, caller,
+                 default_autoescape=None):
+        self._environment = environment
+        self._func = func
+        self._argument_count = len(arguments)
+        self.name = name
+        self.arguments = arguments
+        self.catch_kwargs = catch_kwargs
+        self.catch_varargs = catch_varargs
+        self.caller = caller
+        self.explicit_caller = 'caller' in arguments
+        if default_autoescape is None:
+            default_autoescape = environment.autoescape
+        self._default_autoescape = default_autoescape
+
+    @internalcode
+    @evalcontextfunction
+    def __call__(self, *args, **kwargs):
+        # This requires a bit of explanation,  In the past we used to
+        # decide largely based on compile-time information if a macro is
+        # safe or unsafe.  While there was a volatile mode it was largely
+        # unused for deciding on escaping.  This turns out to be
+        # problemtic for macros because if a macro is safe or not not so
+        # much depends on the escape mode when it was defined but when it
+        # was used.
+        #
+        # Because however we export macros from the module system and
+        # there are historic callers that do not pass an eval context (and
+        # will continue to not pass one), we need to perform an instance
+        # check here.
+        #
+        # This is considered safe because an eval context is not a valid
+        # argument to callables otherwise anwyays.  Worst case here is
+        # that if no eval context is passed we fall back to the compile
+        # time autoescape flag.
+        if args and isinstance(args[0], EvalContext):
+            autoescape = args[0].autoescape
+            args = args[1:]
+        else:
+            autoescape = self._default_autoescape
+
+        # try to consume the positional arguments
+        arguments = list(args[:self._argument_count])
+        off = len(arguments)
+
+        # For information why this is necessary refer to the handling
+        # of caller in the `macro_body` handler in the compiler.
+        found_caller = False
+
+        # if the number of arguments consumed is not the number of
+        # arguments expected we start filling in keyword arguments
+        # and defaults.
+        if off != self._argument_count:
+            for idx, name in enumerate(self.arguments[len(arguments):]):
+                try:
+                    value = kwargs.pop(name)
+                except KeyError:
+                    value = missing
+                if name == 'caller':
+                    found_caller = True
+                arguments.append(value)
+        else:
+            found_caller = self.explicit_caller
+
+        # it's important that the order of these arguments does not change
+        # if not also changed in the compiler's `function_scoping` method.
+        # the order is caller, keyword arguments, positional arguments!
+        if self.caller and not found_caller:
+            caller = kwargs.pop('caller', None)
+            if caller is None:
+                caller = self._environment.undefined('No caller defined',
+                                                     name='caller')
+            arguments.append(caller)
+
+        if self.catch_kwargs:
+            arguments.append(kwargs)
+        elif kwargs:
+            if 'caller' in kwargs:
+                raise TypeError('macro %r was invoked with two values for '
+                                'the special caller argument.  This is '
+                                'most likely a bug.' % self.name)
+            raise TypeError('macro %r takes no keyword argument %r' %
+                            (self.name, next(iter(kwargs))))
+        if self.catch_varargs:
+            arguments.append(args[self._argument_count:])
+        elif len(args) > self._argument_count:
+            raise TypeError('macro %r takes not more than %d argument(s)' %
+                            (self.name, len(self.arguments)))
+
+        return self._invoke(arguments, autoescape)
+
+    def _invoke(self, arguments, autoescape):
+        """This method is being swapped out by the async implementation."""
+        rv = self._func(*arguments)
+        if autoescape:
+            rv = Markup(rv)
+        return rv
+
+    def __repr__(self):
+        return '<%s %s>' % (
+            self.__class__.__name__,
+            self.name is None and 'anonymous' or repr(self.name)
+        )
+
+
+@implements_to_string
+class Undefined(object):
+    """The default undefined type.  This undefined type can be printed and
+    iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
+
+    >>> foo = Undefined(name='foo')
+    >>> str(foo)
+    ''
+    >>> not foo
+    True
+    >>> foo + 42
+    Traceback (most recent call last):
+      ...
+    jinja2.exceptions.UndefinedError: 'foo' is undefined
+    """
+    __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
+                 '_undefined_exception')
+
+    def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
+        self._undefined_hint = hint
+        self._undefined_obj = obj
+        self._undefined_name = name
+        self._undefined_exception = exc
+
+    @internalcode
+    def _fail_with_undefined_error(self, *args, **kwargs):
+        """Regular callback function for undefined objects that raises an
+        `jinja2.exceptions.UndefinedError` on call.
+        """
+        if self._undefined_hint is None:
+            if self._undefined_obj is missing:
+                hint = '%r is undefined' % self._undefined_name
+            elif not isinstance(self._undefined_name, string_types):
+                hint = '%s has no element %r' % (
+                    object_type_repr(self._undefined_obj),
+                    self._undefined_name
+                )
+            else:
+                hint = '%r has no attribute %r' % (
+                    object_type_repr(self._undefined_obj),
+                    self._undefined_name
+                )
+        else:
+            hint = self._undefined_hint
+        raise self._undefined_exception(hint)
+
+    @internalcode
+    def __getattr__(self, name):
+        if name[:2] == '__':
+            raise AttributeError(name)
+        return self._fail_with_undefined_error()
+
+    __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
+        __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
+        __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
+        __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
+        __float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
+        __rsub__ = _fail_with_undefined_error
+
+    def __eq__(self, other):
+        return type(self) is type(other)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __hash__(self):
+        return id(type(self))
+
+    def __str__(self):
+        return u''
+
+    def __len__(self):
+        return 0
+
+    def __iter__(self):
+        if 0:
+            yield None
+
+    def __nonzero__(self):
+        return False
+    __bool__ = __nonzero__
+
+    def __repr__(self):
+        return 'Undefined'
+
+
+def make_logging_undefined(logger=None, base=None):
+    """Given a logger object this returns a new undefined class that will
+    log certain failures.  It will log iterations and printing.  If no
+    logger is given a default logger is created.
+
+    Example::
+
+        logger = logging.getLogger(__name__)
+        LoggingUndefined = make_logging_undefined(
+            logger=logger,
+            base=Undefined
+        )
+
+    .. versionadded:: 2.8
+
+    :param logger: the logger to use.  If not provided, a default logger
+                   is created.
+    :param base: the base class to add logging functionality to.  This
+                 defaults to :class:`Undefined`.
+    """
+    if logger is None:
+        import logging
+        logger = logging.getLogger(__name__)
+        logger.addHandler(logging.StreamHandler(sys.stderr))
+    if base is None:
+        base = Undefined
+
+    def _log_message(undef):
+        if undef._undefined_hint is None:
+            if undef._undefined_obj is missing:
+                hint = '%s is undefined' % undef._undefined_name
+            elif not isinstance(undef._undefined_name, string_types):
+                hint = '%s has no element %s' % (
+                    object_type_repr(undef._undefined_obj),
+                    undef._undefined_name)
+            else:
+                hint = '%s has no attribute %s' % (
+                    object_type_repr(undef._undefined_obj),
+                    undef._undefined_name)
+        else:
+            hint = undef._undefined_hint
+        logger.warning('Template variable warning: %s', hint)
+
+    class LoggingUndefined(base):
+
+        def _fail_with_undefined_error(self, *args, **kwargs):
+            try:
+                return base._fail_with_undefined_error(self, *args, **kwargs)
+            except self._undefined_exception as e:
+                logger.error('Template variable error: %s', str(e))
+                raise e
+
+        def __str__(self):
+            rv = base.__str__(self)
+            _log_message(self)
+            return rv
+
+        def __iter__(self):
+            rv = base.__iter__(self)
+            _log_message(self)
+            return rv
+
+        if PY2:
+            def __nonzero__(self):
+                rv = base.__nonzero__(self)
+                _log_message(self)
+                return rv
+
+            def __unicode__(self):
+                rv = base.__unicode__(self)
+                _log_message(self)
+                return rv
+        else:
+            def __bool__(self):
+                rv = base.__bool__(self)
+                _log_message(self)
+                return rv
+
+    return LoggingUndefined
+
+
+@implements_to_string
+class DebugUndefined(Undefined):
+    """An undefined that returns the debug info when printed.
+
+    >>> foo = DebugUndefined(name='foo')
+    >>> str(foo)
+    '{{ foo }}'
+    >>> not foo
+    True
+    >>> foo + 42
+    Traceback (most recent call last):
+      ...
+    jinja2.exceptions.UndefinedError: 'foo' is undefined
+    """
+    __slots__ = ()
+
+    def __str__(self):
+        if self._undefined_hint is None:
+            if self._undefined_obj is missing:
+                return u'{{ %s }}' % self._undefined_name
+            return '{{ no such element: %s[%r] }}' % (
+                object_type_repr(self._undefined_obj),
+                self._undefined_name
+            )
+        return u'{{ undefined value printed: %s }}' % self._undefined_hint
+
+
+@implements_to_string
+class StrictUndefined(Undefined):
+    """An undefined that barks on print and iteration as well as boolean
+    tests and all kinds of comparisons.  In other words: you can do nothing
+    with it except checking if it's defined using the `defined` test.
+
+    >>> foo = StrictUndefined(name='foo')
+    >>> str(foo)
+    Traceback (most recent call last):
+      ...
+    jinja2.exceptions.UndefinedError: 'foo' is undefined
+    >>> not foo
+    Traceback (most recent call last):
+      ...
+    jinja2.exceptions.UndefinedError: 'foo' is undefined
+    >>> foo + 42
+    Traceback (most recent call last):
+      ...
+    jinja2.exceptions.UndefinedError: 'foo' is undefined
+    """
+    __slots__ = ()
+    __iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
+        __ne__ = __bool__ = __hash__ = \
+        Undefined._fail_with_undefined_error
+
+
+# remove remaining slots attributes, after the metaclass did the magic they
+# are unneeded and irritating as they contain wrong data for the subclasses.
+del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
diff --git a/lib/python3.7/site-packages/jinja2/sandbox.py b/lib/python3.7/site-packages/jinja2/sandbox.py
new file mode 100644
index 0000000000000000000000000000000000000000..752e81289fc6fe77d7655fca40179f0697583d2c
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/sandbox.py
@@ -0,0 +1,486 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.sandbox
+    ~~~~~~~~~~~~~~
+
+    Adds a sandbox layer to Jinja as it was the default behavior in the old
+    Jinja 1 releases.  This sandbox is slightly different from Jinja 1 as the
+    default behavior is easier to use.
+
+    The behavior can be changed by subclassing the environment.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD.
+"""
+import types
+import operator
+from collections import Mapping
+from jinja2.environment import Environment
+from jinja2.exceptions import SecurityError
+from jinja2._compat import string_types, PY2
+from jinja2.utils import Markup
+
+from markupsafe import EscapeFormatter
+from string import Formatter
+
+
+#: maximum number of items a range may produce
+MAX_RANGE = 100000
+
+#: attributes of function objects that are considered unsafe.
+if PY2:
+    UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
+                                      'func_defaults', 'func_globals'])
+else:
+    # On versions > python 2 the special attributes on functions are gone,
+    # but they remain on methods and generators for whatever reason.
+    UNSAFE_FUNCTION_ATTRIBUTES = set()
+
+
+#: unsafe method attributes.  function attributes are unsafe for methods too
+UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
+
+#: unsafe generator attirbutes.
+UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
+
+#: unsafe attributes on coroutines
+UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code'])
+
+#: unsafe attributes on async generators
+UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame'])
+
+import warnings
+
+# make sure we don't warn in python 2.6 about stuff we don't care about
+warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
+                        module='jinja2.sandbox')
+
+from collections import deque
+
+_mutable_set_types = (set,)
+_mutable_mapping_types = (dict,)
+_mutable_sequence_types = (list,)
+
+
+# on python 2.x we can register the user collection types
+try:
+    from UserDict import UserDict, DictMixin
+    from UserList import UserList
+    _mutable_mapping_types += (UserDict, DictMixin)
+    _mutable_set_types += (UserList,)
+except ImportError:
+    pass
+
+# if sets is still available, register the mutable set from there as well
+try:
+    from sets import Set
+    _mutable_set_types += (Set,)
+except ImportError:
+    pass
+
+#: register Python 2.6 abstract base classes
+from collections import MutableSet, MutableMapping, MutableSequence
+_mutable_set_types += (MutableSet,)
+_mutable_mapping_types += (MutableMapping,)
+_mutable_sequence_types += (MutableSequence,)
+
+
+_mutable_spec = (
+    (_mutable_set_types, frozenset([
+        'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
+        'symmetric_difference_update', 'update'
+    ])),
+    (_mutable_mapping_types, frozenset([
+        'clear', 'pop', 'popitem', 'setdefault', 'update'
+    ])),
+    (_mutable_sequence_types, frozenset([
+        'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
+    ])),
+    (deque, frozenset([
+        'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
+        'popleft', 'remove', 'rotate'
+    ]))
+)
+
+
+class _MagicFormatMapping(Mapping):
+    """This class implements a dummy wrapper to fix a bug in the Python
+    standard library for string formatting.
+
+    See https://bugs.python.org/issue13598 for information about why
+    this is necessary.
+    """
+
+    def __init__(self, args, kwargs):
+        self._args = args
+        self._kwargs = kwargs
+        self._last_index = 0
+
+    def __getitem__(self, key):
+        if key == '':
+            idx = self._last_index
+            self._last_index += 1
+            try:
+                return self._args[idx]
+            except LookupError:
+                pass
+            key = str(idx)
+        return self._kwargs[key]
+
+    def __iter__(self):
+        return iter(self._kwargs)
+
+    def __len__(self):
+        return len(self._kwargs)
+
+
+def inspect_format_method(callable):
+    if not isinstance(callable, (types.MethodType,
+                                 types.BuiltinMethodType)) or \
+       callable.__name__ not in ('format', 'format_map'):
+        return None
+    obj = callable.__self__
+    if isinstance(obj, string_types):
+        return obj
+
+
+def safe_range(*args):
+    """A range that can't generate ranges with a length of more than
+    MAX_RANGE items.
+    """
+    rng = range(*args)
+    if len(rng) > MAX_RANGE:
+        raise OverflowError('range too big, maximum size for range is %d' %
+                            MAX_RANGE)
+    return rng
+
+
+def unsafe(f):
+    """Marks a function or method as unsafe.
+
+    ::
+
+        @unsafe
+        def delete(self):
+            pass
+    """
+    f.unsafe_callable = True
+    return f
+
+
+def is_internal_attribute(obj, attr):
+    """Test if the attribute given is an internal python attribute.  For
+    example this function returns `True` for the `func_code` attribute of
+    python objects.  This is useful if the environment method
+    :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
+
+    >>> from jinja2.sandbox import is_internal_attribute
+    >>> is_internal_attribute(str, "mro")
+    True
+    >>> is_internal_attribute(str, "upper")
+    False
+    """
+    if isinstance(obj, types.FunctionType):
+        if attr in UNSAFE_FUNCTION_ATTRIBUTES:
+            return True
+    elif isinstance(obj, types.MethodType):
+        if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
+           attr in UNSAFE_METHOD_ATTRIBUTES:
+            return True
+    elif isinstance(obj, type):
+        if attr == 'mro':
+            return True
+    elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
+        return True
+    elif isinstance(obj, types.GeneratorType):
+        if attr in UNSAFE_GENERATOR_ATTRIBUTES:
+            return True
+    elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
+        if attr in UNSAFE_COROUTINE_ATTRIBUTES:
+            return True
+    elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
+        if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
+            return True
+    return attr.startswith('__')
+
+
+def modifies_known_mutable(obj, attr):
+    """This function checks if an attribute on a builtin mutable object
+    (list, dict, set or deque) would modify it if called.  It also supports
+    the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
+    with Python 2.6 onwards the abstract base classes `MutableSet`,
+    `MutableMapping`, and `MutableSequence`.
+
+    >>> modifies_known_mutable({}, "clear")
+    True
+    >>> modifies_known_mutable({}, "keys")
+    False
+    >>> modifies_known_mutable([], "append")
+    True
+    >>> modifies_known_mutable([], "index")
+    False
+
+    If called with an unsupported object (such as unicode) `False` is
+    returned.
+
+    >>> modifies_known_mutable("foo", "upper")
+    False
+    """
+    for typespec, unsafe in _mutable_spec:
+        if isinstance(obj, typespec):
+            return attr in unsafe
+    return False
+
+
+class SandboxedEnvironment(Environment):
+    """The sandboxed environment.  It works like the regular environment but
+    tells the compiler to generate sandboxed code.  Additionally subclasses of
+    this environment may override the methods that tell the runtime what
+    attributes or functions are safe to access.
+
+    If the template tries to access insecure code a :exc:`SecurityError` is
+    raised.  However also other exceptions may occur during the rendering so
+    the caller has to ensure that all exceptions are caught.
+    """
+    sandboxed = True
+
+    #: default callback table for the binary operators.  A copy of this is
+    #: available on each instance of a sandboxed environment as
+    #: :attr:`binop_table`
+    default_binop_table = {
+        '+':        operator.add,
+        '-':        operator.sub,
+        '*':        operator.mul,
+        '/':        operator.truediv,
+        '//':       operator.floordiv,
+        '**':       operator.pow,
+        '%':        operator.mod
+    }
+
+    #: default callback table for the unary operators.  A copy of this is
+    #: available on each instance of a sandboxed environment as
+    #: :attr:`unop_table`
+    default_unop_table = {
+        '+':        operator.pos,
+        '-':        operator.neg
+    }
+
+    #: a set of binary operators that should be intercepted.  Each operator
+    #: that is added to this set (empty by default) is delegated to the
+    #: :meth:`call_binop` method that will perform the operator.  The default
+    #: operator callback is specified by :attr:`binop_table`.
+    #:
+    #: The following binary operators are interceptable:
+    #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
+    #:
+    #: The default operation form the operator table corresponds to the
+    #: builtin function.  Intercepted calls are always slower than the native
+    #: operator call, so make sure only to intercept the ones you are
+    #: interested in.
+    #:
+    #: .. versionadded:: 2.6
+    intercepted_binops = frozenset()
+
+    #: a set of unary operators that should be intercepted.  Each operator
+    #: that is added to this set (empty by default) is delegated to the
+    #: :meth:`call_unop` method that will perform the operator.  The default
+    #: operator callback is specified by :attr:`unop_table`.
+    #:
+    #: The following unary operators are interceptable: ``+``, ``-``
+    #:
+    #: The default operation form the operator table corresponds to the
+    #: builtin function.  Intercepted calls are always slower than the native
+    #: operator call, so make sure only to intercept the ones you are
+    #: interested in.
+    #:
+    #: .. versionadded:: 2.6
+    intercepted_unops = frozenset()
+
+    def intercept_unop(self, operator):
+        """Called during template compilation with the name of a unary
+        operator to check if it should be intercepted at runtime.  If this
+        method returns `True`, :meth:`call_unop` is excuted for this unary
+        operator.  The default implementation of :meth:`call_unop` will use
+        the :attr:`unop_table` dictionary to perform the operator with the
+        same logic as the builtin one.
+
+        The following unary operators are interceptable: ``+`` and ``-``
+
+        Intercepted calls are always slower than the native operator call,
+        so make sure only to intercept the ones you are interested in.
+
+        .. versionadded:: 2.6
+        """
+        return False
+
+
+    def __init__(self, *args, **kwargs):
+        Environment.__init__(self, *args, **kwargs)
+        self.globals['range'] = safe_range
+        self.binop_table = self.default_binop_table.copy()
+        self.unop_table = self.default_unop_table.copy()
+
+    def is_safe_attribute(self, obj, attr, value):
+        """The sandboxed environment will call this method to check if the
+        attribute of an object is safe to access.  Per default all attributes
+        starting with an underscore are considered private as well as the
+        special attributes of internal python objects as returned by the
+        :func:`is_internal_attribute` function.
+        """
+        return not (attr.startswith('_') or is_internal_attribute(obj, attr))
+
+    def is_safe_callable(self, obj):
+        """Check if an object is safely callable.  Per default a function is
+        considered safe unless the `unsafe_callable` attribute exists and is
+        True.  Override this method to alter the behavior, but this won't
+        affect the `unsafe` decorator from this module.
+        """
+        return not (getattr(obj, 'unsafe_callable', False) or
+                    getattr(obj, 'alters_data', False))
+
+    def call_binop(self, context, operator, left, right):
+        """For intercepted binary operator calls (:meth:`intercepted_binops`)
+        this function is executed instead of the builtin operator.  This can
+        be used to fine tune the behavior of certain operators.
+
+        .. versionadded:: 2.6
+        """
+        return self.binop_table[operator](left, right)
+
+    def call_unop(self, context, operator, arg):
+        """For intercepted unary operator calls (:meth:`intercepted_unops`)
+        this function is executed instead of the builtin operator.  This can
+        be used to fine tune the behavior of certain operators.
+
+        .. versionadded:: 2.6
+        """
+        return self.unop_table[operator](arg)
+
+    def getitem(self, obj, argument):
+        """Subscribe an object from sandboxed code."""
+        try:
+            return obj[argument]
+        except (TypeError, LookupError):
+            if isinstance(argument, string_types):
+                try:
+                    attr = str(argument)
+                except Exception:
+                    pass
+                else:
+                    try:
+                        value = getattr(obj, attr)
+                    except AttributeError:
+                        pass
+                    else:
+                        if self.is_safe_attribute(obj, argument, value):
+                            return value
+                        return self.unsafe_undefined(obj, argument)
+        return self.undefined(obj=obj, name=argument)
+
+    def getattr(self, obj, attribute):
+        """Subscribe an object from sandboxed code and prefer the
+        attribute.  The attribute passed *must* be a bytestring.
+        """
+        try:
+            value = getattr(obj, attribute)
+        except AttributeError:
+            try:
+                return obj[attribute]
+            except (TypeError, LookupError):
+                pass
+        else:
+            if self.is_safe_attribute(obj, attribute, value):
+                return value
+            return self.unsafe_undefined(obj, attribute)
+        return self.undefined(obj=obj, name=attribute)
+
+    def unsafe_undefined(self, obj, attribute):
+        """Return an undefined object for unsafe attributes."""
+        return self.undefined('access to attribute %r of %r '
+                              'object is unsafe.' % (
+            attribute,
+            obj.__class__.__name__
+        ), name=attribute, obj=obj, exc=SecurityError)
+
+    def format_string(self, s, args, kwargs, format_func=None):
+        """If a format call is detected, then this is routed through this
+        method so that our safety sandbox can be used for it.
+        """
+        if isinstance(s, Markup):
+            formatter = SandboxedEscapeFormatter(self, s.escape)
+        else:
+            formatter = SandboxedFormatter(self)
+
+        if format_func is not None and format_func.__name__ == 'format_map':
+            if len(args) != 1 or kwargs:
+                raise TypeError(
+                    'format_map() takes exactly one argument %d given'
+                    % (len(args) + (kwargs is not None))
+                )
+
+            kwargs = args[0]
+            args = None
+
+        kwargs = _MagicFormatMapping(args, kwargs)
+        rv = formatter.vformat(s, args, kwargs)
+        return type(s)(rv)
+
+    def call(__self, __context, __obj, *args, **kwargs):
+        """Call an object from sandboxed code."""
+        fmt = inspect_format_method(__obj)
+        if fmt is not None:
+            return __self.format_string(fmt, args, kwargs, __obj)
+
+        # the double prefixes are to avoid double keyword argument
+        # errors when proxying the call.
+        if not __self.is_safe_callable(__obj):
+            raise SecurityError('%r is not safely callable' % (__obj,))
+        return __context.call(__obj, *args, **kwargs)
+
+
+class ImmutableSandboxedEnvironment(SandboxedEnvironment):
+    """Works exactly like the regular `SandboxedEnvironment` but does not
+    permit modifications on the builtin mutable objects `list`, `set`, and
+    `dict` by using the :func:`modifies_known_mutable` function.
+    """
+
+    def is_safe_attribute(self, obj, attr, value):
+        if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
+            return False
+        return not modifies_known_mutable(obj, attr)
+
+
+# This really is not a public API apparenlty.
+try:
+    from _string import formatter_field_name_split
+except ImportError:
+    def formatter_field_name_split(field_name):
+        return field_name._formatter_field_name_split()
+
+
+class SandboxedFormatterMixin(object):
+
+    def __init__(self, env):
+        self._env = env
+
+    def get_field(self, field_name, args, kwargs):
+        first, rest = formatter_field_name_split(field_name)
+        obj = self.get_value(first, args, kwargs)
+        for is_attr, i in rest:
+            if is_attr:
+                obj = self._env.getattr(obj, i)
+            else:
+                obj = self._env.getitem(obj, i)
+        return obj, first
+
+class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
+
+    def __init__(self, env):
+        SandboxedFormatterMixin.__init__(self, env)
+        Formatter.__init__(self)
+
+class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
+
+    def __init__(self, env, escape):
+        SandboxedFormatterMixin.__init__(self, env)
+        EscapeFormatter.__init__(self, escape)
diff --git a/lib/python3.7/site-packages/jinja2/tests.py b/lib/python3.7/site-packages/jinja2/tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..0adc3d4dbcbb881910bfd90214534449fafddcb3
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/tests.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.tests
+    ~~~~~~~~~~~~
+
+    Jinja test functions. Used with the "is" operator.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+import operator
+import re
+from collections import Mapping
+from jinja2.runtime import Undefined
+from jinja2._compat import text_type, string_types, integer_types
+import decimal
+
+number_re = re.compile(r'^-?\d+(\.\d+)?$')
+regex_type = type(number_re)
+
+
+test_callable = callable
+
+
+def test_odd(value):
+    """Return true if the variable is odd."""
+    return value % 2 == 1
+
+
+def test_even(value):
+    """Return true if the variable is even."""
+    return value % 2 == 0
+
+
+def test_divisibleby(value, num):
+    """Check if a variable is divisible by a number."""
+    return value % num == 0
+
+
+def test_defined(value):
+    """Return true if the variable is defined:
+
+    .. sourcecode:: jinja
+
+        {% if variable is defined %}
+            value of variable: {{ variable }}
+        {% else %}
+            variable is not defined
+        {% endif %}
+
+    See the :func:`default` filter for a simple way to set undefined
+    variables.
+    """
+    return not isinstance(value, Undefined)
+
+
+def test_undefined(value):
+    """Like :func:`defined` but the other way round."""
+    return isinstance(value, Undefined)
+
+
+def test_none(value):
+    """Return true if the variable is none."""
+    return value is None
+
+
+def test_lower(value):
+    """Return true if the variable is lowercased."""
+    return text_type(value).islower()
+
+
+def test_upper(value):
+    """Return true if the variable is uppercased."""
+    return text_type(value).isupper()
+
+
+def test_string(value):
+    """Return true if the object is a string."""
+    return isinstance(value, string_types)
+
+
+def test_mapping(value):
+    """Return true if the object is a mapping (dict etc.).
+
+    .. versionadded:: 2.6
+    """
+    return isinstance(value, Mapping)
+
+
+def test_number(value):
+    """Return true if the variable is a number."""
+    return isinstance(value, integer_types + (float, complex, decimal.Decimal))
+
+
+def test_sequence(value):
+    """Return true if the variable is a sequence. Sequences are variables
+    that are iterable.
+    """
+    try:
+        len(value)
+        value.__getitem__
+    except:
+        return False
+    return True
+
+
+def test_sameas(value, other):
+    """Check if an object points to the same memory address than another
+    object:
+
+    .. sourcecode:: jinja
+
+        {% if foo.attribute is sameas false %}
+            the foo attribute really is the `False` singleton
+        {% endif %}
+    """
+    return value is other
+
+
+def test_iterable(value):
+    """Check if it's possible to iterate over an object."""
+    try:
+        iter(value)
+    except TypeError:
+        return False
+    return True
+
+
+def test_escaped(value):
+    """Check if the value is escaped."""
+    return hasattr(value, '__html__')
+
+
+def test_in(value, seq):
+    """Check if value is in seq.
+
+    .. versionadded:: 2.10
+    """
+    return value in seq
+
+
+TESTS = {
+    'odd':              test_odd,
+    'even':             test_even,
+    'divisibleby':      test_divisibleby,
+    'defined':          test_defined,
+    'undefined':        test_undefined,
+    'none':             test_none,
+    'lower':            test_lower,
+    'upper':            test_upper,
+    'string':           test_string,
+    'mapping':          test_mapping,
+    'number':           test_number,
+    'sequence':         test_sequence,
+    'iterable':         test_iterable,
+    'callable':         test_callable,
+    'sameas':           test_sameas,
+    'escaped':          test_escaped,
+    'in':               test_in,
+    '==':               operator.eq,
+    'eq':               operator.eq,
+    'equalto':          operator.eq,
+    '!=':               operator.ne,
+    'ne':               operator.ne,
+    '>':                operator.gt,
+    'gt':               operator.gt,
+    'greaterthan':      operator.gt,
+    'ge':               operator.ge,
+    '>=':               operator.ge,
+    '<':                operator.lt,
+    'lt':               operator.lt,
+    'lessthan':         operator.lt,
+    '<=':               operator.le,
+    'le':               operator.le,
+}
diff --git a/lib/python3.7/site-packages/jinja2/utils.py b/lib/python3.7/site-packages/jinja2/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..502a311c08e549ac395a02650543e424a6d3e11c
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/utils.py
@@ -0,0 +1,647 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.utils
+    ~~~~~~~~~~~~
+
+    Utility functions.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+import json
+import errno
+from collections import deque
+from threading import Lock
+from jinja2._compat import text_type, string_types, implements_iterator, \
+     url_quote
+
+
+_word_split_re = re.compile(r'(\s+)')
+_punctuation_re = re.compile(
+    '^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
+        '|'.join(map(re.escape, ('(', '<', '&lt;'))),
+        '|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '&gt;')))
+    )
+)
+_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
+_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
+_entity_re = re.compile(r'&([^;]+);')
+_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
+_digits = '0123456789'
+
+# special singleton representing missing values for the runtime
+missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
+
+# internal code
+internal_code = set()
+
+concat = u''.join
+
+_slash_escape = '\\/' not in json.dumps('/')
+
+
+def contextfunction(f):
+    """This decorator can be used to mark a function or method context callable.
+    A context callable is passed the active :class:`Context` as first argument when
+    called from the template.  This is useful if a function wants to get access
+    to the context or functions provided on the context object.  For example
+    a function that returns a sorted list of template variables the current
+    template exports could look like this::
+
+        @contextfunction
+        def get_exported_names(context):
+            return sorted(context.exported_vars)
+    """
+    f.contextfunction = True
+    return f
+
+
+def evalcontextfunction(f):
+    """This decorator can be used to mark a function or method as an eval
+    context callable.  This is similar to the :func:`contextfunction`
+    but instead of passing the context, an evaluation context object is
+    passed.  For more information about the eval context, see
+    :ref:`eval-context`.
+
+    .. versionadded:: 2.4
+    """
+    f.evalcontextfunction = True
+    return f
+
+
+def environmentfunction(f):
+    """This decorator can be used to mark a function or method as environment
+    callable.  This decorator works exactly like the :func:`contextfunction`
+    decorator just that the first argument is the active :class:`Environment`
+    and not context.
+    """
+    f.environmentfunction = True
+    return f
+
+
+def internalcode(f):
+    """Marks the function as internally used"""
+    internal_code.add(f.__code__)
+    return f
+
+
+def is_undefined(obj):
+    """Check if the object passed is undefined.  This does nothing more than
+    performing an instance check against :class:`Undefined` but looks nicer.
+    This can be used for custom filters or tests that want to react to
+    undefined variables.  For example a custom default filter can look like
+    this::
+
+        def default(var, default=''):
+            if is_undefined(var):
+                return default
+            return var
+    """
+    from jinja2.runtime import Undefined
+    return isinstance(obj, Undefined)
+
+
+def consume(iterable):
+    """Consumes an iterable without doing anything with it."""
+    for event in iterable:
+        pass
+
+
+def clear_caches():
+    """Jinja2 keeps internal caches for environments and lexers.  These are
+    used so that Jinja2 doesn't have to recreate environments and lexers all
+    the time.  Normally you don't have to care about that but if you are
+    measuring memory consumption you may want to clean the caches.
+    """
+    from jinja2.environment import _spontaneous_environments
+    from jinja2.lexer import _lexer_cache
+    _spontaneous_environments.clear()
+    _lexer_cache.clear()
+
+
+def import_string(import_name, silent=False):
+    """Imports an object based on a string.  This is useful if you want to
+    use import paths as endpoints or something similar.  An import path can
+    be specified either in dotted notation (``xml.sax.saxutils.escape``)
+    or with a colon as object delimiter (``xml.sax.saxutils:escape``).
+
+    If the `silent` is True the return value will be `None` if the import
+    fails.
+
+    :return: imported object
+    """
+    try:
+        if ':' in import_name:
+            module, obj = import_name.split(':', 1)
+        elif '.' in import_name:
+            items = import_name.split('.')
+            module = '.'.join(items[:-1])
+            obj = items[-1]
+        else:
+            return __import__(import_name)
+        return getattr(__import__(module, None, None, [obj]), obj)
+    except (ImportError, AttributeError):
+        if not silent:
+            raise
+
+
+def open_if_exists(filename, mode='rb'):
+    """Returns a file descriptor for the filename if that file exists,
+    otherwise `None`.
+    """
+    try:
+        return open(filename, mode)
+    except IOError as e:
+        if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
+            raise
+
+
+def object_type_repr(obj):
+    """Returns the name of the object's type.  For some recognized
+    singletons the name of the object is returned instead. (For
+    example for `None` and `Ellipsis`).
+    """
+    if obj is None:
+        return 'None'
+    elif obj is Ellipsis:
+        return 'Ellipsis'
+    # __builtin__ in 2.x, builtins in 3.x
+    if obj.__class__.__module__ in ('__builtin__', 'builtins'):
+        name = obj.__class__.__name__
+    else:
+        name = obj.__class__.__module__ + '.' + obj.__class__.__name__
+    return '%s object' % name
+
+
+def pformat(obj, verbose=False):
+    """Prettyprint an object.  Either use the `pretty` library or the
+    builtin `pprint`.
+    """
+    try:
+        from pretty import pretty
+        return pretty(obj, verbose=verbose)
+    except ImportError:
+        from pprint import pformat
+        return pformat(obj)
+
+
+def urlize(text, trim_url_limit=None, rel=None, target=None):
+    """Converts any URLs in text into clickable links. Works on http://,
+    https:// and www. links. Links can have trailing punctuation (periods,
+    commas, close-parens) and leading punctuation (opening parens) and
+    it'll still do the right thing.
+
+    If trim_url_limit is not None, the URLs in link text will be limited
+    to trim_url_limit characters.
+
+    If nofollow is True, the URLs in link text will get a rel="nofollow"
+    attribute.
+
+    If target is not None, a target attribute will be added to the link.
+    """
+    trim_url = lambda x, limit=trim_url_limit: limit is not None \
+                         and (x[:limit] + (len(x) >=limit and '...'
+                         or '')) or x
+    words = _word_split_re.split(text_type(escape(text)))
+    rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ''
+    target_attr = target and ' target="%s"' % escape(target) or ''
+
+    for i, word in enumerate(words):
+        match = _punctuation_re.match(word)
+        if match:
+            lead, middle, trail = match.groups()
+            if middle.startswith('www.') or (
+                '@' not in middle and
+                not middle.startswith('http://') and
+                not middle.startswith('https://') and
+                len(middle) > 0 and
+                middle[0] in _letters + _digits and (
+                    middle.endswith('.org') or
+                    middle.endswith('.net') or
+                    middle.endswith('.com')
+                )):
+                middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
+                    rel_attr, target_attr, trim_url(middle))
+            if middle.startswith('http://') or \
+               middle.startswith('https://'):
+                middle = '<a href="%s"%s%s>%s</a>' % (middle,
+                    rel_attr, target_attr, trim_url(middle))
+            if '@' in middle and not middle.startswith('www.') and \
+               not ':' in middle and _simple_email_re.match(middle):
+                middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
+            if lead + middle + trail != word:
+                words[i] = lead + middle + trail
+    return u''.join(words)
+
+
+def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
+    """Generate some lorem ipsum for the template."""
+    from jinja2.constants import LOREM_IPSUM_WORDS
+    from random import choice, randrange
+    words = LOREM_IPSUM_WORDS.split()
+    result = []
+
+    for _ in range(n):
+        next_capitalized = True
+        last_comma = last_fullstop = 0
+        word = None
+        last = None
+        p = []
+
+        # each paragraph contains out of 20 to 100 words.
+        for idx, _ in enumerate(range(randrange(min, max))):
+            while True:
+                word = choice(words)
+                if word != last:
+                    last = word
+                    break
+            if next_capitalized:
+                word = word.capitalize()
+                next_capitalized = False
+            # add commas
+            if idx - randrange(3, 8) > last_comma:
+                last_comma = idx
+                last_fullstop += 2
+                word += ','
+            # add end of sentences
+            if idx - randrange(10, 20) > last_fullstop:
+                last_comma = last_fullstop = idx
+                word += '.'
+                next_capitalized = True
+            p.append(word)
+
+        # ensure that the paragraph ends with a dot.
+        p = u' '.join(p)
+        if p.endswith(','):
+            p = p[:-1] + '.'
+        elif not p.endswith('.'):
+            p += '.'
+        result.append(p)
+
+    if not html:
+        return u'\n\n'.join(result)
+    return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
+
+
+def unicode_urlencode(obj, charset='utf-8', for_qs=False):
+    """URL escapes a single bytestring or unicode string with the
+    given charset if applicable to URL safe quoting under all rules
+    that need to be considered under all supported Python versions.
+
+    If non strings are provided they are converted to their unicode
+    representation first.
+    """
+    if not isinstance(obj, string_types):
+        obj = text_type(obj)
+    if isinstance(obj, text_type):
+        obj = obj.encode(charset)
+    safe = not for_qs and b'/' or b''
+    rv = text_type(url_quote(obj, safe))
+    if for_qs:
+        rv = rv.replace('%20', '+')
+    return rv
+
+
+class LRUCache(object):
+    """A simple LRU Cache implementation."""
+
+    # this is fast for small capacities (something below 1000) but doesn't
+    # scale.  But as long as it's only used as storage for templates this
+    # won't do any harm.
+
+    def __init__(self, capacity):
+        self.capacity = capacity
+        self._mapping = {}
+        self._queue = deque()
+        self._postinit()
+
+    def _postinit(self):
+        # alias all queue methods for faster lookup
+        self._popleft = self._queue.popleft
+        self._pop = self._queue.pop
+        self._remove = self._queue.remove
+        self._wlock = Lock()
+        self._append = self._queue.append
+
+    def __getstate__(self):
+        return {
+            'capacity':     self.capacity,
+            '_mapping':     self._mapping,
+            '_queue':       self._queue
+        }
+
+    def __setstate__(self, d):
+        self.__dict__.update(d)
+        self._postinit()
+
+    def __getnewargs__(self):
+        return (self.capacity,)
+
+    def copy(self):
+        """Return a shallow copy of the instance."""
+        rv = self.__class__(self.capacity)
+        rv._mapping.update(self._mapping)
+        rv._queue = deque(self._queue)
+        return rv
+
+    def get(self, key, default=None):
+        """Return an item from the cache dict or `default`"""
+        try:
+            return self[key]
+        except KeyError:
+            return default
+
+    def setdefault(self, key, default=None):
+        """Set `default` if the key is not in the cache otherwise
+        leave unchanged. Return the value of this key.
+        """
+        self._wlock.acquire()
+        try:
+            try:
+                return self[key]
+            except KeyError:
+                self[key] = default
+                return default
+        finally:
+            self._wlock.release()
+
+    def clear(self):
+        """Clear the cache."""
+        self._wlock.acquire()
+        try:
+            self._mapping.clear()
+            self._queue.clear()
+        finally:
+            self._wlock.release()
+
+    def __contains__(self, key):
+        """Check if a key exists in this cache."""
+        return key in self._mapping
+
+    def __len__(self):
+        """Return the current size of the cache."""
+        return len(self._mapping)
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self._mapping
+        )
+
+    def __getitem__(self, key):
+        """Get an item from the cache. Moves the item up so that it has the
+        highest priority then.
+
+        Raise a `KeyError` if it does not exist.
+        """
+        self._wlock.acquire()
+        try:
+            rv = self._mapping[key]
+            if self._queue[-1] != key:
+                try:
+                    self._remove(key)
+                except ValueError:
+                    # if something removed the key from the container
+                    # when we read, ignore the ValueError that we would
+                    # get otherwise.
+                    pass
+                self._append(key)
+            return rv
+        finally:
+            self._wlock.release()
+
+    def __setitem__(self, key, value):
+        """Sets the value for an item. Moves the item up so that it
+        has the highest priority then.
+        """
+        self._wlock.acquire()
+        try:
+            if key in self._mapping:
+                self._remove(key)
+            elif len(self._mapping) == self.capacity:
+                del self._mapping[self._popleft()]
+            self._append(key)
+            self._mapping[key] = value
+        finally:
+            self._wlock.release()
+
+    def __delitem__(self, key):
+        """Remove an item from the cache dict.
+        Raise a `KeyError` if it does not exist.
+        """
+        self._wlock.acquire()
+        try:
+            del self._mapping[key]
+            try:
+                self._remove(key)
+            except ValueError:
+                # __getitem__ is not locked, it might happen
+                pass
+        finally:
+            self._wlock.release()
+
+    def items(self):
+        """Return a list of items."""
+        result = [(key, self._mapping[key]) for key in list(self._queue)]
+        result.reverse()
+        return result
+
+    def iteritems(self):
+        """Iterate over all items."""
+        return iter(self.items())
+
+    def values(self):
+        """Return a list of all values."""
+        return [x[1] for x in self.items()]
+
+    def itervalue(self):
+        """Iterate over all values."""
+        return iter(self.values())
+
+    def keys(self):
+        """Return a list of all keys ordered by most recent usage."""
+        return list(self)
+
+    def iterkeys(self):
+        """Iterate over all keys in the cache dict, ordered by
+        the most recent usage.
+        """
+        return reversed(tuple(self._queue))
+
+    __iter__ = iterkeys
+
+    def __reversed__(self):
+        """Iterate over the values in the cache dict, oldest items
+        coming first.
+        """
+        return iter(tuple(self._queue))
+
+    __copy__ = copy
+
+
+# register the LRU cache as mutable mapping if possible
+try:
+    from collections import MutableMapping
+    MutableMapping.register(LRUCache)
+except ImportError:
+    pass
+
+
+def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
+                      disabled_extensions=(),
+                      default_for_string=True,
+                      default=False):
+    """Intelligently sets the initial value of autoescaping based on the
+    filename of the template.  This is the recommended way to configure
+    autoescaping if you do not want to write a custom function yourself.
+
+    If you want to enable it for all templates created from strings or
+    for all templates with `.html` and `.xml` extensions::
+
+        from jinja2 import Environment, select_autoescape
+        env = Environment(autoescape=select_autoescape(
+            enabled_extensions=('html', 'xml'),
+            default_for_string=True,
+        ))
+
+    Example configuration to turn it on at all times except if the template
+    ends with `.txt`::
+
+        from jinja2 import Environment, select_autoescape
+        env = Environment(autoescape=select_autoescape(
+            disabled_extensions=('txt',),
+            default_for_string=True,
+            default=True,
+        ))
+
+    The `enabled_extensions` is an iterable of all the extensions that
+    autoescaping should be enabled for.  Likewise `disabled_extensions` is
+    a list of all templates it should be disabled for.  If a template is
+    loaded from a string then the default from `default_for_string` is used.
+    If nothing matches then the initial value of autoescaping is set to the
+    value of `default`.
+
+    For security reasons this function operates case insensitive.
+
+    .. versionadded:: 2.9
+    """
+    enabled_patterns = tuple('.' + x.lstrip('.').lower()
+                             for x in enabled_extensions)
+    disabled_patterns = tuple('.' + x.lstrip('.').lower()
+                              for x in disabled_extensions)
+    def autoescape(template_name):
+        if template_name is None:
+            return default_for_string
+        template_name = template_name.lower()
+        if template_name.endswith(enabled_patterns):
+            return True
+        if template_name.endswith(disabled_patterns):
+            return False
+        return default
+    return autoescape
+
+
+def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
+    """Works exactly like :func:`dumps` but is safe for use in ``<script>``
+    tags.  It accepts the same arguments and returns a JSON string.  Note that
+    this is available in templates through the ``|tojson`` filter which will
+    also mark the result as safe.  Due to how this function escapes certain
+    characters this is safe even if used outside of ``<script>`` tags.
+
+    The following characters are escaped in strings:
+
+    -   ``<``
+    -   ``>``
+    -   ``&``
+    -   ``'``
+
+    This makes it safe to embed such strings in any place in HTML with the
+    notable exception of double quoted attributes.  In that case single
+    quote your attributes or HTML escape it in addition.
+    """
+    if dumper is None:
+        dumper = json.dumps
+    rv = dumper(obj, **kwargs) \
+        .replace(u'<', u'\\u003c') \
+        .replace(u'>', u'\\u003e') \
+        .replace(u'&', u'\\u0026') \
+        .replace(u"'", u'\\u0027')
+    return Markup(rv)
+
+
+@implements_iterator
+class Cycler(object):
+    """A cycle helper for templates."""
+
+    def __init__(self, *items):
+        if not items:
+            raise RuntimeError('at least one item has to be provided')
+        self.items = items
+        self.reset()
+
+    def reset(self):
+        """Resets the cycle."""
+        self.pos = 0
+
+    @property
+    def current(self):
+        """Returns the current item."""
+        return self.items[self.pos]
+
+    def next(self):
+        """Goes one item ahead and returns it."""
+        rv = self.current
+        self.pos = (self.pos + 1) % len(self.items)
+        return rv
+
+    __next__ = next
+
+
+class Joiner(object):
+    """A joining helper for templates."""
+
+    def __init__(self, sep=u', '):
+        self.sep = sep
+        self.used = False
+
+    def __call__(self):
+        if not self.used:
+            self.used = True
+            return u''
+        return self.sep
+
+
+class Namespace(object):
+    """A namespace object that can hold arbitrary attributes.  It may be
+    initialized from a dictionary or with keyword argments."""
+
+    def __init__(*args, **kwargs):
+        self, args = args[0], args[1:]
+        self.__attrs = dict(*args, **kwargs)
+
+    def __getattribute__(self, name):
+        if name == '_Namespace__attrs':
+            return object.__getattribute__(self, name)
+        try:
+            return self.__attrs[name]
+        except KeyError:
+            raise AttributeError(name)
+
+    def __setitem__(self, name, value):
+        self.__attrs[name] = value
+
+    def __repr__(self):
+        return '<Namespace %r>' % self.__attrs
+
+
+# does this python version support async for in and async generators?
+try:
+    exec('async def _():\n async for _ in ():\n  yield _')
+    have_async_gen = True
+except SyntaxError:
+    have_async_gen = False
+
+
+# Imported here because that's where it was in the past
+from markupsafe import Markup, escape, soft_unicode
diff --git a/lib/python3.7/site-packages/jinja2/visitor.py b/lib/python3.7/site-packages/jinja2/visitor.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba526dfac9283e02593950fe58a843a3d2083bdd
--- /dev/null
+++ b/lib/python3.7/site-packages/jinja2/visitor.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+"""
+    jinja2.visitor
+    ~~~~~~~~~~~~~~
+
+    This module implements a visitor for the nodes.
+
+    :copyright: (c) 2017 by the Jinja Team.
+    :license: BSD.
+"""
+from jinja2.nodes import Node
+
+
+class NodeVisitor(object):
+    """Walks the abstract syntax tree and call visitor functions for every
+    node found.  The visitor functions may return values which will be
+    forwarded by the `visit` method.
+
+    Per default the visitor functions for the nodes are ``'visit_'`` +
+    class name of the node.  So a `TryFinally` node visit function would
+    be `visit_TryFinally`.  This behavior can be changed by overriding
+    the `get_visitor` function.  If no visitor function exists for a node
+    (return value `None`) the `generic_visit` visitor is used instead.
+    """
+
+    def get_visitor(self, node):
+        """Return the visitor function for this node or `None` if no visitor
+        exists for this node.  In that case the generic visit function is
+        used instead.
+        """
+        method = 'visit_' + node.__class__.__name__
+        return getattr(self, method, None)
+
+    def visit(self, node, *args, **kwargs):
+        """Visit a node."""
+        f = self.get_visitor(node)
+        if f is not None:
+            return f(node, *args, **kwargs)
+        return self.generic_visit(node, *args, **kwargs)
+
+    def generic_visit(self, node, *args, **kwargs):
+        """Called if no explicit visitor function exists for a node."""
+        for node in node.iter_child_nodes():
+            self.visit(node, *args, **kwargs)
+
+
+class NodeTransformer(NodeVisitor):
+    """Walks the abstract syntax tree and allows modifications of nodes.
+
+    The `NodeTransformer` will walk the AST and use the return value of the
+    visitor functions to replace or remove the old node.  If the return
+    value of the visitor function is `None` the node will be removed
+    from the previous location otherwise it's replaced with the return
+    value.  The return value may be the original node in which case no
+    replacement takes place.
+    """
+
+    def generic_visit(self, node, *args, **kwargs):
+        for field, old_value in node.iter_fields():
+            if isinstance(old_value, list):
+                new_values = []
+                for value in old_value:
+                    if isinstance(value, Node):
+                        value = self.visit(value, *args, **kwargs)
+                        if value is None:
+                            continue
+                        elif not isinstance(value, Node):
+                            new_values.extend(value)
+                            continue
+                    new_values.append(value)
+                old_value[:] = new_values
+            elif isinstance(old_value, Node):
+                new_node = self.visit(old_value, *args, **kwargs)
+                if new_node is None:
+                    delattr(node, field)
+                else:
+                    setattr(node, field, new_node)
+        return node
+
+    def visit_list(self, node, *args, **kwargs):
+        """As transformers may return lists in some places this method
+        can be used to enforce a list as return value.
+        """
+        rv = self.visit(node, *args, **kwargs)
+        if not isinstance(rv, list):
+            rv = [rv]
+        return rv
diff --git a/lib/python3.7/site-packages/markupsafe/__init__.py b/lib/python3.7/site-packages/markupsafe/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..da05ed328a476bdcea7c0bd4d335d9e721cbab77
--- /dev/null
+++ b/lib/python3.7/site-packages/markupsafe/__init__.py
@@ -0,0 +1,327 @@
+# -*- coding: utf-8 -*-
+"""
+markupsafe
+~~~~~~~~~~
+
+Implements an escape function and a Markup string to replace HTML
+special characters with safe representations.
+
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
+"""
+import re
+import string
+
+from ._compat import int_types
+from ._compat import iteritems
+from ._compat import Mapping
+from ._compat import PY2
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import unichr
+
+__version__ = "1.1.1"
+
+__all__ = ["Markup", "soft_unicode", "escape", "escape_silent"]
+
+_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)")
+_entity_re = re.compile(r"&([^& ;]+);")
+
+
+class Markup(text_type):
+    """A string that is ready to be safely inserted into an HTML or XML
+    document, either because it was escaped or because it was marked
+    safe.
+
+    Passing an object to the constructor converts it to text and wraps
+    it to mark it safe without escaping. To escape the text, use the
+    :meth:`escape` class method instead.
+
+    >>> Markup('Hello, <em>World</em>!')
+    Markup('Hello, <em>World</em>!')
+    >>> Markup(42)
+    Markup('42')
+    >>> Markup.escape('Hello, <em>World</em>!')
+    Markup('Hello &lt;em&gt;World&lt;/em&gt;!')
+
+    This implements the ``__html__()`` interface that some frameworks
+    use. Passing an object that implements ``__html__()`` will wrap the
+    output of that method, marking it safe.
+
+    >>> class Foo:
+    ...     def __html__(self):
+    ...         return '<a href="/foo">foo</a>'
+    ...
+    >>> Markup(Foo())
+    Markup('<a href="/foo">foo</a>')
+
+    This is a subclass of the text type (``str`` in Python 3,
+    ``unicode`` in Python 2). It has the same methods as that type, but
+    all methods escape their arguments and return a ``Markup`` instance.
+
+    >>> Markup('<em>%s</em>') % 'foo & bar'
+    Markup('<em>foo &amp; bar</em>')
+    >>> Markup('<em>Hello</em> ') + '<foo>'
+    Markup('<em>Hello</em> &lt;foo&gt;')
+    """
+
+    __slots__ = ()
+
+    def __new__(cls, base=u"", encoding=None, errors="strict"):
+        if hasattr(base, "__html__"):
+            base = base.__html__()
+        if encoding is None:
+            return text_type.__new__(cls, base)
+        return text_type.__new__(cls, base, encoding, errors)
+
+    def __html__(self):
+        return self
+
+    def __add__(self, other):
+        if isinstance(other, string_types) or hasattr(other, "__html__"):
+            return self.__class__(super(Markup, self).__add__(self.escape(other)))
+        return NotImplemented
+
+    def __radd__(self, other):
+        if hasattr(other, "__html__") or isinstance(other, string_types):
+            return self.escape(other).__add__(self)
+        return NotImplemented
+
+    def __mul__(self, num):
+        if isinstance(num, int_types):
+            return self.__class__(text_type.__mul__(self, num))
+        return NotImplemented
+
+    __rmul__ = __mul__
+
+    def __mod__(self, arg):
+        if isinstance(arg, tuple):
+            arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
+        else:
+            arg = _MarkupEscapeHelper(arg, self.escape)
+        return self.__class__(text_type.__mod__(self, arg))
+
+    def __repr__(self):
+        return "%s(%s)" % (self.__class__.__name__, text_type.__repr__(self))
+
+    def join(self, seq):
+        return self.__class__(text_type.join(self, map(self.escape, seq)))
+
+    join.__doc__ = text_type.join.__doc__
+
+    def split(self, *args, **kwargs):
+        return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
+
+    split.__doc__ = text_type.split.__doc__
+
+    def rsplit(self, *args, **kwargs):
+        return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
+
+    rsplit.__doc__ = text_type.rsplit.__doc__
+
+    def splitlines(self, *args, **kwargs):
+        return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
+
+    splitlines.__doc__ = text_type.splitlines.__doc__
+
+    def unescape(self):
+        """Convert escaped markup back into a text string. This replaces
+        HTML entities with the characters they represent.
+
+        >>> Markup('Main &raquo; <em>About</em>').unescape()
+        'Main » <em>About</em>'
+        """
+        from ._constants import HTML_ENTITIES
+
+        def handle_match(m):
+            name = m.group(1)
+            if name in HTML_ENTITIES:
+                return unichr(HTML_ENTITIES[name])
+            try:
+                if name[:2] in ("#x", "#X"):
+                    return unichr(int(name[2:], 16))
+                elif name.startswith("#"):
+                    return unichr(int(name[1:]))
+            except ValueError:
+                pass
+            # Don't modify unexpected input.
+            return m.group()
+
+        return _entity_re.sub(handle_match, text_type(self))
+
+    def striptags(self):
+        """:meth:`unescape` the markup, remove tags, and normalize
+        whitespace to single spaces.
+
+        >>> Markup('Main &raquo;\t<em>About</em>').striptags()
+        'Main » About'
+        """
+        stripped = u" ".join(_striptags_re.sub("", self).split())
+        return Markup(stripped).unescape()
+
+    @classmethod
+    def escape(cls, s):
+        """Escape a string. Calls :func:`escape` and ensures that for
+        subclasses the correct type is returned.
+        """
+        rv = escape(s)
+        if rv.__class__ is not cls:
+            return cls(rv)
+        return rv
+
+    def make_simple_escaping_wrapper(name):  # noqa: B902
+        orig = getattr(text_type, name)
+
+        def func(self, *args, **kwargs):
+            args = _escape_argspec(list(args), enumerate(args), self.escape)
+            _escape_argspec(kwargs, iteritems(kwargs), self.escape)
+            return self.__class__(orig(self, *args, **kwargs))
+
+        func.__name__ = orig.__name__
+        func.__doc__ = orig.__doc__
+        return func
+
+    for method in (
+        "__getitem__",
+        "capitalize",
+        "title",
+        "lower",
+        "upper",
+        "replace",
+        "ljust",
+        "rjust",
+        "lstrip",
+        "rstrip",
+        "center",
+        "strip",
+        "translate",
+        "expandtabs",
+        "swapcase",
+        "zfill",
+    ):
+        locals()[method] = make_simple_escaping_wrapper(method)
+
+    def partition(self, sep):
+        return tuple(map(self.__class__, text_type.partition(self, self.escape(sep))))
+
+    def rpartition(self, sep):
+        return tuple(map(self.__class__, text_type.rpartition(self, self.escape(sep))))
+
+    def format(self, *args, **kwargs):
+        formatter = EscapeFormatter(self.escape)
+        kwargs = _MagicFormatMapping(args, kwargs)
+        return self.__class__(formatter.vformat(self, args, kwargs))
+
+    def __html_format__(self, format_spec):
+        if format_spec:
+            raise ValueError("Unsupported format specification " "for Markup.")
+        return self
+
+    # not in python 3
+    if hasattr(text_type, "__getslice__"):
+        __getslice__ = make_simple_escaping_wrapper("__getslice__")
+
+    del method, make_simple_escaping_wrapper
+
+
+class _MagicFormatMapping(Mapping):
+    """This class implements a dummy wrapper to fix a bug in the Python
+    standard library for string formatting.
+
+    See http://bugs.python.org/issue13598 for information about why
+    this is necessary.
+    """
+
+    def __init__(self, args, kwargs):
+        self._args = args
+        self._kwargs = kwargs
+        self._last_index = 0
+
+    def __getitem__(self, key):
+        if key == "":
+            idx = self._last_index
+            self._last_index += 1
+            try:
+                return self._args[idx]
+            except LookupError:
+                pass
+            key = str(idx)
+        return self._kwargs[key]
+
+    def __iter__(self):
+        return iter(self._kwargs)
+
+    def __len__(self):
+        return len(self._kwargs)
+
+
+if hasattr(text_type, "format"):
+
+    class EscapeFormatter(string.Formatter):
+        def __init__(self, escape):
+            self.escape = escape
+
+        def format_field(self, value, format_spec):
+            if hasattr(value, "__html_format__"):
+                rv = value.__html_format__(format_spec)
+            elif hasattr(value, "__html__"):
+                if format_spec:
+                    raise ValueError(
+                        "Format specifier {0} given, but {1} does not"
+                        " define __html_format__. A class that defines"
+                        " __html__ must define __html_format__ to work"
+                        " with format specifiers.".format(format_spec, type(value))
+                    )
+                rv = value.__html__()
+            else:
+                # We need to make sure the format spec is unicode here as
+                # otherwise the wrong callback methods are invoked.  For
+                # instance a byte string there would invoke __str__ and
+                # not __unicode__.
+                rv = string.Formatter.format_field(self, value, text_type(format_spec))
+            return text_type(self.escape(rv))
+
+
+def _escape_argspec(obj, iterable, escape):
+    """Helper for various string-wrapped functions."""
+    for key, value in iterable:
+        if hasattr(value, "__html__") or isinstance(value, string_types):
+            obj[key] = escape(value)
+    return obj
+
+
+class _MarkupEscapeHelper(object):
+    """Helper for Markup.__mod__"""
+
+    def __init__(self, obj, escape):
+        self.obj = obj
+        self.escape = escape
+
+    def __getitem__(self, item):
+        return _MarkupEscapeHelper(self.obj[item], self.escape)
+
+    def __str__(self):
+        return text_type(self.escape(self.obj))
+
+    __unicode__ = __str__
+
+    def __repr__(self):
+        return str(self.escape(repr(self.obj)))
+
+    def __int__(self):
+        return int(self.obj)
+
+    def __float__(self):
+        return float(self.obj)
+
+
+# we have to import it down here as the speedups and native
+# modules imports the markup type which is define above.
+try:
+    from ._speedups import escape, escape_silent, soft_unicode
+except ImportError:
+    from ._native import escape, escape_silent, soft_unicode
+
+if not PY2:
+    soft_str = soft_unicode
+    __all__.append("soft_str")
diff --git a/lib/python3.7/site-packages/markupsafe/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/markupsafe/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e257a32b0d75eac11fec10fb5b38eaf316906711
Binary files /dev/null and b/lib/python3.7/site-packages/markupsafe/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/markupsafe/__pycache__/_compat.cpython-37.pyc b/lib/python3.7/site-packages/markupsafe/__pycache__/_compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..892c5b3ac1c37593ad4034cab792e2eedede7aef
Binary files /dev/null and b/lib/python3.7/site-packages/markupsafe/__pycache__/_compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/markupsafe/__pycache__/_constants.cpython-37.pyc b/lib/python3.7/site-packages/markupsafe/__pycache__/_constants.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff8cec532beb0649a3c804540e39fbbe6c686a8e
Binary files /dev/null and b/lib/python3.7/site-packages/markupsafe/__pycache__/_constants.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/markupsafe/__pycache__/_native.cpython-37.pyc b/lib/python3.7/site-packages/markupsafe/__pycache__/_native.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..31e06ef6c25f41313e7b16a0625506ab365b2be3
Binary files /dev/null and b/lib/python3.7/site-packages/markupsafe/__pycache__/_native.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/markupsafe/_compat.py b/lib/python3.7/site-packages/markupsafe/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc05090f9e88da2c16a75844bcb6d5043025581b
--- /dev/null
+++ b/lib/python3.7/site-packages/markupsafe/_compat.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+"""
+markupsafe._compat
+~~~~~~~~~~~~~~~~~~
+
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
+"""
+import sys
+
+PY2 = sys.version_info[0] == 2
+
+if not PY2:
+    text_type = str
+    string_types = (str,)
+    unichr = chr
+    int_types = (int,)
+
+    def iteritems(x):
+        return iter(x.items())
+
+    from collections.abc import Mapping
+
+else:
+    text_type = unicode
+    string_types = (str, unicode)
+    unichr = unichr
+    int_types = (int, long)
+
+    def iteritems(x):
+        return x.iteritems()
+
+    from collections import Mapping
diff --git a/lib/python3.7/site-packages/markupsafe/_constants.py b/lib/python3.7/site-packages/markupsafe/_constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c57c2d29452a94c22708e1b749f0b7e8c14bf1b
--- /dev/null
+++ b/lib/python3.7/site-packages/markupsafe/_constants.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+"""
+markupsafe._constants
+~~~~~~~~~~~~~~~~~~~~~
+
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
+"""
+
+HTML_ENTITIES = {
+    "AElig": 198,
+    "Aacute": 193,
+    "Acirc": 194,
+    "Agrave": 192,
+    "Alpha": 913,
+    "Aring": 197,
+    "Atilde": 195,
+    "Auml": 196,
+    "Beta": 914,
+    "Ccedil": 199,
+    "Chi": 935,
+    "Dagger": 8225,
+    "Delta": 916,
+    "ETH": 208,
+    "Eacute": 201,
+    "Ecirc": 202,
+    "Egrave": 200,
+    "Epsilon": 917,
+    "Eta": 919,
+    "Euml": 203,
+    "Gamma": 915,
+    "Iacute": 205,
+    "Icirc": 206,
+    "Igrave": 204,
+    "Iota": 921,
+    "Iuml": 207,
+    "Kappa": 922,
+    "Lambda": 923,
+    "Mu": 924,
+    "Ntilde": 209,
+    "Nu": 925,
+    "OElig": 338,
+    "Oacute": 211,
+    "Ocirc": 212,
+    "Ograve": 210,
+    "Omega": 937,
+    "Omicron": 927,
+    "Oslash": 216,
+    "Otilde": 213,
+    "Ouml": 214,
+    "Phi": 934,
+    "Pi": 928,
+    "Prime": 8243,
+    "Psi": 936,
+    "Rho": 929,
+    "Scaron": 352,
+    "Sigma": 931,
+    "THORN": 222,
+    "Tau": 932,
+    "Theta": 920,
+    "Uacute": 218,
+    "Ucirc": 219,
+    "Ugrave": 217,
+    "Upsilon": 933,
+    "Uuml": 220,
+    "Xi": 926,
+    "Yacute": 221,
+    "Yuml": 376,
+    "Zeta": 918,
+    "aacute": 225,
+    "acirc": 226,
+    "acute": 180,
+    "aelig": 230,
+    "agrave": 224,
+    "alefsym": 8501,
+    "alpha": 945,
+    "amp": 38,
+    "and": 8743,
+    "ang": 8736,
+    "apos": 39,
+    "aring": 229,
+    "asymp": 8776,
+    "atilde": 227,
+    "auml": 228,
+    "bdquo": 8222,
+    "beta": 946,
+    "brvbar": 166,
+    "bull": 8226,
+    "cap": 8745,
+    "ccedil": 231,
+    "cedil": 184,
+    "cent": 162,
+    "chi": 967,
+    "circ": 710,
+    "clubs": 9827,
+    "cong": 8773,
+    "copy": 169,
+    "crarr": 8629,
+    "cup": 8746,
+    "curren": 164,
+    "dArr": 8659,
+    "dagger": 8224,
+    "darr": 8595,
+    "deg": 176,
+    "delta": 948,
+    "diams": 9830,
+    "divide": 247,
+    "eacute": 233,
+    "ecirc": 234,
+    "egrave": 232,
+    "empty": 8709,
+    "emsp": 8195,
+    "ensp": 8194,
+    "epsilon": 949,
+    "equiv": 8801,
+    "eta": 951,
+    "eth": 240,
+    "euml": 235,
+    "euro": 8364,
+    "exist": 8707,
+    "fnof": 402,
+    "forall": 8704,
+    "frac12": 189,
+    "frac14": 188,
+    "frac34": 190,
+    "frasl": 8260,
+    "gamma": 947,
+    "ge": 8805,
+    "gt": 62,
+    "hArr": 8660,
+    "harr": 8596,
+    "hearts": 9829,
+    "hellip": 8230,
+    "iacute": 237,
+    "icirc": 238,
+    "iexcl": 161,
+    "igrave": 236,
+    "image": 8465,
+    "infin": 8734,
+    "int": 8747,
+    "iota": 953,
+    "iquest": 191,
+    "isin": 8712,
+    "iuml": 239,
+    "kappa": 954,
+    "lArr": 8656,
+    "lambda": 955,
+    "lang": 9001,
+    "laquo": 171,
+    "larr": 8592,
+    "lceil": 8968,
+    "ldquo": 8220,
+    "le": 8804,
+    "lfloor": 8970,
+    "lowast": 8727,
+    "loz": 9674,
+    "lrm": 8206,
+    "lsaquo": 8249,
+    "lsquo": 8216,
+    "lt": 60,
+    "macr": 175,
+    "mdash": 8212,
+    "micro": 181,
+    "middot": 183,
+    "minus": 8722,
+    "mu": 956,
+    "nabla": 8711,
+    "nbsp": 160,
+    "ndash": 8211,
+    "ne": 8800,
+    "ni": 8715,
+    "not": 172,
+    "notin": 8713,
+    "nsub": 8836,
+    "ntilde": 241,
+    "nu": 957,
+    "oacute": 243,
+    "ocirc": 244,
+    "oelig": 339,
+    "ograve": 242,
+    "oline": 8254,
+    "omega": 969,
+    "omicron": 959,
+    "oplus": 8853,
+    "or": 8744,
+    "ordf": 170,
+    "ordm": 186,
+    "oslash": 248,
+    "otilde": 245,
+    "otimes": 8855,
+    "ouml": 246,
+    "para": 182,
+    "part": 8706,
+    "permil": 8240,
+    "perp": 8869,
+    "phi": 966,
+    "pi": 960,
+    "piv": 982,
+    "plusmn": 177,
+    "pound": 163,
+    "prime": 8242,
+    "prod": 8719,
+    "prop": 8733,
+    "psi": 968,
+    "quot": 34,
+    "rArr": 8658,
+    "radic": 8730,
+    "rang": 9002,
+    "raquo": 187,
+    "rarr": 8594,
+    "rceil": 8969,
+    "rdquo": 8221,
+    "real": 8476,
+    "reg": 174,
+    "rfloor": 8971,
+    "rho": 961,
+    "rlm": 8207,
+    "rsaquo": 8250,
+    "rsquo": 8217,
+    "sbquo": 8218,
+    "scaron": 353,
+    "sdot": 8901,
+    "sect": 167,
+    "shy": 173,
+    "sigma": 963,
+    "sigmaf": 962,
+    "sim": 8764,
+    "spades": 9824,
+    "sub": 8834,
+    "sube": 8838,
+    "sum": 8721,
+    "sup": 8835,
+    "sup1": 185,
+    "sup2": 178,
+    "sup3": 179,
+    "supe": 8839,
+    "szlig": 223,
+    "tau": 964,
+    "there4": 8756,
+    "theta": 952,
+    "thetasym": 977,
+    "thinsp": 8201,
+    "thorn": 254,
+    "tilde": 732,
+    "times": 215,
+    "trade": 8482,
+    "uArr": 8657,
+    "uacute": 250,
+    "uarr": 8593,
+    "ucirc": 251,
+    "ugrave": 249,
+    "uml": 168,
+    "upsih": 978,
+    "upsilon": 965,
+    "uuml": 252,
+    "weierp": 8472,
+    "xi": 958,
+    "yacute": 253,
+    "yen": 165,
+    "yuml": 255,
+    "zeta": 950,
+    "zwj": 8205,
+    "zwnj": 8204,
+}
diff --git a/lib/python3.7/site-packages/markupsafe/_native.py b/lib/python3.7/site-packages/markupsafe/_native.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd08752cd8940f01f85e51a6477cf72b3e5e517e
--- /dev/null
+++ b/lib/python3.7/site-packages/markupsafe/_native.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+"""
+markupsafe._native
+~~~~~~~~~~~~~~~~~~
+
+Native Python implementation used when the C module is not compiled.
+
+:copyright: 2010 Pallets
+:license: BSD-3-Clause
+"""
+from . import Markup
+from ._compat import text_type
+
+
+def escape(s):
+    """Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
+    the string with HTML-safe sequences. Use this if you need to display
+    text that might contain such characters in HTML.
+
+    If the object has an ``__html__`` method, it is called and the
+    return value is assumed to already be safe for HTML.
+
+    :param s: An object to be converted to a string and escaped.
+    :return: A :class:`Markup` string with the escaped text.
+    """
+    if hasattr(s, "__html__"):
+        return Markup(s.__html__())
+    return Markup(
+        text_type(s)
+        .replace("&", "&amp;")
+        .replace(">", "&gt;")
+        .replace("<", "&lt;")
+        .replace("'", "&#39;")
+        .replace('"', "&#34;")
+    )
+
+
+def escape_silent(s):
+    """Like :func:`escape` but treats ``None`` as the empty string.
+    Useful with optional values, as otherwise you get the string
+    ``'None'`` when the value is ``None``.
+
+    >>> escape(None)
+    Markup('None')
+    >>> escape_silent(None)
+    Markup('')
+    """
+    if s is None:
+        return Markup()
+    return escape(s)
+
+
+def soft_unicode(s):
+    """Convert an object to a string if it isn't already. This preserves
+    a :class:`Markup` string rather than converting it back to a basic
+    string, so it will still be marked as safe and won't be escaped
+    again.
+
+    >>> value = escape('<User 1>')
+    >>> value
+    Markup('&lt;User 1&gt;')
+    >>> escape(str(value))
+    Markup('&amp;lt;User 1&amp;gt;')
+    >>> escape(soft_unicode(value))
+    Markup('&lt;User 1&gt;')
+    """
+    if not isinstance(s, text_type):
+        s = text_type(s)
+    return s
diff --git a/lib/python3.7/site-packages/markupsafe/_speedups.c b/lib/python3.7/site-packages/markupsafe/_speedups.c
new file mode 100644
index 0000000000000000000000000000000000000000..12d2c4a7d8352f6bf1deb19790453eb529827a78
--- /dev/null
+++ b/lib/python3.7/site-packages/markupsafe/_speedups.c
@@ -0,0 +1,423 @@
+/**
+ * markupsafe._speedups
+ * ~~~~~~~~~~~~~~~~~~~~
+ *
+ * C implementation of escaping for better performance. Used instead of
+ * the native Python implementation when compiled.
+ *
+ * :copyright: 2010 Pallets
+ * :license: BSD-3-Clause
+ */
+#include <Python.h>
+
+#if PY_MAJOR_VERSION < 3
+#define ESCAPED_CHARS_TABLE_SIZE 63
+#define UNICHR(x) (PyUnicode_AS_UNICODE((PyUnicodeObject*)PyUnicode_DecodeASCII(x, strlen(x), NULL)));
+
+static Py_ssize_t escaped_chars_delta_len[ESCAPED_CHARS_TABLE_SIZE];
+static Py_UNICODE *escaped_chars_repl[ESCAPED_CHARS_TABLE_SIZE];
+#endif
+
+static PyObject* markup;
+
+static int
+init_constants(void)
+{
+	PyObject *module;
+
+#if PY_MAJOR_VERSION < 3
+	/* mapping of characters to replace */
+	escaped_chars_repl['"'] = UNICHR("&#34;");
+	escaped_chars_repl['\''] = UNICHR("&#39;");
+	escaped_chars_repl['&'] = UNICHR("&amp;");
+	escaped_chars_repl['<'] = UNICHR("&lt;");
+	escaped_chars_repl['>'] = UNICHR("&gt;");
+
+	/* lengths of those characters when replaced - 1 */
+	memset(escaped_chars_delta_len, 0, sizeof (escaped_chars_delta_len));
+	escaped_chars_delta_len['"'] = escaped_chars_delta_len['\''] = \
+		escaped_chars_delta_len['&'] = 4;
+	escaped_chars_delta_len['<'] = escaped_chars_delta_len['>'] = 3;
+#endif
+
+	/* import markup type so that we can mark the return value */
+	module = PyImport_ImportModule("markupsafe");
+	if (!module)
+		return 0;
+	markup = PyObject_GetAttrString(module, "Markup");
+	Py_DECREF(module);
+
+	return 1;
+}
+
+#if PY_MAJOR_VERSION < 3
+static PyObject*
+escape_unicode(PyUnicodeObject *in)
+{
+	PyUnicodeObject *out;
+	Py_UNICODE *inp = PyUnicode_AS_UNICODE(in);
+	const Py_UNICODE *inp_end = PyUnicode_AS_UNICODE(in) + PyUnicode_GET_SIZE(in);
+	Py_UNICODE *next_escp;
+	Py_UNICODE *outp;
+	Py_ssize_t delta=0, erepl=0, delta_len=0;
+
+	/* First we need to figure out how long the escaped string will be */
+	while (*(inp) || inp < inp_end) {
+		if (*inp < ESCAPED_CHARS_TABLE_SIZE) {
+			delta += escaped_chars_delta_len[*inp];
+			erepl += !!escaped_chars_delta_len[*inp];
+		}
+		++inp;
+	}
+
+	/* Do we need to escape anything at all? */
+	if (!erepl) {
+		Py_INCREF(in);
+		return (PyObject*)in;
+	}
+
+	out = (PyUnicodeObject*)PyUnicode_FromUnicode(NULL, PyUnicode_GET_SIZE(in) + delta);
+	if (!out)
+		return NULL;
+
+	outp = PyUnicode_AS_UNICODE(out);
+	inp = PyUnicode_AS_UNICODE(in);
+	while (erepl-- > 0) {
+		/* look for the next substitution */
+		next_escp = inp;
+		while (next_escp < inp_end) {
+			if (*next_escp < ESCAPED_CHARS_TABLE_SIZE &&
+			    (delta_len = escaped_chars_delta_len[*next_escp])) {
+				++delta_len;
+				break;
+			}
+			++next_escp;
+		}
+
+		if (next_escp > inp) {
+			/* copy unescaped chars between inp and next_escp */
+			Py_UNICODE_COPY(outp, inp, next_escp-inp);
+			outp += next_escp - inp;
+		}
+
+		/* escape 'next_escp' */
+		Py_UNICODE_COPY(outp, escaped_chars_repl[*next_escp], delta_len);
+		outp += delta_len;
+
+		inp = next_escp + 1;
+	}
+	if (inp < inp_end)
+		Py_UNICODE_COPY(outp, inp, PyUnicode_GET_SIZE(in) - (inp - PyUnicode_AS_UNICODE(in)));
+
+	return (PyObject*)out;
+}
+#else /* PY_MAJOR_VERSION < 3 */
+
+#define GET_DELTA(inp, inp_end, delta) \
+	while (inp < inp_end) {	 \
+		switch (*inp++) {	   \
+		case '"':			   \
+		case '\'':			  \
+		case '&':			   \
+			delta += 4;		 \
+			break;			  \
+		case '<':			   \
+		case '>':			   \
+			delta += 3;		 \
+			break;			  \
+		}					   \
+	}
+
+#define DO_ESCAPE(inp, inp_end, outp) \
+	{  \
+		Py_ssize_t ncopy = 0;  \
+		while (inp < inp_end) {  \
+			switch (*inp) {  \
+			case '"':  \
+				memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+				outp += ncopy; ncopy = 0; \
+				*outp++ = '&';  \
+				*outp++ = '#';  \
+				*outp++ = '3';  \
+				*outp++ = '4';  \
+				*outp++ = ';';  \
+				break;  \
+			case '\'':  \
+				memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+				outp += ncopy; ncopy = 0; \
+				*outp++ = '&';  \
+				*outp++ = '#';  \
+				*outp++ = '3';  \
+				*outp++ = '9';  \
+				*outp++ = ';';  \
+				break;  \
+			case '&':  \
+				memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+				outp += ncopy; ncopy = 0; \
+				*outp++ = '&';  \
+				*outp++ = 'a';  \
+				*outp++ = 'm';  \
+				*outp++ = 'p';  \
+				*outp++ = ';';  \
+				break;  \
+			case '<':  \
+				memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+				outp += ncopy; ncopy = 0; \
+				*outp++ = '&';  \
+				*outp++ = 'l';  \
+				*outp++ = 't';  \
+				*outp++ = ';';  \
+				break;  \
+			case '>':  \
+				memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+				outp += ncopy; ncopy = 0; \
+				*outp++ = '&';  \
+				*outp++ = 'g';  \
+				*outp++ = 't';  \
+				*outp++ = ';';  \
+				break;  \
+			default:  \
+				ncopy++; \
+			}  \
+            inp++; \
+		}  \
+		memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+	}
+
+static PyObject*
+escape_unicode_kind1(PyUnicodeObject *in)
+{
+	Py_UCS1 *inp = PyUnicode_1BYTE_DATA(in);
+	Py_UCS1 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+	Py_UCS1 *outp;
+	PyObject *out;
+	Py_ssize_t delta = 0;
+
+	GET_DELTA(inp, inp_end, delta);
+	if (!delta) {
+		Py_INCREF(in);
+		return (PyObject*)in;
+	}
+
+	out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta,
+						PyUnicode_IS_ASCII(in) ? 127 : 255);
+	if (!out)
+		return NULL;
+
+	inp = PyUnicode_1BYTE_DATA(in);
+	outp = PyUnicode_1BYTE_DATA(out);
+	DO_ESCAPE(inp, inp_end, outp);
+	return out;
+}
+
+static PyObject*
+escape_unicode_kind2(PyUnicodeObject *in)
+{
+	Py_UCS2 *inp = PyUnicode_2BYTE_DATA(in);
+	Py_UCS2 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+	Py_UCS2 *outp;
+	PyObject *out;
+	Py_ssize_t delta = 0;
+
+	GET_DELTA(inp, inp_end, delta);
+	if (!delta) {
+		Py_INCREF(in);
+		return (PyObject*)in;
+	}
+
+	out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 65535);
+	if (!out)
+		return NULL;
+
+	inp = PyUnicode_2BYTE_DATA(in);
+	outp = PyUnicode_2BYTE_DATA(out);
+	DO_ESCAPE(inp, inp_end, outp);
+	return out;
+}
+
+
+static PyObject*
+escape_unicode_kind4(PyUnicodeObject *in)
+{
+	Py_UCS4 *inp = PyUnicode_4BYTE_DATA(in);
+	Py_UCS4 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+	Py_UCS4 *outp;
+	PyObject *out;
+	Py_ssize_t delta = 0;
+
+	GET_DELTA(inp, inp_end, delta);
+	if (!delta) {
+		Py_INCREF(in);
+		return (PyObject*)in;
+	}
+
+	out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 1114111);
+	if (!out)
+		return NULL;
+
+	inp = PyUnicode_4BYTE_DATA(in);
+	outp = PyUnicode_4BYTE_DATA(out);
+	DO_ESCAPE(inp, inp_end, outp);
+	return out;
+}
+
+static PyObject*
+escape_unicode(PyUnicodeObject *in)
+{
+	if (PyUnicode_READY(in))
+		return NULL;
+
+	switch (PyUnicode_KIND(in)) {
+	case PyUnicode_1BYTE_KIND:
+		return escape_unicode_kind1(in);
+	case PyUnicode_2BYTE_KIND:
+		return escape_unicode_kind2(in);
+	case PyUnicode_4BYTE_KIND:
+		return escape_unicode_kind4(in);
+	}
+	assert(0);  /* shouldn't happen */
+	return NULL;
+}
+#endif /* PY_MAJOR_VERSION < 3 */
+
+static PyObject*
+escape(PyObject *self, PyObject *text)
+{
+	static PyObject *id_html;
+	PyObject *s = NULL, *rv = NULL, *html;
+
+	if (id_html == NULL) {
+#if PY_MAJOR_VERSION < 3
+		id_html = PyString_InternFromString("__html__");
+#else
+		id_html = PyUnicode_InternFromString("__html__");
+#endif
+		if (id_html == NULL) {
+			return NULL;
+		}
+	}
+
+	/* we don't have to escape integers, bools or floats */
+	if (PyLong_CheckExact(text) ||
+#if PY_MAJOR_VERSION < 3
+	    PyInt_CheckExact(text) ||
+#endif
+	    PyFloat_CheckExact(text) || PyBool_Check(text) ||
+	    text == Py_None)
+		return PyObject_CallFunctionObjArgs(markup, text, NULL);
+
+	/* if the object has an __html__ method that performs the escaping */
+	html = PyObject_GetAttr(text ,id_html);
+	if (html) {
+		s = PyObject_CallObject(html, NULL);
+		Py_DECREF(html);
+		if (s == NULL) {
+			return NULL;
+		}
+		/* Convert to Markup object */
+		rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
+		Py_DECREF(s);
+		return rv;
+	}
+
+	/* otherwise make the object unicode if it isn't, then escape */
+	PyErr_Clear();
+	if (!PyUnicode_Check(text)) {
+#if PY_MAJOR_VERSION < 3
+		PyObject *unicode = PyObject_Unicode(text);
+#else
+		PyObject *unicode = PyObject_Str(text);
+#endif
+		if (!unicode)
+			return NULL;
+		s = escape_unicode((PyUnicodeObject*)unicode);
+		Py_DECREF(unicode);
+	}
+	else
+		s = escape_unicode((PyUnicodeObject*)text);
+
+	/* convert the unicode string into a markup object. */
+	rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
+	Py_DECREF(s);
+	return rv;
+}
+
+
+static PyObject*
+escape_silent(PyObject *self, PyObject *text)
+{
+	if (text != Py_None)
+		return escape(self, text);
+	return PyObject_CallFunctionObjArgs(markup, NULL);
+}
+
+
+static PyObject*
+soft_unicode(PyObject *self, PyObject *s)
+{
+	if (!PyUnicode_Check(s))
+#if PY_MAJOR_VERSION < 3
+		return PyObject_Unicode(s);
+#else
+		return PyObject_Str(s);
+#endif
+	Py_INCREF(s);
+	return s;
+}
+
+
+static PyMethodDef module_methods[] = {
+	{"escape", (PyCFunction)escape, METH_O,
+	 "escape(s) -> markup\n\n"
+	 "Convert the characters &, <, >, ', and \" in string s to HTML-safe\n"
+	 "sequences.  Use this if you need to display text that might contain\n"
+	 "such characters in HTML.  Marks return value as markup string."},
+	{"escape_silent", (PyCFunction)escape_silent, METH_O,
+	 "escape_silent(s) -> markup\n\n"
+	 "Like escape but converts None to an empty string."},
+	{"soft_unicode", (PyCFunction)soft_unicode, METH_O,
+	 "soft_unicode(object) -> string\n\n"
+         "Make a string unicode if it isn't already.  That way a markup\n"
+         "string is not converted back to unicode."},
+	{NULL, NULL, 0, NULL}		/* Sentinel */
+};
+
+
+#if PY_MAJOR_VERSION < 3
+
+#ifndef PyMODINIT_FUNC	/* declarations for DLL import/export */
+#define PyMODINIT_FUNC void
+#endif
+PyMODINIT_FUNC
+init_speedups(void)
+{
+	if (!init_constants())
+		return;
+
+	Py_InitModule3("markupsafe._speedups", module_methods, "");
+}
+
+#else /* Python 3.x module initialization */
+
+static struct PyModuleDef module_definition = {
+        PyModuleDef_HEAD_INIT,
+	"markupsafe._speedups",
+	NULL,
+	-1,
+	module_methods,
+	NULL,
+	NULL,
+	NULL,
+	NULL
+};
+
+PyMODINIT_FUNC
+PyInit__speedups(void)
+{
+	if (!init_constants())
+		return NULL;
+
+	return PyModule_Create(&module_definition);
+}
+
+#endif
diff --git a/lib/python3.7/site-packages/markupsafe/_speedups.cpython-37m-x86_64-linux-gnu.so b/lib/python3.7/site-packages/markupsafe/_speedups.cpython-37m-x86_64-linux-gnu.so
new file mode 100755
index 0000000000000000000000000000000000000000..373fd8ffd21414f7e126d2d9d070d0b71edf21b8
Binary files /dev/null and b/lib/python3.7/site-packages/markupsafe/_speedups.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/python3.7/site-packages/peewee-3.9.4.dist-info/INSTALLER b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/peewee-3.9.4.dist-info/LICENSE b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..c752ab3ead2e7be324e5cafb5e1d53135c16af13
--- /dev/null
+++ b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2010 Charles Leifer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/lib/python3.7/site-packages/peewee-3.9.4.dist-info/METADATA b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..3a03d2208c03dfa3a10e232c04837b96d1fcbb31
--- /dev/null
+++ b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/METADATA
@@ -0,0 +1,160 @@
+Metadata-Version: 2.1
+Name: peewee
+Version: 3.9.4
+Summary: a little orm
+Home-page: https://github.com/coleifer/peewee/
+Author: Charles Leifer
+Author-email: coleifer@gmail.com
+License: MIT License
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+
+.. image:: http://media.charlesleifer.com/blog/photos/peewee3-logo.png
+
+peewee
+======
+
+Peewee is a simple and small ORM. It has few (but expressive) concepts, making it easy to learn and intuitive to use.
+
+* a small, expressive ORM
+* python 2.7+ and 3.4+ (developed with 3.6)
+* supports sqlite, mysql and postgresql
+* tons of `extensions <http://docs.peewee-orm.com/en/latest/peewee/playhouse.html>`_
+
+.. image:: https://travis-ci.org/coleifer/peewee.svg?branch=master
+  :target: https://travis-ci.org/coleifer/peewee
+
+New to peewee? These may help:
+
+* `Quickstart <http://docs.peewee-orm.com/en/latest/peewee/quickstart.html#quickstart>`_
+* `Example twitter app <http://docs.peewee-orm.com/en/latest/peewee/example.html>`_
+* `Using peewee interactively <http://docs.peewee-orm.com/en/latest/peewee/interactive.html>`_
+* `Models and fields <http://docs.peewee-orm.com/en/latest/peewee/models.html>`_
+* `Querying <http://docs.peewee-orm.com/en/latest/peewee/querying.html>`_
+* `Relationships and joins <http://docs.peewee-orm.com/en/latest/peewee/relationships.html>`_
+
+Examples
+--------
+
+Defining models is similar to Django or SQLAlchemy:
+
+.. code-block:: python
+
+    from peewee import *
+    import datetime
+
+
+    db = SqliteDatabase('my_database.db')
+
+    class BaseModel(Model):
+        class Meta:
+            database = db
+
+    class User(BaseModel):
+        username = CharField(unique=True)
+
+    class Tweet(BaseModel):
+        user = ForeignKeyField(User, backref='tweets')
+        message = TextField()
+        created_date = DateTimeField(default=datetime.datetime.now)
+        is_published = BooleanField(default=True)
+
+Connect to the database and create tables:
+
+.. code-block:: python
+
+    db.connect()
+    db.create_tables([User, Tweet])
+
+Create a few rows:
+
+.. code-block:: python
+
+    charlie = User.create(username='charlie')
+    huey = User(username='huey')
+    huey.save()
+
+    # No need to set `is_published` or `created_date` since they
+    # will just use the default values we specified.
+    Tweet.create(user=charlie, message='My first tweet')
+
+Queries are expressive and composable:
+
+.. code-block:: python
+
+    # A simple query selecting a user.
+    User.get(User.username == 'charlie')
+
+    # Get tweets created by one of several users.
+    usernames = ['charlie', 'huey', 'mickey']
+    users = User.select().where(User.username.in_(usernames))
+    tweets = Tweet.select().where(Tweet.user.in_(users))
+
+    # We could accomplish the same using a JOIN:
+    tweets = (Tweet
+              .select()
+              .join(User)
+              .where(User.username.in_(usernames)))
+
+    # How many tweets were published today?
+    tweets_today = (Tweet
+                    .select()
+                    .where(
+                        (Tweet.created_date >= datetime.date.today()) &
+                        (Tweet.is_published == True))
+                    .count())
+
+    # Paginate the user table and show me page 3 (users 41-60).
+    User.select().order_by(User.username).paginate(3, 20)
+
+    # Order users by the number of tweets they've created:
+    tweet_ct = fn.Count(Tweet.id)
+    users = (User
+             .select(User, tweet_ct.alias('ct'))
+             .join(Tweet, JOIN.LEFT_OUTER)
+             .group_by(User)
+             .order_by(tweet_ct.desc()))
+
+    # Do an atomic update
+    Counter.update(count=Counter.count + 1).where(Counter.url == request.url)
+
+Check out the `example twitter app <http://docs.peewee-orm.com/en/latest/peewee/example.html>`_.
+
+Learning more
+-------------
+
+Check the `documentation <http://docs.peewee-orm.com/>`_ for more examples.
+
+Specific question? Come hang out in the #peewee channel on irc.freenode.net, or post to the mailing list, http://groups.google.com/group/peewee-orm . If you would like to report a bug, `create a new issue <https://github.com/coleifer/peewee/issues/new>`_ on GitHub.
+
+Still want more info?
+---------------------
+
+.. image:: http://media.charlesleifer.com/blog/photos/wat.jpg
+
+I've written a number of blog posts about building applications and web-services with peewee (and usually Flask). If you'd like to see some real-life applications that use peewee, the following resources may be useful:
+
+* `Building a note-taking app with Flask and Peewee <http://charlesleifer.com/blog/saturday-morning-hack-a-little-note-taking-app-with-flask/>`_ as well as `Part 2 <http://charlesleifer.com/blog/saturday-morning-hacks-revisiting-the-notes-app/>`_ and `Part 3 <http://charlesleifer.com/blog/saturday-morning-hacks-adding-full-text-search-to-the-flask-note-taking-app/>`_.
+* `Analytics web service built with Flask and Peewee <http://charlesleifer.com/blog/saturday-morning-hacks-building-an-analytics-app-with-flask/>`_.
+* `Personalized news digest (with a boolean query parser!) <http://charlesleifer.com/blog/saturday-morning-hack-personalized-news-digest-with-boolean-query-parser/>`_.
+* `Structuring Flask apps with Peewee <http://charlesleifer.com/blog/structuring-flask-apps-a-how-to-for-those-coming-from-django/>`_.
+* `Creating a lastpass clone with Flask and Peewee <http://charlesleifer.com/blog/creating-a-personal-password-manager/>`_.
+* `Creating a bookmarking web-service that takes screenshots of your bookmarks <http://charlesleifer.com/blog/building-bookmarking-service-python-and-phantomjs/>`_.
+* `Building a pastebin, wiki and a bookmarking service using Flask and Peewee <http://charlesleifer.com/blog/dont-sweat-small-stuff-use-flask-blueprints/>`_.
+* `Encrypted databases with Python and SQLCipher <http://charlesleifer.com/blog/encrypted-sqlite-databases-with-python-and-sqlcipher/>`_.
+* `Dear Diary: An Encrypted, Command-Line Diary with Peewee <http://charlesleifer.com/blog/dear-diary-an-encrypted-command-line-diary-with-python/>`_.
+* `Query Tree Structures in SQLite using Peewee and the Transitive Closure Extension <http://charlesleifer.com/blog/querying-tree-structures-in-sqlite-using-python-and-the-transitive-closure-extension/>`_.
+
+
diff --git a/lib/python3.7/site-packages/peewee-3.9.4.dist-info/RECORD b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..98ea5d612b614354c7f2f53e97b28e2812192cae
--- /dev/null
+++ b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/RECORD
@@ -0,0 +1,54 @@
+../../../bin/__pycache__/pwiz.cpython-37.pyc,,
+../../../bin/pwiz.py,sha256=U3QNGT7oVTYtUQ6_0H387hVttuLhn4Lj0LlqWPk-b5M,7950
+__pycache__/peewee.cpython-37.pyc,,
+__pycache__/pwiz.cpython-37.pyc,,
+peewee-3.9.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+peewee-3.9.4.dist-info/LICENSE,sha256=N0AJYSWwhzWiR7jdCM2C4LqYTTvr2SIdN4V2Y35SQNo,1058
+peewee-3.9.4.dist-info/METADATA,sha256=IdGZObIOQ9H-t0pH4rLSu9iISdTaZMAa6kCimPc5lS4,6827
+peewee-3.9.4.dist-info/RECORD,,
+peewee-3.9.4.dist-info/WHEEL,sha256=ohybRue5bPR5MQUSq7c6AGl-iIAd0MXt_sfyYTZ1Rq8,104
+peewee-3.9.4.dist-info/top_level.txt,sha256=uV7RZ61bWm9zDrPVGNrGay4E4WDonEqtU2NPe5GGUWs,22
+peewee.py,sha256=StRDjJbE2ymSBbTPJl2Voroh7hATG4oXukYOKh4mO2I,243527
+playhouse/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+playhouse/__pycache__/__init__.cpython-37.pyc,,
+playhouse/__pycache__/apsw_ext.cpython-37.pyc,,
+playhouse/__pycache__/dataset.cpython-37.pyc,,
+playhouse/__pycache__/db_url.cpython-37.pyc,,
+playhouse/__pycache__/fields.cpython-37.pyc,,
+playhouse/__pycache__/flask_utils.cpython-37.pyc,,
+playhouse/__pycache__/hybrid.cpython-37.pyc,,
+playhouse/__pycache__/kv.cpython-37.pyc,,
+playhouse/__pycache__/migrate.cpython-37.pyc,,
+playhouse/__pycache__/mysql_ext.cpython-37.pyc,,
+playhouse/__pycache__/pool.cpython-37.pyc,,
+playhouse/__pycache__/postgres_ext.cpython-37.pyc,,
+playhouse/__pycache__/reflection.cpython-37.pyc,,
+playhouse/__pycache__/shortcuts.cpython-37.pyc,,
+playhouse/__pycache__/signals.cpython-37.pyc,,
+playhouse/__pycache__/sqlcipher_ext.cpython-37.pyc,,
+playhouse/__pycache__/sqlite_ext.cpython-37.pyc,,
+playhouse/__pycache__/sqlite_udf.cpython-37.pyc,,
+playhouse/__pycache__/sqliteq.cpython-37.pyc,,
+playhouse/__pycache__/test_utils.cpython-37.pyc,,
+playhouse/_sqlite_ext.cpython-37m-x86_64-linux-gnu.so,sha256=lcRv_AZ68Ev3nLBAbqlg6g3ViQX2mDjeUfzZnoJDfds,1315696
+playhouse/_sqlite_udf.cpython-37m-x86_64-linux-gnu.so,sha256=zAddXWXSSq2r-d7umkWfLxBzxHB2W3fghZji6DGyELw,482600
+playhouse/apsw_ext.py,sha256=iWj6DPQCURsqgs8QXd1IWEPg4NMOAkSTTLMs8G1gyPU,4223
+playhouse/dataset.py,sha256=G1T_tyxE4vTo7wP4TK_qVJL20RJpR3_SZt7vKWNimVM,12767
+playhouse/db_url.py,sha256=Z8II9jH9eSJ5o9UQK2YJ4LP_WzUi6SHVU1yjfNpBmPM,3976
+playhouse/fields.py,sha256=m1IkkO1FUUuaDzSIcdtg49NobhAJSJF3NlFNge0eFxY,1743
+playhouse/flask_utils.py,sha256=N0stRAzFJXUKu7gWtE46zUeAInReHjJ-F2N5lJGPAM8,6034
+playhouse/hybrid.py,sha256=qRW3mQHVerRSiqvBwwhBF8g5kHgE3iq5OYDKqthtbLk,1473
+playhouse/kv.py,sha256=S7AzM1v-G-BORNU1k_sJQm6MJettDZVBSWcRnqPoAoE,5375
+playhouse/migrate.py,sha256=0XIqjiUtuwYXMQV5U4yCaiaF8UiA14LI-ohhyHfygEA,28612
+playhouse/mysql_ext.py,sha256=-nwyF7IRaBXgbW5Nn3J7q2MXhnbt92V6ojcgHvsSpps,876
+playhouse/pool.py,sha256=5H7vGQPHepeTxSAtKFk2FFmoCNzLSMRMLHXrS8aQvM8,11437
+playhouse/postgres_ext.py,sha256=qkVh67ODd4dhBaqDBde8drWjjTBmEHwbZgpYK-K5dkg,13645
+playhouse/reflection.py,sha256=ZhfTkAUuDqnBIh1-FdxhZYHjuBNTvBmLyIB2EjF_1hM,28793
+playhouse/shortcuts.py,sha256=mgOVaXu51UKiGCEj5Z4VoLDWDVYRJVigMGaRVA9vdpc,8215
+playhouse/signals.py,sha256=vqJaBmH8jWfEPP1J-6unznC61D-dTAv2faEI2rgLL_E,2487
+playhouse/sqlcipher_ext.py,sha256=F78lwdOb9vhPHIwIXDWdLhUdW1GE1nEoKXBrZ3FNZ_4,3460
+playhouse/sqlite_ext.py,sha256=HclDAyPAqv99Jc64rnTj8qSx17wqTTI_PMwX8R_M_xo,43137
+playhouse/sqlite_udf.py,sha256=hehQVYY9pnLPQMPvYqT9fygTnhfWLjP0JI2EH6iHdWk,13291
+playhouse/sqliteq.py,sha256=8rO_mJDjZhcobVPwMdlQFJfDOeB0UOr6fFVEgW1cuCU,10075
+playhouse/test_utils.py,sha256=sJwKeBq2ebR_VFVBGjHDmfzlAqR04GPXKnD3dfA-GAA,1737
+pwiz.py,sha256=grQPlpXMzzTwp002UuclAr2llSICgbJp7ixEzFbVlaM,7963
diff --git a/lib/python3.7/site-packages/peewee-3.9.4.dist-info/WHEEL b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..af9df0058e282545f5989f8e0d636d61a0b5c43c
--- /dev/null
+++ b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.1)
+Root-Is-Purelib: false
+Tag: cp37-cp37m-linux_x86_64
+
diff --git a/lib/python3.7/site-packages/peewee-3.9.4.dist-info/top_level.txt b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1d507be0cc01a5d02c1ad00f344967b663071ce7
--- /dev/null
+++ b/lib/python3.7/site-packages/peewee-3.9.4.dist-info/top_level.txt
@@ -0,0 +1,3 @@
+peewee
+playhouse
+pwiz
diff --git a/lib/python3.7/site-packages/peewee.py b/lib/python3.7/site-packages/peewee.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5e939e589e81cd253b0783ec0c3b47c9e4f490a
--- /dev/null
+++ b/lib/python3.7/site-packages/peewee.py
@@ -0,0 +1,7204 @@
+from bisect import bisect_left
+from bisect import bisect_right
+from contextlib import contextmanager
+from copy import deepcopy
+from functools import wraps
+from inspect import isclass
+import calendar
+import collections
+import datetime
+import decimal
+import hashlib
+import itertools
+import logging
+import operator
+import re
+import socket
+import struct
+import sys
+import threading
+import time
+import uuid
+import warnings
+try:
+    from collections.abc import Mapping
+except ImportError:
+    from collections import Mapping
+
+try:
+    from pysqlite3 import dbapi2 as pysq3
+except ImportError:
+    try:
+        from pysqlite2 import dbapi2 as pysq3
+    except ImportError:
+        pysq3 = None
+try:
+    import sqlite3
+except ImportError:
+    sqlite3 = pysq3
+else:
+    if pysq3 and pysq3.sqlite_version_info >= sqlite3.sqlite_version_info:
+        sqlite3 = pysq3
+try:
+    from psycopg2cffi import compat
+    compat.register()
+except ImportError:
+    pass
+try:
+    import psycopg2
+    from psycopg2 import extensions as pg_extensions
+    try:
+        from psycopg2 import errors as pg_errors
+    except ImportError:
+        pg_errors = None
+except ImportError:
+    psycopg2 = pg_errors = None
+
+mysql_passwd = False
+try:
+    import pymysql as mysql
+except ImportError:
+    try:
+        import MySQLdb as mysql
+        mysql_passwd = True
+    except ImportError:
+        mysql = None
+
+
+__version__ = '3.9.4'
+__all__ = [
+    'AsIs',
+    'AutoField',
+    'BareField',
+    'BigAutoField',
+    'BigBitField',
+    'BigIntegerField',
+    'BinaryUUIDField',
+    'BitField',
+    'BlobField',
+    'BooleanField',
+    'Case',
+    'Cast',
+    'CharField',
+    'Check',
+    'chunked',
+    'Column',
+    'CompositeKey',
+    'Context',
+    'Database',
+    'DatabaseError',
+    'DatabaseProxy',
+    'DataError',
+    'DateField',
+    'DateTimeField',
+    'DecimalField',
+    'DeferredForeignKey',
+    'DeferredThroughModel',
+    'DJANGO_MAP',
+    'DoesNotExist',
+    'DoubleField',
+    'DQ',
+    'EXCLUDED',
+    'Field',
+    'FixedCharField',
+    'FloatField',
+    'fn',
+    'ForeignKeyField',
+    'IdentityField',
+    'ImproperlyConfigured',
+    'Index',
+    'IntegerField',
+    'IntegrityError',
+    'InterfaceError',
+    'InternalError',
+    'IPField',
+    'JOIN',
+    'ManyToManyField',
+    'Model',
+    'ModelIndex',
+    'MySQLDatabase',
+    'NotSupportedError',
+    'OP',
+    'OperationalError',
+    'PostgresqlDatabase',
+    'PrimaryKeyField',  # XXX: Deprecated, change to AutoField.
+    'prefetch',
+    'ProgrammingError',
+    'Proxy',
+    'QualifiedNames',
+    'SchemaManager',
+    'SmallIntegerField',
+    'Select',
+    'SQL',
+    'SqliteDatabase',
+    'Table',
+    'TextField',
+    'TimeField',
+    'TimestampField',
+    'Tuple',
+    'UUIDField',
+    'Value',
+    'ValuesList',
+    'Window',
+]
+
+try:  # Python 2.7+
+    from logging import NullHandler
+except ImportError:
+    class NullHandler(logging.Handler):
+        def emit(self, record):
+            pass
+
+logger = logging.getLogger('peewee')
+logger.addHandler(NullHandler())
+
+
+if sys.version_info[0] == 2:
+    text_type = unicode
+    bytes_type = str
+    buffer_type = buffer
+    izip_longest = itertools.izip_longest
+    callable_ = callable
+    exec('def reraise(tp, value, tb=None): raise tp, value, tb')
+    def print_(s):
+        sys.stdout.write(s)
+        sys.stdout.write('\n')
+else:
+    import builtins
+    try:
+        from collections.abc import Callable
+    except ImportError:
+        from collections import Callable
+    from functools import reduce
+    callable_ = lambda c: isinstance(c, Callable)
+    text_type = str
+    bytes_type = bytes
+    buffer_type = memoryview
+    basestring = str
+    long = int
+    print_ = getattr(builtins, 'print')
+    izip_longest = itertools.zip_longest
+    def reraise(tp, value, tb=None):
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+
+if sqlite3:
+    sqlite3.register_adapter(decimal.Decimal, str)
+    sqlite3.register_adapter(datetime.date, str)
+    sqlite3.register_adapter(datetime.time, str)
+    __sqlite_version__ = sqlite3.sqlite_version_info
+else:
+    __sqlite_version__ = (0, 0, 0)
+
+
+__date_parts__ = set(('year', 'month', 'day', 'hour', 'minute', 'second'))
+
+# Sqlite does not support the `date_part` SQL function, so we will define an
+# implementation in python.
+__sqlite_datetime_formats__ = (
+    '%Y-%m-%d %H:%M:%S',
+    '%Y-%m-%d %H:%M:%S.%f',
+    '%Y-%m-%d',
+    '%H:%M:%S',
+    '%H:%M:%S.%f',
+    '%H:%M')
+
+__sqlite_date_trunc__ = {
+    'year': '%Y',
+    'month': '%Y-%m',
+    'day': '%Y-%m-%d',
+    'hour': '%Y-%m-%d %H',
+    'minute': '%Y-%m-%d %H:%M',
+    'second': '%Y-%m-%d %H:%M:%S'}
+
+__mysql_date_trunc__ = __sqlite_date_trunc__.copy()
+__mysql_date_trunc__['minute'] = '%Y-%m-%d %H:%i'
+__mysql_date_trunc__['second'] = '%Y-%m-%d %H:%i:%S'
+
+def _sqlite_date_part(lookup_type, datetime_string):
+    assert lookup_type in __date_parts__
+    if not datetime_string:
+        return
+    dt = format_date_time(datetime_string, __sqlite_datetime_formats__)
+    return getattr(dt, lookup_type)
+
+def _sqlite_date_trunc(lookup_type, datetime_string):
+    assert lookup_type in __sqlite_date_trunc__
+    if not datetime_string:
+        return
+    dt = format_date_time(datetime_string, __sqlite_datetime_formats__)
+    return dt.strftime(__sqlite_date_trunc__[lookup_type])
+
+
+def __deprecated__(s):
+    warnings.warn(s, DeprecationWarning)
+
+
+class attrdict(dict):
+    def __getattr__(self, attr):
+        try:
+            return self[attr]
+        except KeyError:
+            raise AttributeError(attr)
+    def __setattr__(self, attr, value): self[attr] = value
+    def __iadd__(self, rhs): self.update(rhs); return self
+    def __add__(self, rhs): d = attrdict(self); d.update(rhs); return d
+
+SENTINEL = object()
+
+#: Operations for use in SQL expressions.
+OP = attrdict(
+    AND='AND',
+    OR='OR',
+    ADD='+',
+    SUB='-',
+    MUL='*',
+    DIV='/',
+    BIN_AND='&',
+    BIN_OR='|',
+    XOR='#',
+    MOD='%',
+    EQ='=',
+    LT='<',
+    LTE='<=',
+    GT='>',
+    GTE='>=',
+    NE='!=',
+    IN='IN',
+    NOT_IN='NOT IN',
+    IS='IS',
+    IS_NOT='IS NOT',
+    LIKE='LIKE',
+    ILIKE='ILIKE',
+    BETWEEN='BETWEEN',
+    REGEXP='REGEXP',
+    IREGEXP='IREGEXP',
+    CONCAT='||',
+    BITWISE_NEGATION='~')
+
+# To support "django-style" double-underscore filters, create a mapping between
+# operation name and operation code, e.g. "__eq" == OP.EQ.
+DJANGO_MAP = attrdict({
+    'eq': operator.eq,
+    'lt': operator.lt,
+    'lte': operator.le,
+    'gt': operator.gt,
+    'gte': operator.ge,
+    'ne': operator.ne,
+    'in': operator.lshift,
+    'is': lambda l, r: Expression(l, OP.IS, r),
+    'like': lambda l, r: Expression(l, OP.LIKE, r),
+    'ilike': lambda l, r: Expression(l, OP.ILIKE, r),
+    'regexp': lambda l, r: Expression(l, OP.REGEXP, r),
+})
+
+#: Mapping of field type to the data-type supported by the database. Databases
+#: may override or add to this list.
+FIELD = attrdict(
+    AUTO='INTEGER',
+    BIGAUTO='BIGINT',
+    BIGINT='BIGINT',
+    BLOB='BLOB',
+    BOOL='SMALLINT',
+    CHAR='CHAR',
+    DATE='DATE',
+    DATETIME='DATETIME',
+    DECIMAL='DECIMAL',
+    DEFAULT='',
+    DOUBLE='REAL',
+    FLOAT='REAL',
+    INT='INTEGER',
+    SMALLINT='SMALLINT',
+    TEXT='TEXT',
+    TIME='TIME',
+    UUID='TEXT',
+    UUIDB='BLOB',
+    VARCHAR='VARCHAR')
+
+#: Join helpers (for convenience) -- all join types are supported, this object
+#: is just to help avoid introducing errors by using strings everywhere.
+JOIN = attrdict(
+    INNER='INNER',
+    LEFT_OUTER='LEFT OUTER',
+    RIGHT_OUTER='RIGHT OUTER',
+    FULL='FULL',
+    FULL_OUTER='FULL OUTER',
+    CROSS='CROSS',
+    NATURAL='NATURAL')
+
+# Row representations.
+ROW = attrdict(
+    TUPLE=1,
+    DICT=2,
+    NAMED_TUPLE=3,
+    CONSTRUCTOR=4,
+    MODEL=5)
+
+SCOPE_NORMAL = 1
+SCOPE_SOURCE = 2
+SCOPE_VALUES = 4
+SCOPE_CTE = 8
+SCOPE_COLUMN = 16
+
+# Rules for parentheses around subqueries in compound select.
+CSQ_PARENTHESES_NEVER = 0
+CSQ_PARENTHESES_ALWAYS = 1
+CSQ_PARENTHESES_UNNESTED = 2
+
+# Regular expressions used to convert class names to snake-case table names.
+# First regex handles acronym followed by word or initial lower-word followed
+# by a capitalized word. e.g. APIResponse -> API_Response / fooBar -> foo_Bar.
+# Second regex handles the normal case of two title-cased words.
+SNAKE_CASE_STEP1 = re.compile('(.)_*([A-Z][a-z]+)')
+SNAKE_CASE_STEP2 = re.compile('([a-z0-9])_*([A-Z])')
+
+# Helper functions that are used in various parts of the codebase.
+MODEL_BASE = '_metaclass_helper_'
+
+def with_metaclass(meta, base=object):
+    return meta(MODEL_BASE, (base,), {})
+
+def merge_dict(source, overrides):
+    merged = source.copy()
+    if overrides:
+        merged.update(overrides)
+    return merged
+
+def quote(path, quote_chars):
+    if len(path) == 1:
+        return path[0].join(quote_chars)
+    return '.'.join([part.join(quote_chars) for part in path])
+
+is_model = lambda o: isclass(o) and issubclass(o, Model)
+
+def ensure_tuple(value):
+    if value is not None:
+        return value if isinstance(value, (list, tuple)) else (value,)
+
+def ensure_entity(value):
+    if value is not None:
+        return value if isinstance(value, Node) else Entity(value)
+
+def make_snake_case(s):
+    first = SNAKE_CASE_STEP1.sub(r'\1_\2', s)
+    return SNAKE_CASE_STEP2.sub(r'\1_\2', first).lower()
+
+def chunked(it, n):
+    marker = object()
+    for group in (list(g) for g in izip_longest(*[iter(it)] * n,
+                                                fillvalue=marker)):
+        if group[-1] is marker:
+            del group[group.index(marker):]
+        yield group
+
+
+class _callable_context_manager(object):
+    def __call__(self, fn):
+        @wraps(fn)
+        def inner(*args, **kwargs):
+            with self:
+                return fn(*args, **kwargs)
+        return inner
+
+
+class Proxy(object):
+    """
+    Create a proxy or placeholder for another object.
+    """
+    __slots__ = ('obj', '_callbacks')
+
+    def __init__(self):
+        self._callbacks = []
+        self.initialize(None)
+
+    def initialize(self, obj):
+        self.obj = obj
+        for callback in self._callbacks:
+            callback(obj)
+
+    def attach_callback(self, callback):
+        self._callbacks.append(callback)
+        return callback
+
+    def passthrough(method):
+        def inner(self, *args, **kwargs):
+            if self.obj is None:
+                raise AttributeError('Cannot use uninitialized Proxy.')
+            return getattr(self.obj, method)(*args, **kwargs)
+        return inner
+
+    # Allow proxy to be used as a context-manager.
+    __enter__ = passthrough('__enter__')
+    __exit__ = passthrough('__exit__')
+
+    def __getattr__(self, attr):
+        if self.obj is None:
+            raise AttributeError('Cannot use uninitialized Proxy.')
+        return getattr(self.obj, attr)
+
+    def __setattr__(self, attr, value):
+        if attr not in self.__slots__:
+            raise AttributeError('Cannot set attribute on proxy.')
+        return super(Proxy, self).__setattr__(attr, value)
+
+
+class DatabaseProxy(Proxy):
+    """
+    Proxy implementation specifically for proxying `Database` objects.
+    """
+    def connection_context(self):
+        return ConnectionContext(self)
+    def atomic(self):
+        return _atomic(self)
+    def manual_commit(self):
+        return _manual(self)
+    def transaction(self):
+        return _transaction(self)
+    def savepoint(self):
+        return _savepoint(self)
+
+
+# SQL Generation.
+
+
+class AliasManager(object):
+    __slots__ = ('_counter', '_current_index', '_mapping')
+
+    def __init__(self):
+        # A list of dictionaries containing mappings at various depths.
+        self._counter = 0
+        self._current_index = 0
+        self._mapping = []
+        self.push()
+
+    @property
+    def mapping(self):
+        return self._mapping[self._current_index - 1]
+
+    def add(self, source):
+        if source not in self.mapping:
+            self._counter += 1
+            self[source] = 't%d' % self._counter
+        return self.mapping[source]
+
+    def get(self, source, any_depth=False):
+        if any_depth:
+            for idx in reversed(range(self._current_index)):
+                if source in self._mapping[idx]:
+                    return self._mapping[idx][source]
+        return self.add(source)
+
+    def __getitem__(self, source):
+        return self.get(source)
+
+    def __setitem__(self, source, alias):
+        self.mapping[source] = alias
+
+    def push(self):
+        self._current_index += 1
+        if self._current_index > len(self._mapping):
+            self._mapping.append({})
+
+    def pop(self):
+        if self._current_index == 1:
+            raise ValueError('Cannot pop() from empty alias manager.')
+        self._current_index -= 1
+
+
+class State(collections.namedtuple('_State', ('scope', 'parentheses',
+                                              'settings'))):
+    def __new__(cls, scope=SCOPE_NORMAL, parentheses=False, **kwargs):
+        return super(State, cls).__new__(cls, scope, parentheses, kwargs)
+
+    def __call__(self, scope=None, parentheses=None, **kwargs):
+        # Scope and settings are "inherited" (parentheses is not, however).
+        scope = self.scope if scope is None else scope
+
+        # Try to avoid unnecessary dict copying.
+        if kwargs and self.settings:
+            settings = self.settings.copy()  # Copy original settings dict.
+            settings.update(kwargs)  # Update copy with overrides.
+        elif kwargs:
+            settings = kwargs
+        else:
+            settings = self.settings
+        return State(scope, parentheses, **settings)
+
+    def __getattr__(self, attr_name):
+        return self.settings.get(attr_name)
+
+
+def __scope_context__(scope):
+    @contextmanager
+    def inner(self, **kwargs):
+        with self(scope=scope, **kwargs):
+            yield self
+    return inner
+
+
+class Context(object):
+    __slots__ = ('stack', '_sql', '_values', 'alias_manager', 'state')
+
+    def __init__(self, **settings):
+        self.stack = []
+        self._sql = []
+        self._values = []
+        self.alias_manager = AliasManager()
+        self.state = State(**settings)
+
+    def as_new(self):
+        return Context(**self.state.settings)
+
+    def column_sort_key(self, item):
+        return item[0].get_sort_key(self)
+
+    @property
+    def scope(self):
+        return self.state.scope
+
+    @property
+    def parentheses(self):
+        return self.state.parentheses
+
+    @property
+    def subquery(self):
+        return self.state.subquery
+
+    def __call__(self, **overrides):
+        if overrides and overrides.get('scope') == self.scope:
+            del overrides['scope']
+
+        self.stack.append(self.state)
+        self.state = self.state(**overrides)
+        return self
+
+    scope_normal = __scope_context__(SCOPE_NORMAL)
+    scope_source = __scope_context__(SCOPE_SOURCE)
+    scope_values = __scope_context__(SCOPE_VALUES)
+    scope_cte = __scope_context__(SCOPE_CTE)
+    scope_column = __scope_context__(SCOPE_COLUMN)
+
+    def __enter__(self):
+        if self.parentheses:
+            self.literal('(')
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if self.parentheses:
+            self.literal(')')
+        self.state = self.stack.pop()
+
+    @contextmanager
+    def push_alias(self):
+        self.alias_manager.push()
+        yield
+        self.alias_manager.pop()
+
+    def sql(self, obj):
+        if isinstance(obj, (Node, Context)):
+            return obj.__sql__(self)
+        elif is_model(obj):
+            return obj._meta.table.__sql__(self)
+        else:
+            return self.sql(Value(obj))
+
+    def literal(self, keyword):
+        self._sql.append(keyword)
+        return self
+
+    def value(self, value, converter=None, add_param=True):
+        if converter:
+            value = converter(value)
+            if isinstance(value, Node):
+                return self.sql(value)
+        elif converter is None and self.state.converter:
+            # Explicitly check for None so that "False" can be used to signify
+            # that no conversion should be applied.
+            value = self.state.converter(value)
+
+        if isinstance(value, Node):
+            with self(converter=None):
+                return self.sql(value)
+
+        self._values.append(value)
+        return self.literal(self.state.param or '?') if add_param else self
+
+    def __sql__(self, ctx):
+        ctx._sql.extend(self._sql)
+        ctx._values.extend(self._values)
+        return ctx
+
+    def parse(self, node):
+        return self.sql(node).query()
+
+    def query(self):
+        return ''.join(self._sql), self._values
+
+
+def query_to_string(query):
+    # NOTE: this function is not exported by default as it might be misused --
+    # and this misuse could lead to sql injection vulnerabilities. This
+    # function is intended for debugging or logging purposes ONLY.
+    db = getattr(query, '_database', None)
+    if db is not None:
+        ctx = db.get_sql_context()
+    else:
+        ctx = Context()
+
+    sql, params = ctx.sql(query).query()
+    if not params:
+        return sql
+
+    param = ctx.state.param or '?'
+    if param == '?':
+        sql = sql.replace('?', '%s')
+
+    return sql % tuple(map(_query_val_transform, params))
+
+def _query_val_transform(v):
+    # Interpolate parameters.
+    if isinstance(v, (text_type, datetime.datetime, datetime.date,
+                      datetime.time)):
+        v = "'%s'" % v
+    elif isinstance(v, bytes_type):
+        try:
+            v = v.decode('utf8')
+        except UnicodeDecodeError:
+            v = v.decode('raw_unicode_escape')
+        v = "'%s'" % v
+    elif isinstance(v, int):
+        v = '%s' % int(v)  # Also handles booleans -> 1 or 0.
+    elif v is None:
+        v = 'NULL'
+    else:
+        v = str(v)
+    return v
+
+
+# AST.
+
+
+class Node(object):
+    _coerce = True
+
+    def clone(self):
+        obj = self.__class__.__new__(self.__class__)
+        obj.__dict__ = self.__dict__.copy()
+        return obj
+
+    def __sql__(self, ctx):
+        raise NotImplementedError
+
+    @staticmethod
+    def copy(method):
+        def inner(self, *args, **kwargs):
+            clone = self.clone()
+            method(clone, *args, **kwargs)
+            return clone
+        return inner
+
+    def coerce(self, _coerce=True):
+        if _coerce != self._coerce:
+            clone = self.clone()
+            clone._coerce = _coerce
+            return clone
+        return self
+
+    def is_alias(self):
+        return False
+
+    def unwrap(self):
+        return self
+
+
+class ColumnFactory(object):
+    __slots__ = ('node',)
+
+    def __init__(self, node):
+        self.node = node
+
+    def __getattr__(self, attr):
+        return Column(self.node, attr)
+
+
+class _DynamicColumn(object):
+    __slots__ = ()
+
+    def __get__(self, instance, instance_type=None):
+        if instance is not None:
+            return ColumnFactory(instance)  # Implements __getattr__().
+        return self
+
+
+class _ExplicitColumn(object):
+    __slots__ = ()
+
+    def __get__(self, instance, instance_type=None):
+        if instance is not None:
+            raise AttributeError(
+                '%s specifies columns explicitly, and does not support '
+                'dynamic column lookups.' % instance)
+        return self
+
+
+class Source(Node):
+    c = _DynamicColumn()
+
+    def __init__(self, alias=None):
+        super(Source, self).__init__()
+        self._alias = alias
+
+    @Node.copy
+    def alias(self, name):
+        self._alias = name
+
+    def select(self, *columns):
+        if not columns:
+            columns = (SQL('*'),)
+        return Select((self,), columns)
+
+    def join(self, dest, join_type='INNER', on=None):
+        return Join(self, dest, join_type, on)
+
+    def left_outer_join(self, dest, on=None):
+        return Join(self, dest, JOIN.LEFT_OUTER, on)
+
+    def get_sort_key(self, ctx):
+        if self._alias:
+            return (self._alias,)
+        return (ctx.alias_manager[self],)
+
+    def apply_alias(self, ctx):
+        # If we are defining the source, include the "AS alias" declaration. An
+        # alias is created for the source if one is not already defined.
+        if ctx.scope == SCOPE_SOURCE:
+            if self._alias:
+                ctx.alias_manager[self] = self._alias
+            ctx.literal(' AS ').sql(Entity(ctx.alias_manager[self]))
+        return ctx
+
+    def apply_column(self, ctx):
+        if self._alias:
+            ctx.alias_manager[self] = self._alias
+        return ctx.sql(Entity(ctx.alias_manager[self]))
+
+
+class _HashableSource(object):
+    def __init__(self, *args, **kwargs):
+        super(_HashableSource, self).__init__(*args, **kwargs)
+        self._update_hash()
+
+    @Node.copy
+    def alias(self, name):
+        self._alias = name
+        self._update_hash()
+
+    def _update_hash(self):
+        self._hash = self._get_hash()
+
+    def _get_hash(self):
+        return hash((self.__class__, self._path, self._alias))
+
+    def __hash__(self):
+        return self._hash
+
+    def __eq__(self, other):
+        return self._hash == other._hash
+
+    def __ne__(self, other):
+        return not (self == other)
+
+
+def __bind_database__(meth):
+    @wraps(meth)
+    def inner(self, *args, **kwargs):
+        result = meth(self, *args, **kwargs)
+        if self._database:
+            return result.bind(self._database)
+        return result
+    return inner
+
+
+def __join__(join_type='INNER', inverted=False):
+    def method(self, other):
+        if inverted:
+            self, other = other, self
+        return Join(self, other, join_type=join_type)
+    return method
+
+
+class BaseTable(Source):
+    __and__ = __join__(JOIN.INNER)
+    __add__ = __join__(JOIN.LEFT_OUTER)
+    __sub__ = __join__(JOIN.RIGHT_OUTER)
+    __or__ = __join__(JOIN.FULL_OUTER)
+    __mul__ = __join__(JOIN.CROSS)
+    __rand__ = __join__(JOIN.INNER, inverted=True)
+    __radd__ = __join__(JOIN.LEFT_OUTER, inverted=True)
+    __rsub__ = __join__(JOIN.RIGHT_OUTER, inverted=True)
+    __ror__ = __join__(JOIN.FULL_OUTER, inverted=True)
+    __rmul__ = __join__(JOIN.CROSS, inverted=True)
+
+
+class _BoundTableContext(_callable_context_manager):
+    def __init__(self, table, database):
+        self.table = table
+        self.database = database
+
+    def __enter__(self):
+        self._orig_database = self.table._database
+        self.table.bind(self.database)
+        if self.table._model is not None:
+            self.table._model.bind(self.database)
+        return self.table
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.table.bind(self._orig_database)
+        if self.table._model is not None:
+            self.table._model.bind(self._orig_database)
+
+
+class Table(_HashableSource, BaseTable):
+    def __init__(self, name, columns=None, primary_key=None, schema=None,
+                 alias=None, _model=None, _database=None):
+        self.__name__ = name
+        self._columns = columns
+        self._primary_key = primary_key
+        self._schema = schema
+        self._path = (schema, name) if schema else (name,)
+        self._model = _model
+        self._database = _database
+        super(Table, self).__init__(alias=alias)
+
+        # Allow tables to restrict what columns are available.
+        if columns is not None:
+            self.c = _ExplicitColumn()
+            for column in columns:
+                setattr(self, column, Column(self, column))
+
+        if primary_key:
+            col_src = self if self._columns else self.c
+            self.primary_key = getattr(col_src, primary_key)
+        else:
+            self.primary_key = None
+
+    def clone(self):
+        # Ensure a deep copy of the column instances.
+        return Table(
+            self.__name__,
+            columns=self._columns,
+            primary_key=self._primary_key,
+            schema=self._schema,
+            alias=self._alias,
+            _model=self._model,
+            _database=self._database)
+
+    def bind(self, database=None):
+        self._database = database
+        return self
+
+    def bind_ctx(self, database=None):
+        return _BoundTableContext(self, database)
+
+    def _get_hash(self):
+        return hash((self.__class__, self._path, self._alias, self._model))
+
+    @__bind_database__
+    def select(self, *columns):
+        if not columns and self._columns:
+            columns = [Column(self, column) for column in self._columns]
+        return Select((self,), columns)
+
+    @__bind_database__
+    def insert(self, insert=None, columns=None, **kwargs):
+        if kwargs:
+            insert = {} if insert is None else insert
+            src = self if self._columns else self.c
+            for key, value in kwargs.items():
+                insert[getattr(src, key)] = value
+        return Insert(self, insert=insert, columns=columns)
+
+    @__bind_database__
+    def replace(self, insert=None, columns=None, **kwargs):
+        return (self
+                .insert(insert=insert, columns=columns)
+                .on_conflict('REPLACE'))
+
+    @__bind_database__
+    def update(self, update=None, **kwargs):
+        if kwargs:
+            update = {} if update is None else update
+            for key, value in kwargs.items():
+                src = self if self._columns else self.c
+                update[getattr(src, key)] = value
+        return Update(self, update=update)
+
+    @__bind_database__
+    def delete(self):
+        return Delete(self)
+
+    def __sql__(self, ctx):
+        if ctx.scope == SCOPE_VALUES:
+            # Return the quoted table name.
+            return ctx.sql(Entity(*self._path))
+
+        if self._alias:
+            ctx.alias_manager[self] = self._alias
+
+        if ctx.scope == SCOPE_SOURCE:
+            # Define the table and its alias.
+            return self.apply_alias(ctx.sql(Entity(*self._path)))
+        else:
+            # Refer to the table using the alias.
+            return self.apply_column(ctx)
+
+
+class Join(BaseTable):
+    def __init__(self, lhs, rhs, join_type=JOIN.INNER, on=None, alias=None):
+        super(Join, self).__init__(alias=alias)
+        self.lhs = lhs
+        self.rhs = rhs
+        self.join_type = join_type
+        self._on = on
+
+    def on(self, predicate):
+        self._on = predicate
+        return self
+
+    def __sql__(self, ctx):
+        (ctx
+         .sql(self.lhs)
+         .literal(' %s JOIN ' % self.join_type)
+         .sql(self.rhs))
+        if self._on is not None:
+            ctx.literal(' ON ').sql(self._on)
+        return ctx
+
+
+class ValuesList(_HashableSource, BaseTable):
+    def __init__(self, values, columns=None, alias=None):
+        self._values = values
+        self._columns = columns
+        super(ValuesList, self).__init__(alias=alias)
+
+    def _get_hash(self):
+        return hash((self.__class__, id(self._values), self._alias))
+
+    @Node.copy
+    def columns(self, *names):
+        self._columns = names
+
+    def __sql__(self, ctx):
+        if self._alias:
+            ctx.alias_manager[self] = self._alias
+
+        if ctx.scope == SCOPE_SOURCE:
+            ctx = (ctx
+                   .literal('(VALUES ')
+                   .sql(CommaNodeList([
+                       EnclosedNodeList(row) for row in self._values]))
+                   .literal(') AS ')
+                   .sql(Entity(ctx.alias_manager[self])))
+            if self._columns:
+                ctx.sql(EnclosedNodeList([Entity(c) for c in self._columns]))
+        else:
+            ctx.sql(Entity(ctx.alias_manager[self]))
+        return ctx
+
+
+class CTE(_HashableSource, Source):
+    def __init__(self, name, query, recursive=False, columns=None):
+        self._alias = name
+        self._query = query
+        self._recursive = recursive
+        if columns is not None:
+            columns = [Entity(c) if isinstance(c, basestring) else c
+                       for c in columns]
+        self._columns = columns
+        query._cte_list = ()
+        super(CTE, self).__init__(alias=name)
+
+    def select_from(self, *columns):
+        if not columns:
+            raise ValueError('select_from() must specify one or more columns '
+                             'from the CTE to select.')
+
+        query = (Select((self,), columns)
+                 .with_cte(self)
+                 .bind(self._query._database))
+        try:
+            query = query.objects(self._query.model)
+        except AttributeError:
+            pass
+        return query
+
+    def _get_hash(self):
+        return hash((self.__class__, self._alias, id(self._query)))
+
+    def union_all(self, rhs):
+        clone = self._query.clone()
+        return CTE(self._alias, clone + rhs, self._recursive, self._columns)
+    __add__ = union_all
+
+    def __sql__(self, ctx):
+        if ctx.scope != SCOPE_CTE:
+            return ctx.sql(Entity(self._alias))
+
+        with ctx.push_alias():
+            ctx.alias_manager[self] = self._alias
+            ctx.sql(Entity(self._alias))
+
+            if self._columns:
+                ctx.literal(' ').sql(EnclosedNodeList(self._columns))
+            ctx.literal(' AS ')
+            with ctx.scope_normal(parentheses=True):
+                ctx.sql(self._query)
+        return ctx
+
+
+class ColumnBase(Node):
+    def alias(self, alias):
+        if alias:
+            return Alias(self, alias)
+        return self
+
+    def unalias(self):
+        return self
+
+    def cast(self, as_type):
+        return Cast(self, as_type)
+
+    def asc(self, collation=None, nulls=None):
+        return Asc(self, collation=collation, nulls=nulls)
+    __pos__ = asc
+
+    def desc(self, collation=None, nulls=None):
+        return Desc(self, collation=collation, nulls=nulls)
+    __neg__ = desc
+
+    def __invert__(self):
+        return Negated(self)
+
+    def _e(op, inv=False):
+        """
+        Lightweight factory which returns a method that builds an Expression
+        consisting of the left-hand and right-hand operands, using `op`.
+        """
+        def inner(self, rhs):
+            if inv:
+                return Expression(rhs, op, self)
+            return Expression(self, op, rhs)
+        return inner
+    __and__ = _e(OP.AND)
+    __or__ = _e(OP.OR)
+
+    __add__ = _e(OP.ADD)
+    __sub__ = _e(OP.SUB)
+    __mul__ = _e(OP.MUL)
+    __div__ = __truediv__ = _e(OP.DIV)
+    __xor__ = _e(OP.XOR)
+    __radd__ = _e(OP.ADD, inv=True)
+    __rsub__ = _e(OP.SUB, inv=True)
+    __rmul__ = _e(OP.MUL, inv=True)
+    __rdiv__ = __rtruediv__ = _e(OP.DIV, inv=True)
+    __rand__ = _e(OP.AND, inv=True)
+    __ror__ = _e(OP.OR, inv=True)
+    __rxor__ = _e(OP.XOR, inv=True)
+
+    def __eq__(self, rhs):
+        op = OP.IS if rhs is None else OP.EQ
+        return Expression(self, op, rhs)
+    def __ne__(self, rhs):
+        op = OP.IS_NOT if rhs is None else OP.NE
+        return Expression(self, op, rhs)
+
+    __lt__ = _e(OP.LT)
+    __le__ = _e(OP.LTE)
+    __gt__ = _e(OP.GT)
+    __ge__ = _e(OP.GTE)
+    __lshift__ = _e(OP.IN)
+    __rshift__ = _e(OP.IS)
+    __mod__ = _e(OP.LIKE)
+    __pow__ = _e(OP.ILIKE)
+
+    bin_and = _e(OP.BIN_AND)
+    bin_or = _e(OP.BIN_OR)
+    in_ = _e(OP.IN)
+    not_in = _e(OP.NOT_IN)
+    regexp = _e(OP.REGEXP)
+
+    # Special expressions.
+    def is_null(self, is_null=True):
+        op = OP.IS if is_null else OP.IS_NOT
+        return Expression(self, op, None)
+    def contains(self, rhs):
+        return Expression(self, OP.ILIKE, '%%%s%%' % rhs)
+    def startswith(self, rhs):
+        return Expression(self, OP.ILIKE, '%s%%' % rhs)
+    def endswith(self, rhs):
+        return Expression(self, OP.ILIKE, '%%%s' % rhs)
+    def between(self, lo, hi):
+        return Expression(self, OP.BETWEEN, NodeList((lo, SQL('AND'), hi)))
+    def concat(self, rhs):
+        return StringExpression(self, OP.CONCAT, rhs)
+    def regexp(self, rhs):
+        return Expression(self, OP.REGEXP, rhs)
+    def iregexp(self, rhs):
+        return Expression(self, OP.IREGEXP, rhs)
+    def __getitem__(self, item):
+        if isinstance(item, slice):
+            if item.start is None or item.stop is None:
+                raise ValueError('BETWEEN range must have both a start- and '
+                                 'end-point.')
+            return self.between(item.start, item.stop)
+        return self == item
+
+    def distinct(self):
+        return NodeList((SQL('DISTINCT'), self))
+
+    def collate(self, collation):
+        return NodeList((self, SQL('COLLATE %s' % collation)))
+
+    def get_sort_key(self, ctx):
+        return ()
+
+
+class Column(ColumnBase):
+    def __init__(self, source, name):
+        self.source = source
+        self.name = name
+
+    def get_sort_key(self, ctx):
+        if ctx.scope == SCOPE_VALUES:
+            return (self.name,)
+        else:
+            return self.source.get_sort_key(ctx) + (self.name,)
+
+    def __hash__(self):
+        return hash((self.source, self.name))
+
+    def __sql__(self, ctx):
+        if ctx.scope == SCOPE_VALUES:
+            return ctx.sql(Entity(self.name))
+        else:
+            with ctx.scope_column():
+                return ctx.sql(self.source).literal('.').sql(Entity(self.name))
+
+
+class WrappedNode(ColumnBase):
+    def __init__(self, node):
+        self.node = node
+        self._coerce = getattr(node, '_coerce', True)
+
+    def is_alias(self):
+        return self.node.is_alias()
+
+    def unwrap(self):
+        return self.node.unwrap()
+
+
+class EntityFactory(object):
+    __slots__ = ('node',)
+    def __init__(self, node):
+        self.node = node
+    def __getattr__(self, attr):
+        return Entity(self.node, attr)
+
+
+class _DynamicEntity(object):
+    __slots__ = ()
+    def __get__(self, instance, instance_type=None):
+        if instance is not None:
+            return EntityFactory(instance._alias)  # Implements __getattr__().
+        return self
+
+
+class Alias(WrappedNode):
+    c = _DynamicEntity()
+
+    def __init__(self, node, alias):
+        super(Alias, self).__init__(node)
+        self._alias = alias
+
+    def alias(self, alias=None):
+        if alias is None:
+            return self.node
+        else:
+            return Alias(self.node, alias)
+
+    def unalias(self):
+        return self.node
+
+    def is_alias(self):
+        return True
+
+    def __sql__(self, ctx):
+        if ctx.scope == SCOPE_SOURCE:
+            return (ctx
+                    .sql(self.node)
+                    .literal(' AS ')
+                    .sql(Entity(self._alias)))
+        else:
+            return ctx.sql(Entity(self._alias))
+
+
+class Negated(WrappedNode):
+    def __invert__(self):
+        return self.node
+
+    def __sql__(self, ctx):
+        return ctx.literal('NOT ').sql(self.node)
+
+
+class BitwiseMixin(object):
+    def __and__(self, other):
+        return self.bin_and(other)
+
+    def __or__(self, other):
+        return self.bin_or(other)
+
+    def __sub__(self, other):
+        return self.bin_and(other.bin_negated())
+
+    def __invert__(self):
+        return BitwiseNegated(self)
+
+
+class BitwiseNegated(BitwiseMixin, WrappedNode):
+    def __invert__(self):
+        return self.node
+
+    def __sql__(self, ctx):
+        if ctx.state.operations:
+            op_sql = ctx.state.operations.get(self.op, self.op)
+        else:
+            op_sql = self.op
+        return ctx.literal(op_sql).sql(self.node)
+
+
+class Value(ColumnBase):
+    _multi_types = (list, tuple, frozenset, set)
+
+    def __init__(self, value, converter=None, unpack=True):
+        self.value = value
+        self.converter = converter
+        self.multi = isinstance(self.value, self._multi_types) and unpack
+        if self.multi:
+            self.values = []
+            for item in self.value:
+                if isinstance(item, Node):
+                    self.values.append(item)
+                else:
+                    self.values.append(Value(item, self.converter))
+
+    def __sql__(self, ctx):
+        if self.multi:
+            # For multi-part values (e.g. lists of IDs).
+            return ctx.sql(EnclosedNodeList(self.values))
+
+        return ctx.value(self.value, self.converter)
+
+
+def AsIs(value):
+    return Value(value, unpack=False)
+
+
+class Cast(WrappedNode):
+    def __init__(self, node, cast):
+        super(Cast, self).__init__(node)
+        self._cast = cast
+        self._coerce = False
+
+    def __sql__(self, ctx):
+        return (ctx
+                .literal('CAST(')
+                .sql(self.node)
+                .literal(' AS %s)' % self._cast))
+
+
+class Ordering(WrappedNode):
+    def __init__(self, node, direction, collation=None, nulls=None):
+        super(Ordering, self).__init__(node)
+        self.direction = direction
+        self.collation = collation
+        self.nulls = nulls
+        if nulls and nulls.lower() not in ('first', 'last'):
+            raise ValueError('Ordering nulls= parameter must be "first" or '
+                             '"last", got: %s' % nulls)
+
+    def collate(self, collation=None):
+        return Ordering(self.node, self.direction, collation)
+
+    def _null_ordering_case(self, nulls):
+        if nulls.lower() == 'last':
+            ifnull, notnull = 1, 0
+        elif nulls.lower() == 'first':
+            ifnull, notnull = 0, 1
+        else:
+            raise ValueError('unsupported value for nulls= ordering.')
+        return Case(None, ((self.node.is_null(), ifnull),), notnull)
+
+    def __sql__(self, ctx):
+        if self.nulls and not ctx.state.nulls_ordering:
+            ctx.sql(self._null_ordering_case(self.nulls)).literal(', ')
+
+        ctx.sql(self.node).literal(' %s' % self.direction)
+        if self.collation:
+            ctx.literal(' COLLATE %s' % self.collation)
+        if self.nulls and ctx.state.nulls_ordering:
+            ctx.literal(' NULLS %s' % self.nulls)
+        return ctx
+
+
+def Asc(node, collation=None, nulls=None):
+    return Ordering(node, 'ASC', collation, nulls)
+
+
+def Desc(node, collation=None, nulls=None):
+    return Ordering(node, 'DESC', collation, nulls)
+
+
+class Expression(ColumnBase):
+    def __init__(self, lhs, op, rhs, flat=False):
+        self.lhs = lhs
+        self.op = op
+        self.rhs = rhs
+        self.flat = flat
+
+    def __sql__(self, ctx):
+        overrides = {'parentheses': not self.flat, 'in_expr': True}
+        if isinstance(self.lhs, Field):
+            overrides['converter'] = self.lhs.db_value
+        else:
+            overrides['converter'] = None
+
+        if ctx.state.operations:
+            op_sql = ctx.state.operations.get(self.op, self.op)
+        else:
+            op_sql = self.op
+
+        with ctx(**overrides):
+            # Postgresql reports an error for IN/NOT IN (), so convert to
+            # the equivalent boolean expression.
+            op_in = self.op == OP.IN or self.op == OP.NOT_IN
+            if op_in and ctx.as_new().parse(self.rhs)[0] == '()':
+                return ctx.literal('0 = 1' if self.op == OP.IN else '1 = 1')
+
+            return (ctx
+                    .sql(self.lhs)
+                    .literal(' %s ' % op_sql)
+                    .sql(self.rhs))
+
+
+class StringExpression(Expression):
+    def __add__(self, rhs):
+        return self.concat(rhs)
+    def __radd__(self, lhs):
+        return StringExpression(lhs, OP.CONCAT, self)
+
+
+class Entity(ColumnBase):
+    def __init__(self, *path):
+        self._path = [part.replace('"', '""') for part in path if part]
+
+    def __getattr__(self, attr):
+        return Entity(*self._path + [attr])
+
+    def get_sort_key(self, ctx):
+        return tuple(self._path)
+
+    def __hash__(self):
+        return hash((self.__class__.__name__, tuple(self._path)))
+
+    def __sql__(self, ctx):
+        return ctx.literal(quote(self._path, ctx.state.quote or '""'))
+
+
+class SQL(ColumnBase):
+    def __init__(self, sql, params=None):
+        self.sql = sql
+        self.params = params
+
+    def __sql__(self, ctx):
+        ctx.literal(self.sql)
+        if self.params:
+            for param in self.params:
+                ctx.value(param, False, add_param=False)
+        return ctx
+
+
+def Check(constraint):
+    return SQL('CHECK (%s)' % constraint)
+
+
+class Function(ColumnBase):
+    def __init__(self, name, arguments, coerce=True, python_value=None):
+        self.name = name
+        self.arguments = arguments
+        self._filter = None
+        self._python_value = python_value
+        if name and name.lower() in ('sum', 'count', 'cast'):
+            self._coerce = False
+        else:
+            self._coerce = coerce
+
+    def __getattr__(self, attr):
+        def decorator(*args, **kwargs):
+            return Function(attr, args, **kwargs)
+        return decorator
+
+    @Node.copy
+    def filter(self, where=None):
+        self._filter = where
+
+    @Node.copy
+    def python_value(self, func=None):
+        self._python_value = func
+
+    def over(self, partition_by=None, order_by=None, start=None, end=None,
+             frame_type=None, window=None, exclude=None):
+        if isinstance(partition_by, Window) and window is None:
+            window = partition_by
+
+        if window is not None:
+            node = WindowAlias(window)
+        else:
+            node = Window(partition_by=partition_by, order_by=order_by,
+                          start=start, end=end, frame_type=frame_type,
+                          exclude=exclude, _inline=True)
+        return NodeList((self, SQL('OVER'), node))
+
+    def __sql__(self, ctx):
+        ctx.literal(self.name)
+        if not len(self.arguments):
+            ctx.literal('()')
+        else:
+            with ctx(in_function=True, function_arg_count=len(self.arguments)):
+                ctx.sql(EnclosedNodeList([
+                    (argument if isinstance(argument, Node)
+                     else Value(argument, False))
+                    for argument in self.arguments]))
+
+        if self._filter:
+            ctx.literal(' FILTER (WHERE ').sql(self._filter).literal(')')
+        return ctx
+
+
+fn = Function(None, None)
+
+
+class Window(Node):
+    # Frame start/end and frame exclusion.
+    CURRENT_ROW = SQL('CURRENT ROW')
+    GROUP = SQL('GROUP')
+    TIES = SQL('TIES')
+    NO_OTHERS = SQL('NO OTHERS')
+
+    # Frame types.
+    GROUPS = 'GROUPS'
+    RANGE = 'RANGE'
+    ROWS = 'ROWS'
+
+    def __init__(self, partition_by=None, order_by=None, start=None, end=None,
+                 frame_type=None, extends=None, exclude=None, alias=None,
+                 _inline=False):
+        super(Window, self).__init__()
+        if start is not None and not isinstance(start, SQL):
+            start = SQL(start)
+        if end is not None and not isinstance(end, SQL):
+            end = SQL(end)
+
+        self.partition_by = ensure_tuple(partition_by)
+        self.order_by = ensure_tuple(order_by)
+        self.start = start
+        self.end = end
+        if self.start is None and self.end is not None:
+            raise ValueError('Cannot specify WINDOW end without start.')
+        self._alias = alias or 'w'
+        self._inline = _inline
+        self.frame_type = frame_type
+        self._extends = extends
+        self._exclude = exclude
+
+    def alias(self, alias=None):
+        self._alias = alias or 'w'
+        return self
+
+    @Node.copy
+    def as_range(self):
+        self.frame_type = Window.RANGE
+
+    @Node.copy
+    def as_rows(self):
+        self.frame_type = Window.ROWS
+
+    @Node.copy
+    def as_groups(self):
+        self.frame_type = Window.GROUPS
+
+    @Node.copy
+    def extends(self, window=None):
+        self._extends = window
+
+    @Node.copy
+    def exclude(self, frame_exclusion=None):
+        if isinstance(frame_exclusion, basestring):
+            frame_exclusion = SQL(frame_exclusion)
+        self._exclude = frame_exclusion
+
+    @staticmethod
+    def following(value=None):
+        if value is None:
+            return SQL('UNBOUNDED FOLLOWING')
+        return SQL('%d FOLLOWING' % value)
+
+    @staticmethod
+    def preceding(value=None):
+        if value is None:
+            return SQL('UNBOUNDED PRECEDING')
+        return SQL('%d PRECEDING' % value)
+
+    def __sql__(self, ctx):
+        if ctx.scope != SCOPE_SOURCE and not self._inline:
+            ctx.literal(self._alias)
+            ctx.literal(' AS ')
+
+        with ctx(parentheses=True):
+            parts = []
+            if self._extends is not None:
+                ext = self._extends
+                if isinstance(ext, Window):
+                    ext = SQL(ext._alias)
+                elif isinstance(ext, basestring):
+                    ext = SQL(ext)
+                parts.append(ext)
+            if self.partition_by:
+                parts.extend((
+                    SQL('PARTITION BY'),
+                    CommaNodeList(self.partition_by)))
+            if self.order_by:
+                parts.extend((
+                    SQL('ORDER BY'),
+                    CommaNodeList(self.order_by)))
+            if self.start is not None and self.end is not None:
+                frame = self.frame_type or 'ROWS'
+                parts.extend((
+                    SQL('%s BETWEEN' % frame),
+                    self.start,
+                    SQL('AND'),
+                    self.end))
+            elif self.start is not None:
+                parts.extend((SQL(self.frame_type or 'ROWS'), self.start))
+            elif self.frame_type is not None:
+                parts.append(SQL('%s UNBOUNDED PRECEDING' % self.frame_type))
+            if self._exclude is not None:
+                parts.extend((SQL('EXCLUDE'), self._exclude))
+            ctx.sql(NodeList(parts))
+        return ctx
+
+
+class WindowAlias(Node):
+    def __init__(self, window):
+        self.window = window
+
+    def alias(self, window_alias):
+        self.window._alias = window_alias
+        return self
+
+    def __sql__(self, ctx):
+        return ctx.literal(self.window._alias or 'w')
+
+
+def Case(predicate, expression_tuples, default=None):
+    clauses = [SQL('CASE')]
+    if predicate is not None:
+        clauses.append(predicate)
+    for expr, value in expression_tuples:
+        clauses.extend((SQL('WHEN'), expr, SQL('THEN'), value))
+    if default is not None:
+        clauses.extend((SQL('ELSE'), default))
+    clauses.append(SQL('END'))
+    return NodeList(clauses)
+
+
+class NodeList(ColumnBase):
+    def __init__(self, nodes, glue=' ', parens=False):
+        self.nodes = nodes
+        self.glue = glue
+        self.parens = parens
+        if parens and len(self.nodes) == 1:
+            if isinstance(self.nodes[0], Expression):
+                # Hack to avoid double-parentheses.
+                self.nodes[0].flat = True
+
+    def __sql__(self, ctx):
+        n_nodes = len(self.nodes)
+        if n_nodes == 0:
+            return ctx.literal('()') if self.parens else ctx
+        with ctx(parentheses=self.parens):
+            for i in range(n_nodes - 1):
+                ctx.sql(self.nodes[i])
+                ctx.literal(self.glue)
+            ctx.sql(self.nodes[n_nodes - 1])
+        return ctx
+
+
+def CommaNodeList(nodes):
+    return NodeList(nodes, ', ')
+
+
+def EnclosedNodeList(nodes):
+    return NodeList(nodes, ', ', True)
+
+
+class _Namespace(Node):
+    __slots__ = ('_name',)
+    def __init__(self, name):
+        self._name = name
+    def __getattr__(self, attr):
+        return NamespaceAttribute(self, attr)
+    __getitem__ = __getattr__
+
+class NamespaceAttribute(ColumnBase):
+    def __init__(self, namespace, attribute):
+        self._namespace = namespace
+        self._attribute = attribute
+
+    def __sql__(self, ctx):
+        return (ctx
+                .literal(self._namespace._name + '.')
+                .sql(Entity(self._attribute)))
+
+EXCLUDED = _Namespace('EXCLUDED')
+
+
+class DQ(ColumnBase):
+    def __init__(self, **query):
+        super(DQ, self).__init__()
+        self.query = query
+        self._negated = False
+
+    @Node.copy
+    def __invert__(self):
+        self._negated = not self._negated
+
+    def clone(self):
+        node = DQ(**self.query)
+        node._negated = self._negated
+        return node
+
+#: Represent a row tuple.
+Tuple = lambda *a: EnclosedNodeList(a)
+
+
+class QualifiedNames(WrappedNode):
+    def __sql__(self, ctx):
+        with ctx.scope_column():
+            return ctx.sql(self.node)
+
+
+def qualify_names(node):
+    # Search a node heirarchy to ensure that any column-like objects are
+    # referenced using fully-qualified names.
+    if isinstance(node, Expression):
+        return node.__class__(qualify_names(node.lhs), node.op,
+                              qualify_names(node.rhs), node.flat)
+    elif isinstance(node, ColumnBase):
+        return QualifiedNames(node)
+    return node
+
+
+class OnConflict(Node):
+    def __init__(self, action=None, update=None, preserve=None, where=None,
+                 conflict_target=None, conflict_where=None,
+                 conflict_constraint=None):
+        self._action = action
+        self._update = update
+        self._preserve = ensure_tuple(preserve)
+        self._where = where
+        if conflict_target is not None and conflict_constraint is not None:
+            raise ValueError('only one of "conflict_target" and '
+                             '"conflict_constraint" may be specified.')
+        self._conflict_target = ensure_tuple(conflict_target)
+        self._conflict_where = conflict_where
+        self._conflict_constraint = conflict_constraint
+
+    def get_conflict_statement(self, ctx, query):
+        return ctx.state.conflict_statement(self, query)
+
+    def get_conflict_update(self, ctx, query):
+        return ctx.state.conflict_update(self, query)
+
+    @Node.copy
+    def preserve(self, *columns):
+        self._preserve = columns
+
+    @Node.copy
+    def update(self, _data=None, **kwargs):
+        if _data and kwargs and not isinstance(_data, dict):
+            raise ValueError('Cannot mix data with keyword arguments in the '
+                             'OnConflict update method.')
+        _data = _data or {}
+        if kwargs:
+            _data.update(kwargs)
+        self._update = _data
+
+    @Node.copy
+    def where(self, *expressions):
+        if self._where is not None:
+            expressions = (self._where,) + expressions
+        self._where = reduce(operator.and_, expressions)
+
+    @Node.copy
+    def conflict_target(self, *constraints):
+        self._conflict_constraint = None
+        self._conflict_target = constraints
+
+    @Node.copy
+    def conflict_where(self, *expressions):
+        if self._conflict_where is not None:
+            expressions = (self._conflict_where,) + expressions
+        self._conflict_where = reduce(operator.and_, expressions)
+
+    @Node.copy
+    def conflict_constraint(self, constraint):
+        self._conflict_constraint = constraint
+        self._conflict_target = None
+
+
+def database_required(method):
+    @wraps(method)
+    def inner(self, database=None, *args, **kwargs):
+        database = self._database if database is None else database
+        if not database:
+            raise InterfaceError('Query must be bound to a database in order '
+                                 'to call "%s".' % method.__name__)
+        return method(self, database, *args, **kwargs)
+    return inner
+
+# BASE QUERY INTERFACE.
+
+class BaseQuery(Node):
+    default_row_type = ROW.DICT
+
+    def __init__(self, _database=None, **kwargs):
+        self._database = _database
+        self._cursor_wrapper = None
+        self._row_type = None
+        self._constructor = None
+        super(BaseQuery, self).__init__(**kwargs)
+
+    def bind(self, database=None):
+        self._database = database
+        return self
+
+    def clone(self):
+        query = super(BaseQuery, self).clone()
+        query._cursor_wrapper = None
+        return query
+
+    @Node.copy
+    def dicts(self, as_dict=True):
+        self._row_type = ROW.DICT if as_dict else None
+        return self
+
+    @Node.copy
+    def tuples(self, as_tuple=True):
+        self._row_type = ROW.TUPLE if as_tuple else None
+        return self
+
+    @Node.copy
+    def namedtuples(self, as_namedtuple=True):
+        self._row_type = ROW.NAMED_TUPLE if as_namedtuple else None
+        return self
+
+    @Node.copy
+    def objects(self, constructor=None):
+        self._row_type = ROW.CONSTRUCTOR if constructor else None
+        self._constructor = constructor
+        return self
+
+    def _get_cursor_wrapper(self, cursor):
+        row_type = self._row_type or self.default_row_type
+
+        if row_type == ROW.DICT:
+            return DictCursorWrapper(cursor)
+        elif row_type == ROW.TUPLE:
+            return CursorWrapper(cursor)
+        elif row_type == ROW.NAMED_TUPLE:
+            return NamedTupleCursorWrapper(cursor)
+        elif row_type == ROW.CONSTRUCTOR:
+            return ObjectCursorWrapper(cursor, self._constructor)
+        else:
+            raise ValueError('Unrecognized row type: "%s".' % row_type)
+
+    def __sql__(self, ctx):
+        raise NotImplementedError
+
+    def sql(self):
+        if self._database:
+            context = self._database.get_sql_context()
+        else:
+            context = Context()
+        return context.parse(self)
+
+    @database_required
+    def execute(self, database):
+        return self._execute(database)
+
+    def _execute(self, database):
+        raise NotImplementedError
+
+    def iterator(self, database=None):
+        return iter(self.execute(database).iterator())
+
+    def _ensure_execution(self):
+        if not self._cursor_wrapper:
+            if not self._database:
+                raise ValueError('Query has not been executed.')
+            self.execute()
+
+    def __iter__(self):
+        self._ensure_execution()
+        return iter(self._cursor_wrapper)
+
+    def __getitem__(self, value):
+        self._ensure_execution()
+        if isinstance(value, slice):
+            index = value.stop
+        else:
+            index = value
+        if index is not None:
+            index = index + 1 if index >= 0 else 0
+        self._cursor_wrapper.fill_cache(index)
+        return self._cursor_wrapper.row_cache[value]
+
+    def __len__(self):
+        self._ensure_execution()
+        return len(self._cursor_wrapper)
+
+    def __str__(self):
+        return query_to_string(self)
+
+
+class RawQuery(BaseQuery):
+    def __init__(self, sql=None, params=None, **kwargs):
+        super(RawQuery, self).__init__(**kwargs)
+        self._sql = sql
+        self._params = params
+
+    def __sql__(self, ctx):
+        ctx.literal(self._sql)
+        if self._params:
+            for param in self._params:
+                ctx.value(param, add_param=False)
+        return ctx
+
+    def _execute(self, database):
+        if self._cursor_wrapper is None:
+            cursor = database.execute(self)
+            self._cursor_wrapper = self._get_cursor_wrapper(cursor)
+        return self._cursor_wrapper
+
+
+class Query(BaseQuery):
+    def __init__(self, where=None, order_by=None, limit=None, offset=None,
+                 **kwargs):
+        super(Query, self).__init__(**kwargs)
+        self._where = where
+        self._order_by = order_by
+        self._limit = limit
+        self._offset = offset
+
+        self._cte_list = None
+
+    @Node.copy
+    def with_cte(self, *cte_list):
+        self._cte_list = cte_list
+
+    @Node.copy
+    def where(self, *expressions):
+        if self._where is not None:
+            expressions = (self._where,) + expressions
+        self._where = reduce(operator.and_, expressions)
+
+    @Node.copy
+    def orwhere(self, *expressions):
+        if self._where is not None:
+            expressions = (self._where,) + expressions
+        self._where = reduce(operator.or_, expressions)
+
+    @Node.copy
+    def order_by(self, *values):
+        self._order_by = values
+
+    @Node.copy
+    def order_by_extend(self, *values):
+        self._order_by = ((self._order_by or ()) + values) or None
+
+    @Node.copy
+    def limit(self, value=None):
+        self._limit = value
+
+    @Node.copy
+    def offset(self, value=None):
+        self._offset = value
+
+    @Node.copy
+    def paginate(self, page, paginate_by=20):
+        if page > 0:
+            page -= 1
+        self._limit = paginate_by
+        self._offset = page * paginate_by
+
+    def _apply_ordering(self, ctx):
+        if self._order_by:
+            (ctx
+             .literal(' ORDER BY ')
+             .sql(CommaNodeList(self._order_by)))
+        if self._limit is not None or (self._offset is not None and
+                                       ctx.state.limit_max):
+            ctx.literal(' LIMIT ').sql(self._limit or ctx.state.limit_max)
+        if self._offset is not None:
+            ctx.literal(' OFFSET ').sql(self._offset)
+        return ctx
+
+    def __sql__(self, ctx):
+        if self._cte_list:
+            # The CTE scope is only used at the very beginning of the query,
+            # when we are describing the various CTEs we will be using.
+            recursive = any(cte._recursive for cte in self._cte_list)
+
+            # Explicitly disable the "subquery" flag here, so as to avoid
+            # unnecessary parentheses around subsequent selects.
+            with ctx.scope_cte(subquery=False):
+                (ctx
+                 .literal('WITH RECURSIVE ' if recursive else 'WITH ')
+                 .sql(CommaNodeList(self._cte_list))
+                 .literal(' '))
+        return ctx
+
+
+def __compound_select__(operation, inverted=False):
+    def method(self, other):
+        if inverted:
+            self, other = other, self
+        return CompoundSelectQuery(self, operation, other)
+    return method
+
+
+class SelectQuery(Query):
+    union_all = __add__ = __compound_select__('UNION ALL')
+    union = __or__ = __compound_select__('UNION')
+    intersect = __and__ = __compound_select__('INTERSECT')
+    except_ = __sub__ = __compound_select__('EXCEPT')
+    __radd__ = __compound_select__('UNION ALL', inverted=True)
+    __ror__ = __compound_select__('UNION', inverted=True)
+    __rand__ = __compound_select__('INTERSECT', inverted=True)
+    __rsub__ = __compound_select__('EXCEPT', inverted=True)
+
+    def cte(self, name, recursive=False, columns=None):
+        return CTE(name, self, recursive=recursive, columns=columns)
+
+    def select_from(self, *columns):
+        if not columns:
+            raise ValueError('select_from() must specify one or more columns.')
+
+        query = (Select((self,), columns)
+                 .bind(self._database))
+        if getattr(self, 'model', None) is not None:
+            # Bind to the sub-select's model type, if defined.
+            query = query.objects(self.model)
+        return query
+
+
+class SelectBase(_HashableSource, Source, SelectQuery):
+    def _get_hash(self):
+        return hash((self.__class__, self._alias or id(self)))
+
+    def _execute(self, database):
+        if self._cursor_wrapper is None:
+            cursor = database.execute(self)
+            self._cursor_wrapper = self._get_cursor_wrapper(cursor)
+        return self._cursor_wrapper
+
+    @database_required
+    def peek(self, database, n=1):
+        rows = self.execute(database)[:n]
+        if rows:
+            return rows[0] if n == 1 else rows
+
+    @database_required
+    def first(self, database, n=1):
+        if self._limit != n:
+            self._limit = n
+            self._cursor_wrapper = None
+        return self.peek(database, n=n)
+
+    @database_required
+    def scalar(self, database, as_tuple=False):
+        row = self.tuples().peek(database)
+        return row[0] if row and not as_tuple else row
+
+    @database_required
+    def count(self, database, clear_limit=False):
+        clone = self.order_by().alias('_wrapped')
+        if clear_limit:
+            clone._limit = clone._offset = None
+        try:
+            if clone._having is None and clone._group_by is None and \
+               clone._windows is None and clone._distinct is None and \
+               clone._simple_distinct is not True:
+                clone = clone.select(SQL('1'))
+        except AttributeError:
+            pass
+        return Select([clone], [fn.COUNT(SQL('1'))]).scalar(database)
+
+    @database_required
+    def exists(self, database):
+        clone = self.columns(SQL('1'))
+        clone._limit = 1
+        clone._offset = None
+        return bool(clone.scalar())
+
+    @database_required
+    def get(self, database):
+        self._cursor_wrapper = None
+        try:
+            return self.execute(database)[0]
+        except IndexError:
+            pass
+
+
+# QUERY IMPLEMENTATIONS.
+
+
+class CompoundSelectQuery(SelectBase):
+    def __init__(self, lhs, op, rhs):
+        super(CompoundSelectQuery, self).__init__()
+        self.lhs = lhs
+        self.op = op
+        self.rhs = rhs
+
+    @property
+    def _returning(self):
+        return self.lhs._returning
+
+    def _get_query_key(self):
+        return (self.lhs.get_query_key(), self.rhs.get_query_key())
+
+    def _wrap_parens(self, ctx, subq):
+        csq_setting = ctx.state.compound_select_parentheses
+
+        if not csq_setting or csq_setting == CSQ_PARENTHESES_NEVER:
+            return False
+        elif csq_setting == CSQ_PARENTHESES_ALWAYS:
+            return True
+        elif csq_setting == CSQ_PARENTHESES_UNNESTED:
+            return not isinstance(subq, CompoundSelectQuery)
+
+    def __sql__(self, ctx):
+        if ctx.scope == SCOPE_COLUMN:
+            return self.apply_column(ctx)
+
+        outer_parens = ctx.subquery or (ctx.scope == SCOPE_SOURCE)
+        with ctx(parentheses=outer_parens):
+            # Should the left-hand query be wrapped in parentheses?
+            lhs_parens = self._wrap_parens(ctx, self.lhs)
+            with ctx.scope_normal(parentheses=lhs_parens, subquery=False):
+                ctx.sql(self.lhs)
+            ctx.literal(' %s ' % self.op)
+            with ctx.push_alias():
+                # Should the right-hand query be wrapped in parentheses?
+                rhs_parens = self._wrap_parens(ctx, self.rhs)
+                with ctx.scope_normal(parentheses=rhs_parens, subquery=False):
+                    ctx.sql(self.rhs)
+
+            # Apply ORDER BY, LIMIT, OFFSET. We use the "values" scope so that
+            # entity names are not fully-qualified. This is a bit of a hack, as
+            # we're relying on the logic in Column.__sql__() to not fully
+            # qualify column names.
+            with ctx.scope_values():
+                self._apply_ordering(ctx)
+
+        return self.apply_alias(ctx)
+
+
+class Select(SelectBase):
+    def __init__(self, from_list=None, columns=None, group_by=None,
+                 having=None, distinct=None, windows=None, for_update=None,
+                 **kwargs):
+        super(Select, self).__init__(**kwargs)
+        self._from_list = (list(from_list) if isinstance(from_list, tuple)
+                           else from_list) or []
+        self._returning = columns
+        self._group_by = group_by
+        self._having = having
+        self._windows = None
+        self._for_update = 'FOR UPDATE' if for_update is True else for_update
+
+        self._distinct = self._simple_distinct = None
+        if distinct:
+            if isinstance(distinct, bool):
+                self._simple_distinct = distinct
+            else:
+                self._distinct = distinct
+
+        self._cursor_wrapper = None
+
+    def clone(self):
+        clone = super(Select, self).clone()
+        if clone._from_list:
+            clone._from_list = list(clone._from_list)
+        return clone
+
+    @Node.copy
+    def columns(self, *columns, **kwargs):
+        self._returning = columns
+    select = columns
+
+    @Node.copy
+    def select_extend(self, *columns):
+        self._returning = tuple(self._returning) + columns
+
+    @Node.copy
+    def from_(self, *sources):
+        self._from_list = list(sources)
+
+    @Node.copy
+    def join(self, dest, join_type='INNER', on=None):
+        if not self._from_list:
+            raise ValueError('No sources to join on.')
+        item = self._from_list.pop()
+        self._from_list.append(Join(item, dest, join_type, on))
+
+    @Node.copy
+    def group_by(self, *columns):
+        grouping = []
+        for column in columns:
+            if isinstance(column, Table):
+                if not column._columns:
+                    raise ValueError('Cannot pass a table to group_by() that '
+                                     'does not have columns explicitly '
+                                     'declared.')
+                grouping.extend([getattr(column, col_name)
+                                 for col_name in column._columns])
+            else:
+                grouping.append(column)
+        self._group_by = grouping
+
+    def group_by_extend(self, *values):
+        """@Node.copy used from group_by() call"""
+        group_by = tuple(self._group_by or ()) + values
+        return self.group_by(*group_by)
+
+    @Node.copy
+    def having(self, *expressions):
+        if self._having is not None:
+            expressions = (self._having,) + expressions
+        self._having = reduce(operator.and_, expressions)
+
+    @Node.copy
+    def distinct(self, *columns):
+        if len(columns) == 1 and (columns[0] is True or columns[0] is False):
+            self._simple_distinct = columns[0]
+        else:
+            self._simple_distinct = False
+            self._distinct = columns
+
+    @Node.copy
+    def window(self, *windows):
+        self._windows = windows if windows else None
+
+    @Node.copy
+    def for_update(self, for_update=True):
+        self._for_update = 'FOR UPDATE' if for_update is True else for_update
+
+    def _get_query_key(self):
+        return self._alias
+
+    def __sql_selection__(self, ctx, is_subquery=False):
+        return ctx.sql(CommaNodeList(self._returning))
+
+    def __sql__(self, ctx):
+        if ctx.scope == SCOPE_COLUMN:
+            return self.apply_column(ctx)
+
+        is_subquery = ctx.subquery
+        state = {
+            'converter': None,
+            'in_function': False,
+            'parentheses': is_subquery or (ctx.scope == SCOPE_SOURCE),
+            'subquery': True,
+        }
+        if ctx.state.in_function and ctx.state.function_arg_count == 1:
+            state['parentheses'] = False
+
+        with ctx.scope_normal(**state):
+            # Defer calling parent SQL until here. This ensures that any CTEs
+            # for this query will be properly nested if this query is a
+            # sub-select or is used in an expression. See GH#1809 for example.
+            super(Select, self).__sql__(ctx)
+
+            ctx.literal('SELECT ')
+            if self._simple_distinct or self._distinct is not None:
+                ctx.literal('DISTINCT ')
+                if self._distinct:
+                    (ctx
+                     .literal('ON ')
+                     .sql(EnclosedNodeList(self._distinct))
+                     .literal(' '))
+
+            with ctx.scope_source():
+                ctx = self.__sql_selection__(ctx, is_subquery)
+
+            if self._from_list:
+                with ctx.scope_source(parentheses=False):
+                    ctx.literal(' FROM ').sql(CommaNodeList(self._from_list))
+
+            if self._where is not None:
+                ctx.literal(' WHERE ').sql(self._where)
+
+            if self._group_by:
+                ctx.literal(' GROUP BY ').sql(CommaNodeList(self._group_by))
+
+            if self._having is not None:
+                ctx.literal(' HAVING ').sql(self._having)
+
+            if self._windows is not None:
+                ctx.literal(' WINDOW ')
+                ctx.sql(CommaNodeList(self._windows))
+
+            # Apply ORDER BY, LIMIT, OFFSET.
+            self._apply_ordering(ctx)
+
+            if self._for_update:
+                if not ctx.state.for_update:
+                    raise ValueError('FOR UPDATE specified but not supported '
+                                     'by database.')
+                ctx.literal(' ')
+                ctx.sql(SQL(self._for_update))
+
+        # If the subquery is inside a function -or- we are evaluating a
+        # subquery on either side of an expression w/o an explicit alias, do
+        # not generate an alias + AS clause.
+        if ctx.state.in_function or (ctx.state.in_expr and
+                                     self._alias is None):
+            return ctx
+
+        return self.apply_alias(ctx)
+
+
+class _WriteQuery(Query):
+    def __init__(self, table, returning=None, **kwargs):
+        self.table = table
+        self._returning = returning
+        self._return_cursor = True if returning else False
+        super(_WriteQuery, self).__init__(**kwargs)
+
+    @Node.copy
+    def returning(self, *returning):
+        self._returning = returning
+        self._return_cursor = True if returning else False
+
+    def apply_returning(self, ctx):
+        if self._returning:
+            with ctx.scope_normal():
+                ctx.literal(' RETURNING ').sql(CommaNodeList(self._returning))
+        return ctx
+
+    def _execute(self, database):
+        if self._returning:
+            cursor = self.execute_returning(database)
+        else:
+            cursor = database.execute(self)
+        return self.handle_result(database, cursor)
+
+    def execute_returning(self, database):
+        if self._cursor_wrapper is None:
+            cursor = database.execute(self)
+            self._cursor_wrapper = self._get_cursor_wrapper(cursor)
+        return self._cursor_wrapper
+
+    def handle_result(self, database, cursor):
+        if self._return_cursor:
+            return cursor
+        return database.rows_affected(cursor)
+
+    def _set_table_alias(self, ctx):
+        ctx.alias_manager[self.table] = self.table.__name__
+
+    def __sql__(self, ctx):
+        super(_WriteQuery, self).__sql__(ctx)
+        # We explicitly set the table alias to the table's name, which ensures
+        # that if a sub-select references a column on the outer table, we won't
+        # assign it a new alias (e.g. t2) but will refer to it as table.column.
+        self._set_table_alias(ctx)
+        return ctx
+
+
+class Update(_WriteQuery):
+    def __init__(self, table, update=None, **kwargs):
+        super(Update, self).__init__(table, **kwargs)
+        self._update = update
+        self._from = None
+
+    @Node.copy
+    def from_(self, *sources):
+        self._from = sources
+
+    def __sql__(self, ctx):
+        super(Update, self).__sql__(ctx)
+
+        with ctx.scope_values(subquery=True):
+            ctx.literal('UPDATE ')
+
+            expressions = []
+            for k, v in sorted(self._update.items(), key=ctx.column_sort_key):
+                if not isinstance(v, Node):
+                    converter = k.db_value if isinstance(k, Field) else None
+                    v = Value(v, converter=converter, unpack=False)
+                if not isinstance(v, Value):
+                    v = qualify_names(v)
+                expressions.append(NodeList((k, SQL('='), v)))
+
+            (ctx
+             .sql(self.table)
+             .literal(' SET ')
+             .sql(CommaNodeList(expressions)))
+
+            if self._from:
+                with ctx.scope_source(parentheses=False):
+                    ctx.literal(' FROM ').sql(CommaNodeList(self._from))
+
+            if self._where:
+                with ctx.scope_normal():
+                    ctx.literal(' WHERE ').sql(self._where)
+            self._apply_ordering(ctx)
+            return self.apply_returning(ctx)
+
+
+class Insert(_WriteQuery):
+    SIMPLE = 0
+    QUERY = 1
+    MULTI = 2
+    class DefaultValuesException(Exception): pass
+
+    def __init__(self, table, insert=None, columns=None, on_conflict=None,
+                 **kwargs):
+        super(Insert, self).__init__(table, **kwargs)
+        self._insert = insert
+        self._columns = columns
+        self._on_conflict = on_conflict
+        self._query_type = None
+
+    def where(self, *expressions):
+        raise NotImplementedError('INSERT queries cannot have a WHERE clause.')
+
+    @Node.copy
+    def on_conflict_ignore(self, ignore=True):
+        self._on_conflict = OnConflict('IGNORE') if ignore else None
+
+    @Node.copy
+    def on_conflict_replace(self, replace=True):
+        self._on_conflict = OnConflict('REPLACE') if replace else None
+
+    @Node.copy
+    def on_conflict(self, *args, **kwargs):
+        self._on_conflict = (OnConflict(*args, **kwargs) if (args or kwargs)
+                             else None)
+
+    def _simple_insert(self, ctx):
+        if not self._insert:
+            raise self.DefaultValuesException('Error: no data to insert.')
+        return self._generate_insert((self._insert,), ctx)
+
+    def get_default_data(self):
+        return {}
+
+    def get_default_columns(self):
+        if self.table._columns:
+            return [getattr(self.table, col) for col in self.table._columns
+                    if col != self.table._primary_key]
+
+    def _generate_insert(self, insert, ctx):
+        rows_iter = iter(insert)
+        columns = self._columns
+
+        # Load and organize column defaults (if provided).
+        defaults = self.get_default_data()
+        value_lookups = {}
+
+        # First figure out what columns are being inserted (if they weren't
+        # specified explicitly). Resulting columns are normalized and ordered.
+        if not columns:
+            try:
+                row = next(rows_iter)
+            except StopIteration:
+                raise self.DefaultValuesException('Error: no rows to insert.')
+
+            if not isinstance(row, dict):
+                columns = self.get_default_columns()
+                if columns is None:
+                    raise ValueError('Bulk insert must specify columns.')
+            else:
+                # Infer column names from the dict of data being inserted.
+                accum = []
+                uses_strings = False  # Are the dict keys strings or columns?
+                for key in row:
+                    if isinstance(key, basestring):
+                        column = getattr(self.table, key)
+                        uses_strings = True
+                    else:
+                        column = key
+                    accum.append(column)
+                    value_lookups[column] = key
+
+                # Add any columns present in the default data that are not
+                # accounted for by the dictionary of row data.
+                column_set = set(accum)
+                for col in (set(defaults) - column_set):
+                    accum.append(col)
+                    value_lookups[col] = col.name if uses_strings else col
+
+                columns = sorted(accum, key=lambda obj: obj.get_sort_key(ctx))
+            rows_iter = itertools.chain(iter((row,)), rows_iter)
+        else:
+            clean_columns = []
+            for column in columns:
+                if isinstance(column, basestring):
+                    column_obj = getattr(self.table, column)
+                else:
+                    column_obj = column
+                value_lookups[column_obj] = column
+                clean_columns.append(column_obj)
+
+            columns = clean_columns
+            for col in sorted(defaults, key=lambda obj: obj.get_sort_key(ctx)):
+                if col not in value_lookups:
+                    columns.append(col)
+                    value_lookups[col] = col
+
+        ctx.sql(EnclosedNodeList(columns)).literal(' VALUES ')
+        columns_converters = [
+            (column, column.db_value if isinstance(column, Field) else None)
+            for column in columns]
+
+        all_values = []
+        for row in rows_iter:
+            values = []
+            is_dict = isinstance(row, Mapping)
+            for i, (column, converter) in enumerate(columns_converters):
+                try:
+                    if is_dict:
+                        val = row[value_lookups[column]]
+                    else:
+                        val = row[i]
+                except (KeyError, IndexError):
+                    if column in defaults:
+                        val = defaults[column]
+                        if callable_(val):
+                            val = val()
+                    else:
+                        raise ValueError('Missing value for %s.' % column.name)
+
+                if not isinstance(val, Node):
+                    val = Value(val, converter=converter, unpack=False)
+                values.append(val)
+
+            all_values.append(EnclosedNodeList(values))
+
+        with ctx.scope_values(subquery=True):
+            return ctx.sql(CommaNodeList(all_values))
+
+    def _query_insert(self, ctx):
+        return (ctx
+                .sql(EnclosedNodeList(self._columns))
+                .literal(' ')
+                .sql(self._insert))
+
+    def _default_values(self, ctx):
+        if not self._database:
+            return ctx.literal('DEFAULT VALUES')
+        return self._database.default_values_insert(ctx)
+
+    def __sql__(self, ctx):
+        super(Insert, self).__sql__(ctx)
+        with ctx.scope_values():
+            stmt = None
+            if self._on_conflict is not None:
+                stmt = self._on_conflict.get_conflict_statement(ctx, self)
+
+            (ctx
+             .sql(stmt or SQL('INSERT'))
+             .literal(' INTO ')
+             .sql(self.table)
+             .literal(' '))
+
+            if isinstance(self._insert, dict) and not self._columns:
+                try:
+                    self._simple_insert(ctx)
+                except self.DefaultValuesException:
+                    self._default_values(ctx)
+                self._query_type = Insert.SIMPLE
+            elif isinstance(self._insert, (SelectQuery, SQL)):
+                self._query_insert(ctx)
+                self._query_type = Insert.QUERY
+            else:
+                try:
+                    self._generate_insert(self._insert, ctx)
+                except self.DefaultValuesException:
+                    return
+                self._query_type = Insert.MULTI
+
+            if self._on_conflict is not None:
+                update = self._on_conflict.get_conflict_update(ctx, self)
+                if update is not None:
+                    ctx.literal(' ').sql(update)
+
+            return self.apply_returning(ctx)
+
+    def _execute(self, database):
+        if self._returning is None and database.returning_clause \
+           and self.table._primary_key:
+            self._returning = (self.table._primary_key,)
+        return super(Insert, self)._execute(database)
+
+    def handle_result(self, database, cursor):
+        if self._return_cursor:
+            return cursor
+        return database.last_insert_id(cursor, self._query_type)
+
+
+class Delete(_WriteQuery):
+    def __sql__(self, ctx):
+        super(Delete, self).__sql__(ctx)
+
+        with ctx.scope_values(subquery=True):
+            ctx.literal('DELETE FROM ').sql(self.table)
+            if self._where is not None:
+                with ctx.scope_normal():
+                    ctx.literal(' WHERE ').sql(self._where)
+
+            self._apply_ordering(ctx)
+            return self.apply_returning(ctx)
+
+
+class Index(Node):
+    def __init__(self, name, table, expressions, unique=False, safe=False,
+                 where=None, using=None):
+        self._name = name
+        self._table = Entity(table) if not isinstance(table, Table) else table
+        self._expressions = expressions
+        self._where = where
+        self._unique = unique
+        self._safe = safe
+        self._using = using
+
+    @Node.copy
+    def safe(self, _safe=True):
+        self._safe = _safe
+
+    @Node.copy
+    def where(self, *expressions):
+        if self._where is not None:
+            expressions = (self._where,) + expressions
+        self._where = reduce(operator.and_, expressions)
+
+    @Node.copy
+    def using(self, _using=None):
+        self._using = _using
+
+    def __sql__(self, ctx):
+        statement = 'CREATE UNIQUE INDEX ' if self._unique else 'CREATE INDEX '
+        with ctx.scope_values(subquery=True):
+            ctx.literal(statement)
+            if self._safe:
+                ctx.literal('IF NOT EXISTS ')
+
+            # Sqlite uses CREATE INDEX <schema>.<name> ON <table>, whereas most
+            # others use: CREATE INDEX <name> ON <schema>.<table>.
+            if ctx.state.index_schema_prefix and \
+               isinstance(self._table, Table) and self._table._schema:
+                index_name = Entity(self._table._schema, self._name)
+                table_name = Entity(self._table.__name__)
+            else:
+                index_name = Entity(self._name)
+                table_name = self._table
+
+            (ctx
+             .sql(index_name)
+             .literal(' ON ')
+             .sql(table_name)
+             .literal(' '))
+            if self._using is not None:
+                ctx.literal('USING %s ' % self._using)
+
+            ctx.sql(EnclosedNodeList([
+                SQL(expr) if isinstance(expr, basestring) else expr
+                for expr in self._expressions]))
+            if self._where is not None:
+                ctx.literal(' WHERE ').sql(self._where)
+
+        return ctx
+
+
+class ModelIndex(Index):
+    def __init__(self, model, fields, unique=False, safe=True, where=None,
+                 using=None, name=None):
+        self._model = model
+        if name is None:
+            name = self._generate_name_from_fields(model, fields)
+        if using is None:
+            for field in fields:
+                if isinstance(field, Field) and hasattr(field, 'index_type'):
+                    using = field.index_type
+        super(ModelIndex, self).__init__(
+            name=name,
+            table=model._meta.table,
+            expressions=fields,
+            unique=unique,
+            safe=safe,
+            where=where,
+            using=using)
+
+    def _generate_name_from_fields(self, model, fields):
+        accum = []
+        for field in fields:
+            if isinstance(field, basestring):
+                accum.append(field.split()[0])
+            else:
+                if isinstance(field, Node) and not isinstance(field, Field):
+                    field = field.unwrap()
+                if isinstance(field, Field):
+                    accum.append(field.column_name)
+
+        if not accum:
+            raise ValueError('Unable to generate a name for the index, please '
+                             'explicitly specify a name.')
+
+        clean_field_names = re.sub('[^\w]+', '', '_'.join(accum))
+        meta = model._meta
+        prefix = meta.name if meta.legacy_table_names else meta.table_name
+        return _truncate_constraint_name('_'.join((prefix, clean_field_names)))
+
+
+def _truncate_constraint_name(constraint, maxlen=64):
+    if len(constraint) > maxlen:
+        name_hash = hashlib.md5(constraint.encode('utf-8')).hexdigest()
+        constraint = '%s_%s' % (constraint[:(maxlen - 8)], name_hash[:7])
+    return constraint
+
+
+# DB-API 2.0 EXCEPTIONS.
+
+
+class PeeweeException(Exception): pass
+class ImproperlyConfigured(PeeweeException): pass
+class DatabaseError(PeeweeException): pass
+class DataError(DatabaseError): pass
+class IntegrityError(DatabaseError): pass
+class InterfaceError(PeeweeException): pass
+class InternalError(DatabaseError): pass
+class NotSupportedError(DatabaseError): pass
+class OperationalError(DatabaseError): pass
+class ProgrammingError(DatabaseError): pass
+
+
+class ExceptionWrapper(object):
+    __slots__ = ('exceptions',)
+    def __init__(self, exceptions):
+        self.exceptions = exceptions
+    def __enter__(self): pass
+    def __exit__(self, exc_type, exc_value, traceback):
+        if exc_type is None:
+            return
+        # psycopg2.8 shits out a million cute error types. Try to catch em all.
+        if pg_errors is not None and exc_type.__name__ not in self.exceptions \
+           and issubclass(exc_type, pg_errors.Error):
+            exc_type = exc_type.__bases__[0]
+        if exc_type.__name__ in self.exceptions:
+            new_type = self.exceptions[exc_type.__name__]
+            exc_args = exc_value.args
+            reraise(new_type, new_type(*exc_args), traceback)
+
+
+EXCEPTIONS = {
+    'ConstraintError': IntegrityError,
+    'DatabaseError': DatabaseError,
+    'DataError': DataError,
+    'IntegrityError': IntegrityError,
+    'InterfaceError': InterfaceError,
+    'InternalError': InternalError,
+    'NotSupportedError': NotSupportedError,
+    'OperationalError': OperationalError,
+    'ProgrammingError': ProgrammingError}
+
+__exception_wrapper__ = ExceptionWrapper(EXCEPTIONS)
+
+
+# DATABASE INTERFACE AND CONNECTION MANAGEMENT.
+
+
+IndexMetadata = collections.namedtuple(
+    'IndexMetadata',
+    ('name', 'sql', 'columns', 'unique', 'table'))
+ColumnMetadata = collections.namedtuple(
+    'ColumnMetadata',
+    ('name', 'data_type', 'null', 'primary_key', 'table', 'default'))
+ForeignKeyMetadata = collections.namedtuple(
+    'ForeignKeyMetadata',
+    ('column', 'dest_table', 'dest_column', 'table'))
+ViewMetadata = collections.namedtuple('ViewMetadata', ('name', 'sql'))
+
+
+class _ConnectionState(object):
+    def __init__(self, **kwargs):
+        super(_ConnectionState, self).__init__(**kwargs)
+        self.reset()
+
+    def reset(self):
+        self.closed = True
+        self.conn = None
+        self.transactions = []
+
+    def set_connection(self, conn):
+        self.conn = conn
+        self.closed = False
+
+
+class _ConnectionLocal(_ConnectionState, threading.local): pass
+class _NoopLock(object):
+    __slots__ = ()
+    def __enter__(self): return self
+    def __exit__(self, exc_type, exc_val, exc_tb): pass
+
+
+class ConnectionContext(_callable_context_manager):
+    __slots__ = ('db',)
+    def __init__(self, db): self.db = db
+    def __enter__(self):
+        if self.db.is_closed():
+            self.db.connect()
+    def __exit__(self, exc_type, exc_val, exc_tb): self.db.close()
+
+
+class Database(_callable_context_manager):
+    context_class = Context
+    field_types = {}
+    operations = {}
+    param = '?'
+    quote = '""'
+    server_version = None
+
+    # Feature toggles.
+    commit_select = False
+    compound_select_parentheses = CSQ_PARENTHESES_NEVER
+    for_update = False
+    index_schema_prefix = False
+    limit_max = None
+    nulls_ordering = False
+    returning_clause = False
+    safe_create_index = True
+    safe_drop_index = True
+    sequences = False
+    truncate_table = True
+
+    def __init__(self, database, thread_safe=True, autorollback=False,
+                 field_types=None, operations=None, autocommit=None, **kwargs):
+        self._field_types = merge_dict(FIELD, self.field_types)
+        self._operations = merge_dict(OP, self.operations)
+        if field_types:
+            self._field_types.update(field_types)
+        if operations:
+            self._operations.update(operations)
+
+        self.autorollback = autorollback
+        self.thread_safe = thread_safe
+        if thread_safe:
+            self._state = _ConnectionLocal()
+            self._lock = threading.Lock()
+        else:
+            self._state = _ConnectionState()
+            self._lock = _NoopLock()
+
+        if autocommit is not None:
+            __deprecated__('Peewee no longer uses the "autocommit" option, as '
+                           'the semantics now require it to always be True. '
+                           'Because some database-drivers also use the '
+                           '"autocommit" parameter, you are receiving a '
+                           'warning so you may update your code and remove '
+                           'the parameter, as in the future, specifying '
+                           'autocommit could impact the behavior of the '
+                           'database driver you are using.')
+
+        self.connect_params = {}
+        self.init(database, **kwargs)
+
+    def init(self, database, **kwargs):
+        if not self.is_closed():
+            self.close()
+        self.database = database
+        self.connect_params.update(kwargs)
+        self.deferred = not bool(database)
+
+    def __enter__(self):
+        if self.is_closed():
+            self.connect()
+        self.transaction().__enter__()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        top = self._state.transactions[-1]
+        try:
+            top.__exit__(exc_type, exc_val, exc_tb)
+        finally:
+            self.close()
+
+    def connection_context(self):
+        return ConnectionContext(self)
+
+    def _connect(self):
+        raise NotImplementedError
+
+    def connect(self, reuse_if_open=False):
+        with self._lock:
+            if self.deferred:
+                raise InterfaceError('Error, database must be initialized '
+                                     'before opening a connection.')
+            if not self._state.closed:
+                if reuse_if_open:
+                    return False
+                raise OperationalError('Connection already opened.')
+
+            self._state.reset()
+            with __exception_wrapper__:
+                self._state.set_connection(self._connect())
+                if self.server_version is None:
+                    self._set_server_version(self._state.conn)
+                self._initialize_connection(self._state.conn)
+        return True
+
+    def _initialize_connection(self, conn):
+        pass
+
+    def _set_server_version(self, conn):
+        self.server_version = 0
+
+    def close(self):
+        with self._lock:
+            if self.deferred:
+                raise InterfaceError('Error, database must be initialized '
+                                     'before opening a connection.')
+            if self.in_transaction():
+                raise OperationalError('Attempting to close database while '
+                                       'transaction is open.')
+            is_open = not self._state.closed
+            try:
+                if is_open:
+                    with __exception_wrapper__:
+                        self._close(self._state.conn)
+            finally:
+                self._state.reset()
+            return is_open
+
+    def _close(self, conn):
+        conn.close()
+
+    def is_closed(self):
+        return self._state.closed
+
+    def connection(self):
+        if self.is_closed():
+            self.connect()
+        return self._state.conn
+
+    def cursor(self, commit=None):
+        if self.is_closed():
+            self.connect()
+        return self._state.conn.cursor()
+
+    def execute_sql(self, sql, params=None, commit=SENTINEL):
+        logger.debug((sql, params))
+        if commit is SENTINEL:
+            if self.in_transaction():
+                commit = False
+            elif self.commit_select:
+                commit = True
+            else:
+                commit = not sql[:6].lower().startswith('select')
+
+        with __exception_wrapper__:
+            cursor = self.cursor(commit)
+            try:
+                cursor.execute(sql, params or ())
+            except Exception:
+                if self.autorollback and not self.in_transaction():
+                    self.rollback()
+                raise
+            else:
+                if commit and not self.in_transaction():
+                    self.commit()
+        return cursor
+
+    def execute(self, query, commit=SENTINEL, **context_options):
+        ctx = self.get_sql_context(**context_options)
+        sql, params = ctx.sql(query).query()
+        return self.execute_sql(sql, params, commit=commit)
+
+    def get_context_options(self):
+        return {
+            'field_types': self._field_types,
+            'operations': self._operations,
+            'param': self.param,
+            'quote': self.quote,
+            'compound_select_parentheses': self.compound_select_parentheses,
+            'conflict_statement': self.conflict_statement,
+            'conflict_update': self.conflict_update,
+            'for_update': self.for_update,
+            'index_schema_prefix': self.index_schema_prefix,
+            'limit_max': self.limit_max,
+            'nulls_ordering': self.nulls_ordering,
+        }
+
+    def get_sql_context(self, **context_options):
+        context = self.get_context_options()
+        if context_options:
+            context.update(context_options)
+        return self.context_class(**context)
+
+    def conflict_statement(self, on_conflict, query):
+        raise NotImplementedError
+
+    def conflict_update(self, on_conflict, query):
+        raise NotImplementedError
+
+    def _build_on_conflict_update(self, on_conflict, query):
+        if on_conflict._conflict_target:
+            stmt = SQL('ON CONFLICT')
+            target = EnclosedNodeList([
+                Entity(col) if isinstance(col, basestring) else col
+                for col in on_conflict._conflict_target])
+            if on_conflict._conflict_where is not None:
+                target = NodeList([target, SQL('WHERE'),
+                                   on_conflict._conflict_where])
+        else:
+            stmt = SQL('ON CONFLICT ON CONSTRAINT')
+            target = on_conflict._conflict_constraint
+            if isinstance(target, basestring):
+                target = Entity(target)
+
+        updates = []
+        if on_conflict._preserve:
+            for column in on_conflict._preserve:
+                excluded = NodeList((SQL('EXCLUDED'), ensure_entity(column)),
+                                    glue='.')
+                expression = NodeList((ensure_entity(column), SQL('='),
+                                       excluded))
+                updates.append(expression)
+
+        if on_conflict._update:
+            for k, v in on_conflict._update.items():
+                if not isinstance(v, Node):
+                    # Attempt to resolve string field-names to their respective
+                    # field object, to apply data-type conversions.
+                    if isinstance(k, basestring):
+                        k = getattr(query.table, k)
+                    converter = k.db_value if isinstance(k, Field) else None
+                    v = Value(v, converter=converter, unpack=False)
+                else:
+                    v = QualifiedNames(v)
+                updates.append(NodeList((ensure_entity(k), SQL('='), v)))
+
+        parts = [stmt, target, SQL('DO UPDATE SET'), CommaNodeList(updates)]
+        if on_conflict._where:
+            parts.extend((SQL('WHERE'), QualifiedNames(on_conflict._where)))
+
+        return NodeList(parts)
+
+    def last_insert_id(self, cursor, query_type=None):
+        return cursor.lastrowid
+
+    def rows_affected(self, cursor):
+        return cursor.rowcount
+
+    def default_values_insert(self, ctx):
+        return ctx.literal('DEFAULT VALUES')
+
+    def in_transaction(self):
+        return bool(self._state.transactions)
+
+    def push_transaction(self, transaction):
+        self._state.transactions.append(transaction)
+
+    def pop_transaction(self):
+        return self._state.transactions.pop()
+
+    def transaction_depth(self):
+        return len(self._state.transactions)
+
+    def top_transaction(self):
+        if self._state.transactions:
+            return self._state.transactions[-1]
+
+    def atomic(self):
+        return _atomic(self)
+
+    def manual_commit(self):
+        return _manual(self)
+
+    def transaction(self):
+        return _transaction(self)
+
+    def savepoint(self):
+        return _savepoint(self)
+
+    def begin(self):
+        if self.is_closed():
+            self.connect()
+
+    def commit(self):
+        return self._state.conn.commit()
+
+    def rollback(self):
+        return self._state.conn.rollback()
+
+    def batch_commit(self, it, n):
+        for group in chunked(it, n):
+            with self.atomic():
+                for obj in group:
+                    yield obj
+
+    def table_exists(self, table_name, schema=None):
+        return table_name in self.get_tables(schema=schema)
+
+    def get_tables(self, schema=None):
+        raise NotImplementedError
+
+    def get_indexes(self, table, schema=None):
+        raise NotImplementedError
+
+    def get_columns(self, table, schema=None):
+        raise NotImplementedError
+
+    def get_primary_keys(self, table, schema=None):
+        raise NotImplementedError
+
+    def get_foreign_keys(self, table, schema=None):
+        raise NotImplementedError
+
+    def sequence_exists(self, seq):
+        raise NotImplementedError
+
+    def create_tables(self, models, **options):
+        for model in sort_models(models):
+            model.create_table(**options)
+
+    def drop_tables(self, models, **kwargs):
+        for model in reversed(sort_models(models)):
+            model.drop_table(**kwargs)
+
+    def extract_date(self, date_part, date_field):
+        raise NotImplementedError
+
+    def truncate_date(self, date_part, date_field):
+        raise NotImplementedError
+
+    def bind(self, models, bind_refs=True, bind_backrefs=True):
+        for model in models:
+            model.bind(self, bind_refs=bind_refs, bind_backrefs=bind_backrefs)
+
+    def bind_ctx(self, models, bind_refs=True, bind_backrefs=True):
+        return _BoundModelsContext(models, self, bind_refs, bind_backrefs)
+
+    def get_noop_select(self, ctx):
+        return ctx.sql(Select().columns(SQL('0')).where(SQL('0')))
+
+
+def __pragma__(name):
+    def __get__(self):
+        return self.pragma(name)
+    def __set__(self, value):
+        return self.pragma(name, value)
+    return property(__get__, __set__)
+
+
+class SqliteDatabase(Database):
+    field_types = {
+        'BIGAUTO': FIELD.AUTO,
+        'BIGINT': FIELD.INT,
+        'BOOL': FIELD.INT,
+        'DOUBLE': FIELD.FLOAT,
+        'SMALLINT': FIELD.INT,
+        'UUID': FIELD.TEXT}
+    operations = {
+        'LIKE': 'GLOB',
+        'ILIKE': 'LIKE'}
+    index_schema_prefix = True
+    limit_max = -1
+    server_version = __sqlite_version__
+    truncate_table = False
+
+    def __init__(self, database, *args, **kwargs):
+        self._pragmas = kwargs.pop('pragmas', ())
+        super(SqliteDatabase, self).__init__(database, *args, **kwargs)
+        self._aggregates = {}
+        self._collations = {}
+        self._functions = {}
+        self._window_functions = {}
+        self._table_functions = []
+        self._extensions = set()
+        self._attached = {}
+        self.register_function(_sqlite_date_part, 'date_part', 2)
+        self.register_function(_sqlite_date_trunc, 'date_trunc', 2)
+
+    def init(self, database, pragmas=None, timeout=5, **kwargs):
+        if pragmas is not None:
+            self._pragmas = pragmas
+        if isinstance(self._pragmas, dict):
+            self._pragmas = list(self._pragmas.items())
+        self._timeout = timeout
+        super(SqliteDatabase, self).init(database, **kwargs)
+
+    def _set_server_version(self, conn):
+        pass
+
+    def _connect(self):
+        if sqlite3 is None:
+            raise ImproperlyConfigured('SQLite driver not installed!')
+        conn = sqlite3.connect(self.database, timeout=self._timeout,
+                               **self.connect_params)
+        conn.isolation_level = None
+        try:
+            self._add_conn_hooks(conn)
+        except:
+            conn.close()
+            raise
+        return conn
+
+    def _add_conn_hooks(self, conn):
+        if self._attached:
+            self._attach_databases(conn)
+        if self._pragmas:
+            self._set_pragmas(conn)
+        self._load_aggregates(conn)
+        self._load_collations(conn)
+        self._load_functions(conn)
+        if self.server_version >= (3, 25, 0):
+            self._load_window_functions(conn)
+        if self._table_functions:
+            for table_function in self._table_functions:
+                table_function.register(conn)
+        if self._extensions:
+            self._load_extensions(conn)
+
+    def _set_pragmas(self, conn):
+        cursor = conn.cursor()
+        for pragma, value in self._pragmas:
+            cursor.execute('PRAGMA %s = %s;' % (pragma, value))
+        cursor.close()
+
+    def _attach_databases(self, conn):
+        cursor = conn.cursor()
+        for name, db in self._attached.items():
+            cursor.execute('ATTACH DATABASE "%s" AS "%s"' % (db, name))
+        cursor.close()
+
+    def pragma(self, key, value=SENTINEL, permanent=False, schema=None):
+        if schema is not None:
+            key = '"%s".%s' % (schema, key)
+        sql = 'PRAGMA %s' % key
+        if value is not SENTINEL:
+            sql += ' = %s' % (value or 0)
+            if permanent:
+                pragmas = dict(self._pragmas or ())
+                pragmas[key] = value
+                self._pragmas = list(pragmas.items())
+        elif permanent:
+            raise ValueError('Cannot specify a permanent pragma without value')
+        row = self.execute_sql(sql).fetchone()
+        if row:
+            return row[0]
+
+    cache_size = __pragma__('cache_size')
+    foreign_keys = __pragma__('foreign_keys')
+    journal_mode = __pragma__('journal_mode')
+    journal_size_limit = __pragma__('journal_size_limit')
+    mmap_size = __pragma__('mmap_size')
+    page_size = __pragma__('page_size')
+    read_uncommitted = __pragma__('read_uncommitted')
+    synchronous = __pragma__('synchronous')
+    wal_autocheckpoint = __pragma__('wal_autocheckpoint')
+
+    @property
+    def timeout(self):
+        return self._timeout
+
+    @timeout.setter
+    def timeout(self, seconds):
+        if self._timeout == seconds:
+            return
+
+        self._timeout = seconds
+        if not self.is_closed():
+            # PySQLite multiplies user timeout by 1000, but the unit of the
+            # timeout PRAGMA is actually milliseconds.
+            self.execute_sql('PRAGMA busy_timeout=%d;' % (seconds * 1000))
+
+    def _load_aggregates(self, conn):
+        for name, (klass, num_params) in self._aggregates.items():
+            conn.create_aggregate(name, num_params, klass)
+
+    def _load_collations(self, conn):
+        for name, fn in self._collations.items():
+            conn.create_collation(name, fn)
+
+    def _load_functions(self, conn):
+        for name, (fn, num_params) in self._functions.items():
+            conn.create_function(name, num_params, fn)
+
+    def _load_window_functions(self, conn):
+        for name, (klass, num_params) in self._window_functions.items():
+            conn.create_window_function(name, num_params, klass)
+
+    def register_aggregate(self, klass, name=None, num_params=-1):
+        self._aggregates[name or klass.__name__.lower()] = (klass, num_params)
+        if not self.is_closed():
+            self._load_aggregates(self.connection())
+
+    def aggregate(self, name=None, num_params=-1):
+        def decorator(klass):
+            self.register_aggregate(klass, name, num_params)
+            return klass
+        return decorator
+
+    def register_collation(self, fn, name=None):
+        name = name or fn.__name__
+        def _collation(*args):
+            expressions = args + (SQL('collate %s' % name),)
+            return NodeList(expressions)
+        fn.collation = _collation
+        self._collations[name] = fn
+        if not self.is_closed():
+            self._load_collations(self.connection())
+
+    def collation(self, name=None):
+        def decorator(fn):
+            self.register_collation(fn, name)
+            return fn
+        return decorator
+
+    def register_function(self, fn, name=None, num_params=-1):
+        self._functions[name or fn.__name__] = (fn, num_params)
+        if not self.is_closed():
+            self._load_functions(self.connection())
+
+    def func(self, name=None, num_params=-1):
+        def decorator(fn):
+            self.register_function(fn, name, num_params)
+            return fn
+        return decorator
+
+    def register_window_function(self, klass, name=None, num_params=-1):
+        name = name or klass.__name__.lower()
+        self._window_functions[name] = (klass, num_params)
+        if not self.is_closed():
+            self._load_window_functions(self.connection())
+
+    def window_function(self, name=None, num_params=-1):
+        def decorator(klass):
+            self.register_window_function(klass, name, num_params)
+            return klass
+        return decorator
+
+    def register_table_function(self, klass, name=None):
+        if name is not None:
+            klass.name = name
+        self._table_functions.append(klass)
+        if not self.is_closed():
+            klass.register(self.connection())
+
+    def table_function(self, name=None):
+        def decorator(klass):
+            self.register_table_function(klass, name)
+            return klass
+        return decorator
+
+    def unregister_aggregate(self, name):
+        del(self._aggregates[name])
+
+    def unregister_collation(self, name):
+        del(self._collations[name])
+
+    def unregister_function(self, name):
+        del(self._functions[name])
+
+    def unregister_window_function(self, name):
+        del(self._window_functions[name])
+
+    def unregister_table_function(self, name):
+        for idx, klass in enumerate(self._table_functions):
+            if klass.name == name:
+                break
+        else:
+            return False
+        self._table_functions.pop(idx)
+        return True
+
+    def _load_extensions(self, conn):
+        conn.enable_load_extension(True)
+        for extension in self._extensions:
+            conn.load_extension(extension)
+
+    def load_extension(self, extension):
+        self._extensions.add(extension)
+        if not self.is_closed():
+            conn = self.connection()
+            conn.enable_load_extension(True)
+            conn.load_extension(extension)
+
+    def unload_extension(self, extension):
+        self._extensions.remove(extension)
+
+    def attach(self, filename, name):
+        if name in self._attached:
+            if self._attached[name] == filename:
+                return False
+            raise OperationalError('schema "%s" already attached.' % name)
+
+        self._attached[name] = filename
+        if not self.is_closed():
+            self.execute_sql('ATTACH DATABASE "%s" AS "%s"' % (filename, name))
+        return True
+
+    def detach(self, name):
+        if name not in self._attached:
+            return False
+
+        del self._attached[name]
+        if not self.is_closed():
+            self.execute_sql('DETACH DATABASE "%s"' % name)
+        return True
+
+    def atomic(self, lock_type=None):
+        return _atomic(self, lock_type=lock_type)
+
+    def transaction(self, lock_type=None):
+        return _transaction(self, lock_type=lock_type)
+
+    def begin(self, lock_type=None):
+        statement = 'BEGIN %s' % lock_type if lock_type else 'BEGIN'
+        self.execute_sql(statement, commit=False)
+
+    def get_tables(self, schema=None):
+        schema = schema or 'main'
+        cursor = self.execute_sql('SELECT name FROM "%s".sqlite_master WHERE '
+                                  'type=? ORDER BY name' % schema, ('table',))
+        return [row for row, in cursor.fetchall()]
+
+    def get_views(self, schema=None):
+        sql = ('SELECT name, sql FROM "%s".sqlite_master WHERE type=? '
+               'ORDER BY name') % (schema or 'main')
+        return [ViewMetadata(*row) for row in self.execute_sql(sql, ('view',))]
+
+    def get_indexes(self, table, schema=None):
+        schema = schema or 'main'
+        query = ('SELECT name, sql FROM "%s".sqlite_master '
+                 'WHERE tbl_name = ? AND type = ? ORDER BY name') % schema
+        cursor = self.execute_sql(query, (table, 'index'))
+        index_to_sql = dict(cursor.fetchall())
+
+        # Determine which indexes have a unique constraint.
+        unique_indexes = set()
+        cursor = self.execute_sql('PRAGMA "%s".index_list("%s")' %
+                                  (schema, table))
+        for row in cursor.fetchall():
+            name = row[1]
+            is_unique = int(row[2]) == 1
+            if is_unique:
+                unique_indexes.add(name)
+
+        # Retrieve the indexed columns.
+        index_columns = {}
+        for index_name in sorted(index_to_sql):
+            cursor = self.execute_sql('PRAGMA "%s".index_info("%s")' %
+                                      (schema, index_name))
+            index_columns[index_name] = [row[2] for row in cursor.fetchall()]
+
+        return [
+            IndexMetadata(
+                name,
+                index_to_sql[name],
+                index_columns[name],
+                name in unique_indexes,
+                table)
+            for name in sorted(index_to_sql)]
+
+    def get_columns(self, table, schema=None):
+        cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' %
+                                  (schema or 'main', table))
+        return [ColumnMetadata(r[1], r[2], not r[3], bool(r[5]), table, r[4])
+                for r in cursor.fetchall()]
+
+    def get_primary_keys(self, table, schema=None):
+        cursor = self.execute_sql('PRAGMA "%s".table_info("%s")' %
+                                  (schema or 'main', table))
+        return [row[1] for row in filter(lambda r: r[-1], cursor.fetchall())]
+
+    def get_foreign_keys(self, table, schema=None):
+        cursor = self.execute_sql('PRAGMA "%s".foreign_key_list("%s")' %
+                                  (schema or 'main', table))
+        return [ForeignKeyMetadata(row[3], row[2], row[4], table)
+                for row in cursor.fetchall()]
+
+    def get_binary_type(self):
+        return sqlite3.Binary
+
+    def conflict_statement(self, on_conflict, query):
+        action = on_conflict._action.lower() if on_conflict._action else ''
+        if action and action not in ('nothing', 'update'):
+            return SQL('INSERT OR %s' % on_conflict._action.upper())
+
+    def conflict_update(self, oc, query):
+        # Sqlite prior to 3.24.0 does not support Postgres-style upsert.
+        if self.server_version < (3, 24, 0) and \
+           any((oc._preserve, oc._update, oc._where, oc._conflict_target,
+                oc._conflict_constraint)):
+            raise ValueError('SQLite does not support specifying which values '
+                             'to preserve or update.')
+
+        action = oc._action.lower() if oc._action else ''
+        if action and action not in ('nothing', 'update', ''):
+            return
+
+        if action == 'nothing':
+            return SQL('ON CONFLICT DO NOTHING')
+        elif not oc._update and not oc._preserve:
+            raise ValueError('If you are not performing any updates (or '
+                             'preserving any INSERTed values), then the '
+                             'conflict resolution action should be set to '
+                             '"NOTHING".')
+        elif oc._conflict_constraint:
+            raise ValueError('SQLite does not support specifying named '
+                             'constraints for conflict resolution.')
+        elif not oc._conflict_target:
+            raise ValueError('SQLite requires that a conflict target be '
+                             'specified when doing an upsert.')
+
+        return self._build_on_conflict_update(oc, query)
+
+    def extract_date(self, date_part, date_field):
+        return fn.date_part(date_part, date_field)
+
+    def truncate_date(self, date_part, date_field):
+        return fn.date_trunc(date_part, date_field)
+
+
+class PostgresqlDatabase(Database):
+    field_types = {
+        'AUTO': 'SERIAL',
+        'BIGAUTO': 'BIGSERIAL',
+        'BLOB': 'BYTEA',
+        'BOOL': 'BOOLEAN',
+        'DATETIME': 'TIMESTAMP',
+        'DECIMAL': 'NUMERIC',
+        'DOUBLE': 'DOUBLE PRECISION',
+        'UUID': 'UUID',
+        'UUIDB': 'BYTEA'}
+    operations = {'REGEXP': '~', 'IREGEXP': '~*'}
+    param = '%s'
+
+    commit_select = True
+    compound_select_parentheses = CSQ_PARENTHESES_ALWAYS
+    for_update = True
+    nulls_ordering = True
+    returning_clause = True
+    safe_create_index = False
+    sequences = True
+
+    def init(self, database, register_unicode=True, encoding=None, **kwargs):
+        self._register_unicode = register_unicode
+        self._encoding = encoding
+        super(PostgresqlDatabase, self).init(database, **kwargs)
+
+    def _connect(self):
+        if psycopg2 is None:
+            raise ImproperlyConfigured('Postgres driver not installed!')
+        conn = psycopg2.connect(database=self.database, **self.connect_params)
+        if self._register_unicode:
+            pg_extensions.register_type(pg_extensions.UNICODE, conn)
+            pg_extensions.register_type(pg_extensions.UNICODEARRAY, conn)
+        if self._encoding:
+            conn.set_client_encoding(self._encoding)
+        return conn
+
+    def _set_server_version(self, conn):
+        self.server_version = conn.server_version
+        if self.server_version >= 90600:
+            self.safe_create_index = True
+
+    def last_insert_id(self, cursor, query_type=None):
+        try:
+            return cursor if query_type else cursor[0][0]
+        except (IndexError, KeyError, TypeError):
+            pass
+
+    def get_tables(self, schema=None):
+        query = ('SELECT tablename FROM pg_catalog.pg_tables '
+                 'WHERE schemaname = %s ORDER BY tablename')
+        cursor = self.execute_sql(query, (schema or 'public',))
+        return [table for table, in cursor.fetchall()]
+
+    def get_views(self, schema=None):
+        query = ('SELECT viewname, definition FROM pg_catalog.pg_views '
+                 'WHERE schemaname = %s ORDER BY viewname')
+        cursor = self.execute_sql(query, (schema or 'public',))
+        return [ViewMetadata(v, sql.strip()) for (v, sql) in cursor.fetchall()]
+
+    def get_indexes(self, table, schema=None):
+        query = """
+            SELECT
+                i.relname, idxs.indexdef, idx.indisunique,
+                array_to_string(array_agg(cols.attname), ',')
+            FROM pg_catalog.pg_class AS t
+            INNER JOIN pg_catalog.pg_index AS idx ON t.oid = idx.indrelid
+            INNER JOIN pg_catalog.pg_class AS i ON idx.indexrelid = i.oid
+            INNER JOIN pg_catalog.pg_indexes AS idxs ON
+                (idxs.tablename = t.relname AND idxs.indexname = i.relname)
+            LEFT OUTER JOIN pg_catalog.pg_attribute AS cols ON
+                (cols.attrelid = t.oid AND cols.attnum = ANY(idx.indkey))
+            WHERE t.relname = %s AND t.relkind = %s AND idxs.schemaname = %s
+            GROUP BY i.relname, idxs.indexdef, idx.indisunique
+            ORDER BY idx.indisunique DESC, i.relname;"""
+        cursor = self.execute_sql(query, (table, 'r', schema or 'public'))
+        return [IndexMetadata(row[0], row[1], row[3].split(','), row[2], table)
+                for row in cursor.fetchall()]
+
+    def get_columns(self, table, schema=None):
+        query = """
+            SELECT column_name, is_nullable, data_type, column_default
+            FROM information_schema.columns
+            WHERE table_name = %s AND table_schema = %s
+            ORDER BY ordinal_position"""
+        cursor = self.execute_sql(query, (table, schema or 'public'))
+        pks = set(self.get_primary_keys(table, schema))
+        return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df)
+                for name, null, dt, df in cursor.fetchall()]
+
+    def get_primary_keys(self, table, schema=None):
+        query = """
+            SELECT kc.column_name
+            FROM information_schema.table_constraints AS tc
+            INNER JOIN information_schema.key_column_usage AS kc ON (
+                tc.table_name = kc.table_name AND
+                tc.table_schema = kc.table_schema AND
+                tc.constraint_name = kc.constraint_name)
+            WHERE
+                tc.constraint_type = %s AND
+                tc.table_name = %s AND
+                tc.table_schema = %s"""
+        ctype = 'PRIMARY KEY'
+        cursor = self.execute_sql(query, (ctype, table, schema or 'public'))
+        return [pk for pk, in cursor.fetchall()]
+
+    def get_foreign_keys(self, table, schema=None):
+        sql = """
+            SELECT
+                kcu.column_name, ccu.table_name, ccu.column_name
+            FROM information_schema.table_constraints AS tc
+            JOIN information_schema.key_column_usage AS kcu
+                ON (tc.constraint_name = kcu.constraint_name AND
+                    tc.constraint_schema = kcu.constraint_schema)
+            JOIN information_schema.constraint_column_usage AS ccu
+                ON (ccu.constraint_name = tc.constraint_name AND
+                    ccu.constraint_schema = tc.constraint_schema)
+            WHERE
+                tc.constraint_type = 'FOREIGN KEY' AND
+                tc.table_name = %s AND
+                tc.table_schema = %s"""
+        cursor = self.execute_sql(sql, (table, schema or 'public'))
+        return [ForeignKeyMetadata(row[0], row[1], row[2], table)
+                for row in cursor.fetchall()]
+
+    def sequence_exists(self, sequence):
+        res = self.execute_sql("""
+            SELECT COUNT(*) FROM pg_class, pg_namespace
+            WHERE relkind='S'
+                AND pg_class.relnamespace = pg_namespace.oid
+                AND relname=%s""", (sequence,))
+        return bool(res.fetchone()[0])
+
+    def get_binary_type(self):
+        return psycopg2.Binary
+
+    def conflict_statement(self, on_conflict, query):
+        return
+
+    def conflict_update(self, oc, query):
+        action = oc._action.lower() if oc._action else ''
+        if action in ('ignore', 'nothing'):
+            return SQL('ON CONFLICT DO NOTHING')
+        elif action and action != 'update':
+            raise ValueError('The only supported actions for conflict '
+                             'resolution with Postgresql are "ignore" or '
+                             '"update".')
+        elif not oc._update and not oc._preserve:
+            raise ValueError('If you are not performing any updates (or '
+                             'preserving any INSERTed values), then the '
+                             'conflict resolution action should be set to '
+                             '"IGNORE".')
+        elif not (oc._conflict_target or oc._conflict_constraint):
+            raise ValueError('Postgres requires that a conflict target be '
+                             'specified when doing an upsert.')
+
+        return self._build_on_conflict_update(oc, query)
+
+    def extract_date(self, date_part, date_field):
+        return fn.EXTRACT(NodeList((date_part, SQL('FROM'), date_field)))
+
+    def truncate_date(self, date_part, date_field):
+        return fn.DATE_TRUNC(date_part, date_field)
+
+    def get_noop_select(self, ctx):
+        return ctx.sql(Select().columns(SQL('0')).where(SQL('false')))
+
+
+class MySQLDatabase(Database):
+    field_types = {
+        'AUTO': 'INTEGER AUTO_INCREMENT',
+        'BIGAUTO': 'BIGINT AUTO_INCREMENT',
+        'BOOL': 'BOOL',
+        'DECIMAL': 'NUMERIC',
+        'DOUBLE': 'DOUBLE PRECISION',
+        'FLOAT': 'FLOAT',
+        'UUID': 'VARCHAR(40)',
+        'UUIDB': 'VARBINARY(16)'}
+    operations = {
+        'LIKE': 'LIKE BINARY',
+        'ILIKE': 'LIKE',
+        'REGEXP': 'REGEXP BINARY',
+        'IREGEXP': 'REGEXP',
+        'XOR': 'XOR'}
+    param = '%s'
+    quote = '``'
+
+    commit_select = True
+    compound_select_parentheses = CSQ_PARENTHESES_UNNESTED
+    for_update = True
+    limit_max = 2 ** 64 - 1
+    safe_create_index = False
+    safe_drop_index = False
+
+    def init(self, database, **kwargs):
+        params = {'charset': 'utf8', 'use_unicode': True}
+        params.update(kwargs)
+        if 'password' in params and mysql_passwd:
+            params['passwd'] = params.pop('password')
+        super(MySQLDatabase, self).init(database, **params)
+
+    def _connect(self):
+        if mysql is None:
+            raise ImproperlyConfigured('MySQL driver not installed!')
+        conn = mysql.connect(db=self.database, **self.connect_params)
+        return conn
+
+    def _set_server_version(self, conn):
+        try:
+            version_raw = conn.server_version
+        except AttributeError:
+            version_raw = conn.get_server_info()
+        self.server_version = self._extract_server_version(version_raw)
+
+    def _extract_server_version(self, version):
+        version = version.lower()
+        if 'maria' in version:
+            match_obj = re.search(r'(1\d\.\d+\.\d+)', version)
+        else:
+            match_obj = re.search(r'(\d\.\d+\.\d+)', version)
+        if match_obj is not None:
+            return tuple(int(num) for num in match_obj.groups()[0].split('.'))
+
+        warnings.warn('Unable to determine MySQL version: "%s"' % version)
+        return (0, 0, 0)  # Unable to determine version!
+
+    def default_values_insert(self, ctx):
+        return ctx.literal('() VALUES ()')
+
+    def get_tables(self, schema=None):
+        query = ('SELECT table_name FROM information_schema.tables '
+                 'WHERE table_schema = DATABASE() AND table_type != %s '
+                 'ORDER BY table_name')
+        return [table for table, in self.execute_sql(query, ('VIEW',))]
+
+    def get_views(self, schema=None):
+        query = ('SELECT table_name, view_definition '
+                 'FROM information_schema.views '
+                 'WHERE table_schema = DATABASE() ORDER BY table_name')
+        cursor = self.execute_sql(query)
+        return [ViewMetadata(*row) for row in cursor.fetchall()]
+
+    def get_indexes(self, table, schema=None):
+        cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
+        unique = set()
+        indexes = {}
+        for row in cursor.fetchall():
+            if not row[1]:
+                unique.add(row[2])
+            indexes.setdefault(row[2], [])
+            indexes[row[2]].append(row[4])
+        return [IndexMetadata(name, None, indexes[name], name in unique, table)
+                for name in indexes]
+
+    def get_columns(self, table, schema=None):
+        sql = """
+            SELECT column_name, is_nullable, data_type, column_default
+            FROM information_schema.columns
+            WHERE table_name = %s AND table_schema = DATABASE()"""
+        cursor = self.execute_sql(sql, (table,))
+        pks = set(self.get_primary_keys(table))
+        return [ColumnMetadata(name, dt, null == 'YES', name in pks, table, df)
+                for name, null, dt, df in cursor.fetchall()]
+
+    def get_primary_keys(self, table, schema=None):
+        cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table)
+        return [row[4] for row in
+                filter(lambda row: row[2] == 'PRIMARY', cursor.fetchall())]
+
+    def get_foreign_keys(self, table, schema=None):
+        query = """
+            SELECT column_name, referenced_table_name, referenced_column_name
+            FROM information_schema.key_column_usage
+            WHERE table_name = %s
+                AND table_schema = DATABASE()
+                AND referenced_table_name IS NOT NULL
+                AND referenced_column_name IS NOT NULL"""
+        cursor = self.execute_sql(query, (table,))
+        return [
+            ForeignKeyMetadata(column, dest_table, dest_column, table)
+            for column, dest_table, dest_column in cursor.fetchall()]
+
+    def get_binary_type(self):
+        return mysql.Binary
+
+    def conflict_statement(self, on_conflict, query):
+        if not on_conflict._action: return
+
+        action = on_conflict._action.lower()
+        if action == 'replace':
+            return SQL('REPLACE')
+        elif action == 'ignore':
+            return SQL('INSERT IGNORE')
+        elif action != 'update':
+            raise ValueError('Un-supported action for conflict resolution. '
+                             'MySQL supports REPLACE, IGNORE and UPDATE.')
+
+    def conflict_update(self, on_conflict, query):
+        if on_conflict._where or on_conflict._conflict_target or \
+           on_conflict._conflict_constraint:
+            raise ValueError('MySQL does not support the specification of '
+                             'where clauses or conflict targets for conflict '
+                             'resolution.')
+
+        updates = []
+        if on_conflict._preserve:
+            # Here we need to determine which function to use, which varies
+            # depending on the MySQL server version. MySQL and MariaDB prior to
+            # 10.3.3 use "VALUES", while MariaDB 10.3.3+ use "VALUE".
+            version = self.server_version or (0,)
+            if version[0] == 10 and version >= (10, 3, 3):
+                VALUE_FN = fn.VALUE
+            else:
+                VALUE_FN = fn.VALUES
+
+            for column in on_conflict._preserve:
+                entity = ensure_entity(column)
+                expression = NodeList((
+                    ensure_entity(column),
+                    SQL('='),
+                    VALUE_FN(entity)))
+                updates.append(expression)
+
+        if on_conflict._update:
+            for k, v in on_conflict._update.items():
+                if not isinstance(v, Node):
+                    # Attempt to resolve string field-names to their respective
+                    # field object, to apply data-type conversions.
+                    if isinstance(k, basestring):
+                        k = getattr(query.table, k)
+                    converter = k.db_value if isinstance(k, Field) else None
+                    v = Value(v, converter=converter, unpack=False)
+                updates.append(NodeList((ensure_entity(k), SQL('='), v)))
+
+        if updates:
+            return NodeList((SQL('ON DUPLICATE KEY UPDATE'),
+                             CommaNodeList(updates)))
+
+    def extract_date(self, date_part, date_field):
+        return fn.EXTRACT(NodeList((SQL(date_part), SQL('FROM'), date_field)))
+
+    def truncate_date(self, date_part, date_field):
+        return fn.DATE_FORMAT(date_field, __mysql_date_trunc__[date_part])
+
+    def get_noop_select(self, ctx):
+        return ctx.literal('DO 0')
+
+
+# TRANSACTION CONTROL.
+
+
+class _manual(_callable_context_manager):
+    def __init__(self, db):
+        self.db = db
+
+    def __enter__(self):
+        top = self.db.top_transaction()
+        if top and not isinstance(self.db.top_transaction(), _manual):
+            raise ValueError('Cannot enter manual commit block while a '
+                             'transaction is active.')
+        self.db.push_transaction(self)
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if self.db.pop_transaction() is not self:
+            raise ValueError('Transaction stack corrupted while exiting '
+                             'manual commit block.')
+
+
+class _atomic(_callable_context_manager):
+    def __init__(self, db, lock_type=None):
+        self.db = db
+        self._lock_type = lock_type
+        self._transaction_args = (lock_type,) if lock_type is not None else ()
+
+    def __enter__(self):
+        if self.db.transaction_depth() == 0:
+            self._helper = self.db.transaction(*self._transaction_args)
+        else:
+            self._helper = self.db.savepoint()
+            if isinstance(self.db.top_transaction(), _manual):
+                raise ValueError('Cannot enter atomic commit block while in '
+                                 'manual commit mode.')
+        return self._helper.__enter__()
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        return self._helper.__exit__(exc_type, exc_val, exc_tb)
+
+
+class _transaction(_callable_context_manager):
+    def __init__(self, db, lock_type=None):
+        self.db = db
+        self._lock_type = lock_type
+
+    def _begin(self):
+        if self._lock_type:
+            self.db.begin(self._lock_type)
+        else:
+            self.db.begin()
+
+    def commit(self, begin=True):
+        self.db.commit()
+        if begin:
+            self._begin()
+
+    def rollback(self, begin=True):
+        self.db.rollback()
+        if begin:
+            self._begin()
+
+    def __enter__(self):
+        if self.db.transaction_depth() == 0:
+            self._begin()
+        self.db.push_transaction(self)
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        try:
+            if exc_type:
+                self.rollback(False)
+            elif self.db.transaction_depth() == 1:
+                try:
+                    self.commit(False)
+                except:
+                    self.rollback(False)
+                    raise
+        finally:
+            self.db.pop_transaction()
+
+
+class _savepoint(_callable_context_manager):
+    def __init__(self, db, sid=None):
+        self.db = db
+        self.sid = sid or 's' + uuid.uuid4().hex
+        self.quoted_sid = self.sid.join(self.db.quote)
+
+    def _begin(self):
+        self.db.execute_sql('SAVEPOINT %s;' % self.quoted_sid)
+
+    def commit(self, begin=True):
+        self.db.execute_sql('RELEASE SAVEPOINT %s;' % self.quoted_sid)
+        if begin: self._begin()
+
+    def rollback(self):
+        self.db.execute_sql('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid)
+
+    def __enter__(self):
+        self._begin()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if exc_type:
+            self.rollback()
+        else:
+            try:
+                self.commit(begin=False)
+            except:
+                self.rollback()
+                raise
+
+
+# CURSOR REPRESENTATIONS.
+
+
+class CursorWrapper(object):
+    def __init__(self, cursor):
+        self.cursor = cursor
+        self.count = 0
+        self.index = 0
+        self.initialized = False
+        self.populated = False
+        self.row_cache = []
+
+    def __iter__(self):
+        if self.populated:
+            return iter(self.row_cache)
+        return ResultIterator(self)
+
+    def __getitem__(self, item):
+        if isinstance(item, slice):
+            stop = item.stop
+            if stop is None or stop < 0:
+                self.fill_cache()
+            else:
+                self.fill_cache(stop)
+            return self.row_cache[item]
+        elif isinstance(item, int):
+            self.fill_cache(item if item > 0 else 0)
+            return self.row_cache[item]
+        else:
+            raise ValueError('CursorWrapper only supports integer and slice '
+                             'indexes.')
+
+    def __len__(self):
+        self.fill_cache()
+        return self.count
+
+    def initialize(self):
+        pass
+
+    def iterate(self, cache=True):
+        row = self.cursor.fetchone()
+        if row is None:
+            self.populated = True
+            self.cursor.close()
+            raise StopIteration
+        elif not self.initialized:
+            self.initialize()  # Lazy initialization.
+            self.initialized = True
+        self.count += 1
+        result = self.process_row(row)
+        if cache:
+            self.row_cache.append(result)
+        return result
+
+    def process_row(self, row):
+        return row
+
+    def iterator(self):
+        """Efficient one-pass iteration over the result set."""
+        while True:
+            try:
+                yield self.iterate(False)
+            except StopIteration:
+                return
+
+    def fill_cache(self, n=0):
+        n = n or float('Inf')
+        if n < 0:
+            raise ValueError('Negative values are not supported.')
+
+        iterator = ResultIterator(self)
+        iterator.index = self.count
+        while not self.populated and (n > self.count):
+            try:
+                iterator.next()
+            except StopIteration:
+                break
+
+
+class DictCursorWrapper(CursorWrapper):
+    def _initialize_columns(self):
+        description = self.cursor.description
+        self.columns = [t[0][t[0].find('.') + 1:].strip('"')
+                        for t in description]
+        self.ncols = len(description)
+
+    initialize = _initialize_columns
+
+    def _row_to_dict(self, row):
+        result = {}
+        for i in range(self.ncols):
+            result.setdefault(self.columns[i], row[i])  # Do not overwrite.
+        return result
+
+    process_row = _row_to_dict
+
+
+class NamedTupleCursorWrapper(CursorWrapper):
+    def initialize(self):
+        description = self.cursor.description
+        self.tuple_class = collections.namedtuple(
+            'Row',
+            [col[0][col[0].find('.') + 1:].strip('"') for col in description])
+
+    def process_row(self, row):
+        return self.tuple_class(*row)
+
+
+class ObjectCursorWrapper(DictCursorWrapper):
+    def __init__(self, cursor, constructor):
+        super(ObjectCursorWrapper, self).__init__(cursor)
+        self.constructor = constructor
+
+    def process_row(self, row):
+        row_dict = self._row_to_dict(row)
+        return self.constructor(**row_dict)
+
+
+class ResultIterator(object):
+    def __init__(self, cursor_wrapper):
+        self.cursor_wrapper = cursor_wrapper
+        self.index = 0
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        if self.index < self.cursor_wrapper.count:
+            obj = self.cursor_wrapper.row_cache[self.index]
+        elif not self.cursor_wrapper.populated:
+            self.cursor_wrapper.iterate()
+            obj = self.cursor_wrapper.row_cache[self.index]
+        else:
+            raise StopIteration
+        self.index += 1
+        return obj
+
+    __next__ = next
+
+# FIELDS
+
+class FieldAccessor(object):
+    def __init__(self, model, field, name):
+        self.model = model
+        self.field = field
+        self.name = name
+
+    def __get__(self, instance, instance_type=None):
+        if instance is not None:
+            return instance.__data__.get(self.name)
+        return self.field
+
+    def __set__(self, instance, value):
+        instance.__data__[self.name] = value
+        instance._dirty.add(self.name)
+
+
+class ForeignKeyAccessor(FieldAccessor):
+    def __init__(self, model, field, name):
+        super(ForeignKeyAccessor, self).__init__(model, field, name)
+        self.rel_model = field.rel_model
+
+    def get_rel_instance(self, instance):
+        value = instance.__data__.get(self.name)
+        if value is not None or self.name in instance.__rel__:
+            if self.name not in instance.__rel__:
+                obj = self.rel_model.get(self.field.rel_field == value)
+                instance.__rel__[self.name] = obj
+            return instance.__rel__[self.name]
+        elif not self.field.null:
+            raise self.rel_model.DoesNotExist
+        return value
+
+    def __get__(self, instance, instance_type=None):
+        if instance is not None:
+            return self.get_rel_instance(instance)
+        return self.field
+
+    def __set__(self, instance, obj):
+        if isinstance(obj, self.rel_model):
+            instance.__data__[self.name] = getattr(obj, self.field.rel_field.name)
+            instance.__rel__[self.name] = obj
+        else:
+            fk_value = instance.__data__.get(self.name)
+            instance.__data__[self.name] = obj
+            if obj != fk_value and self.name in instance.__rel__:
+                del instance.__rel__[self.name]
+        instance._dirty.add(self.name)
+
+
+class NoQueryForeignKeyAccessor(ForeignKeyAccessor):
+    def get_rel_instance(self, instance):
+        value = instance.__data__.get(self.name)
+        if value is not None:
+            return instance.__rel__.get(self.name, value)
+        elif not self.field.null:
+            raise self.rel_model.DoesNotExist
+
+
+class BackrefAccessor(object):
+    def __init__(self, field):
+        self.field = field
+        self.model = field.rel_model
+        self.rel_model = field.model
+
+    def __get__(self, instance, instance_type=None):
+        if instance is not None:
+            dest = self.field.rel_field.name
+            return (self.rel_model
+                    .select()
+                    .where(self.field == getattr(instance, dest)))
+        return self
+
+
+class ObjectIdAccessor(object):
+    """Gives direct access to the underlying id"""
+    def __init__(self, field):
+        self.field = field
+
+    def __get__(self, instance, instance_type=None):
+        if instance is not None:
+            return instance.__data__.get(self.field.name)
+        return self.field
+
+    def __set__(self, instance, value):
+        setattr(instance, self.field.name, value)
+
+
+class Field(ColumnBase):
+    _field_counter = 0
+    _order = 0
+    accessor_class = FieldAccessor
+    auto_increment = False
+    default_index_type = None
+    field_type = 'DEFAULT'
+
+    def __init__(self, null=False, index=False, unique=False, column_name=None,
+                 default=None, primary_key=False, constraints=None,
+                 sequence=None, collation=None, unindexed=False, choices=None,
+                 help_text=None, verbose_name=None, index_type=None,
+                 db_column=None, _hidden=False):
+        if db_column is not None:
+            __deprecated__('"db_column" has been deprecated in favor of '
+                           '"column_name" for Field objects.')
+            column_name = db_column
+
+        self.null = null
+        self.index = index
+        self.unique = unique
+        self.column_name = column_name
+        self.default = default
+        self.primary_key = primary_key
+        self.constraints = constraints  # List of column constraints.
+        self.sequence = sequence  # Name of sequence, e.g. foo_id_seq.
+        self.collation = collation
+        self.unindexed = unindexed
+        self.choices = choices
+        self.help_text = help_text
+        self.verbose_name = verbose_name
+        self.index_type = index_type or self.default_index_type
+        self._hidden = _hidden
+
+        # Used internally for recovering the order in which Fields were defined
+        # on the Model class.
+        Field._field_counter += 1
+        self._order = Field._field_counter
+        self._sort_key = (self.primary_key and 1 or 2), self._order
+
+    def __hash__(self):
+        return hash(self.name + '.' + self.model.__name__)
+
+    def __repr__(self):
+        if hasattr(self, 'model') and getattr(self, 'name', None):
+            return '<%s: %s.%s>' % (type(self).__name__,
+                                    self.model.__name__,
+                                    self.name)
+        return '<%s: (unbound)>' % type(self).__name__
+
+    def bind(self, model, name, set_attribute=True):
+        self.model = model
+        self.name = name
+        self.column_name = self.column_name or name
+        if set_attribute:
+            setattr(model, name, self.accessor_class(model, self, name))
+
+    @property
+    def column(self):
+        return Column(self.model._meta.table, self.column_name)
+
+    def adapt(self, value):
+        return value
+
+    def db_value(self, value):
+        return value if value is None else self.adapt(value)
+
+    def python_value(self, value):
+        return value if value is None else self.adapt(value)
+
+    def get_sort_key(self, ctx):
+        return self._sort_key
+
+    def __sql__(self, ctx):
+        return ctx.sql(self.column)
+
+    def get_modifiers(self):
+        return
+
+    def ddl_datatype(self, ctx):
+        if ctx and ctx.state.field_types:
+            column_type = ctx.state.field_types.get(self.field_type,
+                                                    self.field_type)
+        else:
+            column_type = self.field_type
+
+        modifiers = self.get_modifiers()
+        if column_type and modifiers:
+            modifier_literal = ', '.join([str(m) for m in modifiers])
+            return SQL('%s(%s)' % (column_type, modifier_literal))
+        else:
+            return SQL(column_type)
+
+    def ddl(self, ctx):
+        accum = [Entity(self.column_name)]
+        data_type = self.ddl_datatype(ctx)
+        if data_type:
+            accum.append(data_type)
+        if self.unindexed:
+            accum.append(SQL('UNINDEXED'))
+        if not self.null:
+            accum.append(SQL('NOT NULL'))
+        if self.primary_key:
+            accum.append(SQL('PRIMARY KEY'))
+        if self.sequence:
+            accum.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence))
+        if self.constraints:
+            accum.extend(self.constraints)
+        if self.collation:
+            accum.append(SQL('COLLATE %s' % self.collation))
+        return NodeList(accum)
+
+
+class IntegerField(Field):
+    field_type = 'INT'
+    adapt = int
+
+
+class BigIntegerField(IntegerField):
+    field_type = 'BIGINT'
+
+
+class SmallIntegerField(IntegerField):
+    field_type = 'SMALLINT'
+
+
+class AutoField(IntegerField):
+    auto_increment = True
+    field_type = 'AUTO'
+
+    def __init__(self, *args, **kwargs):
+        if kwargs.get('primary_key') is False:
+            raise ValueError('%s must always be a primary key.' % type(self))
+        kwargs['primary_key'] = True
+        super(AutoField, self).__init__(*args, **kwargs)
+
+
+class BigAutoField(AutoField):
+    field_type = 'BIGAUTO'
+
+
+class IdentityField(AutoField):
+    field_type = 'INT GENERATED BY DEFAULT AS IDENTITY'
+
+
+class PrimaryKeyField(AutoField):
+    def __init__(self, *args, **kwargs):
+        __deprecated__('"PrimaryKeyField" has been renamed to "AutoField". '
+                       'Please update your code accordingly as this will be '
+                       'completely removed in a subsequent release.')
+        super(PrimaryKeyField, self).__init__(*args, **kwargs)
+
+
+class FloatField(Field):
+    field_type = 'FLOAT'
+    adapt = float
+
+
+class DoubleField(FloatField):
+    field_type = 'DOUBLE'
+
+
+class DecimalField(Field):
+    field_type = 'DECIMAL'
+
+    def __init__(self, max_digits=10, decimal_places=5, auto_round=False,
+                 rounding=None, *args, **kwargs):
+        self.max_digits = max_digits
+        self.decimal_places = decimal_places
+        self.auto_round = auto_round
+        self.rounding = rounding or decimal.DefaultContext.rounding
+        super(DecimalField, self).__init__(*args, **kwargs)
+
+    def get_modifiers(self):
+        return [self.max_digits, self.decimal_places]
+
+    def db_value(self, value):
+        D = decimal.Decimal
+        if not value:
+            return value if value is None else D(0)
+        if self.auto_round:
+            exp = D(10) ** (-self.decimal_places)
+            rounding = self.rounding
+            return D(text_type(value)).quantize(exp, rounding=rounding)
+        return value
+
+    def python_value(self, value):
+        if value is not None:
+            if isinstance(value, decimal.Decimal):
+                return value
+            return decimal.Decimal(text_type(value))
+
+
+class _StringField(Field):
+    def adapt(self, value):
+        if isinstance(value, text_type):
+            return value
+        elif isinstance(value, bytes_type):
+            return value.decode('utf-8')
+        return text_type(value)
+
+    def __add__(self, other): return self.concat(other)
+    def __radd__(self, other): return other.concat(self)
+
+
+class CharField(_StringField):
+    field_type = 'VARCHAR'
+
+    def __init__(self, max_length=255, *args, **kwargs):
+        self.max_length = max_length
+        super(CharField, self).__init__(*args, **kwargs)
+
+    def get_modifiers(self):
+        return self.max_length and [self.max_length] or None
+
+
+class FixedCharField(CharField):
+    field_type = 'CHAR'
+
+    def python_value(self, value):
+        value = super(FixedCharField, self).python_value(value)
+        if value:
+            value = value.strip()
+        return value
+
+
+class TextField(_StringField):
+    field_type = 'TEXT'
+
+
+class BlobField(Field):
+    field_type = 'BLOB'
+
+    def _db_hook(self, database):
+        self._constructor = database.get_binary_type()
+
+    def bind(self, model, name, set_attribute=True):
+        self._constructor = bytearray
+        if model._meta.database:
+            if isinstance(model._meta.database, Proxy):
+                model._meta.database.attach_callback(self._db_hook)
+            else:
+                self._db_hook(model._meta.database)
+
+        # Attach a hook to the model metadata; in the event the database is
+        # changed or set at run-time, we will be sure to apply our callback and
+        # use the proper data-type for our database driver.
+        model._meta._db_hooks.append(self._db_hook)
+        return super(BlobField, self).bind(model, name, set_attribute)
+
+    def db_value(self, value):
+        if isinstance(value, text_type):
+            value = value.encode('raw_unicode_escape')
+        if isinstance(value, bytes_type):
+            return self._constructor(value)
+        return value
+
+
+class BitField(BitwiseMixin, BigIntegerField):
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault('default', 0)
+        super(BitField, self).__init__(*args, **kwargs)
+        self.__current_flag = 1
+
+    def flag(self, value=None):
+        if value is None:
+            value = self.__current_flag
+            self.__current_flag <<= 1
+        else:
+            self.__current_flag = value << 1
+
+        class FlagDescriptor(object):
+            def __init__(self, field, value):
+                self._field = field
+                self._value = value
+            def __get__(self, instance, instance_type=None):
+                if instance is None:
+                    return self._field.bin_and(self._value) != 0
+                value = getattr(instance, self._field.name) or 0
+                return (value & self._value) != 0
+            def __set__(self, instance, is_set):
+                if is_set not in (True, False):
+                    raise ValueError('Value must be either True or False')
+                value = getattr(instance, self._field.name) or 0
+                if is_set:
+                    value |= self._value
+                else:
+                    value &= ~self._value
+                setattr(instance, self._field.name, value)
+        return FlagDescriptor(self, value)
+
+
+class BigBitFieldData(object):
+    def __init__(self, instance, name):
+        self.instance = instance
+        self.name = name
+        value = self.instance.__data__.get(self.name)
+        if not value:
+            value = bytearray()
+        elif not isinstance(value, bytearray):
+            value = bytearray(value)
+        self._buffer = self.instance.__data__[self.name] = value
+
+    def _ensure_length(self, idx):
+        byte_num, byte_offset = divmod(idx, 8)
+        cur_size = len(self._buffer)
+        if cur_size <= byte_num:
+            self._buffer.extend(b'\x00' * ((byte_num + 1) - cur_size))
+        return byte_num, byte_offset
+
+    def set_bit(self, idx):
+        byte_num, byte_offset = self._ensure_length(idx)
+        self._buffer[byte_num] |= (1 << byte_offset)
+
+    def clear_bit(self, idx):
+        byte_num, byte_offset = self._ensure_length(idx)
+        self._buffer[byte_num] &= ~(1 << byte_offset)
+
+    def toggle_bit(self, idx):
+        byte_num, byte_offset = self._ensure_length(idx)
+        self._buffer[byte_num] ^= (1 << byte_offset)
+        return bool(self._buffer[byte_num] & (1 << byte_offset))
+
+    def is_set(self, idx):
+        byte_num, byte_offset = self._ensure_length(idx)
+        return bool(self._buffer[byte_num] & (1 << byte_offset))
+
+    def __repr__(self):
+        return repr(self._buffer)
+
+
+class BigBitFieldAccessor(FieldAccessor):
+    def __get__(self, instance, instance_type=None):
+        if instance is None:
+            return self.field
+        return BigBitFieldData(instance, self.name)
+    def __set__(self, instance, value):
+        if isinstance(value, memoryview):
+            value = value.tobytes()
+        elif isinstance(value, buffer_type):
+            value = bytes(value)
+        elif isinstance(value, bytearray):
+            value = bytes_type(value)
+        elif isinstance(value, BigBitFieldData):
+            value = bytes_type(value._buffer)
+        elif isinstance(value, text_type):
+            value = value.encode('utf-8')
+        elif not isinstance(value, bytes_type):
+            raise ValueError('Value must be either a bytes, memoryview or '
+                             'BigBitFieldData instance.')
+        super(BigBitFieldAccessor, self).__set__(instance, value)
+
+
+class BigBitField(BlobField):
+    accessor_class = BigBitFieldAccessor
+
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault('default', bytes_type)
+        super(BigBitField, self).__init__(*args, **kwargs)
+
+    def db_value(self, value):
+        return bytes_type(value) if value is not None else value
+
+
+class UUIDField(Field):
+    field_type = 'UUID'
+
+    def db_value(self, value):
+        if isinstance(value, basestring) and len(value) == 32:
+            # Hex string. No transformation is necessary.
+            return value
+        elif isinstance(value, bytes) and len(value) == 16:
+            # Allow raw binary representation.
+            value = uuid.UUID(bytes=value)
+        if isinstance(value, uuid.UUID):
+            return value.hex
+        try:
+            return uuid.UUID(value).hex
+        except:
+            return value
+
+    def python_value(self, value):
+        if isinstance(value, uuid.UUID):
+            return value
+        return uuid.UUID(value) if value is not None else None
+
+
+class BinaryUUIDField(BlobField):
+    field_type = 'UUIDB'
+
+    def db_value(self, value):
+        if isinstance(value, bytes) and len(value) == 16:
+            # Raw binary value. No transformation is necessary.
+            return self._constructor(value)
+        elif isinstance(value, basestring) and len(value) == 32:
+            # Allow hex string representation.
+            value = uuid.UUID(hex=value)
+        if isinstance(value, uuid.UUID):
+            return self._constructor(value.bytes)
+        elif value is not None:
+            raise ValueError('value for binary UUID field must be UUID(), '
+                             'a hexadecimal string, or a bytes object.')
+
+    def python_value(self, value):
+        if isinstance(value, uuid.UUID):
+            return value
+        elif isinstance(value, memoryview):
+            value = value.tobytes()
+        elif value and not isinstance(value, bytes):
+            value = bytes(value)
+        return uuid.UUID(bytes=value) if value is not None else None
+
+
+def _date_part(date_part):
+    def dec(self):
+        return self.model._meta.database.extract_date(date_part, self)
+    return dec
+
+def format_date_time(value, formats, post_process=None):
+    post_process = post_process or (lambda x: x)
+    for fmt in formats:
+        try:
+            return post_process(datetime.datetime.strptime(value, fmt))
+        except ValueError:
+            pass
+    return value
+
+
+class _BaseFormattedField(Field):
+    formats = None
+
+    def __init__(self, formats=None, *args, **kwargs):
+        if formats is not None:
+            self.formats = formats
+        super(_BaseFormattedField, self).__init__(*args, **kwargs)
+
+
+class DateTimeField(_BaseFormattedField):
+    field_type = 'DATETIME'
+    formats = [
+        '%Y-%m-%d %H:%M:%S.%f',
+        '%Y-%m-%d %H:%M:%S',
+        '%Y-%m-%d',
+    ]
+
+    def adapt(self, value):
+        if value and isinstance(value, basestring):
+            return format_date_time(value, self.formats)
+        return value
+
+    year = property(_date_part('year'))
+    month = property(_date_part('month'))
+    day = property(_date_part('day'))
+    hour = property(_date_part('hour'))
+    minute = property(_date_part('minute'))
+    second = property(_date_part('second'))
+
+
+class DateField(_BaseFormattedField):
+    field_type = 'DATE'
+    formats = [
+        '%Y-%m-%d',
+        '%Y-%m-%d %H:%M:%S',
+        '%Y-%m-%d %H:%M:%S.%f',
+    ]
+
+    def adapt(self, value):
+        if value and isinstance(value, basestring):
+            pp = lambda x: x.date()
+            return format_date_time(value, self.formats, pp)
+        elif value and isinstance(value, datetime.datetime):
+            return value.date()
+        return value
+
+    year = property(_date_part('year'))
+    month = property(_date_part('month'))
+    day = property(_date_part('day'))
+
+
+class TimeField(_BaseFormattedField):
+    field_type = 'TIME'
+    formats = [
+        '%H:%M:%S.%f',
+        '%H:%M:%S',
+        '%H:%M',
+        '%Y-%m-%d %H:%M:%S.%f',
+        '%Y-%m-%d %H:%M:%S',
+    ]
+
+    def adapt(self, value):
+        if value:
+            if isinstance(value, basestring):
+                pp = lambda x: x.time()
+                return format_date_time(value, self.formats, pp)
+            elif isinstance(value, datetime.datetime):
+                return value.time()
+        if value is not None and isinstance(value, datetime.timedelta):
+            return (datetime.datetime.min + value).time()
+        return value
+
+    hour = property(_date_part('hour'))
+    minute = property(_date_part('minute'))
+    second = property(_date_part('second'))
+
+
+class TimestampField(BigIntegerField):
+    # Support second -> microsecond resolution.
+    valid_resolutions = [10**i for i in range(7)]
+
+    def __init__(self, *args, **kwargs):
+        self.resolution = kwargs.pop('resolution', 1) or 1
+        if self.resolution not in self.valid_resolutions:
+            raise ValueError('TimestampField resolution must be one of: %s' %
+                             ', '.join(str(i) for i in self.valid_resolutions))
+
+        self.utc = kwargs.pop('utc', False) or False
+        _dt = datetime.datetime
+        self._conv = _dt.utcfromtimestamp if self.utc else _dt.fromtimestamp
+        _default = _dt.utcnow if self.utc else _dt.now
+        kwargs.setdefault('default', _default)
+        super(TimestampField, self).__init__(*args, **kwargs)
+
+    def db_value(self, value):
+        if value is None:
+            return
+
+        if isinstance(value, datetime.datetime):
+            pass
+        elif isinstance(value, datetime.date):
+            value = datetime.datetime(value.year, value.month, value.day)
+        else:
+            return int(round(value * self.resolution))
+
+        if self.utc:
+            timestamp = calendar.timegm(value.utctimetuple())
+        else:
+            timestamp = time.mktime(value.timetuple())
+        timestamp += (value.microsecond * .000001)
+        if self.resolution > 1:
+            timestamp *= self.resolution
+        return int(round(timestamp))
+
+    def python_value(self, value):
+        if value is not None and isinstance(value, (int, float, long)):
+            if self.resolution > 1:
+                ticks_to_microsecond = 1000000 // self.resolution
+                value, ticks = divmod(value, self.resolution)
+                microseconds = int(ticks * ticks_to_microsecond)
+                return self._conv(value).replace(microsecond=microseconds)
+            else:
+                return self._conv(value)
+        return value
+
+
+class IPField(BigIntegerField):
+    def db_value(self, val):
+        if val is not None:
+            return struct.unpack('!I', socket.inet_aton(val))[0]
+
+    def python_value(self, val):
+        if val is not None:
+            return socket.inet_ntoa(struct.pack('!I', val))
+
+
+class BooleanField(Field):
+    field_type = 'BOOL'
+    adapt = bool
+
+
+class BareField(Field):
+    def __init__(self, adapt=None, *args, **kwargs):
+        super(BareField, self).__init__(*args, **kwargs)
+        if adapt is not None:
+            self.adapt = adapt
+
+    def ddl_datatype(self, ctx):
+        return
+
+
+class ForeignKeyField(Field):
+    accessor_class = ForeignKeyAccessor
+
+    def __init__(self, model, field=None, backref=None, on_delete=None,
+                 on_update=None, deferrable=None, _deferred=None,
+                 rel_model=None, to_field=None, object_id_name=None,
+                 lazy_load=True, related_name=None, *args, **kwargs):
+        kwargs.setdefault('index', True)
+
+        # If lazy_load is disable, we use a different descriptor/accessor that
+        # will ensure we don't accidentally perform a query.
+        if not lazy_load:
+            self.accessor_class = NoQueryForeignKeyAccessor
+
+        super(ForeignKeyField, self).__init__(*args, **kwargs)
+
+        if rel_model is not None:
+            __deprecated__('"rel_model" has been deprecated in favor of '
+                           '"model" for ForeignKeyField objects.')
+            model = rel_model
+        if to_field is not None:
+            __deprecated__('"to_field" has been deprecated in favor of '
+                           '"field" for ForeignKeyField objects.')
+            field = to_field
+        if related_name is not None:
+            __deprecated__('"related_name" has been deprecated in favor of '
+                           '"backref" for Field objects.')
+            backref = related_name
+
+        self.rel_model = model
+        self.rel_field = field
+        self.declared_backref = backref
+        self.backref = None
+        self.on_delete = on_delete
+        self.on_update = on_update
+        self.deferrable = deferrable
+        self.deferred = _deferred
+        self.object_id_name = object_id_name
+        self.lazy_load = lazy_load
+
+    @property
+    def field_type(self):
+        if not isinstance(self.rel_field, AutoField):
+            return self.rel_field.field_type
+        elif isinstance(self.rel_field, BigAutoField):
+            return BigIntegerField.field_type
+        return IntegerField.field_type
+
+    def get_modifiers(self):
+        if not isinstance(self.rel_field, AutoField):
+            return self.rel_field.get_modifiers()
+        return super(ForeignKeyField, self).get_modifiers()
+
+    def adapt(self, value):
+        return self.rel_field.adapt(value)
+
+    def db_value(self, value):
+        if isinstance(value, self.rel_model):
+            value = value.get_id()
+        return self.rel_field.db_value(value)
+
+    def python_value(self, value):
+        if isinstance(value, self.rel_model):
+            return value
+        return self.rel_field.python_value(value)
+
+    def bind(self, model, name, set_attribute=True):
+        if not self.column_name:
+            self.column_name = name if name.endswith('_id') else name + '_id'
+        if not self.object_id_name:
+            self.object_id_name = self.column_name
+            if self.object_id_name == name:
+                self.object_id_name += '_id'
+        elif self.object_id_name == name:
+            raise ValueError('ForeignKeyField "%s"."%s" specifies an '
+                             'object_id_name that conflicts with its field '
+                             'name.' % (model._meta.name, name))
+        if self.rel_model == 'self':
+            self.rel_model = model
+        if isinstance(self.rel_field, basestring):
+            self.rel_field = getattr(self.rel_model, self.rel_field)
+        elif self.rel_field is None:
+            self.rel_field = self.rel_model._meta.primary_key
+
+        # Bind field before assigning backref, so field is bound when
+        # calling declared_backref() (if callable).
+        super(ForeignKeyField, self).bind(model, name, set_attribute)
+
+        if callable_(self.declared_backref):
+            self.backref = self.declared_backref(self)
+        else:
+            self.backref, self.declared_backref = self.declared_backref, None
+        if not self.backref:
+            self.backref = '%s_set' % model._meta.name
+
+        if set_attribute:
+            setattr(model, self.object_id_name, ObjectIdAccessor(self))
+            if self.backref not in '!+':
+                setattr(self.rel_model, self.backref, BackrefAccessor(self))
+
+    def foreign_key_constraint(self):
+        parts = [
+            SQL('FOREIGN KEY'),
+            EnclosedNodeList((self,)),
+            SQL('REFERENCES'),
+            self.rel_model,
+            EnclosedNodeList((self.rel_field,))]
+        if self.on_delete:
+            parts.append(SQL('ON DELETE %s' % self.on_delete))
+        if self.on_update:
+            parts.append(SQL('ON UPDATE %s' % self.on_update))
+        if self.deferrable:
+            parts.append(SQL('DEFERRABLE %s' % self.deferrable))
+        return NodeList(parts)
+
+    def __getattr__(self, attr):
+        if attr.startswith('__'):
+            # Prevent recursion error when deep-copying.
+            raise AttributeError('Cannot look-up non-existant "__" methods.')
+        if attr in self.rel_model._meta.fields:
+            return self.rel_model._meta.fields[attr]
+        raise AttributeError('Foreign-key has no attribute %s, nor is it a '
+                             'valid field on the related model.' % attr)
+
+
+class DeferredForeignKey(Field):
+    _unresolved = set()
+
+    def __init__(self, rel_model_name, **kwargs):
+        self.field_kwargs = kwargs
+        self.rel_model_name = rel_model_name.lower()
+        DeferredForeignKey._unresolved.add(self)
+        super(DeferredForeignKey, self).__init__(
+            column_name=kwargs.get('column_name'),
+            null=kwargs.get('null'))
+
+    __hash__ = object.__hash__
+
+    def __deepcopy__(self, memo=None):
+        return DeferredForeignKey(self.rel_model_name, **self.field_kwargs)
+
+    def set_model(self, rel_model):
+        field = ForeignKeyField(rel_model, _deferred=True, **self.field_kwargs)
+        self.model._meta.add_field(self.name, field)
+
+    @staticmethod
+    def resolve(model_cls):
+        unresolved = sorted(DeferredForeignKey._unresolved,
+                            key=operator.attrgetter('_order'))
+        for dr in unresolved:
+            if dr.rel_model_name == model_cls.__name__.lower():
+                dr.set_model(model_cls)
+                DeferredForeignKey._unresolved.discard(dr)
+
+
+class DeferredThroughModel(object):
+    def __init__(self):
+        self._refs = []
+
+    def set_field(self, model, field, name):
+        self._refs.append((model, field, name))
+
+    def set_model(self, through_model):
+        for src_model, m2mfield, name in self._refs:
+            m2mfield.through_model = through_model
+            src_model._meta.add_field(name, m2mfield)
+
+
+class MetaField(Field):
+    column_name = default = model = name = None
+    primary_key = False
+
+
+class ManyToManyFieldAccessor(FieldAccessor):
+    def __init__(self, model, field, name):
+        super(ManyToManyFieldAccessor, self).__init__(model, field, name)
+        self.model = field.model
+        self.rel_model = field.rel_model
+        self.through_model = field.through_model
+        src_fks = self.through_model._meta.model_refs[self.model]
+        dest_fks = self.through_model._meta.model_refs[self.rel_model]
+        if not src_fks:
+            raise ValueError('Cannot find foreign-key to "%s" on "%s" model.' %
+                             (self.model, self.through_model))
+        elif not dest_fks:
+            raise ValueError('Cannot find foreign-key to "%s" on "%s" model.' %
+                             (self.rel_model, self.through_model))
+        self.src_fk = src_fks[0]
+        self.dest_fk = dest_fks[0]
+
+    def __get__(self, instance, instance_type=None, force_query=False):
+        if instance is not None:
+            if not force_query and self.src_fk.backref != '+':
+                backref = getattr(instance, self.src_fk.backref)
+                if isinstance(backref, list):
+                    return [getattr(obj, self.dest_fk.name) for obj in backref]
+
+            src_id = getattr(instance, self.src_fk.rel_field.name)
+            return (ManyToManyQuery(instance, self, self.rel_model)
+                    .join(self.through_model)
+                    .join(self.model)
+                    .where(self.src_fk == src_id))
+
+        return self.field
+
+    def __set__(self, instance, value):
+        query = self.__get__(instance, force_query=True)
+        query.add(value, clear_existing=True)
+
+
+class ManyToManyField(MetaField):
+    accessor_class = ManyToManyFieldAccessor
+
+    def __init__(self, model, backref=None, through_model=None, on_delete=None,
+                 on_update=None, _is_backref=False):
+        if through_model is not None:
+            if not (isinstance(through_model, DeferredThroughModel) or
+                    is_model(through_model)):
+                raise TypeError('Unexpected value for through_model. Expected '
+                                'Model or DeferredThroughModel.')
+            if not _is_backref and (on_delete is not None or on_update is not None):
+                raise ValueError('Cannot specify on_delete or on_update when '
+                                 'through_model is specified.')
+        self.rel_model = model
+        self.backref = backref
+        self._through_model = through_model
+        self._on_delete = on_delete
+        self._on_update = on_update
+        self._is_backref = _is_backref
+
+    def _get_descriptor(self):
+        return ManyToManyFieldAccessor(self)
+
+    def bind(self, model, name, set_attribute=True):
+        if isinstance(self._through_model, DeferredThroughModel):
+            self._through_model.set_field(model, self, name)
+            return
+
+        super(ManyToManyField, self).bind(model, name, set_attribute)
+
+        if not self._is_backref:
+            many_to_many_field = ManyToManyField(
+                self.model,
+                backref=name,
+                through_model=self.through_model,
+                on_delete=self._on_delete,
+                on_update=self._on_update,
+                _is_backref=True)
+            self.backref = self.backref or model._meta.name + 's'
+            self.rel_model._meta.add_field(self.backref, many_to_many_field)
+
+    def get_models(self):
+        return [model for _, model in sorted((
+            (self._is_backref, self.model),
+            (not self._is_backref, self.rel_model)))]
+
+    @property
+    def through_model(self):
+        if self._through_model is None:
+            self._through_model = self._create_through_model()
+        return self._through_model
+
+    @through_model.setter
+    def through_model(self, value):
+        self._through_model = value
+
+    def _create_through_model(self):
+        lhs, rhs = self.get_models()
+        tables = [model._meta.table_name for model in (lhs, rhs)]
+
+        class Meta:
+            database = self.model._meta.database
+            schema = self.model._meta.schema
+            table_name = '%s_%s_through' % tuple(tables)
+            indexes = (
+                ((lhs._meta.name, rhs._meta.name),
+                 True),)
+
+        params = {'on_delete': self._on_delete, 'on_update': self._on_update}
+        attrs = {
+            lhs._meta.name: ForeignKeyField(lhs, **params),
+            rhs._meta.name: ForeignKeyField(rhs, **params),
+            'Meta': Meta}
+
+        klass_name = '%s%sThrough' % (lhs.__name__, rhs.__name__)
+        return type(klass_name, (Model,), attrs)
+
+    def get_through_model(self):
+        # XXX: Deprecated. Just use the "through_model" property.
+        return self.through_model
+
+
+class VirtualField(MetaField):
+    field_class = None
+
+    def __init__(self, field_class=None, *args, **kwargs):
+        Field = field_class if field_class is not None else self.field_class
+        self.field_instance = Field() if Field is not None else None
+        super(VirtualField, self).__init__(*args, **kwargs)
+
+    def db_value(self, value):
+        if self.field_instance is not None:
+            return self.field_instance.db_value(value)
+        return value
+
+    def python_value(self, value):
+        if self.field_instance is not None:
+            return self.field_instance.python_value(value)
+        return value
+
+    def bind(self, model, name, set_attribute=True):
+        self.model = model
+        self.column_name = self.name = name
+        setattr(model, name, self.accessor_class(model, self, name))
+
+
+class CompositeKey(MetaField):
+    sequence = None
+
+    def __init__(self, *field_names):
+        self.field_names = field_names
+
+    def __get__(self, instance, instance_type=None):
+        if instance is not None:
+            return tuple([getattr(instance, field_name)
+                          for field_name in self.field_names])
+        return self
+
+    def __set__(self, instance, value):
+        if not isinstance(value, (list, tuple)):
+            raise TypeError('A list or tuple must be used to set the value of '
+                            'a composite primary key.')
+        if len(value) != len(self.field_names):
+            raise ValueError('The length of the value must equal the number '
+                             'of columns of the composite primary key.')
+        for idx, field_value in enumerate(value):
+            setattr(instance, self.field_names[idx], field_value)
+
+    def __eq__(self, other):
+        expressions = [(self.model._meta.fields[field] == value)
+                       for field, value in zip(self.field_names, other)]
+        return reduce(operator.and_, expressions)
+
+    def __ne__(self, other):
+        return ~(self == other)
+
+    def __hash__(self):
+        return hash((self.model.__name__, self.field_names))
+
+    def __sql__(self, ctx):
+        return ctx.sql(CommaNodeList([self.model._meta.fields[field]
+                                      for field in self.field_names]))
+
+    def bind(self, model, name, set_attribute=True):
+        self.model = model
+        self.column_name = self.name = name
+        setattr(model, self.name, self)
+
+
+class _SortedFieldList(object):
+    __slots__ = ('_keys', '_items')
+
+    def __init__(self):
+        self._keys = []
+        self._items = []
+
+    def __getitem__(self, i):
+        return self._items[i]
+
+    def __iter__(self):
+        return iter(self._items)
+
+    def __contains__(self, item):
+        k = item._sort_key
+        i = bisect_left(self._keys, k)
+        j = bisect_right(self._keys, k)
+        return item in self._items[i:j]
+
+    def index(self, field):
+        return self._keys.index(field._sort_key)
+
+    def insert(self, item):
+        k = item._sort_key
+        i = bisect_left(self._keys, k)
+        self._keys.insert(i, k)
+        self._items.insert(i, item)
+
+    def remove(self, item):
+        idx = self.index(item)
+        del self._items[idx]
+        del self._keys[idx]
+
+
+# MODELS
+
+
+class SchemaManager(object):
+    def __init__(self, model, database=None, **context_options):
+        self.model = model
+        self._database = database
+        context_options.setdefault('scope', SCOPE_VALUES)
+        self.context_options = context_options
+
+    @property
+    def database(self):
+        db = self._database or self.model._meta.database
+        if db is None:
+            raise ImproperlyConfigured('database attribute does not appear to '
+                                       'be set on the model: %s' % self.model)
+        return db
+
+    @database.setter
+    def database(self, value):
+        self._database = value
+
+    def _create_context(self):
+        return self.database.get_sql_context(**self.context_options)
+
+    def _create_table(self, safe=True, **options):
+        is_temp = options.pop('temporary', False)
+        ctx = self._create_context()
+        ctx.literal('CREATE TEMPORARY TABLE ' if is_temp else 'CREATE TABLE ')
+        if safe:
+            ctx.literal('IF NOT EXISTS ')
+        ctx.sql(self.model).literal(' ')
+
+        columns = []
+        constraints = []
+        meta = self.model._meta
+        if meta.composite_key:
+            pk_columns = [meta.fields[field_name].column
+                          for field_name in meta.primary_key.field_names]
+            constraints.append(NodeList((SQL('PRIMARY KEY'),
+                                         EnclosedNodeList(pk_columns))))
+
+        for field in meta.sorted_fields:
+            columns.append(field.ddl(ctx))
+            if isinstance(field, ForeignKeyField) and not field.deferred:
+                constraints.append(field.foreign_key_constraint())
+
+        if meta.constraints:
+            constraints.extend(meta.constraints)
+
+        constraints.extend(self._create_table_option_sql(options))
+        ctx.sql(EnclosedNodeList(columns + constraints))
+
+        if meta.table_settings is not None:
+            table_settings = ensure_tuple(meta.table_settings)
+            for setting in table_settings:
+                if not isinstance(setting, basestring):
+                    raise ValueError('table_settings must be strings')
+                ctx.literal(' ').literal(setting)
+
+        if meta.without_rowid:
+            ctx.literal(' WITHOUT ROWID')
+        return ctx
+
+    def _create_table_option_sql(self, options):
+        accum = []
+        options = merge_dict(self.model._meta.options or {}, options)
+        if not options:
+            return accum
+
+        for key, value in sorted(options.items()):
+            if not isinstance(value, Node):
+                if is_model(value):
+                    value = value._meta.table
+                else:
+                    value = SQL(str(value))
+            accum.append(NodeList((SQL(key), value), glue='='))
+        return accum
+
+    def create_table(self, safe=True, **options):
+        self.database.execute(self._create_table(safe=safe, **options))
+
+    def _drop_table(self, safe=True, **options):
+        ctx = (self._create_context()
+               .literal('DROP TABLE IF EXISTS ' if safe else 'DROP TABLE ')
+               .sql(self.model))
+        if options.get('cascade'):
+            ctx = ctx.literal(' CASCADE')
+        elif options.get('restrict'):
+            ctx = ctx.literal(' RESTRICT')
+        return ctx
+
+    def drop_table(self, safe=True, **options):
+        self.database.execute(self._drop_table(safe=safe, **options))
+
+    def _truncate_table(self, restart_identity=False, cascade=False):
+        db = self.database
+        if not db.truncate_table:
+            return (self._create_context()
+                    .literal('DELETE FROM ').sql(self.model))
+
+        ctx = self._create_context().literal('TRUNCATE TABLE ').sql(self.model)
+        if restart_identity:
+            ctx = ctx.literal(' RESTART IDENTITY')
+        if cascade:
+            ctx = ctx.literal(' CASCADE')
+        return ctx
+
+    def truncate_table(self, restart_identity=False, cascade=False):
+        self.database.execute(self._truncate_table(restart_identity, cascade))
+
+    def _create_indexes(self, safe=True):
+        return [self._create_index(index, safe)
+                for index in self.model._meta.fields_to_index()]
+
+    def _create_index(self, index, safe=True):
+        if isinstance(index, Index):
+            if not self.database.safe_create_index:
+                index = index.safe(False)
+            elif index._safe != safe:
+                index = index.safe(safe)
+        return self._create_context().sql(index)
+
+    def create_indexes(self, safe=True):
+        for query in self._create_indexes(safe=safe):
+            self.database.execute(query)
+
+    def _drop_indexes(self, safe=True):
+        return [self._drop_index(index, safe)
+                for index in self.model._meta.fields_to_index()
+                if isinstance(index, Index)]
+
+    def _drop_index(self, index, safe):
+        statement = 'DROP INDEX '
+        if safe and self.database.safe_drop_index:
+            statement += 'IF EXISTS '
+        if isinstance(index._table, Table) and index._table._schema:
+            index_name = Entity(index._table._schema, index._name)
+        else:
+            index_name = Entity(index._name)
+        return (self
+                ._create_context()
+                .literal(statement)
+                .sql(index_name))
+
+    def drop_indexes(self, safe=True):
+        for query in self._drop_indexes(safe=safe):
+            self.database.execute(query)
+
+    def _check_sequences(self, field):
+        if not field.sequence or not self.database.sequences:
+            raise ValueError('Sequences are either not supported, or are not '
+                             'defined for "%s".' % field.name)
+
+    def _sequence_for_field(self, field):
+        if field.model._meta.schema:
+            return Entity(field.model._meta.schema, field.sequence)
+        else:
+            return Entity(field.sequence)
+
+    def _create_sequence(self, field):
+        self._check_sequences(field)
+        if not self.database.sequence_exists(field.sequence):
+            return (self
+                    ._create_context()
+                    .literal('CREATE SEQUENCE ')
+                    .sql(self._sequence_for_field(field)))
+
+    def create_sequence(self, field):
+        seq_ctx = self._create_sequence(field)
+        if seq_ctx is not None:
+            self.database.execute(seq_ctx)
+
+    def _drop_sequence(self, field):
+        self._check_sequences(field)
+        if self.database.sequence_exists(field.sequence):
+            return (self
+                    ._create_context()
+                    .literal('DROP SEQUENCE ')
+                    .sql(self._sequence_for_field(field)))
+
+    def drop_sequence(self, field):
+        seq_ctx = self._drop_sequence(field)
+        if seq_ctx is not None:
+            self.database.execute(seq_ctx)
+
+    def _create_foreign_key(self, field):
+        name = 'fk_%s_%s_refs_%s' % (field.model._meta.table_name,
+                                     field.column_name,
+                                     field.rel_model._meta.table_name)
+        return (self
+                ._create_context()
+                .literal('ALTER TABLE ')
+                .sql(field.model)
+                .literal(' ADD CONSTRAINT ')
+                .sql(Entity(_truncate_constraint_name(name)))
+                .literal(' ')
+                .sql(field.foreign_key_constraint()))
+
+    def create_foreign_key(self, field):
+        self.database.execute(self._create_foreign_key(field))
+
+    def create_sequences(self):
+        if self.database.sequences:
+            for field in self.model._meta.sorted_fields:
+                if field.sequence:
+                    self.create_sequence(field)
+
+    def create_all(self, safe=True, **table_options):
+        self.create_sequences()
+        self.create_table(safe, **table_options)
+        self.create_indexes(safe=safe)
+
+    def drop_sequences(self):
+        if self.database.sequences:
+            for field in self.model._meta.sorted_fields:
+                if field.sequence:
+                    self.drop_sequence(field)
+
+    def drop_all(self, safe=True, drop_sequences=True, **options):
+        self.drop_table(safe, **options)
+        if drop_sequences:
+            self.drop_sequences()
+
+
+class Metadata(object):
+    def __init__(self, model, database=None, table_name=None, indexes=None,
+                 primary_key=None, constraints=None, schema=None,
+                 only_save_dirty=False, depends_on=None, options=None,
+                 db_table=None, table_function=None, table_settings=None,
+                 without_rowid=False, temporary=False, legacy_table_names=True,
+                 **kwargs):
+        if db_table is not None:
+            __deprecated__('"db_table" has been deprecated in favor of '
+                           '"table_name" for Models.')
+            table_name = db_table
+        self.model = model
+        self.database = database
+
+        self.fields = {}
+        self.columns = {}
+        self.combined = {}
+
+        self._sorted_field_list = _SortedFieldList()
+        self.sorted_fields = []
+        self.sorted_field_names = []
+
+        self.defaults = {}
+        self._default_by_name = {}
+        self._default_dict = {}
+        self._default_callables = {}
+        self._default_callable_list = []
+
+        self.name = model.__name__.lower()
+        self.table_function = table_function
+        self.legacy_table_names = legacy_table_names
+        if not table_name:
+            table_name = (self.table_function(model)
+                          if self.table_function
+                          else self.make_table_name())
+        self.table_name = table_name
+        self._table = None
+
+        self.indexes = list(indexes) if indexes else []
+        self.constraints = constraints
+        self._schema = schema
+        self.primary_key = primary_key
+        self.composite_key = self.auto_increment = None
+        self.only_save_dirty = only_save_dirty
+        self.depends_on = depends_on
+        self.table_settings = table_settings
+        self.without_rowid = without_rowid
+        self.temporary = temporary
+
+        self.refs = {}
+        self.backrefs = {}
+        self.model_refs = collections.defaultdict(list)
+        self.model_backrefs = collections.defaultdict(list)
+        self.manytomany = {}
+
+        self.options = options or {}
+        for key, value in kwargs.items():
+            setattr(self, key, value)
+        self._additional_keys = set(kwargs.keys())
+
+        # Allow objects to register hooks that are called if the model is bound
+        # to a different database. For example, BlobField uses a different
+        # Python data-type depending on the db driver / python version. When
+        # the database changes, we need to update any BlobField so they can use
+        # the appropriate data-type.
+        self._db_hooks = []
+
+    def make_table_name(self):
+        if self.legacy_table_names:
+            return re.sub('[^\w]+', '_', self.name)
+        return make_snake_case(self.model.__name__)
+
+    def model_graph(self, refs=True, backrefs=True, depth_first=True):
+        if not refs and not backrefs:
+            raise ValueError('One of `refs` or `backrefs` must be True.')
+
+        accum = [(None, self.model, None)]
+        seen = set()
+        queue = collections.deque((self,))
+        method = queue.pop if depth_first else queue.popleft
+
+        while queue:
+            curr = method()
+            if curr in seen: continue
+            seen.add(curr)
+
+            if refs:
+                for fk, model in curr.refs.items():
+                    accum.append((fk, model, False))
+                    queue.append(model._meta)
+            if backrefs:
+                for fk, model in curr.backrefs.items():
+                    accum.append((fk, model, True))
+                    queue.append(model._meta)
+
+        return accum
+
+    def add_ref(self, field):
+        rel = field.rel_model
+        self.refs[field] = rel
+        self.model_refs[rel].append(field)
+        rel._meta.backrefs[field] = self.model
+        rel._meta.model_backrefs[self.model].append(field)
+
+    def remove_ref(self, field):
+        rel = field.rel_model
+        del self.refs[field]
+        self.model_refs[rel].remove(field)
+        del rel._meta.backrefs[field]
+        rel._meta.model_backrefs[self.model].remove(field)
+
+    def add_manytomany(self, field):
+        self.manytomany[field.name] = field
+
+    def remove_manytomany(self, field):
+        del self.manytomany[field.name]
+
+    @property
+    def table(self):
+        if self._table is None:
+            self._table = Table(
+                self.table_name,
+                [field.column_name for field in self.sorted_fields],
+                schema=self.schema,
+                _model=self.model,
+                _database=self.database)
+        return self._table
+
+    @table.setter
+    def table(self, value):
+        raise AttributeError('Cannot set the "table".')
+
+    @table.deleter
+    def table(self):
+        self._table = None
+
+    @property
+    def schema(self):
+        return self._schema
+
+    @schema.setter
+    def schema(self, value):
+        self._schema = value
+        del self.table
+
+    @property
+    def entity(self):
+        if self._schema:
+            return Entity(self._schema, self.table_name)
+        else:
+            return Entity(self.table_name)
+
+    def _update_sorted_fields(self):
+        self.sorted_fields = list(self._sorted_field_list)
+        self.sorted_field_names = [f.name for f in self.sorted_fields]
+
+    def get_rel_for_model(self, model):
+        if isinstance(model, ModelAlias):
+            model = model.model
+        forwardrefs = self.model_refs.get(model, [])
+        backrefs = self.model_backrefs.get(model, [])
+        return (forwardrefs, backrefs)
+
+    def add_field(self, field_name, field, set_attribute=True):
+        if field_name in self.fields:
+            self.remove_field(field_name)
+        elif field_name in self.manytomany:
+            self.remove_manytomany(self.manytomany[field_name])
+
+        if not isinstance(field, MetaField):
+            del self.table
+            field.bind(self.model, field_name, set_attribute)
+            self.fields[field.name] = field
+            self.columns[field.column_name] = field
+            self.combined[field.name] = field
+            self.combined[field.column_name] = field
+
+            self._sorted_field_list.insert(field)
+            self._update_sorted_fields()
+
+            if field.default is not None:
+                # This optimization helps speed up model instance construction.
+                self.defaults[field] = field.default
+                if callable_(field.default):
+                    self._default_callables[field] = field.default
+                    self._default_callable_list.append((field.name,
+                                                        field.default))
+                else:
+                    self._default_dict[field] = field.default
+                    self._default_by_name[field.name] = field.default
+        else:
+            field.bind(self.model, field_name, set_attribute)
+
+        if isinstance(field, ForeignKeyField):
+            self.add_ref(field)
+        elif isinstance(field, ManyToManyField) and field.name:
+            self.add_manytomany(field)
+
+    def remove_field(self, field_name):
+        if field_name not in self.fields:
+            return
+
+        del self.table
+        original = self.fields.pop(field_name)
+        del self.columns[original.column_name]
+        del self.combined[field_name]
+        try:
+            del self.combined[original.column_name]
+        except KeyError:
+            pass
+        self._sorted_field_list.remove(original)
+        self._update_sorted_fields()
+
+        if original.default is not None:
+            del self.defaults[original]
+            if self._default_callables.pop(original, None):
+                for i, (name, _) in enumerate(self._default_callable_list):
+                    if name == field_name:
+                        self._default_callable_list.pop(i)
+                        break
+            else:
+                self._default_dict.pop(original, None)
+                self._default_by_name.pop(original.name, None)
+
+        if isinstance(original, ForeignKeyField):
+            self.remove_ref(original)
+
+    def set_primary_key(self, name, field):
+        self.composite_key = isinstance(field, CompositeKey)
+        self.add_field(name, field)
+        self.primary_key = field
+        self.auto_increment = (
+            field.auto_increment or
+            bool(field.sequence))
+
+    def get_primary_keys(self):
+        if self.composite_key:
+            return tuple([self.fields[field_name]
+                          for field_name in self.primary_key.field_names])
+        else:
+            return (self.primary_key,) if self.primary_key is not False else ()
+
+    def get_default_dict(self):
+        dd = self._default_by_name.copy()
+        for field_name, default in self._default_callable_list:
+            dd[field_name] = default()
+        return dd
+
+    def fields_to_index(self):
+        indexes = []
+        for f in self.sorted_fields:
+            if f.primary_key:
+                continue
+            if f.index or f.unique:
+                indexes.append(ModelIndex(self.model, (f,), unique=f.unique,
+                                          using=f.index_type))
+
+        for index_obj in self.indexes:
+            if isinstance(index_obj, Node):
+                indexes.append(index_obj)
+            elif isinstance(index_obj, (list, tuple)):
+                index_parts, unique = index_obj
+                fields = []
+                for part in index_parts:
+                    if isinstance(part, basestring):
+                        fields.append(self.combined[part])
+                    elif isinstance(part, Node):
+                        fields.append(part)
+                    else:
+                        raise ValueError('Expected either a field name or a '
+                                         'subclass of Node. Got: %s' % part)
+                indexes.append(ModelIndex(self.model, fields, unique=unique))
+
+        return indexes
+
+    def set_database(self, database):
+        self.database = database
+        self.model._schema._database = database
+        del self.table
+
+        # Apply any hooks that have been registered.
+        for hook in self._db_hooks:
+            hook(database)
+
+    def set_table_name(self, table_name):
+        self.table_name = table_name
+        del self.table
+
+
+class SubclassAwareMetadata(Metadata):
+    models = []
+
+    def __init__(self, model, *args, **kwargs):
+        super(SubclassAwareMetadata, self).__init__(model, *args, **kwargs)
+        self.models.append(model)
+
+    def map_models(self, fn):
+        for model in self.models:
+            fn(model)
+
+
+class DoesNotExist(Exception): pass
+
+
+class ModelBase(type):
+    inheritable = set(['constraints', 'database', 'indexes', 'primary_key',
+                       'options', 'schema', 'table_function', 'temporary',
+                       'only_save_dirty', 'legacy_table_names',
+                       'table_settings'])
+
+    def __new__(cls, name, bases, attrs):
+        if name == MODEL_BASE or bases[0].__name__ == MODEL_BASE:
+            return super(ModelBase, cls).__new__(cls, name, bases, attrs)
+
+        meta_options = {}
+        meta = attrs.pop('Meta', None)
+        if meta:
+            for k, v in meta.__dict__.items():
+                if not k.startswith('_'):
+                    meta_options[k] = v
+
+        pk = getattr(meta, 'primary_key', None)
+        pk_name = parent_pk = None
+
+        # Inherit any field descriptors by deep copying the underlying field
+        # into the attrs of the new model, additionally see if the bases define
+        # inheritable model options and swipe them.
+        for b in bases:
+            if not hasattr(b, '_meta'):
+                continue
+
+            base_meta = b._meta
+            if parent_pk is None:
+                parent_pk = deepcopy(base_meta.primary_key)
+            all_inheritable = cls.inheritable | base_meta._additional_keys
+            for k in base_meta.__dict__:
+                if k in all_inheritable and k not in meta_options:
+                    meta_options[k] = base_meta.__dict__[k]
+            meta_options.setdefault('schema', base_meta.schema)
+
+            for (k, v) in b.__dict__.items():
+                if k in attrs: continue
+
+                if isinstance(v, FieldAccessor) and not v.field.primary_key:
+                    attrs[k] = deepcopy(v.field)
+
+        sopts = meta_options.pop('schema_options', None) or {}
+        Meta = meta_options.get('model_metadata_class', Metadata)
+        Schema = meta_options.get('schema_manager_class', SchemaManager)
+
+        # Construct the new class.
+        cls = super(ModelBase, cls).__new__(cls, name, bases, attrs)
+        cls.__data__ = cls.__rel__ = None
+
+        cls._meta = Meta(cls, **meta_options)
+        cls._schema = Schema(cls, **sopts)
+
+        fields = []
+        for key, value in cls.__dict__.items():
+            if isinstance(value, Field):
+                if value.primary_key and pk:
+                    raise ValueError('over-determined primary key %s.' % name)
+                elif value.primary_key:
+                    pk, pk_name = value, key
+                else:
+                    fields.append((key, value))
+
+        if pk is None:
+            if parent_pk is not False:
+                pk, pk_name = ((parent_pk, parent_pk.name)
+                               if parent_pk is not None else
+                               (AutoField(), 'id'))
+            else:
+                pk = False
+        elif isinstance(pk, CompositeKey):
+            pk_name = '__composite_key__'
+            cls._meta.composite_key = True
+
+        if pk is not False:
+            cls._meta.set_primary_key(pk_name, pk)
+
+        for name, field in fields:
+            cls._meta.add_field(name, field)
+
+        # Create a repr and error class before finalizing.
+        if hasattr(cls, '__str__') and '__repr__' not in attrs:
+            setattr(cls, '__repr__', lambda self: '<%s: %s>' % (
+                cls.__name__, self.__str__()))
+
+        exc_name = '%sDoesNotExist' % cls.__name__
+        exc_attrs = {'__module__': cls.__module__}
+        exception_class = type(exc_name, (DoesNotExist,), exc_attrs)
+        cls.DoesNotExist = exception_class
+
+        # Call validation hook, allowing additional model validation.
+        cls.validate_model()
+        DeferredForeignKey.resolve(cls)
+        return cls
+
+    def __repr__(self):
+        return '<Model: %s>' % self.__name__
+
+    def __iter__(self):
+        return iter(self.select())
+
+    def __getitem__(self, key):
+        return self.get_by_id(key)
+
+    def __setitem__(self, key, value):
+        self.set_by_id(key, value)
+
+    def __delitem__(self, key):
+        self.delete_by_id(key)
+
+    def __contains__(self, key):
+        try:
+            self.get_by_id(key)
+        except self.DoesNotExist:
+            return False
+        else:
+            return True
+
+    def __len__(self):
+        return self.select().count()
+    def __bool__(self): return True
+    __nonzero__ = __bool__  # Python 2.
+
+
+class _BoundModelsContext(_callable_context_manager):
+    def __init__(self, models, database, bind_refs, bind_backrefs):
+        self.models = models
+        self.database = database
+        self.bind_refs = bind_refs
+        self.bind_backrefs = bind_backrefs
+
+    def __enter__(self):
+        self._orig_database = []
+        for model in self.models:
+            self._orig_database.append(model._meta.database)
+            model.bind(self.database, self.bind_refs, self.bind_backrefs)
+        return self.models
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        for model, db in zip(self.models, self._orig_database):
+            model.bind(db, self.bind_refs, self.bind_backrefs)
+
+
+class Model(with_metaclass(ModelBase, Node)):
+    def __init__(self, *args, **kwargs):
+        if kwargs.pop('__no_default__', None):
+            self.__data__ = {}
+        else:
+            self.__data__ = self._meta.get_default_dict()
+        self._dirty = set(self.__data__)
+        self.__rel__ = {}
+
+        for k in kwargs:
+            setattr(self, k, kwargs[k])
+
+    def __str__(self):
+        return str(self._pk) if self._meta.primary_key is not False else 'n/a'
+
+    @classmethod
+    def validate_model(cls):
+        pass
+
+    @classmethod
+    def alias(cls, alias=None):
+        return ModelAlias(cls, alias)
+
+    @classmethod
+    def select(cls, *fields):
+        is_default = not fields
+        if not fields:
+            fields = cls._meta.sorted_fields
+        return ModelSelect(cls, fields, is_default=is_default)
+
+    @classmethod
+    def _normalize_data(cls, data, kwargs):
+        normalized = {}
+        if data:
+            if not isinstance(data, dict):
+                if kwargs:
+                    raise ValueError('Data cannot be mixed with keyword '
+                                     'arguments: %s' % data)
+                return data
+            for key in data:
+                try:
+                    field = (key if isinstance(key, Field)
+                             else cls._meta.combined[key])
+                except KeyError:
+                    raise ValueError('Unrecognized field name: "%s" in %s.' %
+                                     (key, data))
+                normalized[field] = data[key]
+        if kwargs:
+            for key in kwargs:
+                try:
+                    normalized[cls._meta.combined[key]] = kwargs[key]
+                except KeyError:
+                    normalized[getattr(cls, key)] = kwargs[key]
+        return normalized
+
+    @classmethod
+    def update(cls, __data=None, **update):
+        return ModelUpdate(cls, cls._normalize_data(__data, update))
+
+    @classmethod
+    def insert(cls, __data=None, **insert):
+        return ModelInsert(cls, cls._normalize_data(__data, insert))
+
+    @classmethod
+    def insert_many(cls, rows, fields=None):
+        return ModelInsert(cls, insert=rows, columns=fields)
+
+    @classmethod
+    def insert_from(cls, query, fields):
+        columns = [getattr(cls, field) if isinstance(field, basestring)
+                   else field for field in fields]
+        return ModelInsert(cls, insert=query, columns=columns)
+
+    @classmethod
+    def replace(cls, __data=None, **insert):
+        return cls.insert(__data, **insert).on_conflict('REPLACE')
+
+    @classmethod
+    def replace_many(cls, rows, fields=None):
+        return (cls
+                .insert_many(rows=rows, fields=fields)
+                .on_conflict('REPLACE'))
+
+    @classmethod
+    def raw(cls, sql, *params):
+        return ModelRaw(cls, sql, params)
+
+    @classmethod
+    def delete(cls):
+        return ModelDelete(cls)
+
+    @classmethod
+    def create(cls, **query):
+        inst = cls(**query)
+        inst.save(force_insert=True)
+        return inst
+
+    @classmethod
+    def bulk_create(cls, model_list, batch_size=None):
+        if batch_size is not None:
+            batches = chunked(model_list, batch_size)
+        else:
+            batches = [model_list]
+
+        field_names = list(cls._meta.sorted_field_names)
+        if cls._meta.auto_increment:
+            pk_name = cls._meta.primary_key.name
+            field_names.remove(pk_name)
+            ids_returned = cls._meta.database.returning_clause
+        else:
+            ids_returned = False
+
+        fields = [cls._meta.fields[field_name] for field_name in field_names]
+        for batch in batches:
+            accum = ([getattr(model, f) for f in field_names]
+                     for model in batch)
+            res = cls.insert_many(accum, fields=fields).execute()
+            if ids_returned:
+                for (obj_id,), model in zip(res, batch):
+                    setattr(model, pk_name, obj_id)
+
+    @classmethod
+    def bulk_update(cls, model_list, fields, batch_size=None):
+        # First normalize list of fields so all are field instances.
+        fields = [cls._meta.fields[f] if isinstance(f, basestring) else f
+                  for f in fields]
+        # Now collect list of attribute names to use for values.
+        attrs = [field.object_id_name if isinstance(field, ForeignKeyField)
+                 else field.name for field in fields]
+
+        if batch_size is not None:
+            batches = chunked(model_list, batch_size)
+        else:
+            batches = [model_list]
+
+        n = 0
+        for batch in batches:
+            id_list = [model._pk for model in batch]
+            update = {}
+            for field, attr in zip(fields, attrs):
+                accum = []
+                for model in batch:
+                    value = getattr(model, attr)
+                    if not isinstance(value, Node):
+                        value = Value(value, converter=field.db_value)
+                    accum.append((model._pk, value))
+                case = Case(cls._meta.primary_key, accum)
+                update[field] = case
+
+            n += (cls.update(update)
+                  .where(cls._meta.primary_key.in_(id_list))
+                  .execute())
+        return n
+
+    @classmethod
+    def noop(cls):
+        return NoopModelSelect(cls, ())
+
+    @classmethod
+    def get(cls, *query, **filters):
+        sq = cls.select()
+        if query:
+            # Handle simple lookup using just the primary key.
+            if len(query) == 1 and isinstance(query[0], int):
+                sq = sq.where(cls._meta.primary_key == query[0])
+            else:
+                sq = sq.where(*query)
+        if filters:
+            sq = sq.filter(**filters)
+        return sq.get()
+
+    @classmethod
+    def get_or_none(cls, *query, **filters):
+        try:
+            return cls.get(*query, **filters)
+        except DoesNotExist:
+            pass
+
+    @classmethod
+    def get_by_id(cls, pk):
+        return cls.get(cls._meta.primary_key == pk)
+
+    @classmethod
+    def set_by_id(cls, key, value):
+        if key is None:
+            return cls.insert(value).execute()
+        else:
+            return (cls.update(value)
+                    .where(cls._meta.primary_key == key).execute())
+
+    @classmethod
+    def delete_by_id(cls, pk):
+        return cls.delete().where(cls._meta.primary_key == pk).execute()
+
+    @classmethod
+    def get_or_create(cls, **kwargs):
+        defaults = kwargs.pop('defaults', {})
+        query = cls.select()
+        for field, value in kwargs.items():
+            query = query.where(getattr(cls, field) == value)
+
+        try:
+            return query.get(), False
+        except cls.DoesNotExist:
+            try:
+                if defaults:
+                    kwargs.update(defaults)
+                with cls._meta.database.atomic():
+                    return cls.create(**kwargs), True
+            except IntegrityError as exc:
+                try:
+                    return query.get(), False
+                except cls.DoesNotExist:
+                    raise exc
+
+    @classmethod
+    def filter(cls, *dq_nodes, **filters):
+        return cls.select().filter(*dq_nodes, **filters)
+
+    def get_id(self):
+        return getattr(self, self._meta.primary_key.name)
+
+    _pk = property(get_id)
+
+    @_pk.setter
+    def _pk(self, value):
+        setattr(self, self._meta.primary_key.name, value)
+
+    def _pk_expr(self):
+        return self._meta.primary_key == self._pk
+
+    def _prune_fields(self, field_dict, only):
+        new_data = {}
+        for field in only:
+            if isinstance(field, basestring):
+                field = self._meta.combined[field]
+            if field.name in field_dict:
+                new_data[field.name] = field_dict[field.name]
+        return new_data
+
+    def _populate_unsaved_relations(self, field_dict):
+        for foreign_key_field in self._meta.refs:
+            foreign_key = foreign_key_field.name
+            conditions = (
+                foreign_key in field_dict and
+                field_dict[foreign_key] is None and
+                self.__rel__.get(foreign_key) is not None)
+            if conditions:
+                setattr(self, foreign_key, getattr(self, foreign_key))
+                field_dict[foreign_key] = self.__data__[foreign_key]
+
+    def save(self, force_insert=False, only=None):
+        field_dict = self.__data__.copy()
+        if self._meta.primary_key is not False:
+            pk_field = self._meta.primary_key
+            pk_value = self._pk
+        else:
+            pk_field = pk_value = None
+        if only:
+            field_dict = self._prune_fields(field_dict, only)
+        elif self._meta.only_save_dirty and not force_insert:
+            field_dict = self._prune_fields(field_dict, self.dirty_fields)
+            if not field_dict:
+                self._dirty.clear()
+                return False
+
+        self._populate_unsaved_relations(field_dict)
+        if pk_value is not None and not force_insert:
+            if self._meta.composite_key:
+                for pk_part_name in pk_field.field_names:
+                    field_dict.pop(pk_part_name, None)
+            else:
+                field_dict.pop(pk_field.name, None)
+            rows = self.update(**field_dict).where(self._pk_expr()).execute()
+        elif pk_field is None or not self._meta.auto_increment:
+            self.insert(**field_dict).execute()
+            rows = 1
+        else:
+            pk_from_cursor = self.insert(**field_dict).execute()
+            if pk_from_cursor is not None:
+                pk_value = pk_from_cursor
+            self._pk = pk_value
+            rows = 1
+        self._dirty.clear()
+        return rows
+
+    def is_dirty(self):
+        return bool(self._dirty)
+
+    @property
+    def dirty_fields(self):
+        return [f for f in self._meta.sorted_fields if f.name in self._dirty]
+
+    def dependencies(self, search_nullable=False):
+        model_class = type(self)
+        stack = [(type(self), None)]
+        seen = set()
+
+        while stack:
+            klass, query = stack.pop()
+            if klass in seen:
+                continue
+            seen.add(klass)
+            for fk, rel_model in klass._meta.backrefs.items():
+                if rel_model is model_class or query is None:
+                    node = (fk == self.__data__[fk.rel_field.name])
+                else:
+                    node = fk << query
+                subquery = (rel_model.select(rel_model._meta.primary_key)
+                            .where(node))
+                if not fk.null or search_nullable:
+                    stack.append((rel_model, subquery))
+                yield (node, fk)
+
+    def delete_instance(self, recursive=False, delete_nullable=False):
+        if recursive:
+            dependencies = self.dependencies(delete_nullable)
+            for query, fk in reversed(list(dependencies)):
+                model = fk.model
+                if fk.null and not delete_nullable:
+                    model.update(**{fk.name: None}).where(query).execute()
+                else:
+                    model.delete().where(query).execute()
+        return self.delete().where(self._pk_expr()).execute()
+
+    def __hash__(self):
+        return hash((self.__class__, self._pk))
+
+    def __eq__(self, other):
+        return (
+            other.__class__ == self.__class__ and
+            self._pk is not None and
+            other._pk == self._pk)
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __sql__(self, ctx):
+        return ctx.sql(getattr(self, self._meta.primary_key.name))
+
+    @classmethod
+    def bind(cls, database, bind_refs=True, bind_backrefs=True):
+        is_different = cls._meta.database is not database
+        cls._meta.set_database(database)
+        if bind_refs or bind_backrefs:
+            G = cls._meta.model_graph(refs=bind_refs, backrefs=bind_backrefs)
+            for _, model, is_backref in G:
+                model._meta.set_database(database)
+        return is_different
+
+    @classmethod
+    def bind_ctx(cls, database, bind_refs=True, bind_backrefs=True):
+        return _BoundModelsContext((cls,), database, bind_refs, bind_backrefs)
+
+    @classmethod
+    def table_exists(cls):
+        M = cls._meta
+        return cls._schema.database.table_exists(M.table.__name__, M.schema)
+
+    @classmethod
+    def create_table(cls, safe=True, **options):
+        if 'fail_silently' in options:
+            __deprecated__('"fail_silently" has been deprecated in favor of '
+                           '"safe" for the create_table() method.')
+            safe = options.pop('fail_silently')
+
+        if safe and not cls._schema.database.safe_create_index \
+           and cls.table_exists():
+            return
+        if cls._meta.temporary:
+            options.setdefault('temporary', cls._meta.temporary)
+        cls._schema.create_all(safe, **options)
+
+    @classmethod
+    def drop_table(cls, safe=True, drop_sequences=True, **options):
+        if safe and not cls._schema.database.safe_drop_index \
+           and not cls.table_exists():
+            return
+        if cls._meta.temporary:
+            options.setdefault('temporary', cls._meta.temporary)
+        cls._schema.drop_all(safe, drop_sequences, **options)
+
+    @classmethod
+    def truncate_table(cls, **options):
+        cls._schema.truncate_table(**options)
+
+    @classmethod
+    def index(cls, *fields, **kwargs):
+        return ModelIndex(cls, fields, **kwargs)
+
+    @classmethod
+    def add_index(cls, *fields, **kwargs):
+        if len(fields) == 1 and isinstance(fields[0], (SQL, Index)):
+            cls._meta.indexes.append(fields[0])
+        else:
+            cls._meta.indexes.append(ModelIndex(cls, fields, **kwargs))
+
+
+class ModelAlias(Node):
+    """Provide a separate reference to a model in a query."""
+    def __init__(self, model, alias=None):
+        self.__dict__['model'] = model
+        self.__dict__['alias'] = alias
+
+    def __getattr__(self, attr):
+        model_attr = getattr(self.model, attr)
+        if isinstance(model_attr, Field):
+            self.__dict__[attr] = FieldAlias.create(self, model_attr)
+            return self.__dict__[attr]
+        return model_attr
+
+    def __setattr__(self, attr, value):
+        raise AttributeError('Cannot set attributes on model aliases.')
+
+    def get_field_aliases(self):
+        return [getattr(self, n) for n in self.model._meta.sorted_field_names]
+
+    def select(self, *selection):
+        if not selection:
+            selection = self.get_field_aliases()
+        return ModelSelect(self, selection)
+
+    def __call__(self, **kwargs):
+        return self.model(**kwargs)
+
+    def __sql__(self, ctx):
+        if ctx.scope == SCOPE_VALUES:
+            # Return the quoted table name.
+            return ctx.sql(self.model)
+
+        if self.alias:
+            ctx.alias_manager[self] = self.alias
+
+        if ctx.scope == SCOPE_SOURCE:
+            # Define the table and its alias.
+            return (ctx
+                    .sql(self.model._meta.entity)
+                    .literal(' AS ')
+                    .sql(Entity(ctx.alias_manager[self])))
+        else:
+            # Refer to the table using the alias.
+            return ctx.sql(Entity(ctx.alias_manager[self]))
+
+
+class FieldAlias(Field):
+    def __init__(self, source, field):
+        self.source = source
+        self.model = source.model
+        self.field = field
+
+    @classmethod
+    def create(cls, source, field):
+        class _FieldAlias(cls, type(field)):
+            pass
+        return _FieldAlias(source, field)
+
+    def clone(self):
+        return FieldAlias(self.source, self.field)
+
+    def adapt(self, value): return self.field.adapt(value)
+    def python_value(self, value): return self.field.python_value(value)
+    def db_value(self, value): return self.field.db_value(value)
+    def __getattr__(self, attr):
+        return self.source if attr == 'model' else getattr(self.field, attr)
+
+    def __sql__(self, ctx):
+        return ctx.sql(Column(self.source, self.field.column_name))
+
+
+def sort_models(models):
+    models = set(models)
+    seen = set()
+    ordering = []
+    def dfs(model):
+        if model in models and model not in seen:
+            seen.add(model)
+            for foreign_key, rel_model in model._meta.refs.items():
+                # Do not depth-first search deferred foreign-keys as this can
+                # cause tables to be created in the incorrect order.
+                if not foreign_key.deferred:
+                    dfs(rel_model)
+            if model._meta.depends_on:
+                for dependency in model._meta.depends_on:
+                    dfs(dependency)
+            ordering.append(model)
+
+    names = lambda m: (m._meta.name, m._meta.table_name)
+    for m in sorted(models, key=names):
+        dfs(m)
+    return ordering
+
+
+class _ModelQueryHelper(object):
+    default_row_type = ROW.MODEL
+
+    def __init__(self, *args, **kwargs):
+        super(_ModelQueryHelper, self).__init__(*args, **kwargs)
+        if not self._database:
+            self._database = self.model._meta.database
+
+    @Node.copy
+    def objects(self, constructor=None):
+        self._row_type = ROW.CONSTRUCTOR
+        self._constructor = self.model if constructor is None else constructor
+
+    def _get_cursor_wrapper(self, cursor):
+        row_type = self._row_type or self.default_row_type
+        if row_type == ROW.MODEL:
+            return self._get_model_cursor_wrapper(cursor)
+        elif row_type == ROW.DICT:
+            return ModelDictCursorWrapper(cursor, self.model, self._returning)
+        elif row_type == ROW.TUPLE:
+            return ModelTupleCursorWrapper(cursor, self.model, self._returning)
+        elif row_type == ROW.NAMED_TUPLE:
+            return ModelNamedTupleCursorWrapper(cursor, self.model,
+                                                self._returning)
+        elif row_type == ROW.CONSTRUCTOR:
+            return ModelObjectCursorWrapper(cursor, self.model,
+                                            self._returning, self._constructor)
+        else:
+            raise ValueError('Unrecognized row type: "%s".' % row_type)
+
+    def _get_model_cursor_wrapper(self, cursor):
+        return ModelObjectCursorWrapper(cursor, self.model, [], self.model)
+
+
+class ModelRaw(_ModelQueryHelper, RawQuery):
+    def __init__(self, model, sql, params, **kwargs):
+        self.model = model
+        self._returning = ()
+        super(ModelRaw, self).__init__(sql=sql, params=params, **kwargs)
+
+    def get(self):
+        try:
+            return self.execute()[0]
+        except IndexError:
+            sql, params = self.sql()
+            raise self.model.DoesNotExist('%s instance matching query does '
+                                          'not exist:\nSQL: %s\nParams: %s' %
+                                          (self.model, sql, params))
+
+
+class BaseModelSelect(_ModelQueryHelper):
+    def union_all(self, rhs):
+        return ModelCompoundSelectQuery(self.model, self, 'UNION ALL', rhs)
+    __add__ = union_all
+
+    def union(self, rhs):
+        return ModelCompoundSelectQuery(self.model, self, 'UNION', rhs)
+    __or__ = union
+
+    def intersect(self, rhs):
+        return ModelCompoundSelectQuery(self.model, self, 'INTERSECT', rhs)
+    __and__ = intersect
+
+    def except_(self, rhs):
+        return ModelCompoundSelectQuery(self.model, self, 'EXCEPT', rhs)
+    __sub__ = except_
+
+    def __iter__(self):
+        if not self._cursor_wrapper:
+            self.execute()
+        return iter(self._cursor_wrapper)
+
+    def prefetch(self, *subqueries):
+        return prefetch(self, *subqueries)
+
+    def get(self, database=None):
+        clone = self.paginate(1, 1)
+        clone._cursor_wrapper = None
+        try:
+            return clone.execute(database)[0]
+        except IndexError:
+            sql, params = clone.sql()
+            raise self.model.DoesNotExist('%s instance matching query does '
+                                          'not exist:\nSQL: %s\nParams: %s' %
+                                          (clone.model, sql, params))
+
+    @Node.copy
+    def group_by(self, *columns):
+        grouping = []
+        for column in columns:
+            if is_model(column):
+                grouping.extend(column._meta.sorted_fields)
+            elif isinstance(column, Table):
+                if not column._columns:
+                    raise ValueError('Cannot pass a table to group_by() that '
+                                     'does not have columns explicitly '
+                                     'declared.')
+                grouping.extend([getattr(column, col_name)
+                                 for col_name in column._columns])
+            else:
+                grouping.append(column)
+        self._group_by = grouping
+
+
+class ModelCompoundSelectQuery(BaseModelSelect, CompoundSelectQuery):
+    def __init__(self, model, *args, **kwargs):
+        self.model = model
+        super(ModelCompoundSelectQuery, self).__init__(*args, **kwargs)
+
+    def _get_model_cursor_wrapper(self, cursor):
+        return self.lhs._get_model_cursor_wrapper(cursor)
+
+
+class ModelSelect(BaseModelSelect, Select):
+    def __init__(self, model, fields_or_models, is_default=False):
+        self.model = self._join_ctx = model
+        self._joins = {}
+        self._is_default = is_default
+        fields = []
+        for fm in fields_or_models:
+            if is_model(fm):
+                fields.extend(fm._meta.sorted_fields)
+            elif isinstance(fm, ModelAlias):
+                fields.extend(fm.get_field_aliases())
+            elif isinstance(fm, Table) and fm._columns:
+                fields.extend([getattr(fm, col) for col in fm._columns])
+            else:
+                fields.append(fm)
+        super(ModelSelect, self).__init__([model], fields)
+
+    def clone(self):
+        clone = super(ModelSelect, self).clone()
+        if clone._joins:
+            clone._joins = dict(clone._joins)
+        return clone
+
+    def select(self, *fields):
+        if fields or not self._is_default:
+            return super(ModelSelect, self).select(*fields)
+        return self
+
+    def switch(self, ctx=None):
+        self._join_ctx = self.model if ctx is None else ctx
+        return self
+
+    def _get_model(self, src):
+        if is_model(src):
+            return src, True
+        elif isinstance(src, Table) and src._model:
+            return src._model, False
+        elif isinstance(src, ModelAlias):
+            return src.model, False
+        elif isinstance(src, ModelSelect):
+            return src.model, False
+        return None, False
+
+    def _normalize_join(self, src, dest, on, attr):
+        # Allow "on" expression to have an alias that determines the
+        # destination attribute for the joined data.
+        on_alias = isinstance(on, Alias)
+        if on_alias:
+            attr = attr or on._alias
+            on = on.alias()
+
+        # Obtain references to the source and destination models being joined.
+        src_model, src_is_model = self._get_model(src)
+        dest_model, dest_is_model = self._get_model(dest)
+
+        if src_model and dest_model:
+            self._join_ctx = dest
+            constructor = dest_model
+
+            # In the case where the "on" clause is a Column or Field, we will
+            # convert that field into the appropriate predicate expression.
+            if not (src_is_model and dest_is_model) and isinstance(on, Column):
+                if on.source is src:
+                    to_field = src_model._meta.columns[on.name]
+                elif on.source is dest:
+                    to_field = dest_model._meta.columns[on.name]
+                else:
+                    raise AttributeError('"on" clause Column %s does not '
+                                         'belong to %s or %s.' %
+                                         (on, src_model, dest_model))
+                on = None
+            elif isinstance(on, Field):
+                to_field = on
+                on = None
+            else:
+                to_field = None
+
+            fk_field, is_backref = self._generate_on_clause(
+                src_model, dest_model, to_field, on)
+
+            if on is None:
+                src_attr = 'name' if src_is_model else 'column_name'
+                dest_attr = 'name' if dest_is_model else 'column_name'
+                if is_backref:
+                    lhs = getattr(dest, getattr(fk_field, dest_attr))
+                    rhs = getattr(src, getattr(fk_field.rel_field, src_attr))
+                else:
+                    lhs = getattr(src, getattr(fk_field, src_attr))
+                    rhs = getattr(dest, getattr(fk_field.rel_field, dest_attr))
+                on = (lhs == rhs)
+
+            if not attr:
+                if fk_field is not None and not is_backref:
+                    attr = fk_field.name
+                else:
+                    attr = dest_model._meta.name
+            elif on_alias and fk_field is not None and \
+                    attr == fk_field.object_id_name and not is_backref:
+                raise ValueError('Cannot assign join alias to "%s", as this '
+                                 'attribute is the object_id_name for the '
+                                 'foreign-key field "%s"' % (attr, fk_field))
+
+        elif isinstance(dest, Source):
+            constructor = dict
+            attr = attr or dest._alias
+            if not attr and isinstance(dest, Table):
+                attr = attr or dest.__name__
+
+        return (on, attr, constructor)
+
+    def _generate_on_clause(self, src, dest, to_field=None, on=None):
+        meta = src._meta
+        is_backref = fk_fields = False
+
+        # Get all the foreign keys between source and dest, and determine if
+        # the join is via a back-reference.
+        if dest in meta.model_refs:
+            fk_fields = meta.model_refs[dest]
+        elif dest in meta.model_backrefs:
+            fk_fields = meta.model_backrefs[dest]
+            is_backref = True
+
+        if not fk_fields:
+            if on is not None:
+                return None, False
+            raise ValueError('Unable to find foreign key between %s and %s. '
+                             'Please specify an explicit join condition.' %
+                             (src, dest))
+        elif to_field is not None:
+            # If the foreign-key field was specified explicitly, remove all
+            # other foreign-key fields from the list.
+            target = (to_field.field if isinstance(to_field, FieldAlias)
+                      else to_field)
+            fk_fields = [f for f in fk_fields if (
+                         (f is target) or
+                         (is_backref and f.rel_field is to_field))]
+
+        if len(fk_fields) > 1:
+            if on is None:
+                raise ValueError('More than one foreign key between %s and %s.'
+                                 ' Please specify which you are joining on.' %
+                                 (src, dest))
+
+            # If there are multiple foreign-keys to choose from and the join
+            # predicate is an expression, we'll try to figure out which
+            # foreign-key field we're joining on so that we can assign to the
+            # correct attribute when resolving the model graph.
+            to_field = None
+            if isinstance(on, Expression):
+                lhs, rhs = on.lhs, on.rhs
+                lhs_f = lhs.field if isinstance(lhs, FieldAlias) else lhs
+                rhs_f = rhs.field if isinstance(rhs, FieldAlias) else rhs
+                if lhs_f in fk_fields:
+                    to_field = lhs_f
+                elif rhs_f in fk_fields:
+                    to_field = rhs_f
+
+            return to_field, False
+        else:
+            return fk_fields[0], is_backref
+
+    @Node.copy
+    def join(self, dest, join_type='INNER', on=None, src=None, attr=None):
+        src = self._join_ctx if src is None else src
+
+        if join_type != JOIN.CROSS:
+            on, attr, constructor = self._normalize_join(src, dest, on, attr)
+            if attr:
+                self._joins.setdefault(src, [])
+                self._joins[src].append((dest, attr, constructor))
+        elif on is not None:
+            raise ValueError('Cannot specify on clause with cross join.')
+
+        if not self._from_list:
+            raise ValueError('No sources to join on.')
+
+        item = self._from_list.pop()
+        self._from_list.append(Join(item, dest, join_type, on))
+
+    def join_from(self, src, dest, join_type='INNER', on=None, attr=None):
+        return self.join(dest, join_type, on, src, attr)
+
+    def _get_model_cursor_wrapper(self, cursor):
+        if len(self._from_list) == 1 and not self._joins:
+            return ModelObjectCursorWrapper(cursor, self.model,
+                                            self._returning, self.model)
+        return ModelCursorWrapper(cursor, self.model, self._returning,
+                                  self._from_list, self._joins)
+
+    def ensure_join(self, lm, rm, on=None, **join_kwargs):
+        join_ctx = self._join_ctx
+        for dest, attr, constructor in self._joins.get(lm, []):
+            if dest == rm:
+                return self
+        return self.switch(lm).join(rm, on=on, **join_kwargs).switch(join_ctx)
+
+    def convert_dict_to_node(self, qdict):
+        accum = []
+        joins = []
+        fks = (ForeignKeyField, BackrefAccessor)
+        for key, value in sorted(qdict.items()):
+            curr = self.model
+            if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP:
+                key, op = key.rsplit('__', 1)
+                op = DJANGO_MAP[op]
+            elif value is None:
+                op = DJANGO_MAP['is']
+            else:
+                op = DJANGO_MAP['eq']
+
+            if '__' not in key:
+                # Handle simplest case. This avoids joining over-eagerly when a
+                # direct FK lookup is all that is required.
+                model_attr = getattr(curr, key)
+            else:
+                for piece in key.split('__'):
+                    for dest, attr, _ in self._joins.get(curr, ()):
+                        if attr == piece or (isinstance(dest, ModelAlias) and
+                                             dest.alias == piece):
+                            curr = dest
+                            break
+                    else:
+                        model_attr = getattr(curr, piece)
+                        if value is not None and isinstance(model_attr, fks):
+                            curr = model_attr.rel_model
+                            joins.append(model_attr)
+            accum.append(op(model_attr, value))
+        return accum, joins
+
+    def filter(self, *args, **kwargs):
+        # normalize args and kwargs into a new expression
+        dq_node = ColumnBase()
+        if args:
+            dq_node &= reduce(operator.and_, [a.clone() for a in args])
+        if kwargs:
+            dq_node &= DQ(**kwargs)
+
+        # dq_node should now be an Expression, lhs = Node(), rhs = ...
+        q = collections.deque([dq_node])
+        dq_joins = set()
+        while q:
+            curr = q.popleft()
+            if not isinstance(curr, Expression):
+                continue
+            for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)):
+                if isinstance(piece, DQ):
+                    query, joins = self.convert_dict_to_node(piece.query)
+                    dq_joins.update(joins)
+                    expression = reduce(operator.and_, query)
+                    # Apply values from the DQ object.
+                    if piece._negated:
+                        expression = Negated(expression)
+                    #expression._alias = piece._alias
+                    setattr(curr, side, expression)
+                else:
+                    q.append(piece)
+
+        dq_node = dq_node.rhs
+
+        query = self.clone()
+        for field in dq_joins:
+            if isinstance(field, ForeignKeyField):
+                lm, rm = field.model, field.rel_model
+                field_obj = field
+            elif isinstance(field, BackrefAccessor):
+                lm, rm = field.model, field.rel_model
+                field_obj = field.field
+            query = query.ensure_join(lm, rm, field_obj)
+        return query.where(dq_node)
+
+    def __sql_selection__(self, ctx, is_subquery=False):
+        if self._is_default and is_subquery and len(self._returning) > 1 and \
+           self.model._meta.primary_key is not False:
+            return ctx.sql(self.model._meta.primary_key)
+
+        return ctx.sql(CommaNodeList(self._returning))
+
+
+class NoopModelSelect(ModelSelect):
+    def __sql__(self, ctx):
+        return self.model._meta.database.get_noop_select(ctx)
+
+    def _get_cursor_wrapper(self, cursor):
+        return CursorWrapper(cursor)
+
+
+class _ModelWriteQueryHelper(_ModelQueryHelper):
+    def __init__(self, model, *args, **kwargs):
+        self.model = model
+        super(_ModelWriteQueryHelper, self).__init__(model, *args, **kwargs)
+
+    def returning(self, *returning):
+        accum = []
+        for item in returning:
+            if is_model(item):
+                accum.extend(item._meta.sorted_fields)
+            else:
+                accum.append(item)
+        return super(_ModelWriteQueryHelper, self).returning(*accum)
+
+    def _set_table_alias(self, ctx):
+        table = self.model._meta.table
+        ctx.alias_manager[table] = table.__name__
+
+
+class ModelUpdate(_ModelWriteQueryHelper, Update):
+    pass
+
+
+class ModelInsert(_ModelWriteQueryHelper, Insert):
+    default_row_type = ROW.TUPLE
+
+    def __init__(self, *args, **kwargs):
+        super(ModelInsert, self).__init__(*args, **kwargs)
+        if self._returning is None and self.model._meta.database is not None:
+            if self.model._meta.database.returning_clause:
+                self._returning = self.model._meta.get_primary_keys()
+
+    def returning(self, *returning):
+        # By default ModelInsert will yield a `tuple` containing the
+        # primary-key of the newly inserted row. But if we are explicitly
+        # specifying a returning clause and have not set a row type, we will
+        # default to returning model instances instead.
+        if returning and self._row_type is None:
+            self._row_type = ROW.MODEL
+        return super(ModelInsert, self).returning(*returning)
+
+    def get_default_data(self):
+        return self.model._meta.defaults
+
+    def get_default_columns(self):
+        fields = self.model._meta.sorted_fields
+        return fields[1:] if self.model._meta.auto_increment else fields
+
+
+class ModelDelete(_ModelWriteQueryHelper, Delete):
+    pass
+
+
+class ManyToManyQuery(ModelSelect):
+    def __init__(self, instance, accessor, rel, *args, **kwargs):
+        self._instance = instance
+        self._accessor = accessor
+        self._src_attr = accessor.src_fk.rel_field.name
+        self._dest_attr = accessor.dest_fk.rel_field.name
+        super(ManyToManyQuery, self).__init__(rel, (rel,), *args, **kwargs)
+
+    def _id_list(self, model_or_id_list):
+        if isinstance(model_or_id_list[0], Model):
+            return [getattr(obj, self._dest_attr) for obj in model_or_id_list]
+        return model_or_id_list
+
+    def add(self, value, clear_existing=False):
+        if clear_existing:
+            self.clear()
+
+        accessor = self._accessor
+        src_id = getattr(self._instance, self._src_attr)
+        if isinstance(value, SelectQuery):
+            query = value.columns(
+                Value(src_id),
+                accessor.dest_fk.rel_field)
+            accessor.through_model.insert_from(
+                fields=[accessor.src_fk, accessor.dest_fk],
+                query=query).execute()
+        else:
+            value = ensure_tuple(value)
+            if not value: return
+
+            inserts = [{
+                accessor.src_fk.name: src_id,
+                accessor.dest_fk.name: rel_id}
+                for rel_id in self._id_list(value)]
+            accessor.through_model.insert_many(inserts).execute()
+
+    def remove(self, value):
+        src_id = getattr(self._instance, self._src_attr)
+        if isinstance(value, SelectQuery):
+            column = getattr(value.model, self._dest_attr)
+            subquery = value.columns(column)
+            return (self._accessor.through_model
+                    .delete()
+                    .where(
+                        (self._accessor.dest_fk << subquery) &
+                        (self._accessor.src_fk == src_id))
+                    .execute())
+        else:
+            value = ensure_tuple(value)
+            if not value:
+                return
+            return (self._accessor.through_model
+                    .delete()
+                    .where(
+                        (self._accessor.dest_fk << self._id_list(value)) &
+                        (self._accessor.src_fk == src_id))
+                    .execute())
+
+    def clear(self):
+        src_id = getattr(self._instance, self._src_attr)
+        return (self._accessor.through_model
+                .delete()
+                .where(self._accessor.src_fk == src_id)
+                .execute())
+
+
+def safe_python_value(conv_func):
+    def validate(value):
+        try:
+            return conv_func(value)
+        except (TypeError, ValueError):
+            return value
+    return validate
+
+
+class BaseModelCursorWrapper(DictCursorWrapper):
+    def __init__(self, cursor, model, columns):
+        super(BaseModelCursorWrapper, self).__init__(cursor)
+        self.model = model
+        self.select = columns or []
+
+    def _initialize_columns(self):
+        combined = self.model._meta.combined
+        table = self.model._meta.table
+        description = self.cursor.description
+
+        self.ncols = len(self.cursor.description)
+        self.columns = []
+        self.converters = converters = [None] * self.ncols
+        self.fields = fields = [None] * self.ncols
+
+        for idx, description_item in enumerate(description):
+            column = description_item[0]
+            dot_index = column.find('.')
+            if dot_index != -1:
+                column = column[dot_index + 1:]
+
+            column = column.strip('"')
+            self.columns.append(column)
+            try:
+                raw_node = self.select[idx]
+            except IndexError:
+                if column in combined:
+                    raw_node = node = combined[column]
+                else:
+                    continue
+            else:
+                node = raw_node.unwrap()
+
+            # Heuristics used to attempt to get the field associated with a
+            # given SELECT column, so that we can accurately convert the value
+            # returned by the database-cursor into a Python object.
+            if isinstance(node, Field):
+                if raw_node._coerce:
+                    converters[idx] = node.python_value
+                fields[idx] = node
+                if (column == node.name or column == node.column_name) and \
+                   not raw_node.is_alias():
+                    self.columns[idx] = node.name
+            elif isinstance(node, Function) and node._coerce:
+                if node._python_value is not None:
+                    converters[idx] = node._python_value
+                elif node.arguments and isinstance(node.arguments[0], Node):
+                    # If the first argument is a field or references a column
+                    # on a Model, try using that field's conversion function.
+                    # This usually works, but we use "safe_python_value()" so
+                    # that if a TypeError or ValueError occurs during
+                    # conversion we can just fall-back to the raw cursor value.
+                    first = node.arguments[0].unwrap()
+                    if isinstance(first, Entity):
+                        path = first._path[-1]  # Try to look-up by name.
+                        first = combined.get(path)
+                    if isinstance(first, Field):
+                        converters[idx] = safe_python_value(first.python_value)
+            elif column in combined:
+                if node._coerce:
+                    converters[idx] = combined[column].python_value
+                if isinstance(node, Column) and node.source == table:
+                    fields[idx] = combined[column]
+
+    initialize = _initialize_columns
+
+    def process_row(self, row):
+        raise NotImplementedError
+
+
+class ModelDictCursorWrapper(BaseModelCursorWrapper):
+    def process_row(self, row):
+        result = {}
+        columns, converters = self.columns, self.converters
+        fields = self.fields
+
+        for i in range(self.ncols):
+            attr = columns[i]
+            if attr in result: continue  # Don't overwrite if we have dupes.
+            if converters[i] is not None:
+                result[attr] = converters[i](row[i])
+            else:
+                result[attr] = row[i]
+
+        return result
+
+
+class ModelTupleCursorWrapper(ModelDictCursorWrapper):
+    constructor = tuple
+
+    def process_row(self, row):
+        columns, converters = self.columns, self.converters
+        return self.constructor([
+            (converters[i](row[i]) if converters[i] is not None else row[i])
+            for i in range(self.ncols)])
+
+
+class ModelNamedTupleCursorWrapper(ModelTupleCursorWrapper):
+    def initialize(self):
+        self._initialize_columns()
+        attributes = []
+        for i in range(self.ncols):
+            attributes.append(self.columns[i])
+        self.tuple_class = collections.namedtuple('Row', attributes)
+        self.constructor = lambda row: self.tuple_class(*row)
+
+
+class ModelObjectCursorWrapper(ModelDictCursorWrapper):
+    def __init__(self, cursor, model, select, constructor):
+        self.constructor = constructor
+        self.is_model = is_model(constructor)
+        super(ModelObjectCursorWrapper, self).__init__(cursor, model, select)
+
+    def process_row(self, row):
+        data = super(ModelObjectCursorWrapper, self).process_row(row)
+        if self.is_model:
+            # Clear out any dirty fields before returning to the user.
+            obj = self.constructor(__no_default__=1, **data)
+            obj._dirty.clear()
+            return obj
+        else:
+            return self.constructor(**data)
+
+
+class ModelCursorWrapper(BaseModelCursorWrapper):
+    def __init__(self, cursor, model, select, from_list, joins):
+        super(ModelCursorWrapper, self).__init__(cursor, model, select)
+        self.from_list = from_list
+        self.joins = joins
+
+    def initialize(self):
+        self._initialize_columns()
+        selected_src = set([field.model for field in self.fields
+                            if field is not None])
+        select, columns = self.select, self.columns
+
+        self.key_to_constructor = {self.model: self.model}
+        self.src_is_dest = {}
+        self.src_to_dest = []
+        accum = collections.deque(self.from_list)
+        dests = set()
+        while accum:
+            curr = accum.popleft()
+            if isinstance(curr, Join):
+                accum.append(curr.lhs)
+                accum.append(curr.rhs)
+                continue
+
+            if curr not in self.joins:
+                continue
+
+            for key, attr, constructor in self.joins[curr]:
+                if key not in self.key_to_constructor:
+                    self.key_to_constructor[key] = constructor
+                    self.src_to_dest.append((curr, attr, key,
+                                             isinstance(curr, dict)))
+                    dests.add(key)
+                    accum.append(key)
+
+        for src, _, dest, _ in self.src_to_dest:
+            self.src_is_dest[src] = src in dests and (dest in selected_src
+                                                      or src in selected_src)
+
+        self.column_keys = []
+        for idx, node in enumerate(select):
+            key = self.model
+            field = self.fields[idx]
+            if field is not None:
+                if isinstance(field, FieldAlias):
+                    key = field.source
+                else:
+                    key = field.model
+            else:
+                if isinstance(node, Node):
+                    node = node.unwrap()
+                if isinstance(node, Column):
+                    key = node.source
+
+            self.column_keys.append(key)
+
+    def process_row(self, row):
+        objects = {}
+        object_list = []
+        for key, constructor in self.key_to_constructor.items():
+            objects[key] = constructor(__no_default__=True)
+            object_list.append(objects[key])
+
+        set_keys = set()
+        for idx, key in enumerate(self.column_keys):
+            instance = objects[key]
+            column = self.columns[idx]
+            value = row[idx]
+            if value is not None:
+                set_keys.add(key)
+            if self.converters[idx]:
+                value = self.converters[idx](value)
+
+            if isinstance(instance, dict):
+                instance[column] = value
+            else:
+                setattr(instance, column, value)
+
+        # Need to do some analysis on the joins before this.
+        for (src, attr, dest, is_dict) in self.src_to_dest:
+            instance = objects[src]
+            try:
+                joined_instance = objects[dest]
+            except KeyError:
+                continue
+
+            # If no fields were set on the destination instance then do not
+            # assign an "empty" instance.
+            if instance is None or dest is None or \
+               (dest not in set_keys and not self.src_is_dest.get(dest)):
+                continue
+
+            if is_dict:
+                instance[attr] = joined_instance
+            else:
+                setattr(instance, attr, joined_instance)
+
+        # When instantiating models from a cursor, we clear the dirty fields.
+        for instance in object_list:
+            if isinstance(instance, Model):
+                instance._dirty.clear()
+
+        return objects[self.model]
+
+
+class PrefetchQuery(collections.namedtuple('_PrefetchQuery', (
+    'query', 'fields', 'is_backref', 'rel_models', 'field_to_name', 'model'))):
+    def __new__(cls, query, fields=None, is_backref=None, rel_models=None,
+                field_to_name=None, model=None):
+        if fields:
+            if is_backref:
+                if rel_models is None:
+                    rel_models = [field.model for field in fields]
+                foreign_key_attrs = [field.rel_field.name for field in fields]
+            else:
+                if rel_models is None:
+                    rel_models = [field.rel_model for field in fields]
+                foreign_key_attrs = [field.name for field in fields]
+            field_to_name = list(zip(fields, foreign_key_attrs))
+        model = query.model
+        return super(PrefetchQuery, cls).__new__(
+            cls, query, fields, is_backref, rel_models, field_to_name, model)
+
+    def populate_instance(self, instance, id_map):
+        if self.is_backref:
+            for field in self.fields:
+                identifier = instance.__data__[field.name]
+                key = (field, identifier)
+                if key in id_map:
+                    setattr(instance, field.name, id_map[key])
+        else:
+            for field, attname in self.field_to_name:
+                identifier = instance.__data__[field.rel_field.name]
+                key = (field, identifier)
+                rel_instances = id_map.get(key, [])
+                for inst in rel_instances:
+                    setattr(inst, attname, instance)
+                setattr(instance, field.backref, rel_instances)
+
+    def store_instance(self, instance, id_map):
+        for field, attname in self.field_to_name:
+            identity = field.rel_field.python_value(instance.__data__[attname])
+            key = (field, identity)
+            if self.is_backref:
+                id_map[key] = instance
+            else:
+                id_map.setdefault(key, [])
+                id_map[key].append(instance)
+
+
+def prefetch_add_subquery(sq, subqueries):
+    fixed_queries = [PrefetchQuery(sq)]
+    for i, subquery in enumerate(subqueries):
+        if isinstance(subquery, tuple):
+            subquery, target_model = subquery
+        else:
+            target_model = None
+        if not isinstance(subquery, Query) and is_model(subquery) or \
+           isinstance(subquery, ModelAlias):
+            subquery = subquery.select()
+        subquery_model = subquery.model
+        fks = backrefs = None
+        for j in reversed(range(i + 1)):
+            fixed = fixed_queries[j]
+            last_query = fixed.query
+            last_model = last_obj = fixed.model
+            if isinstance(last_model, ModelAlias):
+                last_model = last_model.model
+            rels = subquery_model._meta.model_refs.get(last_model, [])
+            if rels:
+                fks = [getattr(subquery_model, fk.name) for fk in rels]
+                pks = [getattr(last_obj, fk.rel_field.name) for fk in rels]
+            else:
+                backrefs = subquery_model._meta.model_backrefs.get(last_model)
+            if (fks or backrefs) and ((target_model is last_obj) or
+                                      (target_model is None)):
+                break
+
+        if not fks and not backrefs:
+            tgt_err = ' using %s' % target_model if target_model else ''
+            raise AttributeError('Error: unable to find foreign key for '
+                                 'query: %s%s' % (subquery, tgt_err))
+
+        dest = (target_model,) if target_model else None
+
+        if fks:
+            expr = reduce(operator.or_, [
+                (fk << last_query.select(pk))
+                for (fk, pk) in zip(fks, pks)])
+            subquery = subquery.where(expr)
+            fixed_queries.append(PrefetchQuery(subquery, fks, False, dest))
+        elif backrefs:
+            expressions = []
+            for backref in backrefs:
+                rel_field = getattr(subquery_model, backref.rel_field.name)
+                fk_field = getattr(last_obj, backref.name)
+                expressions.append(rel_field << last_query.select(fk_field))
+            subquery = subquery.where(reduce(operator.or_, expressions))
+            fixed_queries.append(PrefetchQuery(subquery, backrefs, True, dest))
+
+    return fixed_queries
+
+
+def prefetch(sq, *subqueries):
+    if not subqueries:
+        return sq
+
+    fixed_queries = prefetch_add_subquery(sq, subqueries)
+    deps = {}
+    rel_map = {}
+    for pq in reversed(fixed_queries):
+        query_model = pq.model
+        if pq.fields:
+            for rel_model in pq.rel_models:
+                rel_map.setdefault(rel_model, [])
+                rel_map[rel_model].append(pq)
+
+        deps[query_model] = {}
+        id_map = deps[query_model]
+        has_relations = bool(rel_map.get(query_model))
+
+        for instance in pq.query:
+            if pq.fields:
+                pq.store_instance(instance, id_map)
+            if has_relations:
+                for rel in rel_map[query_model]:
+                    rel.populate_instance(instance, deps[rel.model])
+
+    return list(pq.query)
diff --git a/lib/python3.7/site-packages/pip-19.1.dist-info/INSTALLER b/lib/python3.7/site-packages/pip-19.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/pip-19.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/pip-19.1.dist-info/LICENSE.txt b/lib/python3.7/site-packages/pip-19.1.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..737fec5c5352af3d9a6a47a0670da4bdb52c5725
--- /dev/null
+++ b/lib/python3.7/site-packages/pip-19.1.dist-info/LICENSE.txt
@@ -0,0 +1,20 @@
+Copyright (c) 2008-2019 The pip developers (see AUTHORS.txt file)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/python3.7/site-packages/pip-19.1.dist-info/METADATA b/lib/python3.7/site-packages/pip-19.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..5f48a62f7d4022cc67d9e7880ecf3c6dedcf3757
--- /dev/null
+++ b/lib/python3.7/site-packages/pip-19.1.dist-info/METADATA
@@ -0,0 +1,75 @@
+Metadata-Version: 2.1
+Name: pip
+Version: 19.1
+Summary: The PyPA recommended tool for installing Python packages.
+Home-page: https://pip.pypa.io/
+Author: The pip developers
+Author-email: pypa-dev@groups.google.com
+License: MIT
+Keywords: distutils easy_install egg setuptools wheel virtualenv
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Topic :: Software Development :: Build Tools
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*
+
+pip - The Python Package Installer
+==================================
+
+.. image:: https://img.shields.io/pypi/v/pip.svg
+   :target: https://pypi.org/project/pip/
+
+.. image:: https://readthedocs.org/projects/pip/badge/?version=latest
+   :target: https://pip.pypa.io/en/latest
+
+pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes.
+
+Please take a look at our documentation for how to install and use pip:
+
+* `Installation`_
+* `Usage`_
+* `Release notes`_
+
+If you find bugs, need help, or want to talk to the developers please use our mailing lists or chat rooms:
+
+* `Issue tracking`_
+* `Discourse channel`_
+* `User IRC`_
+
+If you want to get involved head over to GitHub to get the source code and feel free to jump on the developer mailing lists and chat rooms:
+
+* `GitHub page`_
+* `Dev mailing list`_
+* `Dev IRC`_
+
+Code of Conduct
+---------------
+
+Everyone interacting in the pip project's codebases, issue trackers, chat
+rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_.
+
+.. _package installer: https://packaging.python.org/en/latest/current/
+.. _Python Package Index: https://pypi.org
+.. _Installation: https://pip.pypa.io/en/stable/installing.html
+.. _Usage: https://pip.pypa.io/en/stable/
+.. _Release notes: https://pip.pypa.io/en/stable/news.html
+.. _GitHub page: https://github.com/pypa/pip
+.. _Issue tracking: https://github.com/pypa/pip/issues
+.. _Discourse channel: https://discuss.python.org/c/packaging
+.. _Dev mailing list: https://groups.google.com/forum/#!forum/pypa-dev
+.. _User IRC: https://webchat.freenode.net/?channels=%23pypa
+.. _Dev IRC: https://webchat.freenode.net/?channels=%23pypa-dev
+.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/
+
+
diff --git a/lib/python3.7/site-packages/pip-19.1.dist-info/RECORD b/lib/python3.7/site-packages/pip-19.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..f20ded8ee1164e389d201f33c149ef82001a93ca
--- /dev/null
+++ b/lib/python3.7/site-packages/pip-19.1.dist-info/RECORD
@@ -0,0 +1,618 @@
+../../../bin/pip,sha256=yfduDi9OGe76sEh01_aSBrQC4O5OAMRxsNjIVw-gWR0,283
+../../../bin/pip3,sha256=yfduDi9OGe76sEh01_aSBrQC4O5OAMRxsNjIVw-gWR0,283
+../../../bin/pip3.7,sha256=yfduDi9OGe76sEh01_aSBrQC4O5OAMRxsNjIVw-gWR0,283
+pip-19.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pip-19.1.dist-info/LICENSE.txt,sha256=W6Ifuwlk-TatfRU2LR7W1JMcyMj5_y1NkRkOEJvnRDE,1090
+pip-19.1.dist-info/METADATA,sha256=beBgNX95MIue6GG4pjR72SBngkDbdZydSE1mLZJQngw,2890
+pip-19.1.dist-info/RECORD,,
+pip-19.1.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110
+pip-19.1.dist-info/entry_points.txt,sha256=S_zfxY25QtQDVY1BiLAmOKSkkI5llzCKPLiYOSEupsY,98
+pip-19.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pip/__init__.py,sha256=FmpL3q9nmKqFLOCMcYN8LoGCA7mrESG-HiX5Ksm8Kls,21
+pip/__main__.py,sha256=L3IHqBeasELUHvwy5CT_izVEMhM12tve289qut49DvU,623
+pip/__pycache__/__init__.cpython-37.pyc,,
+pip/__pycache__/__main__.cpython-37.pyc,,
+pip/_internal/__init__.py,sha256=Di4rpoQhA2Kx0Dm0THczf61CDOXcPokR2FIpyKkRm1Y,2870
+pip/_internal/__pycache__/__init__.cpython-37.pyc,,
+pip/_internal/__pycache__/build_env.cpython-37.pyc,,
+pip/_internal/__pycache__/cache.cpython-37.pyc,,
+pip/_internal/__pycache__/configuration.cpython-37.pyc,,
+pip/_internal/__pycache__/download.cpython-37.pyc,,
+pip/_internal/__pycache__/exceptions.cpython-37.pyc,,
+pip/_internal/__pycache__/index.cpython-37.pyc,,
+pip/_internal/__pycache__/locations.cpython-37.pyc,,
+pip/_internal/__pycache__/pep425tags.cpython-37.pyc,,
+pip/_internal/__pycache__/pyproject.cpython-37.pyc,,
+pip/_internal/__pycache__/resolve.cpython-37.pyc,,
+pip/_internal/__pycache__/wheel.cpython-37.pyc,,
+pip/_internal/build_env.py,sha256=ZUpR-00pxQc-Mw750jyBVA-oWb4WHtpK63pfiTHd8qU,7392
+pip/_internal/cache.py,sha256=PQesJm1JrjgAya3VyV6qkbnfACG23c1wQ7PaSw_0F-Y,7656
+pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132
+pip/_internal/cli/__pycache__/__init__.cpython-37.pyc,,
+pip/_internal/cli/__pycache__/autocompletion.cpython-37.pyc,,
+pip/_internal/cli/__pycache__/base_command.cpython-37.pyc,,
+pip/_internal/cli/__pycache__/cmdoptions.cpython-37.pyc,,
+pip/_internal/cli/__pycache__/main_parser.cpython-37.pyc,,
+pip/_internal/cli/__pycache__/parser.cpython-37.pyc,,
+pip/_internal/cli/__pycache__/status_codes.cpython-37.pyc,,
+pip/_internal/cli/autocompletion.py,sha256=ptvsMdGjq42pzoY4skABVF43u2xAtLJlXAulPi-A10Y,6083
+pip/_internal/cli/base_command.py,sha256=2SbVZ2o3KmVdeJlK4l1lP6y_ZMFZ8VkDp9ns1ByZ-xM,12629
+pip/_internal/cli/cmdoptions.py,sha256=IA1XAgTJ1oWXp1oF3CkE8mpUx8vqIysyd7hlEZk7wWE,23813
+pip/_internal/cli/main_parser.py,sha256=YH_w_hApq2pyS_G5tZlbIe7g_HUIcKE820jhnv0piVA,3002
+pip/_internal/cli/parser.py,sha256=VZKUKJPbU6I2cHPLDOikin-aCx7OvLcZ3fzYp3xytd8,9378
+pip/_internal/cli/status_codes.py,sha256=F6uDG6Gj7RNKQJUDnd87QKqI16Us-t-B0wPF_4QMpWc,156
+pip/_internal/commands/__init__.py,sha256=7fscm9bUGJInZwWY7VjH2pPC7HUWQWyjYMEIB33L9CY,2223
+pip/_internal/commands/__pycache__/__init__.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/check.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/completion.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/configuration.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/download.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/freeze.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/hash.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/help.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/install.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/list.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/search.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/show.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/uninstall.cpython-37.pyc,,
+pip/_internal/commands/__pycache__/wheel.cpython-37.pyc,,
+pip/_internal/commands/check.py,sha256=liigNVif0iz2mBfhvsajrLZT5zM5KIvgmKvhAW91EzA,1430
+pip/_internal/commands/completion.py,sha256=hqvCvoxsIHjysiD7olHKTqK2lzE1_lS6LWn69kN5qyI,2929
+pip/_internal/commands/configuration.py,sha256=Il76e-ERfEHtW2GbV1aTR6o-xurh-LVv2NCea7erP4s,8020
+pip/_internal/commands/download.py,sha256=XPe3Kuj9iZfXwOiJq70mYVYNZD5lJCLnGT_C61cOsKw,6623
+pip/_internal/commands/freeze.py,sha256=VvS3G0wrm_9BH3B7Ex5msLL_1UQTtCq5G8dDI63Iemo,3259
+pip/_internal/commands/hash.py,sha256=K1JycsD-rpjqrRcL_ijacY9UKmI82pQcLYq4kCM4Pv0,1681
+pip/_internal/commands/help.py,sha256=MwBhPJpW1Dt3GfJV3V8V6kgAy_pXT0jGrZJB1wCTW-E,1090
+pip/_internal/commands/install.py,sha256=vSb8TfUHwz3HJ5cA-JnPP3X84ONQ0wkCztw0pHFbmsc,22841
+pip/_internal/commands/list.py,sha256=EtKEUotPbLCzzcWrZqEiEXi4ic00dgUOKUeNU0y7Pfg,10166
+pip/_internal/commands/search.py,sha256=sLZ9icKMEEGekHvzRRZMiTd1zCFIZeDptyyU1mQCYzk,4728
+pip/_internal/commands/show.py,sha256=bE-ucu8fAjTTENpRRKhwD3QSWR8Rss7YgKAbMJoxock,6273
+pip/_internal/commands/uninstall.py,sha256=h0gfPF5jylDESx_IHgF6bZME7QAEOHzQHdn65GP-jrE,2963
+pip/_internal/commands/wheel.py,sha256=7MNPZqK9WWxZC3TgzvMBH-RPRlOFLpwq927lkzUiUjI,7167
+pip/_internal/configuration.py,sha256=xit1f8ZdnHz-AHzWAKTUa_gZzlIIM-5AgK30glB71ik,13115
+pip/_internal/download.py,sha256=0_8jUtL4SLZe4qo8Fe66qkbO69gUG-IPbp932WOHyFs,35147
+pip/_internal/exceptions.py,sha256=ryXWmEJpSHr3yXXk1efWHSzQwAjBdBJiKR5kCQvjgPA,9117
+pip/_internal/index.py,sha256=jVzHqVuP2CJh9YCyp2Zbx1NSmE_VBxhc26fYKF8aer8,42130
+pip/_internal/locations.py,sha256=-N3f1I24gcPTdW52onPnySDoQUIn_TtPYV99WwsjxRE,6932
+pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63
+pip/_internal/models/__pycache__/__init__.cpython-37.pyc,,
+pip/_internal/models/__pycache__/candidate.cpython-37.pyc,,
+pip/_internal/models/__pycache__/format_control.cpython-37.pyc,,
+pip/_internal/models/__pycache__/index.cpython-37.pyc,,
+pip/_internal/models/__pycache__/link.cpython-37.pyc,,
+pip/_internal/models/candidate.py,sha256=_IzS-yw0h2UHfZALsZ2mxkeGNSNuNGspjSn2JfZ-ZHM,1045
+pip/_internal/models/format_control.py,sha256=ap8Swa26ocSXBxIuCvaDBRZjxdKUFuwC-bfqXQHWtKw,2250
+pip/_internal/models/index.py,sha256=K59A8-hVhBM20Xkahr4dTwP7OjkJyEqXH11UwHFVgqM,1060
+pip/_internal/models/link.py,sha256=Hqu72UgH266njr2z2kTnIG-sQiPtjENDbPqXFzT-84s,4783
+pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/operations/__pycache__/__init__.cpython-37.pyc,,
+pip/_internal/operations/__pycache__/check.cpython-37.pyc,,
+pip/_internal/operations/__pycache__/freeze.cpython-37.pyc,,
+pip/_internal/operations/__pycache__/prepare.cpython-37.pyc,,
+pip/_internal/operations/check.py,sha256=CZHaksHKUMo58wGHGhhKlPrpcRMHObouzzyd9pMRFt8,5109
+pip/_internal/operations/freeze.py,sha256=_Ffl2HpoWLLHaiB1XyTjKIA3QWC8H-E1ib6xJgX_mRE,9279
+pip/_internal/operations/prepare.py,sha256=hqzdPNAJLZVEpJQATn_0aSMJqy8xPvW36j9mjFKw54s,17102
+pip/_internal/pep425tags.py,sha256=jyBorYxepVJeHNo69TE-3UxJXsGO2XvGxveSWQDi_Wo,13128
+pip/_internal/pyproject.py,sha256=I1S69bAIy0kwKZSkP3Mz5cdbXXlBSfuQHmdCCm9bwJ4,10788
+pip/_internal/req/__init__.py,sha256=Y2SjAuMFsSt3dkiK8kkiQAfv8sHrjl0PAT63FKFT0tM,2364
+pip/_internal/req/__pycache__/__init__.cpython-37.pyc,,
+pip/_internal/req/__pycache__/constructors.cpython-37.pyc,,
+pip/_internal/req/__pycache__/req_file.cpython-37.pyc,,
+pip/_internal/req/__pycache__/req_install.cpython-37.pyc,,
+pip/_internal/req/__pycache__/req_set.cpython-37.pyc,,
+pip/_internal/req/__pycache__/req_tracker.cpython-37.pyc,,
+pip/_internal/req/__pycache__/req_uninstall.cpython-37.pyc,,
+pip/_internal/req/constructors.py,sha256=4KlO_-G89XYZW1VnzHwV34oCvR6ilmxFqSSGLgv4PEw,11399
+pip/_internal/req/req_file.py,sha256=mZoQvhHZJwgljrXDH8CLXaNoUK4sM8_bRfK9Po2fM80,13714
+pip/_internal/req/req_install.py,sha256=sLu2Z3CKEtnKUI0j0QsDuPidlU2PJihxm7K_O-5Jm3A,40068
+pip/_internal/req/req_set.py,sha256=PaDc5EswLQhxBMFbuKbJ0frZbMNKocmA8OGqIWT-9EY,7860
+pip/_internal/req/req_tracker.py,sha256=wBpDzSDSYwpUfW4K43NrEOCCp1r6stuubfLc65Y95EM,3129
+pip/_internal/req/req_uninstall.py,sha256=rVOk8BRM_L9rsUUr9lmkV6Lm9N1Os7TEIDir6tT1Q7U,23105
+pip/_internal/resolve.py,sha256=rZDMkyhUIbaytY9KsYQVez8nl21xLXgaDt8xuHvyGMI,15128
+pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/utils/__pycache__/__init__.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/appdirs.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/compat.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/deprecation.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/encoding.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/filesystem.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/glibc.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/hashes.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/logging.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/misc.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/models.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/outdated.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/packaging.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/setuptools_build.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/temp_dir.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/typing.cpython-37.pyc,,
+pip/_internal/utils/__pycache__/ui.cpython-37.pyc,,
+pip/_internal/utils/appdirs.py,sha256=r9i0BZLK9KcvrzI5tqlw8ehRTtSehWGERFLy7YppG3g,9398
+pip/_internal/utils/compat.py,sha256=Q0cmYGBaKB4tV5E0CG11339YLxuCaqdUCSJd9vinMc8,8551
+pip/_internal/utils/deprecation.py,sha256=HYUvfCZUxwLIFlYHeZM3VmuFhXTdlw0rk4-cC14bKw4,3080
+pip/_internal/utils/encoding.py,sha256=tudXCoAPe9fZvNK4cmWQs2frREZ-QuGCwF_SlTyz6cI,1218
+pip/_internal/utils/filesystem.py,sha256=ojaIDvOFOtkpKme5se6X2N8ARmQxu8cxvaaI-NFqVtk,990
+pip/_internal/utils/glibc.py,sha256=9_1wY9Lmca4xzOQDF1A6ITW_N80j_BXwN7Sx7vBqP4k,3282
+pip/_internal/utils/hashes.py,sha256=_6l8M_nqmLZjs-o2lP2rB7ZleAT05WOuqgwBa8uFYR8,3527
+pip/_internal/utils/logging.py,sha256=qSYK7Wf_oiCr1VR4qvAq_TtpY3SIKSlVDgnAeQqSjHc,11671
+pip/_internal/utils/misc.py,sha256=QsYl2j5rU5Mh_CEsSf1W8GB6HvPOfioj30Emqz7mg7s,32434
+pip/_internal/utils/models.py,sha256=DQYZSRhjvSdDTAaJLLCpDtxAn1S_-v_8nlNjv4T2jwY,1042
+pip/_internal/utils/outdated.py,sha256=sEe1l902jiOl3tArf6omqVd44s4BT9DOk3a3CXtJNNI,5868
+pip/_internal/utils/packaging.py,sha256=frr5M-ORe30rRKbH6pwqp_jkX1T8rx-UGDlj8vG5HII,2743
+pip/_internal/utils/setuptools_build.py,sha256=0blfscmNJW_iZ5DcswJeDB_PbtTEjfK9RL1R1WEDW2E,278
+pip/_internal/utils/temp_dir.py,sha256=0Xq5ZlOd2OOeHwKM6hGy66gnMGAbyhio7DtjLHd7DFg,5339
+pip/_internal/utils/typing.py,sha256=bF73ImJzIaxLLEVwfEaSJzFGqV9LaxkQBvDULIyr1jI,1125
+pip/_internal/utils/ui.py,sha256=yRqmi2V4OeTYP9SnXhuXKlyRx8xJ79AjLRgvLCZ4E1E,13812
+pip/_internal/vcs/__init__.py,sha256=kgwOhkt6ddbKosZytktLydMUb_e-JpXMstJF9YpSOwM,19205
+pip/_internal/vcs/__pycache__/__init__.cpython-37.pyc,,
+pip/_internal/vcs/__pycache__/bazaar.cpython-37.pyc,,
+pip/_internal/vcs/__pycache__/git.cpython-37.pyc,,
+pip/_internal/vcs/__pycache__/mercurial.cpython-37.pyc,,
+pip/_internal/vcs/__pycache__/subversion.cpython-37.pyc,,
+pip/_internal/vcs/bazaar.py,sha256=3lA6CKyrYpoJuxjTa8oTozszNcqdYILqEhKLJ41RMug,3231
+pip/_internal/vcs/git.py,sha256=x0PQDgzWho544gfqtE9CXoUkN34zE4YkpgrbTcWEte4,12960
+pip/_internal/vcs/mercurial.py,sha256=AbH4IWdLCsjUxFnIHRvijV-7SNhfjDZJJFzQ1e_b48Q,3355
+pip/_internal/vcs/subversion.py,sha256=cMH4MEobSj68gaSiBD53KXVJnEbhtuUJjL8U1Kk_v70,8167
+pip/_internal/wheel.py,sha256=-MijhN1BIuaQkb3909smA63wkbvdRrdtr1CUDxnQTqA,41189
+pip/_vendor/__init__.py,sha256=iip2nWwH_riYqnDnM0q4BJFrWE-XWjYfxCejJKct0WM,4654
+pip/_vendor/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/__pycache__/appdirs.cpython-37.pyc,,
+pip/_vendor/__pycache__/distro.cpython-37.pyc,,
+pip/_vendor/__pycache__/ipaddress.cpython-37.pyc,,
+pip/_vendor/__pycache__/pyparsing.cpython-37.pyc,,
+pip/_vendor/__pycache__/retrying.cpython-37.pyc,,
+pip/_vendor/__pycache__/six.cpython-37.pyc,,
+pip/_vendor/appdirs.py,sha256=BENKsvcA08IpccD9345-rMrg3aXWFA1q6BFEglnHg6I,24547
+pip/_vendor/cachecontrol/__init__.py,sha256=6cRPchVqkAkeUtYTSW8qCetjSqJo-GxP-n4VMVDbvmc,302
+pip/_vendor/cachecontrol/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-37.pyc,,
+pip/_vendor/cachecontrol/__pycache__/adapter.cpython-37.pyc,,
+pip/_vendor/cachecontrol/__pycache__/cache.cpython-37.pyc,,
+pip/_vendor/cachecontrol/__pycache__/compat.cpython-37.pyc,,
+pip/_vendor/cachecontrol/__pycache__/controller.cpython-37.pyc,,
+pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-37.pyc,,
+pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-37.pyc,,
+pip/_vendor/cachecontrol/__pycache__/serialize.cpython-37.pyc,,
+pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-37.pyc,,
+pip/_vendor/cachecontrol/_cmd.py,sha256=URGE0KrA87QekCG3SGPatlSPT571dZTDjNa-ZXX3pDc,1295
+pip/_vendor/cachecontrol/adapter.py,sha256=eBGAtVNRZgtl_Kj5JV54miqL9YND-D0JZPahwY8kFtY,4863
+pip/_vendor/cachecontrol/cache.py,sha256=1fc4wJP8HYt1ycnJXeEw5pCpeBL2Cqxx6g9Fb0AYDWQ,805
+pip/_vendor/cachecontrol/caches/__init__.py,sha256=-gHNKYvaeD0kOk5M74eOrsSgIKUtC6i6GfbmugGweEo,86
+pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-37.pyc,,
+pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-37.pyc,,
+pip/_vendor/cachecontrol/caches/file_cache.py,sha256=8vrSzzGcdfEfICago1uSFbkumNJMGLbCdEkXsmUIExw,4177
+pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=HxelMpNCo-dYr2fiJDwM3hhhRmxUYtB5tXm1GpAAT4Y,856
+pip/_vendor/cachecontrol/compat.py,sha256=kHNvMRdt6s_Xwqq_9qJmr9ou3wYMOMUMxPPcwNxT8Mc,695
+pip/_vendor/cachecontrol/controller.py,sha256=U7g-YwizQ2O5NRgK_MZreF1ntM4E49C3PuF3od-Vwz4,13698
+pip/_vendor/cachecontrol/filewrapper.py,sha256=vACKO8Llzu_ZWyjV1Fxn1MA4TGU60N5N3GSrAFdAY2Q,2533
+pip/_vendor/cachecontrol/heuristics.py,sha256=BFGHJ3yQcxvZizfo90LLZ04T_Z5XSCXvFotrp7Us0sc,4070
+pip/_vendor/cachecontrol/serialize.py,sha256=GebE34fgToyWwAsRPguh8hEPN6CqoG-5hRMXRsjVABQ,6954
+pip/_vendor/cachecontrol/wrapper.py,sha256=sfr9YHWx-5TwNz1H5rT6QOo8ggII6v3vbEDjQFwR6wc,671
+pip/_vendor/certifi/__init__.py,sha256=-M1moep9D6jZnOAT0qnM1dpoEHMPage7Osv5xLRLyFg,52
+pip/_vendor/certifi/__main__.py,sha256=NaCn6WtWME-zzVWQ2j4zFyl8cY4knDa9CwtHNIeFPhM,53
+pip/_vendor/certifi/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/certifi/__pycache__/__main__.cpython-37.pyc,,
+pip/_vendor/certifi/__pycache__/core.cpython-37.pyc,,
+pip/_vendor/certifi/cacert.pem,sha256=OXuDPlrPiaJwm5ZEAamspo0ktiNJtyu-OGhOWGqgeic,284518
+pip/_vendor/certifi/core.py,sha256=EuFc2BsToG5O1-qsx4BSjQ1r1-7WRtH87b1WflZOWhI,218
+pip/_vendor/chardet/__init__.py,sha256=YsP5wQlsHJ2auF1RZJfypiSrCA7_bQiRm3ES_NI76-Y,1559
+pip/_vendor/chardet/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/big5freq.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/big5prober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/chardistribution.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/charsetprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/codingstatemachine.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/compat.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/cp949prober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/enums.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/escprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/escsm.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/eucjpprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/euckrfreq.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/euckrprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/euctwfreq.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/euctwprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/gb2312freq.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/gb2312prober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/hebrewprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/jisfreq.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/jpcntx.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/langcyrillicmodel.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/langgreekmodel.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/langthaimodel.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/langturkishmodel.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/latin1prober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/mbcssm.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/sjisprober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/universaldetector.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/utf8prober.cpython-37.pyc,,
+pip/_vendor/chardet/__pycache__/version.cpython-37.pyc,,
+pip/_vendor/chardet/big5freq.py,sha256=D_zK5GyzoVsRes0HkLJziltFQX0bKCLOrFe9_xDvO_8,31254
+pip/_vendor/chardet/big5prober.py,sha256=kBxHbdetBpPe7xrlb-e990iot64g_eGSLd32lB7_h3M,1757
+pip/_vendor/chardet/chardistribution.py,sha256=3woWS62KrGooKyqz4zQSnjFbJpa6V7g02daAibTwcl8,9411
+pip/_vendor/chardet/charsetgroupprober.py,sha256=6bDu8YIiRuScX4ca9Igb0U69TA2PGXXDej6Cc4_9kO4,3787
+pip/_vendor/chardet/charsetprober.py,sha256=KSmwJErjypyj0bRZmC5F5eM7c8YQgLYIjZXintZNstg,5110
+pip/_vendor/chardet/cli/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+pip/_vendor/chardet/cli/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/chardet/cli/__pycache__/chardetect.cpython-37.pyc,,
+pip/_vendor/chardet/cli/chardetect.py,sha256=DI8dlV3FBD0c0XA_y3sQ78z754DUv1J8n34RtDjOXNw,2774
+pip/_vendor/chardet/codingstatemachine.py,sha256=VYp_6cyyki5sHgXDSZnXW4q1oelHc3cu9AyQTX7uug8,3590
+pip/_vendor/chardet/compat.py,sha256=PKTzHkSbtbHDqS9PyujMbX74q1a8mMpeQTDVsQhZMRw,1134
+pip/_vendor/chardet/cp949prober.py,sha256=TZ434QX8zzBsnUvL_8wm4AQVTZ2ZkqEEQL_lNw9f9ow,1855
+pip/_vendor/chardet/enums.py,sha256=Aimwdb9as1dJKZaFNUH2OhWIVBVd6ZkJJ_WK5sNY8cU,1661
+pip/_vendor/chardet/escprober.py,sha256=kkyqVg1Yw3DIOAMJ2bdlyQgUFQhuHAW8dUGskToNWSc,3950
+pip/_vendor/chardet/escsm.py,sha256=RuXlgNvTIDarndvllNCk5WZBIpdCxQ0kcd9EAuxUh84,10510
+pip/_vendor/chardet/eucjpprober.py,sha256=iD8Jdp0ISRjgjiVN7f0e8xGeQJ5GM2oeZ1dA8nbSeUw,3749
+pip/_vendor/chardet/euckrfreq.py,sha256=-7GdmvgWez4-eO4SuXpa7tBiDi5vRXQ8WvdFAzVaSfo,13546
+pip/_vendor/chardet/euckrprober.py,sha256=MqFMTQXxW4HbzIpZ9lKDHB3GN8SP4yiHenTmf8g_PxY,1748
+pip/_vendor/chardet/euctwfreq.py,sha256=No1WyduFOgB5VITUA7PLyC5oJRNzRyMbBxaKI1l16MA,31621
+pip/_vendor/chardet/euctwprober.py,sha256=13p6EP4yRaxqnP4iHtxHOJ6R2zxHq1_m8hTRjzVZ95c,1747
+pip/_vendor/chardet/gb2312freq.py,sha256=JX8lsweKLmnCwmk8UHEQsLgkr_rP_kEbvivC4qPOrlc,20715
+pip/_vendor/chardet/gb2312prober.py,sha256=gGvIWi9WhDjE-xQXHvNIyrnLvEbMAYgyUSZ65HUfylw,1754
+pip/_vendor/chardet/hebrewprober.py,sha256=c3SZ-K7hvyzGY6JRAZxJgwJ_sUS9k0WYkvMY00YBYFo,13838
+pip/_vendor/chardet/jisfreq.py,sha256=vpmJv2Bu0J8gnMVRPHMFefTRvo_ha1mryLig8CBwgOg,25777
+pip/_vendor/chardet/jpcntx.py,sha256=PYlNqRUQT8LM3cT5FmHGP0iiscFlTWED92MALvBungo,19643
+pip/_vendor/chardet/langbulgarianmodel.py,sha256=1HqQS9Pbtnj1xQgxitJMvw8X6kKr5OockNCZWfEQrPE,12839
+pip/_vendor/chardet/langcyrillicmodel.py,sha256=LODajvsetH87yYDDQKA2CULXUH87tI223dhfjh9Zx9c,17948
+pip/_vendor/chardet/langgreekmodel.py,sha256=8YAW7bU8YwSJap0kIJSbPMw1BEqzGjWzqcqf0WgUKAA,12688
+pip/_vendor/chardet/langhebrewmodel.py,sha256=JSnqmE5E62tDLTPTvLpQsg5gOMO4PbdWRvV7Avkc0HA,11345
+pip/_vendor/chardet/langhungarianmodel.py,sha256=RhapYSG5l0ZaO-VV4Fan5sW0WRGQqhwBM61yx3yxyOA,12592
+pip/_vendor/chardet/langthaimodel.py,sha256=8l0173Gu_W6G8mxmQOTEF4ls2YdE7FxWf3QkSxEGXJQ,11290
+pip/_vendor/chardet/langturkishmodel.py,sha256=W22eRNJsqI6uWAfwXSKVWWnCerYqrI8dZQTm_M0lRFk,11102
+pip/_vendor/chardet/latin1prober.py,sha256=S2IoORhFk39FEFOlSFWtgVybRiP6h7BlLldHVclNkU8,5370
+pip/_vendor/chardet/mbcharsetprober.py,sha256=AR95eFH9vuqSfvLQZN-L5ijea25NOBCoXqw8s5O9xLQ,3413
+pip/_vendor/chardet/mbcsgroupprober.py,sha256=h6TRnnYq2OxG1WdD5JOyxcdVpn7dG0q-vB8nWr5mbh4,2012
+pip/_vendor/chardet/mbcssm.py,sha256=SY32wVIF3HzcjY3BaEspy9metbNSKxIIB0RKPn7tjpI,25481
+pip/_vendor/chardet/sbcharsetprober.py,sha256=LDSpCldDCFlYwUkGkwD2oFxLlPWIWXT09akH_2PiY74,5657
+pip/_vendor/chardet/sbcsgroupprober.py,sha256=1IprcCB_k1qfmnxGC6MBbxELlKqD3scW6S8YIwdeyXA,3546
+pip/_vendor/chardet/sjisprober.py,sha256=IIt-lZj0WJqK4rmUZzKZP4GJlE8KUEtFYVuY96ek5MQ,3774
+pip/_vendor/chardet/universaldetector.py,sha256=qL0174lSZE442eB21nnktT9_VcAye07laFWUeUrjttY,12485
+pip/_vendor/chardet/utf8prober.py,sha256=IdD8v3zWOsB8OLiyPi-y_fqwipRFxV9Nc1eKBLSuIEw,2766
+pip/_vendor/chardet/version.py,sha256=sp3B08mrDXB-pf3K9fqJ_zeDHOCLC8RrngQyDFap_7g,242
+pip/_vendor/colorama/__init__.py,sha256=lJdY6COz9uM_pXwuk9oLr0fp8H8q2RrUqN16GKabvq4,239
+pip/_vendor/colorama/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/colorama/__pycache__/ansi.cpython-37.pyc,,
+pip/_vendor/colorama/__pycache__/ansitowin32.cpython-37.pyc,,
+pip/_vendor/colorama/__pycache__/initialise.cpython-37.pyc,,
+pip/_vendor/colorama/__pycache__/win32.cpython-37.pyc,,
+pip/_vendor/colorama/__pycache__/winterm.cpython-37.pyc,,
+pip/_vendor/colorama/ansi.py,sha256=Fi0un-QLqRm-v7o_nKiOqyC8PapBJK7DLV_q9LKtTO0,2524
+pip/_vendor/colorama/ansitowin32.py,sha256=u8QaqdqS_xYSfNkPM1eRJLHz6JMWPodaJaP0mxgHCDc,10462
+pip/_vendor/colorama/initialise.py,sha256=PprovDNxMTrvoNHFcL2NZjpH2XzDc8BLxLxiErfUl4k,1915
+pip/_vendor/colorama/win32.py,sha256=bJ8Il9jwaBN5BJ8bmN6FoYZ1QYuMKv2j8fGrXh7TJjw,5404
+pip/_vendor/colorama/winterm.py,sha256=2y_2b7Zsv34feAsP67mLOVc-Bgq51mdYGo571VprlrM,6438
+pip/_vendor/distlib/__init__.py,sha256=7uthK6m96pTekk8hjlT-MybcwYmmxwP8gEOxXVg1f2s,581
+pip/_vendor/distlib/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/compat.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/database.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/index.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/locators.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/manifest.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/markers.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/metadata.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/resources.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/scripts.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/util.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/version.cpython-37.pyc,,
+pip/_vendor/distlib/__pycache__/wheel.cpython-37.pyc,,
+pip/_vendor/distlib/_backport/__init__.py,sha256=bqS_dTOH6uW9iGgd0uzfpPjo6vZ4xpPZ7kyfZJ2vNaw,274
+pip/_vendor/distlib/_backport/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/distlib/_backport/__pycache__/misc.cpython-37.pyc,,
+pip/_vendor/distlib/_backport/__pycache__/shutil.cpython-37.pyc,,
+pip/_vendor/distlib/_backport/__pycache__/sysconfig.cpython-37.pyc,,
+pip/_vendor/distlib/_backport/__pycache__/tarfile.cpython-37.pyc,,
+pip/_vendor/distlib/_backport/misc.py,sha256=KWecINdbFNOxSOP1fGF680CJnaC6S4fBRgEtaYTw0ig,971
+pip/_vendor/distlib/_backport/shutil.py,sha256=VW1t3uYqUjWZH7jV-6QiimLhnldoV5uIpH4EuiT1jfw,25647
+pip/_vendor/distlib/_backport/sysconfig.cfg,sha256=swZKxq9RY5e9r3PXCrlvQPMsvOdiWZBTHLEbqS8LJLU,2617
+pip/_vendor/distlib/_backport/sysconfig.py,sha256=JdJ9ztRy4Hc-b5-VS74x3nUtdEIVr_OBvMsIb8O2sjc,26964
+pip/_vendor/distlib/_backport/tarfile.py,sha256=Ihp7rXRcjbIKw8COm9wSePV9ARGXbSF9gGXAMn2Q-KU,92628
+pip/_vendor/distlib/compat.py,sha256=xdNZmqFN5HwF30HjRn5M415pcC2kgXRBXn767xS8v-M,41404
+pip/_vendor/distlib/database.py,sha256=-KJH63AJ7hqjLtGCwOTrionhKr2Vsytdwkjyo8UdEco,51029
+pip/_vendor/distlib/index.py,sha256=Dd1kIV06XIdynNpKxHMMRRIKsXuoUsG7QIzntfVtZCI,21073
+pip/_vendor/distlib/locators.py,sha256=S9G2IsZp0RnMMbXGrT-gu7892pNpy1XMlUEuUHX3OI8,51828
+pip/_vendor/distlib/manifest.py,sha256=nQEhYmgoreaBZzyFzwYsXxJARu3fo4EkunU163U16iE,14811
+pip/_vendor/distlib/markers.py,sha256=6Ac3cCfFBERexiESWIOXmg-apIP8l2esafNSX3KMy-8,4387
+pip/_vendor/distlib/metadata.py,sha256=BNCnpRfFVslyZcosr4vnE_YbkRb3TNxXtk7TrDszJdc,40172
+pip/_vendor/distlib/resources.py,sha256=2FGv0ZHF14KXjLIlL0R991lyQQGcewOS4mJ-5n-JVnc,10766
+pip/_vendor/distlib/scripts.py,sha256=NYqRJ2uuEuJwr_NNLzWH0m_s_YsobDFQb6HqxuQ2Sew,16638
+pip/_vendor/distlib/t32.exe,sha256=ftub1bsSPUCOnBn-eCtcarKTk0N0CBEP53BumkIxWJE,92672
+pip/_vendor/distlib/t64.exe,sha256=iChOG627LWTHY8-jzSwlo9SYU5a-0JHwQu4AqDz8I68,102400
+pip/_vendor/distlib/util.py,sha256=gwKL5geJKmtR4GeIUnoMAWjsPPG3tVP_mFxw_Sx-isc,59681
+pip/_vendor/distlib/version.py,sha256=_n7F6juvQGAcn769E_SHa7fOcf5ERlEVymJ_EjPRwGw,23391
+pip/_vendor/distlib/w32.exe,sha256=NPYPpt7PIjVqABEu1CzabbDyHHkJpuw-_qZq_48H0j0,89088
+pip/_vendor/distlib/w64.exe,sha256=Yb-qr1OQEzL8KRGTk-XHUZDwMSljfQeZnVoTk-K4e7E,99328
+pip/_vendor/distlib/wheel.py,sha256=gV53KDG7BgbxsdeKjnATbP47gTEJRNylcIeE1TFin1o,39880
+pip/_vendor/distro.py,sha256=X2So5kjrRKyMbQJ90Xgy93HU5eFtujCzKaYNeoy1k1c,43251
+pip/_vendor/html5lib/__init__.py,sha256=Ztrn7UvF-wIFAgRBBa0ML-Gu5AffH3BPX_INJx4SaBI,1162
+pip/_vendor/html5lib/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-37.pyc,,
+pip/_vendor/html5lib/__pycache__/_inputstream.cpython-37.pyc,,
+pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-37.pyc,,
+pip/_vendor/html5lib/__pycache__/_utils.cpython-37.pyc,,
+pip/_vendor/html5lib/__pycache__/constants.cpython-37.pyc,,
+pip/_vendor/html5lib/__pycache__/html5parser.cpython-37.pyc,,
+pip/_vendor/html5lib/__pycache__/serializer.cpython-37.pyc,,
+pip/_vendor/html5lib/_ihatexml.py,sha256=3LBtJMlzgwM8vpQiU1TvGmEEmNH72sV0yD8yS53y07A,16705
+pip/_vendor/html5lib/_inputstream.py,sha256=bPUWcAfJScK4xkjQQaG_HsI2BvEVbFvI0AsodDYPQj0,32552
+pip/_vendor/html5lib/_tokenizer.py,sha256=YAaOEBD6qc5ISq9Xt9Nif1OFgcybTTfMdwqBkZhpAq4,76580
+pip/_vendor/html5lib/_trie/__init__.py,sha256=8VR1bcgD2OpeS2XExpu5yBhP_Q1K-lwKbBKICBPf1kU,289
+pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-37.pyc,,
+pip/_vendor/html5lib/_trie/__pycache__/datrie.cpython-37.pyc,,
+pip/_vendor/html5lib/_trie/__pycache__/py.cpython-37.pyc,,
+pip/_vendor/html5lib/_trie/_base.py,sha256=uJHVhzif9S0MJXgy9F98iEev5evi_rgUk5BmEbUSp8c,930
+pip/_vendor/html5lib/_trie/datrie.py,sha256=EQpqSfkZRuTbE-DuhW7xMdVDxdZNZ0CfmnYfHA_3zxM,1178
+pip/_vendor/html5lib/_trie/py.py,sha256=wXmQLrZRf4MyWNyg0m3h81m9InhLR7GJ002mIIZh-8o,1775
+pip/_vendor/html5lib/_utils.py,sha256=ismpASeqa2jqEPQjHUj8vReAf7yIoKnvLN5fuOw6nv0,4015
+pip/_vendor/html5lib/constants.py,sha256=4lmZWLtEPRLnl8NzftOoYTJdo6jpeMtP6dqQC0g_bWQ,83518
+pip/_vendor/html5lib/filters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-37.pyc,,
+pip/_vendor/html5lib/filters/__pycache__/base.cpython-37.pyc,,
+pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-37.pyc,,
+pip/_vendor/html5lib/filters/__pycache__/lint.cpython-37.pyc,,
+pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-37.pyc,,
+pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-37.pyc,,
+pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-37.pyc,,
+pip/_vendor/html5lib/filters/alphabeticalattributes.py,sha256=lViZc2JMCclXi_5gduvmdzrRxtO5Xo9ONnbHBVCsykU,919
+pip/_vendor/html5lib/filters/base.py,sha256=z-IU9ZAYjpsVsqmVt7kuWC63jR11hDMr6CVrvuao8W0,286
+pip/_vendor/html5lib/filters/inject_meta_charset.py,sha256=egDXUEHXmAG9504xz0K6ALDgYkvUrC2q15YUVeNlVQg,2945
+pip/_vendor/html5lib/filters/lint.py,sha256=jk6q56xY0ojiYfvpdP-OZSm9eTqcAdRqhCoPItemPYA,3643
+pip/_vendor/html5lib/filters/optionaltags.py,sha256=8lWT75J0aBOHmPgfmqTHSfPpPMp01T84NKu0CRedxcE,10588
+pip/_vendor/html5lib/filters/sanitizer.py,sha256=4ON02KNjuqda1lCw5_JCUZxb0BzWR5M7ON84dtJ7dm0,26248
+pip/_vendor/html5lib/filters/whitespace.py,sha256=8eWqZxd4UC4zlFGW6iyY6f-2uuT8pOCSALc3IZt7_t4,1214
+pip/_vendor/html5lib/html5parser.py,sha256=g5g2ezkusHxhi7b23vK_-d6K6BfIJRbqIQmvQ9z4EgI,118963
+pip/_vendor/html5lib/serializer.py,sha256=yfcfBHse2wDs6ojxn-kieJjLT5s1ipilQJ0gL3-rJis,15758
+pip/_vendor/html5lib/treeadapters/__init__.py,sha256=A0rY5gXIe4bJOiSGRO_j_tFhngRBO8QZPzPtPw5dFzo,679
+pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-37.pyc,,
+pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-37.pyc,,
+pip/_vendor/html5lib/treeadapters/genshi.py,sha256=CH27pAsDKmu4ZGkAUrwty7u0KauGLCZRLPMzaO3M5vo,1715
+pip/_vendor/html5lib/treeadapters/sax.py,sha256=BKS8woQTnKiqeffHsxChUqL4q2ZR_wb5fc9MJ3zQC8s,1776
+pip/_vendor/html5lib/treebuilders/__init__.py,sha256=AysSJyvPfikCMMsTVvaxwkgDieELD5dfR8FJIAuq7hY,3592
+pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-37.pyc,,
+pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-37.pyc,,
+pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-37.pyc,,
+pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-37.pyc,,
+pip/_vendor/html5lib/treebuilders/base.py,sha256=wQGp5yy22TNG8tJ6aREe4UUeTR7A99dEz0BXVaedWb4,14579
+pip/_vendor/html5lib/treebuilders/dom.py,sha256=SY3MsijXyzdNPc8aK5IQsupBoM8J67y56DgNtGvsb9g,8835
+pip/_vendor/html5lib/treebuilders/etree.py,sha256=aqIBOGj_dFYqBURIcTegGNBhAIJOw5iFDHb4jrkYH-8,12764
+pip/_vendor/html5lib/treebuilders/etree_lxml.py,sha256=9V0dXxbJYYq-Skgb5-_OL2NkVYpjioEb4CHajo0e9yI,14122
+pip/_vendor/html5lib/treewalkers/__init__.py,sha256=yhXxHpjlSqfQyUag3v8-vWjMPriFBU8YRAPNpDgBTn8,5714
+pip/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/html5lib/treewalkers/__pycache__/base.cpython-37.pyc,,
+pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-37.pyc,,
+pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-37.pyc,,
+pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-37.pyc,,
+pip/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-37.pyc,,
+pip/_vendor/html5lib/treewalkers/base.py,sha256=ouiOsuSzvI0KgzdWP8PlxIaSNs9falhbiinAEc_UIJY,7476
+pip/_vendor/html5lib/treewalkers/dom.py,sha256=EHyFR8D8lYNnyDU9lx_IKigVJRyecUGua0mOi7HBukc,1413
+pip/_vendor/html5lib/treewalkers/etree.py,sha256=sz1o6mmE93NQ53qJFDO7HKyDtuwgK-Ay3qSFZPC6u00,4550
+pip/_vendor/html5lib/treewalkers/etree_lxml.py,sha256=sY6wfRshWTllu6n48TPWpKsQRPp-0CQrT0hj_AdzHSU,6309
+pip/_vendor/html5lib/treewalkers/genshi.py,sha256=4D2PECZ5n3ZN3qu3jMl9yY7B81jnQApBQSVlfaIuYbA,2309
+pip/_vendor/idna/__init__.py,sha256=9Nt7xpyet3DmOrPUGooDdAwmHZZu1qUAy2EaJ93kGiQ,58
+pip/_vendor/idna/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/idna/__pycache__/codec.cpython-37.pyc,,
+pip/_vendor/idna/__pycache__/compat.cpython-37.pyc,,
+pip/_vendor/idna/__pycache__/core.cpython-37.pyc,,
+pip/_vendor/idna/__pycache__/idnadata.cpython-37.pyc,,
+pip/_vendor/idna/__pycache__/intranges.cpython-37.pyc,,
+pip/_vendor/idna/__pycache__/package_data.cpython-37.pyc,,
+pip/_vendor/idna/__pycache__/uts46data.cpython-37.pyc,,
+pip/_vendor/idna/codec.py,sha256=lvYb7yu7PhAqFaAIAdWcwgaWI2UmgseUua-1c0AsG0A,3299
+pip/_vendor/idna/compat.py,sha256=R-h29D-6mrnJzbXxymrWUW7iZUvy-26TQwZ0ij57i4U,232
+pip/_vendor/idna/core.py,sha256=JDCZZ_PLESqIgEbU8mPyoEufWwoOiIqygA17-QZIe3s,11733
+pip/_vendor/idna/idnadata.py,sha256=HXaPFw6_YAJ0qppACPu0YLAULtRs3QovRM_CCZHGdY0,40899
+pip/_vendor/idna/intranges.py,sha256=TY1lpxZIQWEP6tNqjZkFA5hgoMWOj1OBmnUG8ihT87E,1749
+pip/_vendor/idna/package_data.py,sha256=kIzeKKXEouXLR4srqwf9Q3zv-NffKSOz5aSDOJARPB0,21
+pip/_vendor/idna/uts46data.py,sha256=oLyNZ1pBaiBlj9zFzLFRd_P7J8MkRcgDisjExZR_4MY,198292
+pip/_vendor/ipaddress.py,sha256=2OgbkeAD2rLkcXqbcvof3J5R7lRwjNLoBySyTkBtKnc,79852
+pip/_vendor/lockfile/__init__.py,sha256=Tqpz90DwKYfhPsfzVOJl84TL87pdFE5ePNHdXAxs4Tk,9371
+pip/_vendor/lockfile/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/lockfile/__pycache__/linklockfile.cpython-37.pyc,,
+pip/_vendor/lockfile/__pycache__/mkdirlockfile.cpython-37.pyc,,
+pip/_vendor/lockfile/__pycache__/pidlockfile.cpython-37.pyc,,
+pip/_vendor/lockfile/__pycache__/sqlitelockfile.cpython-37.pyc,,
+pip/_vendor/lockfile/__pycache__/symlinklockfile.cpython-37.pyc,,
+pip/_vendor/lockfile/linklockfile.py,sha256=C7OH3H4GdK68u4FQgp8fkP2kO4fyUTSyj3X6blgfobc,2652
+pip/_vendor/lockfile/mkdirlockfile.py,sha256=e3qgIL-etZMLsS-3ft19iW_8IQ360HNkGOqE3yBKsUw,3096
+pip/_vendor/lockfile/pidlockfile.py,sha256=ukH9uk6NFuxyVmG5QiWw4iKq3fT7MjqUguX95avYPIY,6090
+pip/_vendor/lockfile/sqlitelockfile.py,sha256=o2TMkMRY0iwn-iL1XMRRIFStMUkS4i3ajceeYNntKFg,5506
+pip/_vendor/lockfile/symlinklockfile.py,sha256=ABwXXmvTHvCl5viPblShL3PG-gGsLiT1roAMfDRwhi8,2616
+pip/_vendor/msgpack/__init__.py,sha256=y0bk2YbzK6J2e0J_dyreN6nD7yM2IezT6m_tU2h-Mdg,1677
+pip/_vendor/msgpack/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/msgpack/__pycache__/_version.cpython-37.pyc,,
+pip/_vendor/msgpack/__pycache__/exceptions.cpython-37.pyc,,
+pip/_vendor/msgpack/__pycache__/fallback.cpython-37.pyc,,
+pip/_vendor/msgpack/_version.py,sha256=dN7wVIjbyuQIJ35B2o6gymQNDLPlj_7-uTfgCv7KErM,20
+pip/_vendor/msgpack/exceptions.py,sha256=lPkAi_u12NlFajDz4FELSHEdfU8hrR3zeTvKX8aQuz4,1056
+pip/_vendor/msgpack/fallback.py,sha256=h0ll8xnq12mI9PuQ9Qd_Ihtt08Sp8L0JqhG9KY8Vyjk,36411
+pip/_vendor/packaging/__about__.py,sha256=Wg0-hNgTU2_lBZcGBh5pm1R9yroQ3rv-X0rig8KjA6o,744
+pip/_vendor/packaging/__init__.py,sha256=6enbp5XgRfjBjsI9-bn00HjHf5TH21PDMOKkJW8xw-w,562
+pip/_vendor/packaging/__pycache__/__about__.cpython-37.pyc,,
+pip/_vendor/packaging/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/packaging/__pycache__/_compat.cpython-37.pyc,,
+pip/_vendor/packaging/__pycache__/_structures.cpython-37.pyc,,
+pip/_vendor/packaging/__pycache__/markers.cpython-37.pyc,,
+pip/_vendor/packaging/__pycache__/requirements.cpython-37.pyc,,
+pip/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc,,
+pip/_vendor/packaging/__pycache__/utils.cpython-37.pyc,,
+pip/_vendor/packaging/__pycache__/version.cpython-37.pyc,,
+pip/_vendor/packaging/_compat.py,sha256=Ugdm-qcneSchW25JrtMIKgUxfEEBcCAz6WrEeXeqz9o,865
+pip/_vendor/packaging/_structures.py,sha256=pVd90XcXRGwpZRB_qdFuVEibhCHpX_bL5zYr9-N0mc8,1416
+pip/_vendor/packaging/markers.py,sha256=-QjvJkhSJBxBogO9J_EpPQudHaaLV3rgVYsBDqn-ZLc,8234
+pip/_vendor/packaging/requirements.py,sha256=grcnFU8x7KD230JaFLXtWl3VClLuOmsOy4c-m55tOWs,4700
+pip/_vendor/packaging/specifiers.py,sha256=0ZzQpcUnvrQ6LjR-mQRLzMr8G6hdRv-mY0VSf_amFtI,27778
+pip/_vendor/packaging/utils.py,sha256=VaTC0Ei7zO2xl9ARiWmz2YFLFt89PuuhLbAlXMyAGms,1520
+pip/_vendor/packaging/version.py,sha256=Npdwnb8OHedj_2L86yiUqscujb7w_i5gmSK1PhOAFzg,11978
+pip/_vendor/pep517/__init__.py,sha256=nOY747zTld3oTdEetBG6DWxEcZXTeOQk0aHvbR-sa5w,84
+pip/_vendor/pep517/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/pep517/__pycache__/_in_process.cpython-37.pyc,,
+pip/_vendor/pep517/__pycache__/build.cpython-37.pyc,,
+pip/_vendor/pep517/__pycache__/check.cpython-37.pyc,,
+pip/_vendor/pep517/__pycache__/colorlog.cpython-37.pyc,,
+pip/_vendor/pep517/__pycache__/compat.cpython-37.pyc,,
+pip/_vendor/pep517/__pycache__/envbuild.cpython-37.pyc,,
+pip/_vendor/pep517/__pycache__/wrappers.cpython-37.pyc,,
+pip/_vendor/pep517/_in_process.py,sha256=xMY2kLutkjCti5WqTmKOLRRL3o8Ds_k-fObFyuMv1tk,6061
+pip/_vendor/pep517/build.py,sha256=-n8PT-ugS1TdqoTUY1vatDQjrLtx48K_-Quu2MuQBiA,2699
+pip/_vendor/pep517/check.py,sha256=Lu7nMdYu1JVV58fE3hv-d_avTy5h0yO9LsIzAt82Clk,5885
+pip/_vendor/pep517/colorlog.py,sha256=Tk9AuYm_cLF3BKTBoSTJt9bRryn0aFojIQOwbfVUTxQ,4098
+pip/_vendor/pep517/compat.py,sha256=4SFG4QN-cNj8ebSa0wV0HUtEEQWwmbok2a0uk1gYEOM,631
+pip/_vendor/pep517/envbuild.py,sha256=9-u4KffexPMEm52rTaIjEOxsCAd2DMByxzv5H566QLw,5763
+pip/_vendor/pep517/wrappers.py,sha256=9dZn-q7F5KyQKUJMie2uKwur2FG0CLXz_kLZzkJOhZc,5912
+pip/_vendor/pkg_resources/__init__.py,sha256=ZVHzk7ZiFIIgE2RTJj8F7wwjdMGrAngMWtQo-rGNsm4,107910
+pip/_vendor/pkg_resources/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/pkg_resources/__pycache__/py31compat.cpython-37.pyc,,
+pip/_vendor/pkg_resources/py31compat.py,sha256=CRk8fkiPRDLsbi5pZcKsHI__Pbmh_94L8mr9Qy9Ab2U,562
+pip/_vendor/progress/__init__.py,sha256=fcbQQXo5np2CoQyhSH5XprkicwLZNLePR3uIahznSO0,4857
+pip/_vendor/progress/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/progress/__pycache__/bar.cpython-37.pyc,,
+pip/_vendor/progress/__pycache__/counter.cpython-37.pyc,,
+pip/_vendor/progress/__pycache__/spinner.cpython-37.pyc,,
+pip/_vendor/progress/bar.py,sha256=QuDuVNcmXgpxtNtxO0Fq72xKigxABaVmxYGBw4J3Z_E,2854
+pip/_vendor/progress/counter.py,sha256=MznyBrvPWrOlGe4MZAlGUb9q3aODe6_aNYeAE_VNoYA,1372
+pip/_vendor/progress/spinner.py,sha256=k8JbDW94T0-WXuXfxZIFhdoNPYp3jfnpXqBnfRv5fGs,1380
+pip/_vendor/pyparsing.py,sha256=sxGUe_YcWBB5ZoHec0m1iJtgcj4iKv_SGfdA_zVCYII,245385
+pip/_vendor/pytoml/__init__.py,sha256=W_SKx36Hsew-Fty36BOpreLm4uF4V_Tgkm_z9rIoOE8,127
+pip/_vendor/pytoml/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/pytoml/__pycache__/core.cpython-37.pyc,,
+pip/_vendor/pytoml/__pycache__/parser.cpython-37.pyc,,
+pip/_vendor/pytoml/__pycache__/test.cpython-37.pyc,,
+pip/_vendor/pytoml/__pycache__/utils.cpython-37.pyc,,
+pip/_vendor/pytoml/__pycache__/writer.cpython-37.pyc,,
+pip/_vendor/pytoml/core.py,sha256=9CrLLTs1PdWjEwRnYzt_i4dhHcZvGxs_GsMlYAX3iY4,509
+pip/_vendor/pytoml/parser.py,sha256=2tDXkldqPQJhyadXzL2rGhVbjUyBNeXXhaEfncHl2iQ,10326
+pip/_vendor/pytoml/test.py,sha256=2nQs4aX3XQEaaQCx6x_OJTS2Hb0_IiTZRqNOeDmLCzo,1021
+pip/_vendor/pytoml/utils.py,sha256=JCLHx77Hu1R3F-bRgiROIiKyCzLwyebnp5P35cRJxWs,1665
+pip/_vendor/pytoml/writer.py,sha256=WbNNQg3sh_V-s3kt88LkNNbxEq6pPDdhRE-daJzArcI,3198
+pip/_vendor/requests/__init__.py,sha256=ZI8kbaEzLAxsqex3MmMPr-v24d1RfZbNAOY8fUxg2Xw,4074
+pip/_vendor/requests/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/__version__.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/_internal_utils.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/adapters.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/api.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/auth.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/certs.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/compat.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/cookies.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/exceptions.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/help.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/hooks.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/models.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/packages.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/sessions.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/status_codes.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/structures.cpython-37.pyc,,
+pip/_vendor/requests/__pycache__/utils.cpython-37.pyc,,
+pip/_vendor/requests/__version__.py,sha256=8KG3anaNCi-PEclPPOHJ_cv1udY_L1_njVr84gRZ9HM,436
+pip/_vendor/requests/_internal_utils.py,sha256=Zx3PnEUccyfsB-ie11nZVAW8qClJy0gx1qNME7rgT18,1096
+pip/_vendor/requests/adapters.py,sha256=e-bmKEApNVqFdylxuMJJfiaHdlmS_zhWhIMEzlHvGuc,21548
+pip/_vendor/requests/api.py,sha256=hWZgfD7OriCZFOnpeq0bv2pbXDl8YXfxDwAcU036qDs,6253
+pip/_vendor/requests/auth.py,sha256=QB2-cSUj1jrvWZfPXttsZpyAacQgtKLVk14vQW9TpSE,10206
+pip/_vendor/requests/certs.py,sha256=nXRVq9DtGmv_1AYbwjTu9UrgAcdJv05ZvkNeaoLOZxY,465
+pip/_vendor/requests/compat.py,sha256=FZX4Q_EMKiMnhZpZ3g_gOsT-j2ca9ij2gehDx1cwYeo,1941
+pip/_vendor/requests/cookies.py,sha256=Y-bKX6TvW3FnYlE6Au0SXtVVWcaNdFvuAwQxw-G0iTI,18430
+pip/_vendor/requests/exceptions.py,sha256=-mLam3TAx80V09EaH3H-ZxR61eAVuLRZ8zgBBSLjK44,3197
+pip/_vendor/requests/help.py,sha256=SJPVcoXeo7KfK4AxJN5eFVQCjr0im87tU2n7ubLsksU,3578
+pip/_vendor/requests/hooks.py,sha256=QReGyy0bRcr5rkwCuObNakbYsc7EkiKeBwG4qHekr2Q,757
+pip/_vendor/requests/models.py,sha256=6s-37iAqXVptq8z7U_LoH_pbIPrCQUm_Z8QuIGE29Q0,34275
+pip/_vendor/requests/packages.py,sha256=njJmVifY4aSctuW3PP5EFRCxjEwMRDO6J_feG2dKWsI,695
+pip/_vendor/requests/sessions.py,sha256=DjbCotDW6xSAaBsjbW-L8l4N0UcwmrxVNgSrZgIjGWM,29332
+pip/_vendor/requests/status_codes.py,sha256=XWlcpBjbCtq9sSqpH9_KKxgnLTf9Z__wCWolq21ySlg,4129
+pip/_vendor/requests/structures.py,sha256=zoP8qly2Jak5e89HwpqjN1z2diztI-_gaqts1raJJBc,2981
+pip/_vendor/requests/utils.py,sha256=LtPJ1db6mJff2TJSJWKi7rBpzjPS3mSOrjC9zRhoD3A,30049
+pip/_vendor/retrying.py,sha256=k3fflf5_Mm0XcIJYhB7Tj34bqCCPhUDkYbx1NvW2FPE,9972
+pip/_vendor/six.py,sha256=h9jch2pS86y4R36pKRS3LOYUCVFNIJMRwjZ4fJDtJ44,32452
+pip/_vendor/urllib3/__init__.py,sha256=EZviRQA_iuL_94EeJHY4JAArRXbRCkAzA0HH9iXZ15s,2722
+pip/_vendor/urllib3/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/urllib3/__pycache__/_collections.cpython-37.pyc,,
+pip/_vendor/urllib3/__pycache__/connection.cpython-37.pyc,,
+pip/_vendor/urllib3/__pycache__/connectionpool.cpython-37.pyc,,
+pip/_vendor/urllib3/__pycache__/exceptions.cpython-37.pyc,,
+pip/_vendor/urllib3/__pycache__/fields.cpython-37.pyc,,
+pip/_vendor/urllib3/__pycache__/filepost.cpython-37.pyc,,
+pip/_vendor/urllib3/__pycache__/poolmanager.cpython-37.pyc,,
+pip/_vendor/urllib3/__pycache__/request.cpython-37.pyc,,
+pip/_vendor/urllib3/__pycache__/response.cpython-37.pyc,,
+pip/_vendor/urllib3/_collections.py,sha256=-CAKsDE-WdubAjlBSZLx7b0e7WKenaNGwWvGLDEF1TM,10746
+pip/_vendor/urllib3/connection.py,sha256=KLFvknLgllcMkgJ-zUsFjCzOt9P03fDoIpTPz_vqXCw,13839
+pip/_vendor/urllib3/connectionpool.py,sha256=rgc_3D0VsD5VDxr4KzzA8Plee0Rmerm5WKb71FcxWu8,35097
+pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-37.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-37.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-37.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-37.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-37.pyc,,
+pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-37.pyc,,
+pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=lhYXvB5_oGKSeurX7za3XhcGyERvNjXRQ3eJp2GmQ3M,717
+pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-37.pyc,,
+pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-37.pyc,,
+pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=x2kLSh-ASZKsun0FxtraBuLVe3oHuth4YW6yZ5Vof-w,17560
+pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=Umy5u-3Z957GirdapnicXVOpHaM4xdOZABJuJxfaeJA,12162
+pip/_vendor/urllib3/contrib/appengine.py,sha256=VvDpkc5gf9dTXNxXmyG1mPdON_3DrYG_eW4uOqN98oQ,10938
+pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=5ZpMF7N9B6NEjVU-r-xjDOV_-hkNvsDoNc84J2yqauI,4459
+pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=-kI_9y99Iwybv6Wy8IF8PugVl61BeMBEEqGwrDYNCuc,15823
+pip/_vendor/urllib3/contrib/securetransport.py,sha256=BqXSlChN9_hjCWgyN6JdcgvBUdc37QCCX4u3_8zE_9o,30309
+pip/_vendor/urllib3/contrib/socks.py,sha256=Iom0snbHkCuZbZ7Sle2Kueha1W0jYAJ0SyCOtePLaio,6391
+pip/_vendor/urllib3/exceptions.py,sha256=rFeIfBNKC8KJ61ux-MtJyJlEC9G9ggkmCeF751JwVR4,6604
+pip/_vendor/urllib3/fields.py,sha256=D_TE_SK15YatdbhWDMN0OE3X6UCJn1RTkANINCYOobE,5943
+pip/_vendor/urllib3/filepost.py,sha256=40CROlpRKVBpFUkD0R6wJf_PpvbcRQRFUu0OOQlFkKM,2436
+pip/_vendor/urllib3/packages/__init__.py,sha256=nlChrGzkjCkmhCX9HrF_qHPUgosfsPQkVIJxiiLhk9g,109
+pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/urllib3/packages/__pycache__/six.cpython-37.pyc,,
+pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-37.pyc,,
+pip/_vendor/urllib3/packages/backports/makefile.py,sha256=so2z9BiNM8kh38Ve5tomQP_mp2_ubEqzdlCpLZKzzCI,1456
+pip/_vendor/urllib3/packages/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098
+pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py,sha256=WBVbxQBojNAxfZwNavkox3BgJiMA9BJmm-_fwd0jD_o,688
+pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-37.pyc,,
+pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py,sha256=E-9J-kAaUn76WMZ4PpzKUxM4C3yjY7mopOpbPIy3Dso,5700
+pip/_vendor/urllib3/poolmanager.py,sha256=csE6Bh6L0FJ3iNOHk2z8KhMT8Eiq976b6pk8I6vrOC8,16853
+pip/_vendor/urllib3/request.py,sha256=OfelFYzPnxGlU3amEz9uBLjCBOriwgJh4QC_aW9SF3U,5991
+pip/_vendor/urllib3/response.py,sha256=ta1jp4B5PGBWzoAV1s48WLuHCRICQnK7F9m_kyK4Z8g,25609
+pip/_vendor/urllib3/util/__init__.py,sha256=6Ran4oAVIy40Cu_oEPWnNV9bwF5rXx6G1DUZ7oehjPY,1044
+pip/_vendor/urllib3/util/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/urllib3/util/__pycache__/connection.cpython-37.pyc,,
+pip/_vendor/urllib3/util/__pycache__/queue.cpython-37.pyc,,
+pip/_vendor/urllib3/util/__pycache__/request.cpython-37.pyc,,
+pip/_vendor/urllib3/util/__pycache__/response.cpython-37.pyc,,
+pip/_vendor/urllib3/util/__pycache__/retry.cpython-37.pyc,,
+pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-37.pyc,,
+pip/_vendor/urllib3/util/__pycache__/timeout.cpython-37.pyc,,
+pip/_vendor/urllib3/util/__pycache__/url.cpython-37.pyc,,
+pip/_vendor/urllib3/util/__pycache__/wait.cpython-37.pyc,,
+pip/_vendor/urllib3/util/connection.py,sha256=-AyqcRTuNUHuo5ndtsU0Og_nMyCGATC-kYqOUdBHwIQ,4639
+pip/_vendor/urllib3/util/queue.py,sha256=myTX3JDHntglKQNBf3b6dasHH-uF-W59vzGSQiFdAfI,497
+pip/_vendor/urllib3/util/request.py,sha256=H5_lrHvtwl2U2BbT1UYN9HpruNc1gsNFlz2njQmhPrQ,3705
+pip/_vendor/urllib3/util/response.py,sha256=028PNXDZhwBtnm2uXvnAHi_l9_AAGrAMH2Igh2AbgWg,2586
+pip/_vendor/urllib3/util/retry.py,sha256=kFQTesNiwPp6ZeQo9VHeUO7b8qA-_l3BnErCAOEPo4Q,15105
+pip/_vendor/urllib3/util/ssl_.py,sha256=4qqBDM82bufhqqEd0b-99sObz95XmEVEXDVi5iAyCeE,13172
+pip/_vendor/urllib3/util/timeout.py,sha256=7lHNrgL5YH2cI1j-yZnzV_J8jBlRVdmFhQaNyM1_2b8,9757
+pip/_vendor/urllib3/util/url.py,sha256=qCY_HHUXvo05wAsEERALgExtlgxLnAHSQ7ce1b-g3SM,6487
+pip/_vendor/urllib3/util/wait.py,sha256=p4BZo_Ukp5JF0Dn6jro7cUfqIjnU6WFtuoA6poaV5Jk,5403
+pip/_vendor/webencodings/__init__.py,sha256=qOBJIuPy_4ByYH6W_bNgJF-qYQ2DoU-dKsDu5yRWCXg,10579
+pip/_vendor/webencodings/__pycache__/__init__.cpython-37.pyc,,
+pip/_vendor/webencodings/__pycache__/labels.cpython-37.pyc,,
+pip/_vendor/webencodings/__pycache__/mklabels.cpython-37.pyc,,
+pip/_vendor/webencodings/__pycache__/tests.cpython-37.pyc,,
+pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-37.pyc,,
+pip/_vendor/webencodings/labels.py,sha256=4AO_KxTddqGtrL9ns7kAPjb0CcN6xsCIxbK37HY9r3E,8979
+pip/_vendor/webencodings/mklabels.py,sha256=GYIeywnpaLnP0GSic8LFWgd0UVvO_l1Nc6YoF-87R_4,1305
+pip/_vendor/webencodings/tests.py,sha256=OtGLyjhNY1fvkW1GvLJ_FV9ZoqC9Anyjr7q3kxTbzNs,6563
+pip/_vendor/webencodings/x_user_defined.py,sha256=yOqWSdmpytGfUgh_Z6JYgDNhoc-BAHyyeeT15Fr42tM,4307
diff --git a/lib/python3.7/site-packages/pip-19.1.dist-info/WHEEL b/lib/python3.7/site-packages/pip-19.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..c8240f03e87fdc556c4cb9a1f721a73138177cc5
--- /dev/null
+++ b/lib/python3.7/site-packages/pip-19.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/lib/python3.7/site-packages/pip-19.1.dist-info/entry_points.txt b/lib/python3.7/site-packages/pip-19.1.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f5809cb4a334b4dbdec8a926299b2a655d7299ad
--- /dev/null
+++ b/lib/python3.7/site-packages/pip-19.1.dist-info/entry_points.txt
@@ -0,0 +1,5 @@
+[console_scripts]
+pip = pip._internal:main
+pip3 = pip._internal:main
+pip3.7 = pip._internal:main
+
diff --git a/lib/python3.7/site-packages/pip-19.1.dist-info/top_level.txt b/lib/python3.7/site-packages/pip-19.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/pip-19.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/pip/__init__.py b/lib/python3.7/site-packages/pip/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..75cd896892517daa7e3f9cb7f463d40c7ed5cb6b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/__init__.py
@@ -0,0 +1 @@
+__version__ = "19.1"
diff --git a/lib/python3.7/site-packages/pip/__main__.py b/lib/python3.7/site-packages/pip/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c223f8c18783929659b3794ad714153c8c78223
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/__main__.py
@@ -0,0 +1,19 @@
+from __future__ import absolute_import
+
+import os
+import sys
+
+# If we are running from a wheel, add the wheel to sys.path
+# This allows the usage python pip-*.whl/pip install pip-*.whl
+if __package__ == '':
+    # __file__ is pip-*.whl/pip/__main__.py
+    # first dirname call strips of '/__main__.py', second strips off '/pip'
+    # Resulting path is the name of the wheel itself
+    # Add that to sys.path so we can import pip
+    path = os.path.dirname(os.path.dirname(__file__))
+    sys.path.insert(0, path)
+
+from pip._internal import main as _main  # isort:skip # noqa
+
+if __name__ == '__main__':
+    sys.exit(_main())
diff --git a/lib/python3.7/site-packages/pip/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7a923cd56acc0c06eeb9a4915e4c165ce2fa61da
Binary files /dev/null and b/lib/python3.7/site-packages/pip/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/__pycache__/__main__.cpython-37.pyc b/lib/python3.7/site-packages/pip/__pycache__/__main__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bdf5113757e00c0584733123b7e9748e79aaedfc
Binary files /dev/null and b/lib/python3.7/site-packages/pip/__pycache__/__main__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__init__.py b/lib/python3.7/site-packages/pip/_internal/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c1637cfc11949aaae726543eff11b58159b0b1a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/__init__.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+from __future__ import absolute_import
+
+import locale
+import logging
+import os
+import warnings
+
+import sys
+
+# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks,
+# but if invoked (i.e. imported), it will issue a warning to stderr if socks
+# isn't available.  requests unconditionally imports urllib3's socks contrib
+# module, triggering this warning.  The warning breaks DEP-8 tests (because of
+# the stderr output) and is just plain annoying in normal usage.  I don't want
+# to add socks as yet another dependency for pip, nor do I want to allow-stderr
+# in the DEP-8 tests, so just suppress the warning.  pdb tells me this has to
+# be done before the import of pip.vcs.
+from pip._vendor.urllib3.exceptions import DependencyWarning
+warnings.filterwarnings("ignore", category=DependencyWarning)  # noqa
+
+# We want to inject the use of SecureTransport as early as possible so that any
+# references or sessions or what have you are ensured to have it, however we
+# only want to do this in the case that we're running on macOS and the linked
+# OpenSSL is too old to handle TLSv1.2
+try:
+    import ssl
+except ImportError:
+    pass
+else:
+    # Checks for OpenSSL 1.0.1 on MacOS
+    if sys.platform == "darwin" and ssl.OPENSSL_VERSION_NUMBER < 0x1000100f:
+        try:
+            from pip._vendor.urllib3.contrib import securetransport
+        except (ImportError, OSError):
+            pass
+        else:
+            securetransport.inject_into_urllib3()
+
+from pip._internal.cli.autocompletion import autocomplete
+from pip._internal.cli.main_parser import parse_command
+from pip._internal.commands import commands_dict
+from pip._internal.exceptions import PipError
+from pip._internal.utils import deprecation
+from pip._internal.vcs import git, mercurial, subversion, bazaar  # noqa
+from pip._vendor.urllib3.exceptions import InsecureRequestWarning
+
+logger = logging.getLogger(__name__)
+
+# Hide the InsecureRequestWarning from urllib3
+warnings.filterwarnings("ignore", category=InsecureRequestWarning)
+
+
+def main(args=None):
+    if args is None:
+        args = sys.argv[1:]
+
+    # Configure our deprecation warnings to be sent through loggers
+    deprecation.install_warning_logger()
+
+    autocomplete()
+
+    try:
+        cmd_name, cmd_args = parse_command(args)
+    except PipError as exc:
+        sys.stderr.write("ERROR: %s" % exc)
+        sys.stderr.write(os.linesep)
+        sys.exit(1)
+
+    # Needed for locale.getpreferredencoding(False) to work
+    # in pip._internal.utils.encoding.auto_decode
+    try:
+        locale.setlocale(locale.LC_ALL, '')
+    except locale.Error as e:
+        # setlocale can apparently crash if locale are uninitialized
+        logger.debug("Ignoring error %s when setting locale", e)
+    command = commands_dict[cmd_name](isolated=("--isolated" in cmd_args))
+    return command.main(cmd_args)
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..261437829af4050268c309d97c772a0cd9fdda49
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/build_env.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/build_env.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bfef32b012aa808ce8fbee5882a5cc389b528a85
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/build_env.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/cache.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/cache.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4fb9756b45c15562e0d1adf60eb88873c3c8c758
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/cache.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/configuration.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/configuration.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be19fbbb015460de15d6aebdc7b54ec44b30c1d3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/configuration.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/download.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/download.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2e30990a752096904a896663847863617613ebe3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/download.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/exceptions.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/exceptions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b12b1f64955cc3189fa6c7730bcdb016da153a12
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/exceptions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/index.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/index.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0209d97d676c36ca0ef9069722da97071ba44a40
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/index.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/locations.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/locations.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20a9f35ee826cdeb6de8a4175e1c2fc430b57bb1
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/locations.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/pep425tags.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/pep425tags.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..04aea13fa35be778fe87813d44b56442f2c08585
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/pep425tags.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/pyproject.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/pyproject.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f882e7ec9598029987c87ec3cad73f5420b063c5
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/pyproject.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/resolve.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/resolve.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..db53f0b3b984ebc2ddf71ee0ab295cb60905b353
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/resolve.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/__pycache__/wheel.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/__pycache__/wheel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..97eec3023b0a0ea30f33c0b6173135c0ab71a1e3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/__pycache__/wheel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/build_env.py b/lib/python3.7/site-packages/pip/_internal/build_env.py
new file mode 100644
index 0000000000000000000000000000000000000000..89830e72ba4c4c8fe518e3ec2a5ef49821593521
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/build_env.py
@@ -0,0 +1,215 @@
+"""Build Environment used for isolation during sdist building
+"""
+
+import logging
+import os
+import sys
+import textwrap
+from collections import OrderedDict
+from distutils.sysconfig import get_python_lib
+from sysconfig import get_paths
+
+from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet
+
+from pip import __file__ as pip_location
+from pip._internal.utils.misc import call_subprocess
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+from pip._internal.utils.ui import open_spinner
+
+if MYPY_CHECK_RUNNING:
+    from typing import Tuple, Set, Iterable, Optional, List
+    from pip._internal.index import PackageFinder
+
+logger = logging.getLogger(__name__)
+
+
+class _Prefix:
+
+    def __init__(self, path):
+        # type: (str) -> None
+        self.path = path
+        self.setup = False
+        self.bin_dir = get_paths(
+            'nt' if os.name == 'nt' else 'posix_prefix',
+            vars={'base': path, 'platbase': path}
+        )['scripts']
+        # Note: prefer distutils' sysconfig to get the
+        # library paths so PyPy is correctly supported.
+        purelib = get_python_lib(plat_specific=False, prefix=path)
+        platlib = get_python_lib(plat_specific=True, prefix=path)
+        if purelib == platlib:
+            self.lib_dirs = [purelib]
+        else:
+            self.lib_dirs = [purelib, platlib]
+
+
+class BuildEnvironment(object):
+    """Creates and manages an isolated environment to install build deps
+    """
+
+    def __init__(self):
+        # type: () -> None
+        self._temp_dir = TempDirectory(kind="build-env")
+        self._temp_dir.create()
+
+        self._prefixes = OrderedDict((
+            (name, _Prefix(os.path.join(self._temp_dir.path, name)))
+            for name in ('normal', 'overlay')
+        ))
+
+        self._bin_dirs = []  # type: List[str]
+        self._lib_dirs = []  # type: List[str]
+        for prefix in reversed(list(self._prefixes.values())):
+            self._bin_dirs.append(prefix.bin_dir)
+            self._lib_dirs.extend(prefix.lib_dirs)
+
+        # Customize site to:
+        # - ensure .pth files are honored
+        # - prevent access to system site packages
+        system_sites = {
+            os.path.normcase(site) for site in (
+                get_python_lib(plat_specific=False),
+                get_python_lib(plat_specific=True),
+            )
+        }
+        self._site_dir = os.path.join(self._temp_dir.path, 'site')
+        if not os.path.exists(self._site_dir):
+            os.mkdir(self._site_dir)
+        with open(os.path.join(self._site_dir, 'sitecustomize.py'), 'w') as fp:
+            fp.write(textwrap.dedent(
+                '''
+                import os, site, sys
+
+                # First, drop system-sites related paths.
+                original_sys_path = sys.path[:]
+                known_paths = set()
+                for path in {system_sites!r}:
+                    site.addsitedir(path, known_paths=known_paths)
+                system_paths = set(
+                    os.path.normcase(path)
+                    for path in sys.path[len(original_sys_path):]
+                )
+                original_sys_path = [
+                    path for path in original_sys_path
+                    if os.path.normcase(path) not in system_paths
+                ]
+                sys.path = original_sys_path
+
+                # Second, add lib directories.
+                # ensuring .pth file are processed.
+                for path in {lib_dirs!r}:
+                    assert not path in sys.path
+                    site.addsitedir(path)
+                '''
+            ).format(system_sites=system_sites, lib_dirs=self._lib_dirs))
+
+    def __enter__(self):
+        self._save_env = {
+            name: os.environ.get(name, None)
+            for name in ('PATH', 'PYTHONNOUSERSITE', 'PYTHONPATH')
+        }
+
+        path = self._bin_dirs[:]
+        old_path = self._save_env['PATH']
+        if old_path:
+            path.extend(old_path.split(os.pathsep))
+
+        pythonpath = [self._site_dir]
+
+        os.environ.update({
+            'PATH': os.pathsep.join(path),
+            'PYTHONNOUSERSITE': '1',
+            'PYTHONPATH': os.pathsep.join(pythonpath),
+        })
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        for varname, old_value in self._save_env.items():
+            if old_value is None:
+                os.environ.pop(varname, None)
+            else:
+                os.environ[varname] = old_value
+
+    def cleanup(self):
+        # type: () -> None
+        self._temp_dir.cleanup()
+
+    def check_requirements(self, reqs):
+        # type: (Iterable[str]) -> Tuple[Set[Tuple[str, str]], Set[str]]
+        """Return 2 sets:
+            - conflicting requirements: set of (installed, wanted) reqs tuples
+            - missing requirements: set of reqs
+        """
+        missing = set()
+        conflicting = set()
+        if reqs:
+            ws = WorkingSet(self._lib_dirs)
+            for req in reqs:
+                try:
+                    if ws.find(Requirement.parse(req)) is None:
+                        missing.add(req)
+                except VersionConflict as e:
+                    conflicting.add((str(e.args[0].as_requirement()),
+                                     str(e.args[1])))
+        return conflicting, missing
+
+    def install_requirements(
+        self,
+        finder,  # type: PackageFinder
+        requirements,  # type: Iterable[str]
+        prefix_as_string,  # type: str
+        message  # type: Optional[str]
+    ):
+        # type: (...) -> None
+        prefix = self._prefixes[prefix_as_string]
+        assert not prefix.setup
+        prefix.setup = True
+        if not requirements:
+            return
+        args = [
+            sys.executable, os.path.dirname(pip_location), 'install',
+            '--ignore-installed', '--no-user', '--prefix', prefix.path,
+            '--no-warn-script-location',
+        ]  # type: List[str]
+        if logger.getEffectiveLevel() <= logging.DEBUG:
+            args.append('-v')
+        for format_control in ('no_binary', 'only_binary'):
+            formats = getattr(finder.format_control, format_control)
+            args.extend(('--' + format_control.replace('_', '-'),
+                         ','.join(sorted(formats or {':none:'}))))
+        if finder.index_urls:
+            args.extend(['-i', finder.index_urls[0]])
+            for extra_index in finder.index_urls[1:]:
+                args.extend(['--extra-index-url', extra_index])
+        else:
+            args.append('--no-index')
+        for link in finder.find_links:
+            args.extend(['--find-links', link])
+        for _, host, _ in finder.secure_origins:
+            args.extend(['--trusted-host', host])
+        if finder.allow_all_prereleases:
+            args.append('--pre')
+        args.append('--')
+        args.extend(requirements)
+        with open_spinner(message) as spinner:
+            call_subprocess(args, spinner=spinner)
+
+
+class NoOpBuildEnvironment(BuildEnvironment):
+    """A no-op drop-in replacement for BuildEnvironment
+    """
+
+    def __init__(self):
+        pass
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        pass
+
+    def cleanup(self):
+        pass
+
+    def install_requirements(self, finder, requirements, prefix, message):
+        raise NotImplementedError()
diff --git a/lib/python3.7/site-packages/pip/_internal/cache.py b/lib/python3.7/site-packages/pip/_internal/cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..9379343c2fae69706d92462db2a91d6c10eb0f5d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/cache.py
@@ -0,0 +1,224 @@
+"""Cache Management
+"""
+
+import errno
+import hashlib
+import logging
+import os
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.download import path_to_url
+from pip._internal.models.link import Link
+from pip._internal.utils.compat import expanduser
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+from pip._internal.wheel import InvalidWheelFilename, Wheel
+
+if MYPY_CHECK_RUNNING:
+    from typing import Optional, Set, List, Any
+    from pip._internal.index import FormatControl
+
+logger = logging.getLogger(__name__)
+
+
+class Cache(object):
+    """An abstract class - provides cache directories for data from links
+
+
+        :param cache_dir: The root of the cache.
+        :param format_control: An object of FormatControl class to limit
+            binaries being read from the cache.
+        :param allowed_formats: which formats of files the cache should store.
+            ('binary' and 'source' are the only allowed values)
+    """
+
+    def __init__(self, cache_dir, format_control, allowed_formats):
+        # type: (str, FormatControl, Set[str]) -> None
+        super(Cache, self).__init__()
+        self.cache_dir = expanduser(cache_dir) if cache_dir else None
+        self.format_control = format_control
+        self.allowed_formats = allowed_formats
+
+        _valid_formats = {"source", "binary"}
+        assert self.allowed_formats.union(_valid_formats) == _valid_formats
+
+    def _get_cache_path_parts(self, link):
+        # type: (Link) -> List[str]
+        """Get parts of part that must be os.path.joined with cache_dir
+        """
+
+        # We want to generate an url to use as our cache key, we don't want to
+        # just re-use the URL because it might have other items in the fragment
+        # and we don't care about those.
+        key_parts = [link.url_without_fragment]
+        if link.hash_name is not None and link.hash is not None:
+            key_parts.append("=".join([link.hash_name, link.hash]))
+        key_url = "#".join(key_parts)
+
+        # Encode our key url with sha224, we'll use this because it has similar
+        # security properties to sha256, but with a shorter total output (and
+        # thus less secure). However the differences don't make a lot of
+        # difference for our use case here.
+        hashed = hashlib.sha224(key_url.encode()).hexdigest()
+
+        # We want to nest the directories some to prevent having a ton of top
+        # level directories where we might run out of sub directories on some
+        # FS.
+        parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
+
+        return parts
+
+    def _get_candidates(self, link, package_name):
+        # type: (Link, Optional[str]) -> List[Any]
+        can_not_cache = (
+            not self.cache_dir or
+            not package_name or
+            not link
+        )
+        if can_not_cache:
+            return []
+
+        canonical_name = canonicalize_name(package_name)
+        formats = self.format_control.get_allowed_formats(
+            canonical_name
+        )
+        if not self.allowed_formats.intersection(formats):
+            return []
+
+        root = self.get_path_for_link(link)
+        try:
+            return os.listdir(root)
+        except OSError as err:
+            if err.errno in {errno.ENOENT, errno.ENOTDIR}:
+                return []
+            raise
+
+    def get_path_for_link(self, link):
+        # type: (Link) -> str
+        """Return a directory to store cached items in for link.
+        """
+        raise NotImplementedError()
+
+    def get(self, link, package_name):
+        # type: (Link, Optional[str]) -> Link
+        """Returns a link to a cached item if it exists, otherwise returns the
+        passed link.
+        """
+        raise NotImplementedError()
+
+    def _link_for_candidate(self, link, candidate):
+        # type: (Link, str) -> Link
+        root = self.get_path_for_link(link)
+        path = os.path.join(root, candidate)
+
+        return Link(path_to_url(path))
+
+    def cleanup(self):
+        # type: () -> None
+        pass
+
+
+class SimpleWheelCache(Cache):
+    """A cache of wheels for future installs.
+    """
+
+    def __init__(self, cache_dir, format_control):
+        # type: (str, FormatControl) -> None
+        super(SimpleWheelCache, self).__init__(
+            cache_dir, format_control, {"binary"}
+        )
+
+    def get_path_for_link(self, link):
+        # type: (Link) -> str
+        """Return a directory to store cached wheels for link
+
+        Because there are M wheels for any one sdist, we provide a directory
+        to cache them in, and then consult that directory when looking up
+        cache hits.
+
+        We only insert things into the cache if they have plausible version
+        numbers, so that we don't contaminate the cache with things that were
+        not unique. E.g. ./package might have dozens of installs done for it
+        and build a version of 0.0...and if we built and cached a wheel, we'd
+        end up using the same wheel even if the source has been edited.
+
+        :param link: The link of the sdist for which this will cache wheels.
+        """
+        parts = self._get_cache_path_parts(link)
+
+        # Store wheels within the root cache_dir
+        return os.path.join(self.cache_dir, "wheels", *parts)
+
+    def get(self, link, package_name):
+        # type: (Link, Optional[str]) -> Link
+        candidates = []
+
+        for wheel_name in self._get_candidates(link, package_name):
+            try:
+                wheel = Wheel(wheel_name)
+            except InvalidWheelFilename:
+                continue
+            if not wheel.supported():
+                # Built for a different python/arch/etc
+                continue
+            candidates.append((wheel.support_index_min(), wheel_name))
+
+        if not candidates:
+            return link
+
+        return self._link_for_candidate(link, min(candidates)[1])
+
+
+class EphemWheelCache(SimpleWheelCache):
+    """A SimpleWheelCache that creates it's own temporary cache directory
+    """
+
+    def __init__(self, format_control):
+        # type: (FormatControl) -> None
+        self._temp_dir = TempDirectory(kind="ephem-wheel-cache")
+        self._temp_dir.create()
+
+        super(EphemWheelCache, self).__init__(
+            self._temp_dir.path, format_control
+        )
+
+    def cleanup(self):
+        # type: () -> None
+        self._temp_dir.cleanup()
+
+
+class WheelCache(Cache):
+    """Wraps EphemWheelCache and SimpleWheelCache into a single Cache
+
+    This Cache allows for gracefully degradation, using the ephem wheel cache
+    when a certain link is not found in the simple wheel cache first.
+    """
+
+    def __init__(self, cache_dir, format_control):
+        # type: (str, FormatControl) -> None
+        super(WheelCache, self).__init__(
+            cache_dir, format_control, {'binary'}
+        )
+        self._wheel_cache = SimpleWheelCache(cache_dir, format_control)
+        self._ephem_cache = EphemWheelCache(format_control)
+
+    def get_path_for_link(self, link):
+        # type: (Link) -> str
+        return self._wheel_cache.get_path_for_link(link)
+
+    def get_ephem_path_for_link(self, link):
+        # type: (Link) -> str
+        return self._ephem_cache.get_path_for_link(link)
+
+    def get(self, link, package_name):
+        # type: (Link, Optional[str]) -> Link
+        retval = self._wheel_cache.get(link, package_name)
+        if retval is link:
+            retval = self._ephem_cache.get(link, package_name)
+        return retval
+
+    def cleanup(self):
+        # type: () -> None
+        self._wheel_cache.cleanup()
+        self._ephem_cache.cleanup()
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/__init__.py b/lib/python3.7/site-packages/pip/_internal/cli/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e589bb917e23823e25f9fff7e0849c4d6d4a62bc
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/cli/__init__.py
@@ -0,0 +1,4 @@
+"""Subpackage containing all of pip's command line interface related code
+"""
+
+# This file intentionally does not import submodules
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a716b30bd3fb76986b25a3a17d3e82606e5f07b5
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0885de3e67d56de59bf20fdb6a28205667d1aeff
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/autocompletion.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0fd00be3d17e01ff3a7f4664dbc478d536f80295
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/base_command.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..54551108ffdb9b5cdd160226bfd50348d287767c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/cmdoptions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d03718be7d50224de9b6ca770281cbc3f02dd1c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/main_parser.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/parser.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/parser.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..75575480eeb55a8718289194243948092c439fae
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/parser.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e0783459f3365b1f395fafd414c0bce282eace8b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/cli/__pycache__/status_codes.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/autocompletion.py b/lib/python3.7/site-packages/pip/_internal/cli/autocompletion.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a04199e6dc06f71c57a0bb14c175476a8410b42
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/cli/autocompletion.py
@@ -0,0 +1,152 @@
+"""Logic that powers autocompletion installed by ``pip completion``.
+"""
+
+import optparse
+import os
+import sys
+
+from pip._internal.cli.main_parser import create_main_parser
+from pip._internal.commands import commands_dict, get_summaries
+from pip._internal.utils.misc import get_installed_distributions
+
+
+def autocomplete():
+    """Entry Point for completion of main and subcommand options.
+    """
+    # Don't complete if user hasn't sourced bash_completion file.
+    if 'PIP_AUTO_COMPLETE' not in os.environ:
+        return
+    cwords = os.environ['COMP_WORDS'].split()[1:]
+    cword = int(os.environ['COMP_CWORD'])
+    try:
+        current = cwords[cword - 1]
+    except IndexError:
+        current = ''
+
+    subcommands = [cmd for cmd, summary in get_summaries()]
+    options = []
+    # subcommand
+    try:
+        subcommand_name = [w for w in cwords if w in subcommands][0]
+    except IndexError:
+        subcommand_name = None
+
+    parser = create_main_parser()
+    # subcommand options
+    if subcommand_name:
+        # special case: 'help' subcommand has no options
+        if subcommand_name == 'help':
+            sys.exit(1)
+        # special case: list locally installed dists for show and uninstall
+        should_list_installed = (
+            subcommand_name in ['show', 'uninstall'] and
+            not current.startswith('-')
+        )
+        if should_list_installed:
+            installed = []
+            lc = current.lower()
+            for dist in get_installed_distributions(local_only=True):
+                if dist.key.startswith(lc) and dist.key not in cwords[1:]:
+                    installed.append(dist.key)
+            # if there are no dists installed, fall back to option completion
+            if installed:
+                for dist in installed:
+                    print(dist)
+                sys.exit(1)
+
+        subcommand = commands_dict[subcommand_name]()
+
+        for opt in subcommand.parser.option_list_all:
+            if opt.help != optparse.SUPPRESS_HELP:
+                for opt_str in opt._long_opts + opt._short_opts:
+                    options.append((opt_str, opt.nargs))
+
+        # filter out previously specified options from available options
+        prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
+        options = [(x, v) for (x, v) in options if x not in prev_opts]
+        # filter options by current input
+        options = [(k, v) for k, v in options if k.startswith(current)]
+        # get completion type given cwords and available subcommand options
+        completion_type = get_path_completion_type(
+            cwords, cword, subcommand.parser.option_list_all,
+        )
+        # get completion files and directories if ``completion_type`` is
+        # ``<file>``, ``<dir>`` or ``<path>``
+        if completion_type:
+            options = auto_complete_paths(current, completion_type)
+            options = ((opt, 0) for opt in options)
+        for option in options:
+            opt_label = option[0]
+            # append '=' to options which require args
+            if option[1] and option[0][:2] == "--":
+                opt_label += '='
+            print(opt_label)
+    else:
+        # show main parser options only when necessary
+
+        opts = [i.option_list for i in parser.option_groups]
+        opts.append(parser.option_list)
+        opts = (o for it in opts for o in it)
+        if current.startswith('-'):
+            for opt in opts:
+                if opt.help != optparse.SUPPRESS_HELP:
+                    subcommands += opt._long_opts + opt._short_opts
+        else:
+            # get completion type given cwords and all available options
+            completion_type = get_path_completion_type(cwords, cword, opts)
+            if completion_type:
+                subcommands = auto_complete_paths(current, completion_type)
+
+        print(' '.join([x for x in subcommands if x.startswith(current)]))
+    sys.exit(1)
+
+
+def get_path_completion_type(cwords, cword, opts):
+    """Get the type of path completion (``file``, ``dir``, ``path`` or None)
+
+    :param cwords: same as the environmental variable ``COMP_WORDS``
+    :param cword: same as the environmental variable ``COMP_CWORD``
+    :param opts: The available options to check
+    :return: path completion type (``file``, ``dir``, ``path`` or None)
+    """
+    if cword < 2 or not cwords[cword - 2].startswith('-'):
+        return
+    for opt in opts:
+        if opt.help == optparse.SUPPRESS_HELP:
+            continue
+        for o in str(opt).split('/'):
+            if cwords[cword - 2].split('=')[0] == o:
+                if not opt.metavar or any(
+                        x in ('path', 'file', 'dir')
+                        for x in opt.metavar.split('/')):
+                    return opt.metavar
+
+
+def auto_complete_paths(current, completion_type):
+    """If ``completion_type`` is ``file`` or ``path``, list all regular files
+    and directories starting with ``current``; otherwise only list directories
+    starting with ``current``.
+
+    :param current: The word to be completed
+    :param completion_type: path completion type(`file`, `path` or `dir`)i
+    :return: A generator of regular files and/or directories
+    """
+    directory, filename = os.path.split(current)
+    current_path = os.path.abspath(directory)
+    # Don't complete paths if they can't be accessed
+    if not os.access(current_path, os.R_OK):
+        return
+    filename = os.path.normcase(filename)
+    # list all files that start with ``filename``
+    file_list = (x for x in os.listdir(current_path)
+                 if os.path.normcase(x).startswith(filename))
+    for f in file_list:
+        opt = os.path.join(current_path, f)
+        comp_file = os.path.normcase(os.path.join(directory, f))
+        # complete regular files when there is not ``<dir>`` after option
+        # complete directories when there is ``<file>``, ``<path>`` or
+        # ``<dir>``after option
+        if completion_type != 'dir' and os.path.isfile(opt):
+            yield comp_file
+        elif os.path.isdir(opt):
+            yield os.path.join(comp_file, '')
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/base_command.py b/lib/python3.7/site-packages/pip/_internal/cli/base_command.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6108c96eb5bf6c896437cb62771759d1593be7d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/cli/base_command.py
@@ -0,0 +1,340 @@
+"""Base Command class, and related routines"""
+from __future__ import absolute_import, print_function
+
+import logging
+import logging.config
+import optparse
+import os
+import platform
+import sys
+import traceback
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.parser import (
+    ConfigOptionParser, UpdatingDefaultsHelpFormatter,
+)
+from pip._internal.cli.status_codes import (
+    ERROR, PREVIOUS_BUILD_DIR_ERROR, SUCCESS, UNKNOWN_ERROR,
+    VIRTUALENV_NOT_FOUND,
+)
+from pip._internal.download import PipSession
+from pip._internal.exceptions import (
+    BadCommand, CommandError, InstallationError, PreviousBuildDirError,
+    UninstallationError,
+)
+from pip._internal.index import PackageFinder
+from pip._internal.locations import running_under_virtualenv
+from pip._internal.req.constructors import (
+    install_req_from_editable, install_req_from_line,
+)
+from pip._internal.req.req_file import parse_requirements
+from pip._internal.utils.deprecation import deprecated
+from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging
+from pip._internal.utils.misc import (
+    get_prog, normalize_path, redact_password_from_url,
+)
+from pip._internal.utils.outdated import pip_version_check
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Optional, List, Tuple, Any
+    from optparse import Values
+    from pip._internal.cache import WheelCache
+    from pip._internal.req.req_set import RequirementSet
+
+__all__ = ['Command']
+
+logger = logging.getLogger(__name__)
+
+
+class Command(object):
+    name = None  # type: Optional[str]
+    usage = None  # type: Optional[str]
+    ignore_require_venv = False  # type: bool
+
+    def __init__(self, isolated=False):
+        # type: (bool) -> None
+        parser_kw = {
+            'usage': self.usage,
+            'prog': '%s %s' % (get_prog(), self.name),
+            'formatter': UpdatingDefaultsHelpFormatter(),
+            'add_help_option': False,
+            'name': self.name,
+            'description': self.__doc__,
+            'isolated': isolated,
+        }
+
+        self.parser = ConfigOptionParser(**parser_kw)
+
+        # Commands should add options to this option group
+        optgroup_name = '%s Options' % self.name.capitalize()
+        self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
+
+        # Add the general options
+        gen_opts = cmdoptions.make_option_group(
+            cmdoptions.general_group,
+            self.parser,
+        )
+        self.parser.add_option_group(gen_opts)
+
+    def run(self, options, args):
+        # type: (Values, List[Any]) -> Any
+        raise NotImplementedError
+
+    def _build_session(self, options, retries=None, timeout=None):
+        # type: (Values, Optional[int], Optional[int]) -> PipSession
+        session = PipSession(
+            cache=(
+                normalize_path(os.path.join(options.cache_dir, "http"))
+                if options.cache_dir else None
+            ),
+            retries=retries if retries is not None else options.retries,
+            insecure_hosts=options.trusted_hosts,
+        )
+
+        # Handle custom ca-bundles from the user
+        if options.cert:
+            session.verify = options.cert
+
+        # Handle SSL client certificate
+        if options.client_cert:
+            session.cert = options.client_cert
+
+        # Handle timeouts
+        if options.timeout or timeout:
+            session.timeout = (
+                timeout if timeout is not None else options.timeout
+            )
+
+        # Handle configured proxies
+        if options.proxy:
+            session.proxies = {
+                "http": options.proxy,
+                "https": options.proxy,
+            }
+
+        # Determine if we can prompt the user for authentication or not
+        session.auth.prompting = not options.no_input
+
+        return session
+
+    def parse_args(self, args):
+        # type: (List[str]) -> Tuple
+        # factored out for testability
+        return self.parser.parse_args(args)
+
+    def main(self, args):
+        # type: (List[str]) -> int
+        options, args = self.parse_args(args)
+
+        # Set verbosity so that it can be used elsewhere.
+        self.verbosity = options.verbose - options.quiet
+
+        level_number = setup_logging(
+            verbosity=self.verbosity,
+            no_color=options.no_color,
+            user_log_file=options.log,
+        )
+
+        if sys.version_info[:2] == (3, 4):
+            deprecated(
+                "Python 3.4 support has been deprecated. pip 19.1 will be the "
+                "last one supporting it. Please upgrade your Python as Python "
+                "3.4 won't be maintained after March 2019 (cf PEP 429).",
+                replacement=None,
+                gone_in='19.2',
+            )
+        elif sys.version_info[:2] == (2, 7):
+            message = (
+                "A future version of pip will drop support for Python 2.7."
+            )
+            if platform.python_implementation() == "CPython":
+                message = (
+                    "Python 2.7 will reach the end of its life on January "
+                    "1st, 2020. Please upgrade your Python as Python 2.7 "
+                    "won't be maintained after that date. "
+                ) + message
+            deprecated(message, replacement=None, gone_in=None)
+
+        # TODO: Try to get these passing down from the command?
+        #       without resorting to os.environ to hold these.
+        #       This also affects isolated builds and it should.
+
+        if options.no_input:
+            os.environ['PIP_NO_INPUT'] = '1'
+
+        if options.exists_action:
+            os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
+
+        if options.require_venv and not self.ignore_require_venv:
+            # If a venv is required check if it can really be found
+            if not running_under_virtualenv():
+                logger.critical(
+                    'Could not find an activated virtualenv (required).'
+                )
+                sys.exit(VIRTUALENV_NOT_FOUND)
+
+        try:
+            status = self.run(options, args)
+            # FIXME: all commands should return an exit status
+            # and when it is done, isinstance is not needed anymore
+            if isinstance(status, int):
+                return status
+        except PreviousBuildDirError as exc:
+            logger.critical(str(exc))
+            logger.debug('Exception information:', exc_info=True)
+
+            return PREVIOUS_BUILD_DIR_ERROR
+        except (InstallationError, UninstallationError, BadCommand) as exc:
+            logger.critical(str(exc))
+            logger.debug('Exception information:', exc_info=True)
+
+            return ERROR
+        except CommandError as exc:
+            logger.critical('%s', exc)
+            logger.debug('Exception information:', exc_info=True)
+
+            return ERROR
+        except BrokenStdoutLoggingError:
+            # Bypass our logger and write any remaining messages to stderr
+            # because stdout no longer works.
+            print('ERROR: Pipe to stdout was broken', file=sys.stderr)
+            if level_number <= logging.DEBUG:
+                traceback.print_exc(file=sys.stderr)
+
+            return ERROR
+        except KeyboardInterrupt:
+            logger.critical('Operation cancelled by user')
+            logger.debug('Exception information:', exc_info=True)
+
+            return ERROR
+        except BaseException:
+            logger.critical('Exception:', exc_info=True)
+
+            return UNKNOWN_ERROR
+        finally:
+            allow_version_check = (
+                # Does this command have the index_group options?
+                hasattr(options, "no_index") and
+                # Is this command allowed to perform this check?
+                not (options.disable_pip_version_check or options.no_index)
+            )
+            # Check if we're using the latest version of pip available
+            if allow_version_check:
+                session = self._build_session(
+                    options,
+                    retries=0,
+                    timeout=min(5, options.timeout)
+                )
+                with session:
+                    pip_version_check(session, options)
+
+            # Shutdown the logging module
+            logging.shutdown()
+
+        return SUCCESS
+
+
+class RequirementCommand(Command):
+
+    @staticmethod
+    def populate_requirement_set(requirement_set,  # type: RequirementSet
+                                 args,             # type: List[str]
+                                 options,          # type: Values
+                                 finder,           # type: PackageFinder
+                                 session,          # type: PipSession
+                                 name,             # type: str
+                                 wheel_cache       # type: Optional[WheelCache]
+                                 ):
+        # type: (...) -> None
+        """
+        Marshal cmd line args into a requirement set.
+        """
+        # NOTE: As a side-effect, options.require_hashes and
+        #       requirement_set.require_hashes may be updated
+
+        for filename in options.constraints:
+            for req_to_add in parse_requirements(
+                    filename,
+                    constraint=True, finder=finder, options=options,
+                    session=session, wheel_cache=wheel_cache):
+                req_to_add.is_direct = True
+                requirement_set.add_requirement(req_to_add)
+
+        for req in args:
+            req_to_add = install_req_from_line(
+                req, None, isolated=options.isolated_mode,
+                use_pep517=options.use_pep517,
+                wheel_cache=wheel_cache
+            )
+            req_to_add.is_direct = True
+            requirement_set.add_requirement(req_to_add)
+
+        for req in options.editables:
+            req_to_add = install_req_from_editable(
+                req,
+                isolated=options.isolated_mode,
+                use_pep517=options.use_pep517,
+                wheel_cache=wheel_cache
+            )
+            req_to_add.is_direct = True
+            requirement_set.add_requirement(req_to_add)
+
+        for filename in options.requirements:
+            for req_to_add in parse_requirements(
+                    filename,
+                    finder=finder, options=options, session=session,
+                    wheel_cache=wheel_cache,
+                    use_pep517=options.use_pep517):
+                req_to_add.is_direct = True
+                requirement_set.add_requirement(req_to_add)
+        # If --require-hashes was a line in a requirements file, tell
+        # RequirementSet about it:
+        requirement_set.require_hashes = options.require_hashes
+
+        if not (args or options.editables or options.requirements):
+            opts = {'name': name}
+            if options.find_links:
+                raise CommandError(
+                    'You must give at least one requirement to %(name)s '
+                    '(maybe you meant "pip %(name)s %(links)s"?)' %
+                    dict(opts, links=' '.join(options.find_links)))
+            else:
+                raise CommandError(
+                    'You must give at least one requirement to %(name)s '
+                    '(see "pip help %(name)s")' % opts)
+
+    def _build_package_finder(
+        self,
+        options,               # type: Values
+        session,               # type: PipSession
+        platform=None,         # type: Optional[str]
+        python_versions=None,  # type: Optional[List[str]]
+        abi=None,              # type: Optional[str]
+        implementation=None    # type: Optional[str]
+    ):
+        # type: (...) -> PackageFinder
+        """
+        Create a package finder appropriate to this requirement command.
+        """
+        index_urls = [options.index_url] + options.extra_index_urls
+        if options.no_index:
+            logger.debug(
+                'Ignoring indexes: %s',
+                ','.join(redact_password_from_url(url) for url in index_urls),
+            )
+            index_urls = []
+
+        return PackageFinder(
+            find_links=options.find_links,
+            format_control=options.format_control,
+            index_urls=index_urls,
+            trusted_hosts=options.trusted_hosts,
+            allow_all_prereleases=options.pre,
+            session=session,
+            platform=platform,
+            versions=python_versions,
+            abi=abi,
+            implementation=implementation,
+            prefer_binary=options.prefer_binary,
+        )
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/cmdoptions.py b/lib/python3.7/site-packages/pip/_internal/cli/cmdoptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..a09e38f708282379e756701243c24eb3e70b14e2
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/cli/cmdoptions.py
@@ -0,0 +1,809 @@
+"""
+shared options and groups
+
+The principle here is to define options once, but *not* instantiate them
+globally. One reason being that options with action='append' can carry state
+between parses. pip parses general options twice internally, and shouldn't
+pass on state. To be consistent, all options will follow this design.
+
+"""
+from __future__ import absolute_import
+
+import textwrap
+import warnings
+from distutils.util import strtobool
+from functools import partial
+from optparse import SUPPRESS_HELP, Option, OptionGroup
+
+from pip._internal.exceptions import CommandError
+from pip._internal.locations import USER_CACHE_DIR, src_prefix
+from pip._internal.models.format_control import FormatControl
+from pip._internal.models.index import PyPI
+from pip._internal.utils.hashes import STRONG_HASHES
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+from pip._internal.utils.ui import BAR_TYPES
+
+if MYPY_CHECK_RUNNING:
+    from typing import Any, Callable, Dict, Optional
+    from optparse import OptionParser, Values
+    from pip._internal.cli.parser import ConfigOptionParser
+
+
+def raise_option_error(parser, option, msg):
+    """
+    Raise an option parsing error using parser.error().
+
+    Args:
+      parser: an OptionParser instance.
+      option: an Option instance.
+      msg: the error text.
+    """
+    msg = '{} error: {}'.format(option, msg)
+    msg = textwrap.fill(' '.join(msg.split()))
+    parser.error(msg)
+
+
+def make_option_group(group, parser):
+    # type: (Dict[str, Any], ConfigOptionParser) -> OptionGroup
+    """
+    Return an OptionGroup object
+    group  -- assumed to be dict with 'name' and 'options' keys
+    parser -- an optparse Parser
+    """
+    option_group = OptionGroup(parser, group['name'])
+    for option in group['options']:
+        option_group.add_option(option())
+    return option_group
+
+
+def check_install_build_global(options, check_options=None):
+    # type: (Values, Optional[Values]) -> None
+    """Disable wheels if per-setup.py call options are set.
+
+    :param options: The OptionParser options to update.
+    :param check_options: The options to check, if not supplied defaults to
+        options.
+    """
+    if check_options is None:
+        check_options = options
+
+    def getname(n):
+        return getattr(check_options, n, None)
+    names = ["build_options", "global_options", "install_options"]
+    if any(map(getname, names)):
+        control = options.format_control
+        control.disallow_binaries()
+        warnings.warn(
+            'Disabling all use of wheels due to the use of --build-options '
+            '/ --global-options / --install-options.', stacklevel=2,
+        )
+
+
+def check_dist_restriction(options, check_target=False):
+    # type: (Values, bool) -> None
+    """Function for determining if custom platform options are allowed.
+
+    :param options: The OptionParser options.
+    :param check_target: Whether or not to check if --target is being used.
+    """
+    dist_restriction_set = any([
+        options.python_version,
+        options.platform,
+        options.abi,
+        options.implementation,
+    ])
+
+    binary_only = FormatControl(set(), {':all:'})
+    sdist_dependencies_allowed = (
+        options.format_control != binary_only and
+        not options.ignore_dependencies
+    )
+
+    # Installations or downloads using dist restrictions must not combine
+    # source distributions and dist-specific wheels, as they are not
+    # guaranteed to be locally compatible.
+    if dist_restriction_set and sdist_dependencies_allowed:
+        raise CommandError(
+            "When restricting platform and interpreter constraints using "
+            "--python-version, --platform, --abi, or --implementation, "
+            "either --no-deps must be set, or --only-binary=:all: must be "
+            "set and --no-binary must not be set (or must be set to "
+            ":none:)."
+        )
+
+    if check_target:
+        if dist_restriction_set and not options.target_dir:
+            raise CommandError(
+                "Can not use any platform or abi specific options unless "
+                "installing via '--target'"
+            )
+
+
+###########
+# options #
+###########
+
+help_ = partial(
+    Option,
+    '-h', '--help',
+    dest='help',
+    action='help',
+    help='Show help.',
+)  # type: Callable[..., Option]
+
+isolated_mode = partial(
+    Option,
+    "--isolated",
+    dest="isolated_mode",
+    action="store_true",
+    default=False,
+    help=(
+        "Run pip in an isolated mode, ignoring environment variables and user "
+        "configuration."
+    ),
+)  # type: Callable[..., Option]
+
+require_virtualenv = partial(
+    Option,
+    # Run only if inside a virtualenv, bail if not.
+    '--require-virtualenv', '--require-venv',
+    dest='require_venv',
+    action='store_true',
+    default=False,
+    help=SUPPRESS_HELP
+)  # type: Callable[..., Option]
+
+verbose = partial(
+    Option,
+    '-v', '--verbose',
+    dest='verbose',
+    action='count',
+    default=0,
+    help='Give more output. Option is additive, and can be used up to 3 times.'
+)  # type: Callable[..., Option]
+
+no_color = partial(
+    Option,
+    '--no-color',
+    dest='no_color',
+    action='store_true',
+    default=False,
+    help="Suppress colored output",
+)  # type: Callable[..., Option]
+
+version = partial(
+    Option,
+    '-V', '--version',
+    dest='version',
+    action='store_true',
+    help='Show version and exit.',
+)  # type: Callable[..., Option]
+
+quiet = partial(
+    Option,
+    '-q', '--quiet',
+    dest='quiet',
+    action='count',
+    default=0,
+    help=(
+        'Give less output. Option is additive, and can be used up to 3'
+        ' times (corresponding to WARNING, ERROR, and CRITICAL logging'
+        ' levels).'
+    ),
+)  # type: Callable[..., Option]
+
+progress_bar = partial(
+    Option,
+    '--progress-bar',
+    dest='progress_bar',
+    type='choice',
+    choices=list(BAR_TYPES.keys()),
+    default='on',
+    help=(
+        'Specify type of progress to be displayed [' +
+        '|'.join(BAR_TYPES.keys()) + '] (default: %default)'
+    ),
+)  # type: Callable[..., Option]
+
+log = partial(
+    Option,
+    "--log", "--log-file", "--local-log",
+    dest="log",
+    metavar="path",
+    help="Path to a verbose appending log."
+)  # type: Callable[..., Option]
+
+no_input = partial(
+    Option,
+    # Don't ask for input
+    '--no-input',
+    dest='no_input',
+    action='store_true',
+    default=False,
+    help=SUPPRESS_HELP
+)  # type: Callable[..., Option]
+
+proxy = partial(
+    Option,
+    '--proxy',
+    dest='proxy',
+    type='str',
+    default='',
+    help="Specify a proxy in the form [user:passwd@]proxy.server:port."
+)  # type: Callable[..., Option]
+
+retries = partial(
+    Option,
+    '--retries',
+    dest='retries',
+    type='int',
+    default=5,
+    help="Maximum number of retries each connection should attempt "
+         "(default %default times).",
+)  # type: Callable[..., Option]
+
+timeout = partial(
+    Option,
+    '--timeout', '--default-timeout',
+    metavar='sec',
+    dest='timeout',
+    type='float',
+    default=15,
+    help='Set the socket timeout (default %default seconds).',
+)  # type: Callable[..., Option]
+
+skip_requirements_regex = partial(
+    Option,
+    # A regex to be used to skip requirements
+    '--skip-requirements-regex',
+    dest='skip_requirements_regex',
+    type='str',
+    default='',
+    help=SUPPRESS_HELP,
+)  # type: Callable[..., Option]
+
+
+def exists_action():
+    # type: () -> Option
+    return Option(
+        # Option when path already exist
+        '--exists-action',
+        dest='exists_action',
+        type='choice',
+        choices=['s', 'i', 'w', 'b', 'a'],
+        default=[],
+        action='append',
+        metavar='action',
+        help="Default action when a path already exists: "
+             "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.",
+    )
+
+
+cert = partial(
+    Option,
+    '--cert',
+    dest='cert',
+    type='str',
+    metavar='path',
+    help="Path to alternate CA bundle.",
+)  # type: Callable[..., Option]
+
+client_cert = partial(
+    Option,
+    '--client-cert',
+    dest='client_cert',
+    type='str',
+    default=None,
+    metavar='path',
+    help="Path to SSL client certificate, a single file containing the "
+         "private key and the certificate in PEM format.",
+)  # type: Callable[..., Option]
+
+index_url = partial(
+    Option,
+    '-i', '--index-url', '--pypi-url',
+    dest='index_url',
+    metavar='URL',
+    default=PyPI.simple_url,
+    help="Base URL of Python Package Index (default %default). "
+         "This should point to a repository compliant with PEP 503 "
+         "(the simple repository API) or a local directory laid out "
+         "in the same format.",
+)  # type: Callable[..., Option]
+
+
+def extra_index_url():
+    return Option(
+        '--extra-index-url',
+        dest='extra_index_urls',
+        metavar='URL',
+        action='append',
+        default=[],
+        help="Extra URLs of package indexes to use in addition to "
+             "--index-url. Should follow the same rules as "
+             "--index-url.",
+    )
+
+
+no_index = partial(
+    Option,
+    '--no-index',
+    dest='no_index',
+    action='store_true',
+    default=False,
+    help='Ignore package index (only looking at --find-links URLs instead).',
+)  # type: Callable[..., Option]
+
+
+def find_links():
+    # type: () -> Option
+    return Option(
+        '-f', '--find-links',
+        dest='find_links',
+        action='append',
+        default=[],
+        metavar='url',
+        help="If a url or path to an html file, then parse for links to "
+             "archives. If a local path or file:// url that's a directory, "
+             "then look for archives in the directory listing.",
+    )
+
+
+def trusted_host():
+    # type: () -> Option
+    return Option(
+        "--trusted-host",
+        dest="trusted_hosts",
+        action="append",
+        metavar="HOSTNAME",
+        default=[],
+        help="Mark this host as trusted, even though it does not have valid "
+             "or any HTTPS.",
+    )
+
+
+def constraints():
+    # type: () -> Option
+    return Option(
+        '-c', '--constraint',
+        dest='constraints',
+        action='append',
+        default=[],
+        metavar='file',
+        help='Constrain versions using the given constraints file. '
+        'This option can be used multiple times.'
+    )
+
+
+def requirements():
+    # type: () -> Option
+    return Option(
+        '-r', '--requirement',
+        dest='requirements',
+        action='append',
+        default=[],
+        metavar='file',
+        help='Install from the given requirements file. '
+        'This option can be used multiple times.'
+    )
+
+
+def editable():
+    # type: () -> Option
+    return Option(
+        '-e', '--editable',
+        dest='editables',
+        action='append',
+        default=[],
+        metavar='path/url',
+        help=('Install a project in editable mode (i.e. setuptools '
+              '"develop mode") from a local project path or a VCS url.'),
+    )
+
+
+src = partial(
+    Option,
+    '--src', '--source', '--source-dir', '--source-directory',
+    dest='src_dir',
+    metavar='dir',
+    default=src_prefix,
+    help='Directory to check out editable projects into. '
+    'The default in a virtualenv is "<venv path>/src". '
+    'The default for global installs is "<current dir>/src".'
+)  # type: Callable[..., Option]
+
+
+def _get_format_control(values, option):
+    # type: (Values, Option) -> Any
+    """Get a format_control object."""
+    return getattr(values, option.dest)
+
+
+def _handle_no_binary(option, opt_str, value, parser):
+    # type: (Option, str, str, OptionParser) -> None
+    existing = _get_format_control(parser.values, option)
+    FormatControl.handle_mutual_excludes(
+        value, existing.no_binary, existing.only_binary,
+    )
+
+
+def _handle_only_binary(option, opt_str, value, parser):
+    # type: (Option, str, str, OptionParser) -> None
+    existing = _get_format_control(parser.values, option)
+    FormatControl.handle_mutual_excludes(
+        value, existing.only_binary, existing.no_binary,
+    )
+
+
+def no_binary():
+    # type: () -> Option
+    format_control = FormatControl(set(), set())
+    return Option(
+        "--no-binary", dest="format_control", action="callback",
+        callback=_handle_no_binary, type="str",
+        default=format_control,
+        help="Do not use binary packages. Can be supplied multiple times, and "
+             "each time adds to the existing value. Accepts either :all: to "
+             "disable all binary packages, :none: to empty the set, or one or "
+             "more package names with commas between them. Note that some "
+             "packages are tricky to compile and may fail to install when "
+             "this option is used on them.",
+    )
+
+
+def only_binary():
+    # type: () -> Option
+    format_control = FormatControl(set(), set())
+    return Option(
+        "--only-binary", dest="format_control", action="callback",
+        callback=_handle_only_binary, type="str",
+        default=format_control,
+        help="Do not use source packages. Can be supplied multiple times, and "
+             "each time adds to the existing value. Accepts either :all: to "
+             "disable all source packages, :none: to empty the set, or one or "
+             "more package names with commas between them. Packages without "
+             "binary distributions will fail to install when this option is "
+             "used on them.",
+    )
+
+
+platform = partial(
+    Option,
+    '--platform',
+    dest='platform',
+    metavar='platform',
+    default=None,
+    help=("Only use wheels compatible with <platform>. "
+          "Defaults to the platform of the running system."),
+)  # type: Callable[..., Option]
+
+
+python_version = partial(
+    Option,
+    '--python-version',
+    dest='python_version',
+    metavar='python_version',
+    default=None,
+    help=("Only use wheels compatible with Python "
+          "interpreter version <version>. If not specified, then the "
+          "current system interpreter minor version is used. A major "
+          "version (e.g. '2') can be specified to match all "
+          "minor revs of that major version.  A minor version "
+          "(e.g. '34') can also be specified."),
+)  # type: Callable[..., Option]
+
+
+implementation = partial(
+    Option,
+    '--implementation',
+    dest='implementation',
+    metavar='implementation',
+    default=None,
+    help=("Only use wheels compatible with Python "
+          "implementation <implementation>, e.g. 'pp', 'jy', 'cp', "
+          " or 'ip'. If not specified, then the current "
+          "interpreter implementation is used.  Use 'py' to force "
+          "implementation-agnostic wheels."),
+)  # type: Callable[..., Option]
+
+
+abi = partial(
+    Option,
+    '--abi',
+    dest='abi',
+    metavar='abi',
+    default=None,
+    help=("Only use wheels compatible with Python "
+          "abi <abi>, e.g. 'pypy_41'.  If not specified, then the "
+          "current interpreter abi tag is used.  Generally "
+          "you will need to specify --implementation, "
+          "--platform, and --python-version when using "
+          "this option."),
+)  # type: Callable[..., Option]
+
+
+def prefer_binary():
+    # type: () -> Option
+    return Option(
+        "--prefer-binary",
+        dest="prefer_binary",
+        action="store_true",
+        default=False,
+        help="Prefer older binary packages over newer source packages."
+    )
+
+
+cache_dir = partial(
+    Option,
+    "--cache-dir",
+    dest="cache_dir",
+    default=USER_CACHE_DIR,
+    metavar="dir",
+    help="Store the cache data in <dir>."
+)  # type: Callable[..., Option]
+
+
+def no_cache_dir_callback(option, opt, value, parser):
+    """
+    Process a value provided for the --no-cache-dir option.
+
+    This is an optparse.Option callback for the --no-cache-dir option.
+    """
+    # The value argument will be None if --no-cache-dir is passed via the
+    # command-line, since the option doesn't accept arguments.  However,
+    # the value can be non-None if the option is triggered e.g. by an
+    # environment variable, like PIP_NO_CACHE_DIR=true.
+    if value is not None:
+        # Then parse the string value to get argument error-checking.
+        try:
+            strtobool(value)
+        except ValueError as exc:
+            raise_option_error(parser, option=option, msg=str(exc))
+
+    # Originally, setting PIP_NO_CACHE_DIR to a value that strtobool()
+    # converted to 0 (like "false" or "no") caused cache_dir to be disabled
+    # rather than enabled (logic would say the latter).  Thus, we disable
+    # the cache directory not just on values that parse to True, but (for
+    # backwards compatibility reasons) also on values that parse to False.
+    # In other words, always set it to False if the option is provided in
+    # some (valid) form.
+    parser.values.cache_dir = False
+
+
+no_cache = partial(
+    Option,
+    "--no-cache-dir",
+    dest="cache_dir",
+    action="callback",
+    callback=no_cache_dir_callback,
+    help="Disable the cache.",
+)  # type: Callable[..., Option]
+
+no_deps = partial(
+    Option,
+    '--no-deps', '--no-dependencies',
+    dest='ignore_dependencies',
+    action='store_true',
+    default=False,
+    help="Don't install package dependencies.",
+)  # type: Callable[..., Option]
+
+build_dir = partial(
+    Option,
+    '-b', '--build', '--build-dir', '--build-directory',
+    dest='build_dir',
+    metavar='dir',
+    help='Directory to unpack packages into and build in. Note that '
+         'an initial build still takes place in a temporary directory. '
+         'The location of temporary directories can be controlled by setting '
+         'the TMPDIR environment variable (TEMP on Windows) appropriately. '
+         'When passed, build directories are not cleaned in case of failures.'
+)  # type: Callable[..., Option]
+
+ignore_requires_python = partial(
+    Option,
+    '--ignore-requires-python',
+    dest='ignore_requires_python',
+    action='store_true',
+    help='Ignore the Requires-Python information.'
+)  # type: Callable[..., Option]
+
+no_build_isolation = partial(
+    Option,
+    '--no-build-isolation',
+    dest='build_isolation',
+    action='store_false',
+    default=True,
+    help='Disable isolation when building a modern source distribution. '
+         'Build dependencies specified by PEP 518 must be already installed '
+         'if this option is used.'
+)  # type: Callable[..., Option]
+
+
+def no_use_pep517_callback(option, opt, value, parser):
+    """
+    Process a value provided for the --no-use-pep517 option.
+
+    This is an optparse.Option callback for the no_use_pep517 option.
+    """
+    # Since --no-use-pep517 doesn't accept arguments, the value argument
+    # will be None if --no-use-pep517 is passed via the command-line.
+    # However, the value can be non-None if the option is triggered e.g.
+    # by an environment variable, for example "PIP_NO_USE_PEP517=true".
+    if value is not None:
+        msg = """A value was passed for --no-use-pep517,
+        probably using either the PIP_NO_USE_PEP517 environment variable
+        or the "no-use-pep517" config file option. Use an appropriate value
+        of the PIP_USE_PEP517 environment variable or the "use-pep517"
+        config file option instead.
+        """
+        raise_option_error(parser, option=option, msg=msg)
+
+    # Otherwise, --no-use-pep517 was passed via the command-line.
+    parser.values.use_pep517 = False
+
+
+use_pep517 = partial(
+    Option,
+    '--use-pep517',
+    dest='use_pep517',
+    action='store_true',
+    default=None,
+    help='Use PEP 517 for building source distributions '
+         '(use --no-use-pep517 to force legacy behaviour).'
+)  # type: Any
+
+no_use_pep517 = partial(
+    Option,
+    '--no-use-pep517',
+    dest='use_pep517',
+    action='callback',
+    callback=no_use_pep517_callback,
+    default=None,
+    help=SUPPRESS_HELP
+)  # type: Any
+
+install_options = partial(
+    Option,
+    '--install-option',
+    dest='install_options',
+    action='append',
+    metavar='options',
+    help="Extra arguments to be supplied to the setup.py install "
+         "command (use like --install-option=\"--install-scripts=/usr/local/"
+         "bin\"). Use multiple --install-option options to pass multiple "
+         "options to setup.py install. If you are using an option with a "
+         "directory path, be sure to use absolute path.",
+)  # type: Callable[..., Option]
+
+global_options = partial(
+    Option,
+    '--global-option',
+    dest='global_options',
+    action='append',
+    metavar='options',
+    help="Extra global options to be supplied to the setup.py "
+         "call before the install command.",
+)  # type: Callable[..., Option]
+
+no_clean = partial(
+    Option,
+    '--no-clean',
+    action='store_true',
+    default=False,
+    help="Don't clean up build directories."
+)  # type: Callable[..., Option]
+
+pre = partial(
+    Option,
+    '--pre',
+    action='store_true',
+    default=False,
+    help="Include pre-release and development versions. By default, "
+         "pip only finds stable versions.",
+)  # type: Callable[..., Option]
+
+disable_pip_version_check = partial(
+    Option,
+    "--disable-pip-version-check",
+    dest="disable_pip_version_check",
+    action="store_true",
+    default=False,
+    help="Don't periodically check PyPI to determine whether a new version "
+         "of pip is available for download. Implied with --no-index.",
+)  # type: Callable[..., Option]
+
+
+# Deprecated, Remove later
+always_unzip = partial(
+    Option,
+    '-Z', '--always-unzip',
+    dest='always_unzip',
+    action='store_true',
+    help=SUPPRESS_HELP,
+)  # type: Callable[..., Option]
+
+
+def _merge_hash(option, opt_str, value, parser):
+    # type: (Option, str, str, OptionParser) -> None
+    """Given a value spelled "algo:digest", append the digest to a list
+    pointed to in a dict by the algo name."""
+    if not parser.values.hashes:
+        parser.values.hashes = {}
+    try:
+        algo, digest = value.split(':', 1)
+    except ValueError:
+        parser.error('Arguments to %s must be a hash name '
+                     'followed by a value, like --hash=sha256:abcde...' %
+                     opt_str)
+    if algo not in STRONG_HASHES:
+        parser.error('Allowed hash algorithms for %s are %s.' %
+                     (opt_str, ', '.join(STRONG_HASHES)))
+    parser.values.hashes.setdefault(algo, []).append(digest)
+
+
+hash = partial(
+    Option,
+    '--hash',
+    # Hash values eventually end up in InstallRequirement.hashes due to
+    # __dict__ copying in process_line().
+    dest='hashes',
+    action='callback',
+    callback=_merge_hash,
+    type='string',
+    help="Verify that the package's archive matches this "
+         'hash before installing. Example: --hash=sha256:abcdef...',
+)  # type: Callable[..., Option]
+
+
+require_hashes = partial(
+    Option,
+    '--require-hashes',
+    dest='require_hashes',
+    action='store_true',
+    default=False,
+    help='Require a hash to check each requirement against, for '
+         'repeatable installs. This option is implied when any package in a '
+         'requirements file has a --hash option.',
+)  # type: Callable[..., Option]
+
+
+##########
+# groups #
+##########
+
+general_group = {
+    'name': 'General Options',
+    'options': [
+        help_,
+        isolated_mode,
+        require_virtualenv,
+        verbose,
+        version,
+        quiet,
+        log,
+        no_input,
+        proxy,
+        retries,
+        timeout,
+        skip_requirements_regex,
+        exists_action,
+        trusted_host,
+        cert,
+        client_cert,
+        cache_dir,
+        no_cache,
+        disable_pip_version_check,
+        no_color,
+    ]
+}  # type: Dict[str, Any]
+
+index_group = {
+    'name': 'Package Index Options',
+    'options': [
+        index_url,
+        extra_index_url,
+        no_index,
+        find_links,
+    ]
+}  # type: Dict[str, Any]
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/main_parser.py b/lib/python3.7/site-packages/pip/_internal/cli/main_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..767f35d50ee46f2cd460b10ce4bf7c9c2c6eb4fa
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/cli/main_parser.py
@@ -0,0 +1,104 @@
+"""A single place for constructing and exposing the main parser
+"""
+
+import os
+import sys
+
+from pip import __version__
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.parser import (
+    ConfigOptionParser, UpdatingDefaultsHelpFormatter,
+)
+from pip._internal.commands import (
+    commands_dict, get_similar_commands, get_summaries,
+)
+from pip._internal.exceptions import CommandError
+from pip._internal.utils.misc import get_prog
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Tuple, List
+
+
+__all__ = ["create_main_parser", "parse_command"]
+
+
+def create_main_parser():
+    # type: () -> ConfigOptionParser
+    """Creates and returns the main parser for pip's CLI
+    """
+
+    parser_kw = {
+        'usage': '\n%prog <command> [options]',
+        'add_help_option': False,
+        'formatter': UpdatingDefaultsHelpFormatter(),
+        'name': 'global',
+        'prog': get_prog(),
+    }
+
+    parser = ConfigOptionParser(**parser_kw)
+    parser.disable_interspersed_args()
+
+    pip_pkg_dir = os.path.abspath(os.path.join(
+        os.path.dirname(__file__), "..", "..",
+    ))
+    parser.version = 'pip %s from %s (python %s)' % (
+        __version__, pip_pkg_dir, sys.version[:3],
+    )
+
+    # add the general options
+    gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
+    parser.add_option_group(gen_opts)
+
+    # so the help formatter knows
+    parser.main = True  # type: ignore
+
+    # create command listing for description
+    command_summaries = get_summaries()
+    description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
+    parser.description = '\n'.join(description)
+
+    return parser
+
+
+def parse_command(args):
+    # type: (List[str]) -> Tuple[str, List[str]]
+    parser = create_main_parser()
+
+    # Note: parser calls disable_interspersed_args(), so the result of this
+    # call is to split the initial args into the general options before the
+    # subcommand and everything else.
+    # For example:
+    #  args: ['--timeout=5', 'install', '--user', 'INITools']
+    #  general_options: ['--timeout==5']
+    #  args_else: ['install', '--user', 'INITools']
+    general_options, args_else = parser.parse_args(args)
+
+    # --version
+    if general_options.version:
+        sys.stdout.write(parser.version)  # type: ignore
+        sys.stdout.write(os.linesep)
+        sys.exit()
+
+    # pip || pip help -> print_help()
+    if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
+        parser.print_help()
+        sys.exit()
+
+    # the subcommand name
+    cmd_name = args_else[0]
+
+    if cmd_name not in commands_dict:
+        guess = get_similar_commands(cmd_name)
+
+        msg = ['unknown command "%s"' % cmd_name]
+        if guess:
+            msg.append('maybe you meant "%s"' % guess)
+
+        raise CommandError(' - '.join(msg))
+
+    # all the args without the subcommand
+    cmd_args = args[:]
+    cmd_args.remove(cmd_name)
+
+    return cmd_name, cmd_args
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/parser.py b/lib/python3.7/site-packages/pip/_internal/cli/parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1eaac42042861c59441d4f13c856c84092720f5
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/cli/parser.py
@@ -0,0 +1,261 @@
+"""Base option parser setup"""
+from __future__ import absolute_import
+
+import logging
+import optparse
+import sys
+import textwrap
+from distutils.util import strtobool
+
+from pip._vendor.six import string_types
+
+from pip._internal.cli.status_codes import UNKNOWN_ERROR
+from pip._internal.configuration import Configuration, ConfigurationError
+from pip._internal.utils.compat import get_terminal_size
+
+logger = logging.getLogger(__name__)
+
+
+class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
+    """A prettier/less verbose help formatter for optparse."""
+
+    def __init__(self, *args, **kwargs):
+        # help position must be aligned with __init__.parseopts.description
+        kwargs['max_help_position'] = 30
+        kwargs['indent_increment'] = 1
+        kwargs['width'] = get_terminal_size()[0] - 2
+        optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
+
+    def format_option_strings(self, option):
+        return self._format_option_strings(option, ' <%s>', ', ')
+
+    def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
+        """
+        Return a comma-separated list of option strings and metavars.
+
+        :param option:  tuple of (short opt, long opt), e.g: ('-f', '--format')
+        :param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
+        :param optsep:  separator
+        """
+        opts = []
+
+        if option._short_opts:
+            opts.append(option._short_opts[0])
+        if option._long_opts:
+            opts.append(option._long_opts[0])
+        if len(opts) > 1:
+            opts.insert(1, optsep)
+
+        if option.takes_value():
+            metavar = option.metavar or option.dest.lower()
+            opts.append(mvarfmt % metavar.lower())
+
+        return ''.join(opts)
+
+    def format_heading(self, heading):
+        if heading == 'Options':
+            return ''
+        return heading + ':\n'
+
+    def format_usage(self, usage):
+        """
+        Ensure there is only one newline between usage and the first heading
+        if there is no description.
+        """
+        msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), "  ")
+        return msg
+
+    def format_description(self, description):
+        # leave full control over description to us
+        if description:
+            if hasattr(self.parser, 'main'):
+                label = 'Commands'
+            else:
+                label = 'Description'
+            # some doc strings have initial newlines, some don't
+            description = description.lstrip('\n')
+            # some doc strings have final newlines and spaces, some don't
+            description = description.rstrip()
+            # dedent, then reindent
+            description = self.indent_lines(textwrap.dedent(description), "  ")
+            description = '%s:\n%s\n' % (label, description)
+            return description
+        else:
+            return ''
+
+    def format_epilog(self, epilog):
+        # leave full control over epilog to us
+        if epilog:
+            return epilog
+        else:
+            return ''
+
+    def indent_lines(self, text, indent):
+        new_lines = [indent + line for line in text.split('\n')]
+        return "\n".join(new_lines)
+
+
+class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
+    """Custom help formatter for use in ConfigOptionParser.
+
+    This is updates the defaults before expanding them, allowing
+    them to show up correctly in the help listing.
+    """
+
+    def expand_default(self, option):
+        if self.parser is not None:
+            self.parser._update_defaults(self.parser.defaults)
+        return optparse.IndentedHelpFormatter.expand_default(self, option)
+
+
+class CustomOptionParser(optparse.OptionParser):
+
+    def insert_option_group(self, idx, *args, **kwargs):
+        """Insert an OptionGroup at a given position."""
+        group = self.add_option_group(*args, **kwargs)
+
+        self.option_groups.pop()
+        self.option_groups.insert(idx, group)
+
+        return group
+
+    @property
+    def option_list_all(self):
+        """Get a list of all options, including those in option groups."""
+        res = self.option_list[:]
+        for i in self.option_groups:
+            res.extend(i.option_list)
+
+        return res
+
+
+class ConfigOptionParser(CustomOptionParser):
+    """Custom option parser which updates its defaults by checking the
+    configuration files and environmental variables"""
+
+    def __init__(self, *args, **kwargs):
+        self.name = kwargs.pop('name')
+
+        isolated = kwargs.pop("isolated", False)
+        self.config = Configuration(isolated)
+
+        assert self.name
+        optparse.OptionParser.__init__(self, *args, **kwargs)
+
+    def check_default(self, option, key, val):
+        try:
+            return option.check_value(key, val)
+        except optparse.OptionValueError as exc:
+            print("An error occurred during configuration: %s" % exc)
+            sys.exit(3)
+
+    def _get_ordered_configuration_items(self):
+        # Configuration gives keys in an unordered manner. Order them.
+        override_order = ["global", self.name, ":env:"]
+
+        # Pool the options into different groups
+        section_items = {name: [] for name in override_order}
+        for section_key, val in self.config.items():
+            # ignore empty values
+            if not val:
+                logger.debug(
+                    "Ignoring configuration key '%s' as it's value is empty.",
+                    section_key
+                )
+                continue
+
+            section, key = section_key.split(".", 1)
+            if section in override_order:
+                section_items[section].append((key, val))
+
+        # Yield each group in their override order
+        for section in override_order:
+            for key, val in section_items[section]:
+                yield key, val
+
+    def _update_defaults(self, defaults):
+        """Updates the given defaults with values from the config files and
+        the environ. Does a little special handling for certain types of
+        options (lists)."""
+
+        # Accumulate complex default state.
+        self.values = optparse.Values(self.defaults)
+        late_eval = set()
+        # Then set the options with those values
+        for key, val in self._get_ordered_configuration_items():
+            # '--' because configuration supports only long names
+            option = self.get_option('--' + key)
+
+            # Ignore options not present in this parser. E.g. non-globals put
+            # in [global] by users that want them to apply to all applicable
+            # commands.
+            if option is None:
+                continue
+
+            if option.action in ('store_true', 'store_false', 'count'):
+                try:
+                    val = strtobool(val)
+                except ValueError:
+                    error_msg = invalid_config_error_message(
+                        option.action, key, val
+                    )
+                    self.error(error_msg)
+
+            elif option.action == 'append':
+                val = val.split()
+                val = [self.check_default(option, key, v) for v in val]
+            elif option.action == 'callback':
+                late_eval.add(option.dest)
+                opt_str = option.get_opt_string()
+                val = option.convert_value(opt_str, val)
+                # From take_action
+                args = option.callback_args or ()
+                kwargs = option.callback_kwargs or {}
+                option.callback(option, opt_str, val, self, *args, **kwargs)
+            else:
+                val = self.check_default(option, key, val)
+
+            defaults[option.dest] = val
+
+        for key in late_eval:
+            defaults[key] = getattr(self.values, key)
+        self.values = None
+        return defaults
+
+    def get_default_values(self):
+        """Overriding to make updating the defaults after instantiation of
+        the option parser possible, _update_defaults() does the dirty work."""
+        if not self.process_default_values:
+            # Old, pre-Optik 1.5 behaviour.
+            return optparse.Values(self.defaults)
+
+        # Load the configuration, or error out in case of an error
+        try:
+            self.config.load()
+        except ConfigurationError as err:
+            self.exit(UNKNOWN_ERROR, str(err))
+
+        defaults = self._update_defaults(self.defaults.copy())  # ours
+        for option in self._get_all_options():
+            default = defaults.get(option.dest)
+            if isinstance(default, string_types):
+                opt_str = option.get_opt_string()
+                defaults[option.dest] = option.check_value(opt_str, default)
+        return optparse.Values(defaults)
+
+    def error(self, msg):
+        self.print_usage(sys.stderr)
+        self.exit(UNKNOWN_ERROR, "%s\n" % msg)
+
+
+def invalid_config_error_message(action, key, val):
+    """Returns a better error message when invalid configuration option
+    is provided."""
+    if action in ('store_true', 'store_false'):
+        return ("{0} is not a valid value for {1} option, "
+                "please specify a boolean value like yes/no, "
+                "true/false or 1/0 instead.").format(val, key)
+
+    return ("{0} is not a valid value for {1} option, "
+            "please specify a numerical value like 1/0 "
+            "instead.").format(val, key)
diff --git a/lib/python3.7/site-packages/pip/_internal/cli/status_codes.py b/lib/python3.7/site-packages/pip/_internal/cli/status_codes.py
new file mode 100644
index 0000000000000000000000000000000000000000..275360a3175abaeab86148d61b735904f96d72f6
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/cli/status_codes.py
@@ -0,0 +1,8 @@
+from __future__ import absolute_import
+
+SUCCESS = 0
+ERROR = 1
+UNKNOWN_ERROR = 2
+VIRTUALENV_NOT_FOUND = 3
+PREVIOUS_BUILD_DIR_ERROR = 4
+NO_MATCHES_FOUND = 23
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__init__.py b/lib/python3.7/site-packages/pip/_internal/commands/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e90db34f69ffcd04ccc8f9db8c3048337917fbf
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/__init__.py
@@ -0,0 +1,79 @@
+"""
+Package containing all pip commands
+"""
+from __future__ import absolute_import
+
+from pip._internal.commands.completion import CompletionCommand
+from pip._internal.commands.configuration import ConfigurationCommand
+from pip._internal.commands.download import DownloadCommand
+from pip._internal.commands.freeze import FreezeCommand
+from pip._internal.commands.hash import HashCommand
+from pip._internal.commands.help import HelpCommand
+from pip._internal.commands.list import ListCommand
+from pip._internal.commands.check import CheckCommand
+from pip._internal.commands.search import SearchCommand
+from pip._internal.commands.show import ShowCommand
+from pip._internal.commands.install import InstallCommand
+from pip._internal.commands.uninstall import UninstallCommand
+from pip._internal.commands.wheel import WheelCommand
+
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import List, Type
+    from pip._internal.cli.base_command import Command
+
+commands_order = [
+    InstallCommand,
+    DownloadCommand,
+    UninstallCommand,
+    FreezeCommand,
+    ListCommand,
+    ShowCommand,
+    CheckCommand,
+    ConfigurationCommand,
+    SearchCommand,
+    WheelCommand,
+    HashCommand,
+    CompletionCommand,
+    HelpCommand,
+]  # type: List[Type[Command]]
+
+commands_dict = {c.name: c for c in commands_order}
+
+
+def get_summaries(ordered=True):
+    """Yields sorted (command name, command summary) tuples."""
+
+    if ordered:
+        cmditems = _sort_commands(commands_dict, commands_order)
+    else:
+        cmditems = commands_dict.items()
+
+    for name, command_class in cmditems:
+        yield (name, command_class.summary)
+
+
+def get_similar_commands(name):
+    """Command name auto-correct."""
+    from difflib import get_close_matches
+
+    name = name.lower()
+
+    close_commands = get_close_matches(name, commands_dict.keys())
+
+    if close_commands:
+        return close_commands[0]
+    else:
+        return False
+
+
+def _sort_commands(cmddict, order):
+    def keyfn(key):
+        try:
+            return order.index(key[1])
+        except ValueError:
+            # unordered items should come last
+            return 0xff
+
+    return sorted(cmddict.items(), key=keyfn)
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..94b2a6e9f4a44ec7930d76b5587dfad806782819
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/check.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/check.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08cacbeb65847d1b8a0262990f92b80d4b71ce86
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/check.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/completion.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/completion.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ddda80e94a1981419c3275e7293bd87343087f5
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/completion.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..68d0bfca398d0fc2acee06fc122ef7ef077ab439
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/configuration.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/download.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/download.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9dcc198164324e0b188f676cbb74b6da498de1d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/download.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b07cd2c178dfe21809ce357802c10a48939a9edf
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/freeze.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/hash.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/hash.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cb8ef6d2f2aaa969abeed321a39e05e77eb07ddb
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/hash.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/help.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/help.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f86bd5ec1bbcc03b80f1f97e609fee370003f7e5
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/help.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/install.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/install.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb89842c27e52bfc9a70233c5d2159f201d3c304
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/install.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/list.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/list.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5d650fb6052c55744dbde774f41fb9a1433db29
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/list.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/search.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/search.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..de9a7a61a546dfc3a77a93c196b69af2eab6956c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/search.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/show.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/show.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb657afc4063f11a1b289a084dbdf9d520427e9d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/show.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..212b33c3484b4ae8db78c9f8fa7969c3195f6662
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/uninstall.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c60abe8052c04a0956db5785195b4591e9019db5
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/commands/__pycache__/wheel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/check.py b/lib/python3.7/site-packages/pip/_internal/commands/check.py
new file mode 100644
index 0000000000000000000000000000000000000000..801cecc0b8438083f84a36f904880860b1d28bd8
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/check.py
@@ -0,0 +1,41 @@
+import logging
+
+from pip._internal.cli.base_command import Command
+from pip._internal.operations.check import (
+    check_package_set, create_package_set_from_installed,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class CheckCommand(Command):
+    """Verify installed packages have compatible dependencies."""
+    name = 'check'
+    usage = """
+      %prog [options]"""
+    summary = 'Verify installed packages have compatible dependencies.'
+
+    def run(self, options, args):
+        package_set, parsing_probs = create_package_set_from_installed()
+        missing, conflicting = check_package_set(package_set)
+
+        for project_name in missing:
+            version = package_set[project_name].version
+            for dependency in missing[project_name]:
+                logger.info(
+                    "%s %s requires %s, which is not installed.",
+                    project_name, version, dependency[0],
+                )
+
+        for project_name in conflicting:
+            version = package_set[project_name].version
+            for dep_name, dep_version, req in conflicting[project_name]:
+                logger.info(
+                    "%s %s has requirement %s, but you have %s %s.",
+                    project_name, version, req, dep_name, dep_version,
+                )
+
+        if missing or conflicting or parsing_probs:
+            return 1
+        else:
+            logger.info("No broken requirements found.")
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/completion.py b/lib/python3.7/site-packages/pip/_internal/commands/completion.py
new file mode 100644
index 0000000000000000000000000000000000000000..2fcdd393ed6876caa2438a2577b32041a4ff2acf
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/completion.py
@@ -0,0 +1,94 @@
+from __future__ import absolute_import
+
+import sys
+import textwrap
+
+from pip._internal.cli.base_command import Command
+from pip._internal.utils.misc import get_prog
+
+BASE_COMPLETION = """
+# pip %(shell)s completion start%(script)s# pip %(shell)s completion end
+"""
+
+COMPLETION_SCRIPTS = {
+    'bash': """
+        _pip_completion()
+        {
+            COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\
+                           COMP_CWORD=$COMP_CWORD \\
+                           PIP_AUTO_COMPLETE=1 $1 ) )
+        }
+        complete -o default -F _pip_completion %(prog)s
+    """,
+    'zsh': """
+        function _pip_completion {
+          local words cword
+          read -Ac words
+          read -cn cword
+          reply=( $( COMP_WORDS="$words[*]" \\
+                     COMP_CWORD=$(( cword-1 )) \\
+                     PIP_AUTO_COMPLETE=1 $words[1] ) )
+        }
+        compctl -K _pip_completion %(prog)s
+    """,
+    'fish': """
+        function __fish_complete_pip
+            set -lx COMP_WORDS (commandline -o) ""
+            set -lx COMP_CWORD ( \\
+                math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\
+            )
+            set -lx PIP_AUTO_COMPLETE 1
+            string split \\  -- (eval $COMP_WORDS[1])
+        end
+        complete -fa "(__fish_complete_pip)" -c %(prog)s
+    """,
+}
+
+
+class CompletionCommand(Command):
+    """A helper command to be used for command completion."""
+    name = 'completion'
+    summary = 'A helper command used for command completion.'
+    ignore_require_venv = True
+
+    def __init__(self, *args, **kw):
+        super(CompletionCommand, self).__init__(*args, **kw)
+
+        cmd_opts = self.cmd_opts
+
+        cmd_opts.add_option(
+            '--bash', '-b',
+            action='store_const',
+            const='bash',
+            dest='shell',
+            help='Emit completion code for bash')
+        cmd_opts.add_option(
+            '--zsh', '-z',
+            action='store_const',
+            const='zsh',
+            dest='shell',
+            help='Emit completion code for zsh')
+        cmd_opts.add_option(
+            '--fish', '-f',
+            action='store_const',
+            const='fish',
+            dest='shell',
+            help='Emit completion code for fish')
+
+        self.parser.insert_option_group(0, cmd_opts)
+
+    def run(self, options, args):
+        """Prints the completion code of the given shell"""
+        shells = COMPLETION_SCRIPTS.keys()
+        shell_options = ['--' + shell for shell in sorted(shells)]
+        if options.shell in shells:
+            script = textwrap.dedent(
+                COMPLETION_SCRIPTS.get(options.shell, '') % {
+                    'prog': get_prog(),
+                }
+            )
+            print(BASE_COMPLETION % {'script': script, 'shell': options.shell})
+        else:
+            sys.stderr.write(
+                'ERROR: You must pass %s\n' % ' or '.join(shell_options)
+            )
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/configuration.py b/lib/python3.7/site-packages/pip/_internal/commands/configuration.py
new file mode 100644
index 0000000000000000000000000000000000000000..950e20573684a030a62a496a9a625cf4040a3850
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/configuration.py
@@ -0,0 +1,253 @@
+import logging
+import os
+import subprocess
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import ERROR, SUCCESS
+from pip._internal.configuration import Configuration, kinds
+from pip._internal.exceptions import PipError
+from pip._internal.locations import running_under_virtualenv, site_config_file
+from pip._internal.utils.deprecation import deprecated
+from pip._internal.utils.misc import get_prog
+
+logger = logging.getLogger(__name__)
+
+
+class ConfigurationCommand(Command):
+    """Manage local and global configuration.
+
+        Subcommands:
+
+        list: List the active configuration (or from the file specified)
+        edit: Edit the configuration file in an editor
+        get: Get the value associated with name
+        set: Set the name=value
+        unset: Unset the value associated with name
+
+        If none of --user, --global and --site are passed, a virtual
+        environment configuration file is used if one is active and the file
+        exists. Otherwise, all modifications happen on the to the user file by
+        default.
+    """
+
+    name = 'config'
+    usage = """
+        %prog [<file-option>] list
+        %prog [<file-option>] [--editor <editor-path>] edit
+
+        %prog [<file-option>] get name
+        %prog [<file-option>] set name value
+        %prog [<file-option>] unset name
+    """
+
+    summary = "Manage local and global configuration."
+
+    def __init__(self, *args, **kwargs):
+        super(ConfigurationCommand, self).__init__(*args, **kwargs)
+
+        self.configuration = None
+
+        self.cmd_opts.add_option(
+            '--editor',
+            dest='editor',
+            action='store',
+            default=None,
+            help=(
+                'Editor to use to edit the file. Uses VISUAL or EDITOR '
+                'environment variables if not provided.'
+            )
+        )
+
+        self.cmd_opts.add_option(
+            '--global',
+            dest='global_file',
+            action='store_true',
+            default=False,
+            help='Use the system-wide configuration file only'
+        )
+
+        self.cmd_opts.add_option(
+            '--user',
+            dest='user_file',
+            action='store_true',
+            default=False,
+            help='Use the user configuration file only'
+        )
+
+        self.cmd_opts.add_option(
+            '--site',
+            dest='site_file',
+            action='store_true',
+            default=False,
+            help='Use the current environment configuration file only'
+        )
+
+        self.cmd_opts.add_option(
+            '--venv',
+            dest='venv_file',
+            action='store_true',
+            default=False,
+            help=(
+                '[Deprecated] Use the current environment configuration '
+                'file in a virtual environment only'
+            )
+        )
+
+        self.parser.insert_option_group(0, self.cmd_opts)
+
+    def run(self, options, args):
+        handlers = {
+            "list": self.list_values,
+            "edit": self.open_in_editor,
+            "get": self.get_name,
+            "set": self.set_name_value,
+            "unset": self.unset_name
+        }
+
+        # Determine action
+        if not args or args[0] not in handlers:
+            logger.error("Need an action ({}) to perform.".format(
+                ", ".join(sorted(handlers)))
+            )
+            return ERROR
+
+        action = args[0]
+
+        # Determine which configuration files are to be loaded
+        #    Depends on whether the command is modifying.
+        try:
+            load_only = self._determine_file(
+                options, need_value=(action in ["get", "set", "unset", "edit"])
+            )
+        except PipError as e:
+            logger.error(e.args[0])
+            return ERROR
+
+        # Load a new configuration
+        self.configuration = Configuration(
+            isolated=options.isolated_mode, load_only=load_only
+        )
+        self.configuration.load()
+
+        # Error handling happens here, not in the action-handlers.
+        try:
+            handlers[action](options, args[1:])
+        except PipError as e:
+            logger.error(e.args[0])
+            return ERROR
+
+        return SUCCESS
+
+    def _determine_file(self, options, need_value):
+        # Convert legacy venv_file option to site_file or error
+        if options.venv_file and not options.site_file:
+            if running_under_virtualenv():
+                options.site_file = True
+                deprecated(
+                    "The --venv option has been deprecated.",
+                    replacement="--site",
+                    gone_in="19.3",
+                )
+            else:
+                raise PipError(
+                    "Legacy --venv option requires a virtual environment. "
+                    "Use --site instead."
+                )
+
+        file_options = [key for key, value in (
+            (kinds.USER, options.user_file),
+            (kinds.GLOBAL, options.global_file),
+            (kinds.SITE, options.site_file),
+        ) if value]
+
+        if not file_options:
+            if not need_value:
+                return None
+            # Default to user, unless there's a site file.
+            elif os.path.exists(site_config_file):
+                return kinds.SITE
+            else:
+                return kinds.USER
+        elif len(file_options) == 1:
+            return file_options[0]
+
+        raise PipError(
+            "Need exactly one file to operate upon "
+            "(--user, --site, --global) to perform."
+        )
+
+    def list_values(self, options, args):
+        self._get_n_args(args, "list", n=0)
+
+        for key, value in sorted(self.configuration.items()):
+            logger.info("%s=%r", key, value)
+
+    def get_name(self, options, args):
+        key = self._get_n_args(args, "get [name]", n=1)
+        value = self.configuration.get_value(key)
+
+        logger.info("%s", value)
+
+    def set_name_value(self, options, args):
+        key, value = self._get_n_args(args, "set [name] [value]", n=2)
+        self.configuration.set_value(key, value)
+
+        self._save_configuration()
+
+    def unset_name(self, options, args):
+        key = self._get_n_args(args, "unset [name]", n=1)
+        self.configuration.unset_value(key)
+
+        self._save_configuration()
+
+    def open_in_editor(self, options, args):
+        editor = self._determine_editor(options)
+
+        fname = self.configuration.get_file_to_edit()
+        if fname is None:
+            raise PipError("Could not determine appropriate file.")
+
+        try:
+            subprocess.check_call([editor, fname])
+        except subprocess.CalledProcessError as e:
+            raise PipError(
+                "Editor Subprocess exited with exit code {}"
+                .format(e.returncode)
+            )
+
+    def _get_n_args(self, args, example, n):
+        """Helper to make sure the command got the right number of arguments
+        """
+        if len(args) != n:
+            msg = (
+                'Got unexpected number of arguments, expected {}. '
+                '(example: "{} config {}")'
+            ).format(n, get_prog(), example)
+            raise PipError(msg)
+
+        if n == 1:
+            return args[0]
+        else:
+            return args
+
+    def _save_configuration(self):
+        # We successfully ran a modifying command. Need to save the
+        # configuration.
+        try:
+            self.configuration.save()
+        except Exception:
+            logger.error(
+                "Unable to save configuration. Please report this as a bug.",
+                exc_info=1
+            )
+            raise PipError("Internal Error.")
+
+    def _determine_editor(self, options):
+        if options.editor is not None:
+            return options.editor
+        elif "VISUAL" in os.environ:
+            return os.environ["VISUAL"]
+        elif "EDITOR" in os.environ:
+            return os.environ["EDITOR"]
+        else:
+            raise PipError("Could not determine editor to use.")
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/download.py b/lib/python3.7/site-packages/pip/_internal/commands/download.py
new file mode 100644
index 0000000000000000000000000000000000000000..a57e4bc4cb8a1dc470fe928b585fae3e2bde5279
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/download.py
@@ -0,0 +1,176 @@
+from __future__ import absolute_import
+
+import logging
+import os
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.base_command import RequirementCommand
+from pip._internal.operations.prepare import RequirementPreparer
+from pip._internal.req import RequirementSet
+from pip._internal.req.req_tracker import RequirementTracker
+from pip._internal.resolve import Resolver
+from pip._internal.utils.filesystem import check_path_owner
+from pip._internal.utils.misc import ensure_dir, normalize_path
+from pip._internal.utils.temp_dir import TempDirectory
+
+logger = logging.getLogger(__name__)
+
+
+class DownloadCommand(RequirementCommand):
+    """
+    Download packages from:
+
+    - PyPI (and other indexes) using requirement specifiers.
+    - VCS project urls.
+    - Local project directories.
+    - Local or remote source archives.
+
+    pip also supports downloading from "requirements files", which provide
+    an easy way to specify a whole environment to be downloaded.
+    """
+    name = 'download'
+
+    usage = """
+      %prog [options] <requirement specifier> [package-index-options] ...
+      %prog [options] -r <requirements file> [package-index-options] ...
+      %prog [options] <vcs project url> ...
+      %prog [options] <local project path> ...
+      %prog [options] <archive url/path> ..."""
+
+    summary = 'Download packages.'
+
+    def __init__(self, *args, **kw):
+        super(DownloadCommand, self).__init__(*args, **kw)
+
+        cmd_opts = self.cmd_opts
+
+        cmd_opts.add_option(cmdoptions.constraints())
+        cmd_opts.add_option(cmdoptions.requirements())
+        cmd_opts.add_option(cmdoptions.build_dir())
+        cmd_opts.add_option(cmdoptions.no_deps())
+        cmd_opts.add_option(cmdoptions.global_options())
+        cmd_opts.add_option(cmdoptions.no_binary())
+        cmd_opts.add_option(cmdoptions.only_binary())
+        cmd_opts.add_option(cmdoptions.prefer_binary())
+        cmd_opts.add_option(cmdoptions.src())
+        cmd_opts.add_option(cmdoptions.pre())
+        cmd_opts.add_option(cmdoptions.no_clean())
+        cmd_opts.add_option(cmdoptions.require_hashes())
+        cmd_opts.add_option(cmdoptions.progress_bar())
+        cmd_opts.add_option(cmdoptions.no_build_isolation())
+        cmd_opts.add_option(cmdoptions.use_pep517())
+        cmd_opts.add_option(cmdoptions.no_use_pep517())
+
+        cmd_opts.add_option(
+            '-d', '--dest', '--destination-dir', '--destination-directory',
+            dest='download_dir',
+            metavar='dir',
+            default=os.curdir,
+            help=("Download packages into <dir>."),
+        )
+
+        cmd_opts.add_option(cmdoptions.platform())
+        cmd_opts.add_option(cmdoptions.python_version())
+        cmd_opts.add_option(cmdoptions.implementation())
+        cmd_opts.add_option(cmdoptions.abi())
+
+        index_opts = cmdoptions.make_option_group(
+            cmdoptions.index_group,
+            self.parser,
+        )
+
+        self.parser.insert_option_group(0, index_opts)
+        self.parser.insert_option_group(0, cmd_opts)
+
+    def run(self, options, args):
+        options.ignore_installed = True
+        # editable doesn't really make sense for `pip download`, but the bowels
+        # of the RequirementSet code require that property.
+        options.editables = []
+
+        if options.python_version:
+            python_versions = [options.python_version]
+        else:
+            python_versions = None
+
+        cmdoptions.check_dist_restriction(options)
+
+        options.src_dir = os.path.abspath(options.src_dir)
+        options.download_dir = normalize_path(options.download_dir)
+
+        ensure_dir(options.download_dir)
+
+        with self._build_session(options) as session:
+            finder = self._build_package_finder(
+                options=options,
+                session=session,
+                platform=options.platform,
+                python_versions=python_versions,
+                abi=options.abi,
+                implementation=options.implementation,
+            )
+            build_delete = (not (options.no_clean or options.build_dir))
+            if options.cache_dir and not check_path_owner(options.cache_dir):
+                logger.warning(
+                    "The directory '%s' or its parent directory is not owned "
+                    "by the current user and caching wheels has been "
+                    "disabled. check the permissions and owner of that "
+                    "directory. If executing pip with sudo, you may want "
+                    "sudo's -H flag.",
+                    options.cache_dir,
+                )
+                options.cache_dir = None
+
+            with RequirementTracker() as req_tracker, TempDirectory(
+                options.build_dir, delete=build_delete, kind="download"
+            ) as directory:
+
+                requirement_set = RequirementSet(
+                    require_hashes=options.require_hashes,
+                )
+                self.populate_requirement_set(
+                    requirement_set,
+                    args,
+                    options,
+                    finder,
+                    session,
+                    self.name,
+                    None
+                )
+
+                preparer = RequirementPreparer(
+                    build_dir=directory.path,
+                    src_dir=options.src_dir,
+                    download_dir=options.download_dir,
+                    wheel_download_dir=None,
+                    progress_bar=options.progress_bar,
+                    build_isolation=options.build_isolation,
+                    req_tracker=req_tracker,
+                )
+
+                resolver = Resolver(
+                    preparer=preparer,
+                    finder=finder,
+                    session=session,
+                    wheel_cache=None,
+                    use_user_site=False,
+                    upgrade_strategy="to-satisfy-only",
+                    force_reinstall=False,
+                    ignore_dependencies=options.ignore_dependencies,
+                    ignore_requires_python=False,
+                    ignore_installed=True,
+                    isolated=options.isolated_mode,
+                )
+                resolver.resolve(requirement_set)
+
+                downloaded = ' '.join([
+                    req.name for req in requirement_set.successfully_downloaded
+                ])
+                if downloaded:
+                    logger.info('Successfully downloaded %s', downloaded)
+
+                # Clean up
+                if not options.no_clean:
+                    requirement_set.cleanup_files()
+
+        return requirement_set
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/freeze.py b/lib/python3.7/site-packages/pip/_internal/commands/freeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc9c53a6b5db235e2ff6dd6384dfd29e666d27b0
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/freeze.py
@@ -0,0 +1,96 @@
+from __future__ import absolute_import
+
+import sys
+
+from pip._internal.cache import WheelCache
+from pip._internal.cli.base_command import Command
+from pip._internal.models.format_control import FormatControl
+from pip._internal.operations.freeze import freeze
+from pip._internal.utils.compat import stdlib_pkgs
+
+DEV_PKGS = {'pip', 'setuptools', 'distribute', 'wheel'}
+
+
+class FreezeCommand(Command):
+    """
+    Output installed packages in requirements format.
+
+    packages are listed in a case-insensitive sorted order.
+    """
+    name = 'freeze'
+    usage = """
+      %prog [options]"""
+    summary = 'Output installed packages in requirements format.'
+    log_streams = ("ext://sys.stderr", "ext://sys.stderr")
+
+    def __init__(self, *args, **kw):
+        super(FreezeCommand, self).__init__(*args, **kw)
+
+        self.cmd_opts.add_option(
+            '-r', '--requirement',
+            dest='requirements',
+            action='append',
+            default=[],
+            metavar='file',
+            help="Use the order in the given requirements file and its "
+                 "comments when generating output. This option can be "
+                 "used multiple times.")
+        self.cmd_opts.add_option(
+            '-f', '--find-links',
+            dest='find_links',
+            action='append',
+            default=[],
+            metavar='URL',
+            help='URL for finding packages, which will be added to the '
+                 'output.')
+        self.cmd_opts.add_option(
+            '-l', '--local',
+            dest='local',
+            action='store_true',
+            default=False,
+            help='If in a virtualenv that has global access, do not output '
+                 'globally-installed packages.')
+        self.cmd_opts.add_option(
+            '--user',
+            dest='user',
+            action='store_true',
+            default=False,
+            help='Only output packages installed in user-site.')
+        self.cmd_opts.add_option(
+            '--all',
+            dest='freeze_all',
+            action='store_true',
+            help='Do not skip these packages in the output:'
+                 ' %s' % ', '.join(DEV_PKGS))
+        self.cmd_opts.add_option(
+            '--exclude-editable',
+            dest='exclude_editable',
+            action='store_true',
+            help='Exclude editable package from output.')
+
+        self.parser.insert_option_group(0, self.cmd_opts)
+
+    def run(self, options, args):
+        format_control = FormatControl(set(), set())
+        wheel_cache = WheelCache(options.cache_dir, format_control)
+        skip = set(stdlib_pkgs)
+        if not options.freeze_all:
+            skip.update(DEV_PKGS)
+
+        freeze_kwargs = dict(
+            requirement=options.requirements,
+            find_links=options.find_links,
+            local_only=options.local,
+            user_only=options.user,
+            skip_regex=options.skip_requirements_regex,
+            isolated=options.isolated_mode,
+            wheel_cache=wheel_cache,
+            skip=skip,
+            exclude_editable=options.exclude_editable,
+        )
+
+        try:
+            for line in freeze(**freeze_kwargs):
+                sys.stdout.write(line + '\n')
+        finally:
+            wheel_cache.cleanup()
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/hash.py b/lib/python3.7/site-packages/pip/_internal/commands/hash.py
new file mode 100644
index 0000000000000000000000000000000000000000..423440e9c2985fe7a79e33d6872fcc895ea6d711
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/hash.py
@@ -0,0 +1,57 @@
+from __future__ import absolute_import
+
+import hashlib
+import logging
+import sys
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import ERROR
+from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES
+from pip._internal.utils.misc import read_chunks
+
+logger = logging.getLogger(__name__)
+
+
+class HashCommand(Command):
+    """
+    Compute a hash of a local package archive.
+
+    These can be used with --hash in a requirements file to do repeatable
+    installs.
+
+    """
+    name = 'hash'
+    usage = '%prog [options] <file> ...'
+    summary = 'Compute hashes of package archives.'
+    ignore_require_venv = True
+
+    def __init__(self, *args, **kw):
+        super(HashCommand, self).__init__(*args, **kw)
+        self.cmd_opts.add_option(
+            '-a', '--algorithm',
+            dest='algorithm',
+            choices=STRONG_HASHES,
+            action='store',
+            default=FAVORITE_HASH,
+            help='The hash algorithm to use: one of %s' %
+                 ', '.join(STRONG_HASHES))
+        self.parser.insert_option_group(0, self.cmd_opts)
+
+    def run(self, options, args):
+        if not args:
+            self.parser.print_usage(sys.stderr)
+            return ERROR
+
+        algorithm = options.algorithm
+        for path in args:
+            logger.info('%s:\n--hash=%s:%s',
+                        path, algorithm, _hash_of_file(path, algorithm))
+
+
+def _hash_of_file(path, algorithm):
+    """Return the hash digest of a file."""
+    with open(path, 'rb') as archive:
+        hash = hashlib.new(algorithm)
+        for chunk in read_chunks(archive):
+            hash.update(chunk)
+    return hash.hexdigest()
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/help.py b/lib/python3.7/site-packages/pip/_internal/commands/help.py
new file mode 100644
index 0000000000000000000000000000000000000000..49a81cbb074299f520e52643dc63d2fd33e8ad2a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/help.py
@@ -0,0 +1,37 @@
+from __future__ import absolute_import
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import SUCCESS
+from pip._internal.exceptions import CommandError
+
+
+class HelpCommand(Command):
+    """Show help for commands"""
+    name = 'help'
+    usage = """
+      %prog <command>"""
+    summary = 'Show help for commands.'
+    ignore_require_venv = True
+
+    def run(self, options, args):
+        from pip._internal.commands import commands_dict, get_similar_commands
+
+        try:
+            # 'pip help' with no args is handled by pip.__init__.parseopt()
+            cmd_name = args[0]  # the command we need help for
+        except IndexError:
+            return SUCCESS
+
+        if cmd_name not in commands_dict:
+            guess = get_similar_commands(cmd_name)
+
+            msg = ['unknown command "%s"' % cmd_name]
+            if guess:
+                msg.append('maybe you meant "%s"' % guess)
+
+            raise CommandError(' - '.join(msg))
+
+        command = commands_dict[cmd_name]()
+        command.parser.print_help()
+
+        return SUCCESS
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/install.py b/lib/python3.7/site-packages/pip/_internal/commands/install.py
new file mode 100644
index 0000000000000000000000000000000000000000..c13da44c6c370fb653f1dee1f2c608a3b4f0051a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/install.py
@@ -0,0 +1,587 @@
+from __future__ import absolute_import
+
+import errno
+import logging
+import operator
+import os
+import shutil
+from optparse import SUPPRESS_HELP
+
+from pip._vendor import pkg_resources
+
+from pip._internal.cache import WheelCache
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.base_command import RequirementCommand
+from pip._internal.cli.status_codes import ERROR
+from pip._internal.exceptions import (
+    CommandError, InstallationError, PreviousBuildDirError,
+)
+from pip._internal.locations import distutils_scheme, virtualenv_no_global
+from pip._internal.operations.check import check_install_conflicts
+from pip._internal.operations.prepare import RequirementPreparer
+from pip._internal.req import RequirementSet, install_given_reqs
+from pip._internal.req.req_tracker import RequirementTracker
+from pip._internal.resolve import Resolver
+from pip._internal.utils.filesystem import check_path_owner
+from pip._internal.utils.misc import (
+    ensure_dir, get_installed_version,
+    protect_pip_from_modification_on_windows,
+)
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.wheel import WheelBuilder
+
+logger = logging.getLogger(__name__)
+
+
+def is_wheel_installed():
+    """
+    Return whether the wheel package is installed.
+    """
+    try:
+        import wheel  # noqa: F401
+    except ImportError:
+        return False
+
+    return True
+
+
+def build_wheels(builder, pep517_requirements, legacy_requirements, session):
+    """
+    Build wheels for requirements, depending on whether wheel is installed.
+    """
+    # We don't build wheels for legacy requirements if wheel is not installed.
+    should_build_legacy = is_wheel_installed()
+
+    # Always build PEP 517 requirements
+    build_failures = builder.build(
+        pep517_requirements,
+        session=session, autobuilding=True
+    )
+
+    if should_build_legacy:
+        # We don't care about failures building legacy
+        # requirements, as we'll fall through to a direct
+        # install for those.
+        builder.build(
+            legacy_requirements,
+            session=session, autobuilding=True
+        )
+
+    return build_failures
+
+
+class InstallCommand(RequirementCommand):
+    """
+    Install packages from:
+
+    - PyPI (and other indexes) using requirement specifiers.
+    - VCS project urls.
+    - Local project directories.
+    - Local or remote source archives.
+
+    pip also supports installing from "requirements files", which provide
+    an easy way to specify a whole environment to be installed.
+    """
+    name = 'install'
+
+    usage = """
+      %prog [options] <requirement specifier> [package-index-options] ...
+      %prog [options] -r <requirements file> [package-index-options] ...
+      %prog [options] [-e] <vcs project url> ...
+      %prog [options] [-e] <local project path> ...
+      %prog [options] <archive url/path> ..."""
+
+    summary = 'Install packages.'
+
+    def __init__(self, *args, **kw):
+        super(InstallCommand, self).__init__(*args, **kw)
+
+        cmd_opts = self.cmd_opts
+
+        cmd_opts.add_option(cmdoptions.requirements())
+        cmd_opts.add_option(cmdoptions.constraints())
+        cmd_opts.add_option(cmdoptions.no_deps())
+        cmd_opts.add_option(cmdoptions.pre())
+
+        cmd_opts.add_option(cmdoptions.editable())
+        cmd_opts.add_option(
+            '-t', '--target',
+            dest='target_dir',
+            metavar='dir',
+            default=None,
+            help='Install packages into <dir>. '
+                 'By default this will not replace existing files/folders in '
+                 '<dir>. Use --upgrade to replace existing packages in <dir> '
+                 'with new versions.'
+        )
+        cmd_opts.add_option(cmdoptions.platform())
+        cmd_opts.add_option(cmdoptions.python_version())
+        cmd_opts.add_option(cmdoptions.implementation())
+        cmd_opts.add_option(cmdoptions.abi())
+
+        cmd_opts.add_option(
+            '--user',
+            dest='use_user_site',
+            action='store_true',
+            help="Install to the Python user install directory for your "
+                 "platform. Typically ~/.local/, or %APPDATA%\\Python on "
+                 "Windows. (See the Python documentation for site.USER_BASE "
+                 "for full details.)")
+        cmd_opts.add_option(
+            '--no-user',
+            dest='use_user_site',
+            action='store_false',
+            help=SUPPRESS_HELP)
+        cmd_opts.add_option(
+            '--root',
+            dest='root_path',
+            metavar='dir',
+            default=None,
+            help="Install everything relative to this alternate root "
+                 "directory.")
+        cmd_opts.add_option(
+            '--prefix',
+            dest='prefix_path',
+            metavar='dir',
+            default=None,
+            help="Installation prefix where lib, bin and other top-level "
+                 "folders are placed")
+
+        cmd_opts.add_option(cmdoptions.build_dir())
+
+        cmd_opts.add_option(cmdoptions.src())
+
+        cmd_opts.add_option(
+            '-U', '--upgrade',
+            dest='upgrade',
+            action='store_true',
+            help='Upgrade all specified packages to the newest available '
+                 'version. The handling of dependencies depends on the '
+                 'upgrade-strategy used.'
+        )
+
+        cmd_opts.add_option(
+            '--upgrade-strategy',
+            dest='upgrade_strategy',
+            default='only-if-needed',
+            choices=['only-if-needed', 'eager'],
+            help='Determines how dependency upgrading should be handled '
+                 '[default: %default]. '
+                 '"eager" - dependencies are upgraded regardless of '
+                 'whether the currently installed version satisfies the '
+                 'requirements of the upgraded package(s). '
+                 '"only-if-needed" -  are upgraded only when they do not '
+                 'satisfy the requirements of the upgraded package(s).'
+        )
+
+        cmd_opts.add_option(
+            '--force-reinstall',
+            dest='force_reinstall',
+            action='store_true',
+            help='Reinstall all packages even if they are already '
+                 'up-to-date.')
+
+        cmd_opts.add_option(
+            '-I', '--ignore-installed',
+            dest='ignore_installed',
+            action='store_true',
+            help='Ignore the installed packages (reinstalling instead).')
+
+        cmd_opts.add_option(cmdoptions.ignore_requires_python())
+        cmd_opts.add_option(cmdoptions.no_build_isolation())
+        cmd_opts.add_option(cmdoptions.use_pep517())
+        cmd_opts.add_option(cmdoptions.no_use_pep517())
+
+        cmd_opts.add_option(cmdoptions.install_options())
+        cmd_opts.add_option(cmdoptions.global_options())
+
+        cmd_opts.add_option(
+            "--compile",
+            action="store_true",
+            dest="compile",
+            default=True,
+            help="Compile Python source files to bytecode",
+        )
+
+        cmd_opts.add_option(
+            "--no-compile",
+            action="store_false",
+            dest="compile",
+            help="Do not compile Python source files to bytecode",
+        )
+
+        cmd_opts.add_option(
+            "--no-warn-script-location",
+            action="store_false",
+            dest="warn_script_location",
+            default=True,
+            help="Do not warn when installing scripts outside PATH",
+        )
+        cmd_opts.add_option(
+            "--no-warn-conflicts",
+            action="store_false",
+            dest="warn_about_conflicts",
+            default=True,
+            help="Do not warn about broken dependencies",
+        )
+
+        cmd_opts.add_option(cmdoptions.no_binary())
+        cmd_opts.add_option(cmdoptions.only_binary())
+        cmd_opts.add_option(cmdoptions.prefer_binary())
+        cmd_opts.add_option(cmdoptions.no_clean())
+        cmd_opts.add_option(cmdoptions.require_hashes())
+        cmd_opts.add_option(cmdoptions.progress_bar())
+
+        index_opts = cmdoptions.make_option_group(
+            cmdoptions.index_group,
+            self.parser,
+        )
+
+        self.parser.insert_option_group(0, index_opts)
+        self.parser.insert_option_group(0, cmd_opts)
+
+    def run(self, options, args):
+        cmdoptions.check_install_build_global(options)
+        upgrade_strategy = "to-satisfy-only"
+        if options.upgrade:
+            upgrade_strategy = options.upgrade_strategy
+
+        if options.build_dir:
+            options.build_dir = os.path.abspath(options.build_dir)
+
+        cmdoptions.check_dist_restriction(options, check_target=True)
+
+        if options.python_version:
+            python_versions = [options.python_version]
+        else:
+            python_versions = None
+
+        options.src_dir = os.path.abspath(options.src_dir)
+        install_options = options.install_options or []
+        if options.use_user_site:
+            if options.prefix_path:
+                raise CommandError(
+                    "Can not combine '--user' and '--prefix' as they imply "
+                    "different installation locations"
+                )
+            if virtualenv_no_global():
+                raise InstallationError(
+                    "Can not perform a '--user' install. User site-packages "
+                    "are not visible in this virtualenv."
+                )
+            install_options.append('--user')
+            install_options.append('--prefix=')
+
+        target_temp_dir = TempDirectory(kind="target")
+        if options.target_dir:
+            options.ignore_installed = True
+            options.target_dir = os.path.abspath(options.target_dir)
+            if (os.path.exists(options.target_dir) and not
+                    os.path.isdir(options.target_dir)):
+                raise CommandError(
+                    "Target path exists but is not a directory, will not "
+                    "continue."
+                )
+
+            # Create a target directory for using with the target option
+            target_temp_dir.create()
+            install_options.append('--home=' + target_temp_dir.path)
+
+        global_options = options.global_options or []
+
+        with self._build_session(options) as session:
+            finder = self._build_package_finder(
+                options=options,
+                session=session,
+                platform=options.platform,
+                python_versions=python_versions,
+                abi=options.abi,
+                implementation=options.implementation,
+            )
+            build_delete = (not (options.no_clean or options.build_dir))
+            wheel_cache = WheelCache(options.cache_dir, options.format_control)
+
+            if options.cache_dir and not check_path_owner(options.cache_dir):
+                logger.warning(
+                    "The directory '%s' or its parent directory is not owned "
+                    "by the current user and caching wheels has been "
+                    "disabled. check the permissions and owner of that "
+                    "directory. If executing pip with sudo, you may want "
+                    "sudo's -H flag.",
+                    options.cache_dir,
+                )
+                options.cache_dir = None
+
+            with RequirementTracker() as req_tracker, TempDirectory(
+                options.build_dir, delete=build_delete, kind="install"
+            ) as directory:
+                requirement_set = RequirementSet(
+                    require_hashes=options.require_hashes,
+                    check_supported_wheels=not options.target_dir,
+                )
+
+                try:
+                    self.populate_requirement_set(
+                        requirement_set, args, options, finder, session,
+                        self.name, wheel_cache
+                    )
+                    preparer = RequirementPreparer(
+                        build_dir=directory.path,
+                        src_dir=options.src_dir,
+                        download_dir=None,
+                        wheel_download_dir=None,
+                        progress_bar=options.progress_bar,
+                        build_isolation=options.build_isolation,
+                        req_tracker=req_tracker,
+                    )
+
+                    resolver = Resolver(
+                        preparer=preparer,
+                        finder=finder,
+                        session=session,
+                        wheel_cache=wheel_cache,
+                        use_user_site=options.use_user_site,
+                        upgrade_strategy=upgrade_strategy,
+                        force_reinstall=options.force_reinstall,
+                        ignore_dependencies=options.ignore_dependencies,
+                        ignore_requires_python=options.ignore_requires_python,
+                        ignore_installed=options.ignore_installed,
+                        isolated=options.isolated_mode,
+                        use_pep517=options.use_pep517
+                    )
+                    resolver.resolve(requirement_set)
+
+                    protect_pip_from_modification_on_windows(
+                        modifying_pip=requirement_set.has_requirement("pip")
+                    )
+
+                    # Consider legacy and PEP517-using requirements separately
+                    legacy_requirements = []
+                    pep517_requirements = []
+                    for req in requirement_set.requirements.values():
+                        if req.use_pep517:
+                            pep517_requirements.append(req)
+                        else:
+                            legacy_requirements.append(req)
+
+                    wheel_builder = WheelBuilder(
+                        finder, preparer, wheel_cache,
+                        build_options=[], global_options=[],
+                    )
+
+                    build_failures = build_wheels(
+                        builder=wheel_builder,
+                        pep517_requirements=pep517_requirements,
+                        legacy_requirements=legacy_requirements,
+                        session=session,
+                    )
+
+                    # If we're using PEP 517, we cannot do a direct install
+                    # so we fail here.
+                    if build_failures:
+                        raise InstallationError(
+                            "Could not build wheels for {} which use"
+                            " PEP 517 and cannot be installed directly".format(
+                                ", ".join(r.name for r in build_failures)))
+
+                    to_install = resolver.get_installation_order(
+                        requirement_set
+                    )
+
+                    # Consistency Checking of the package set we're installing.
+                    should_warn_about_conflicts = (
+                        not options.ignore_dependencies and
+                        options.warn_about_conflicts
+                    )
+                    if should_warn_about_conflicts:
+                        self._warn_about_conflicts(to_install)
+
+                    # Don't warn about script install locations if
+                    # --target has been specified
+                    warn_script_location = options.warn_script_location
+                    if options.target_dir:
+                        warn_script_location = False
+
+                    installed = install_given_reqs(
+                        to_install,
+                        install_options,
+                        global_options,
+                        root=options.root_path,
+                        home=target_temp_dir.path,
+                        prefix=options.prefix_path,
+                        pycompile=options.compile,
+                        warn_script_location=warn_script_location,
+                        use_user_site=options.use_user_site,
+                    )
+
+                    lib_locations = get_lib_location_guesses(
+                        user=options.use_user_site,
+                        home=target_temp_dir.path,
+                        root=options.root_path,
+                        prefix=options.prefix_path,
+                        isolated=options.isolated_mode,
+                    )
+                    working_set = pkg_resources.WorkingSet(lib_locations)
+
+                    reqs = sorted(installed, key=operator.attrgetter('name'))
+                    items = []
+                    for req in reqs:
+                        item = req.name
+                        try:
+                            installed_version = get_installed_version(
+                                req.name, working_set=working_set
+                            )
+                            if installed_version:
+                                item += '-' + installed_version
+                        except Exception:
+                            pass
+                        items.append(item)
+                    installed = ' '.join(items)
+                    if installed:
+                        logger.info('Successfully installed %s', installed)
+                except EnvironmentError as error:
+                    show_traceback = (self.verbosity >= 1)
+
+                    message = create_env_error_message(
+                        error, show_traceback, options.use_user_site,
+                    )
+                    logger.error(message, exc_info=show_traceback)
+
+                    return ERROR
+                except PreviousBuildDirError:
+                    options.no_clean = True
+                    raise
+                finally:
+                    # Clean up
+                    if not options.no_clean:
+                        requirement_set.cleanup_files()
+                        wheel_cache.cleanup()
+
+        if options.target_dir:
+            self._handle_target_dir(
+                options.target_dir, target_temp_dir, options.upgrade
+            )
+        return requirement_set
+
+    def _handle_target_dir(self, target_dir, target_temp_dir, upgrade):
+        ensure_dir(target_dir)
+
+        # Checking both purelib and platlib directories for installed
+        # packages to be moved to target directory
+        lib_dir_list = []
+
+        with target_temp_dir:
+            # Checking both purelib and platlib directories for installed
+            # packages to be moved to target directory
+            scheme = distutils_scheme('', home=target_temp_dir.path)
+            purelib_dir = scheme['purelib']
+            platlib_dir = scheme['platlib']
+            data_dir = scheme['data']
+
+            if os.path.exists(purelib_dir):
+                lib_dir_list.append(purelib_dir)
+            if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
+                lib_dir_list.append(platlib_dir)
+            if os.path.exists(data_dir):
+                lib_dir_list.append(data_dir)
+
+            for lib_dir in lib_dir_list:
+                for item in os.listdir(lib_dir):
+                    if lib_dir == data_dir:
+                        ddir = os.path.join(data_dir, item)
+                        if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
+                            continue
+                    target_item_dir = os.path.join(target_dir, item)
+                    if os.path.exists(target_item_dir):
+                        if not upgrade:
+                            logger.warning(
+                                'Target directory %s already exists. Specify '
+                                '--upgrade to force replacement.',
+                                target_item_dir
+                            )
+                            continue
+                        if os.path.islink(target_item_dir):
+                            logger.warning(
+                                'Target directory %s already exists and is '
+                                'a link. Pip will not automatically replace '
+                                'links, please remove if replacement is '
+                                'desired.',
+                                target_item_dir
+                            )
+                            continue
+                        if os.path.isdir(target_item_dir):
+                            shutil.rmtree(target_item_dir)
+                        else:
+                            os.remove(target_item_dir)
+
+                    shutil.move(
+                        os.path.join(lib_dir, item),
+                        target_item_dir
+                    )
+
+    def _warn_about_conflicts(self, to_install):
+        try:
+            package_set, _dep_info = check_install_conflicts(to_install)
+        except Exception:
+            logger.error("Error checking for conflicts.", exc_info=True)
+            return
+        missing, conflicting = _dep_info
+
+        # NOTE: There is some duplication here from pip check
+        for project_name in missing:
+            version = package_set[project_name][0]
+            for dependency in missing[project_name]:
+                logger.critical(
+                    "%s %s requires %s, which is not installed.",
+                    project_name, version, dependency[1],
+                )
+
+        for project_name in conflicting:
+            version = package_set[project_name][0]
+            for dep_name, dep_version, req in conflicting[project_name]:
+                logger.critical(
+                    "%s %s has requirement %s, but you'll have %s %s which is "
+                    "incompatible.",
+                    project_name, version, req, dep_name, dep_version,
+                )
+
+
+def get_lib_location_guesses(*args, **kwargs):
+    scheme = distutils_scheme('', *args, **kwargs)
+    return [scheme['purelib'], scheme['platlib']]
+
+
+def create_env_error_message(error, show_traceback, using_user_site):
+    """Format an error message for an EnvironmentError
+
+    It may occur anytime during the execution of the install command.
+    """
+    parts = []
+
+    # Mention the error if we are not going to show a traceback
+    parts.append("Could not install packages due to an EnvironmentError")
+    if not show_traceback:
+        parts.append(": ")
+        parts.append(str(error))
+    else:
+        parts.append(".")
+
+    # Spilt the error indication from a helper message (if any)
+    parts[-1] += "\n"
+
+    # Suggest useful actions to the user:
+    #  (1) using user site-packages or (2) verifying the permissions
+    if error.errno == errno.EACCES:
+        user_option_part = "Consider using the `--user` option"
+        permissions_part = "Check the permissions"
+
+        if not using_user_site:
+            parts.extend([
+                user_option_part, " or ",
+                permissions_part.lower(),
+            ])
+        else:
+            parts.append(permissions_part)
+        parts.append(".\n")
+
+    return "".join(parts).strip() + "\n"
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/list.py b/lib/python3.7/site-packages/pip/_internal/commands/list.py
new file mode 100644
index 0000000000000000000000000000000000000000..d70782d407dfecf8bb6ba68b75f3e214817991e4
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/list.py
@@ -0,0 +1,302 @@
+from __future__ import absolute_import
+
+import json
+import logging
+
+from pip._vendor import six
+from pip._vendor.six.moves import zip_longest
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.base_command import Command
+from pip._internal.exceptions import CommandError
+from pip._internal.index import PackageFinder
+from pip._internal.utils.misc import (
+    dist_is_editable, get_installed_distributions,
+)
+from pip._internal.utils.packaging import get_installer
+
+logger = logging.getLogger(__name__)
+
+
+class ListCommand(Command):
+    """
+    List installed packages, including editables.
+
+    Packages are listed in a case-insensitive sorted order.
+    """
+    name = 'list'
+    usage = """
+      %prog [options]"""
+    summary = 'List installed packages.'
+
+    def __init__(self, *args, **kw):
+        super(ListCommand, self).__init__(*args, **kw)
+
+        cmd_opts = self.cmd_opts
+
+        cmd_opts.add_option(
+            '-o', '--outdated',
+            action='store_true',
+            default=False,
+            help='List outdated packages')
+        cmd_opts.add_option(
+            '-u', '--uptodate',
+            action='store_true',
+            default=False,
+            help='List uptodate packages')
+        cmd_opts.add_option(
+            '-e', '--editable',
+            action='store_true',
+            default=False,
+            help='List editable projects.')
+        cmd_opts.add_option(
+            '-l', '--local',
+            action='store_true',
+            default=False,
+            help=('If in a virtualenv that has global access, do not list '
+                  'globally-installed packages.'),
+        )
+        self.cmd_opts.add_option(
+            '--user',
+            dest='user',
+            action='store_true',
+            default=False,
+            help='Only output packages installed in user-site.')
+
+        cmd_opts.add_option(
+            '--pre',
+            action='store_true',
+            default=False,
+            help=("Include pre-release and development versions. By default, "
+                  "pip only finds stable versions."),
+        )
+
+        cmd_opts.add_option(
+            '--format',
+            action='store',
+            dest='list_format',
+            default="columns",
+            choices=('columns', 'freeze', 'json'),
+            help="Select the output format among: columns (default), freeze, "
+                 "or json",
+        )
+
+        cmd_opts.add_option(
+            '--not-required',
+            action='store_true',
+            dest='not_required',
+            help="List packages that are not dependencies of "
+                 "installed packages.",
+        )
+
+        cmd_opts.add_option(
+            '--exclude-editable',
+            action='store_false',
+            dest='include_editable',
+            help='Exclude editable package from output.',
+        )
+        cmd_opts.add_option(
+            '--include-editable',
+            action='store_true',
+            dest='include_editable',
+            help='Include editable package from output.',
+            default=True,
+        )
+        index_opts = cmdoptions.make_option_group(
+            cmdoptions.index_group, self.parser
+        )
+
+        self.parser.insert_option_group(0, index_opts)
+        self.parser.insert_option_group(0, cmd_opts)
+
+    def _build_package_finder(self, options, index_urls, session):
+        """
+        Create a package finder appropriate to this list command.
+        """
+        return PackageFinder(
+            find_links=options.find_links,
+            index_urls=index_urls,
+            allow_all_prereleases=options.pre,
+            trusted_hosts=options.trusted_hosts,
+            session=session,
+        )
+
+    def run(self, options, args):
+        if options.outdated and options.uptodate:
+            raise CommandError(
+                "Options --outdated and --uptodate cannot be combined.")
+
+        packages = get_installed_distributions(
+            local_only=options.local,
+            user_only=options.user,
+            editables_only=options.editable,
+            include_editables=options.include_editable,
+        )
+
+        # get_not_required must be called firstly in order to find and
+        # filter out all dependencies correctly. Otherwise a package
+        # can't be identified as requirement because some parent packages
+        # could be filtered out before.
+        if options.not_required:
+            packages = self.get_not_required(packages, options)
+
+        if options.outdated:
+            packages = self.get_outdated(packages, options)
+        elif options.uptodate:
+            packages = self.get_uptodate(packages, options)
+
+        self.output_package_listing(packages, options)
+
+    def get_outdated(self, packages, options):
+        return [
+            dist for dist in self.iter_packages_latest_infos(packages, options)
+            if dist.latest_version > dist.parsed_version
+        ]
+
+    def get_uptodate(self, packages, options):
+        return [
+            dist for dist in self.iter_packages_latest_infos(packages, options)
+            if dist.latest_version == dist.parsed_version
+        ]
+
+    def get_not_required(self, packages, options):
+        dep_keys = set()
+        for dist in packages:
+            dep_keys.update(requirement.key for requirement in dist.requires())
+        return {pkg for pkg in packages if pkg.key not in dep_keys}
+
+    def iter_packages_latest_infos(self, packages, options):
+        index_urls = [options.index_url] + options.extra_index_urls
+        if options.no_index:
+            logger.debug('Ignoring indexes: %s', ','.join(index_urls))
+            index_urls = []
+
+        with self._build_session(options) as session:
+            finder = self._build_package_finder(options, index_urls, session)
+
+            for dist in packages:
+                typ = 'unknown'
+                all_candidates = finder.find_all_candidates(dist.key)
+                if not options.pre:
+                    # Remove prereleases
+                    all_candidates = [candidate for candidate in all_candidates
+                                      if not candidate.version.is_prerelease]
+
+                evaluator = finder.candidate_evaluator
+                best_candidate = evaluator.get_best_candidate(all_candidates)
+                if best_candidate is None:
+                    continue
+
+                remote_version = best_candidate.version
+                if best_candidate.location.is_wheel:
+                    typ = 'wheel'
+                else:
+                    typ = 'sdist'
+                # This is dirty but makes the rest of the code much cleaner
+                dist.latest_version = remote_version
+                dist.latest_filetype = typ
+                yield dist
+
+    def output_package_listing(self, packages, options):
+        packages = sorted(
+            packages,
+            key=lambda dist: dist.project_name.lower(),
+        )
+        if options.list_format == 'columns' and packages:
+            data, header = format_for_columns(packages, options)
+            self.output_package_listing_columns(data, header)
+        elif options.list_format == 'freeze':
+            for dist in packages:
+                if options.verbose >= 1:
+                    logger.info("%s==%s (%s)", dist.project_name,
+                                dist.version, dist.location)
+                else:
+                    logger.info("%s==%s", dist.project_name, dist.version)
+        elif options.list_format == 'json':
+            logger.info(format_for_json(packages, options))
+
+    def output_package_listing_columns(self, data, header):
+        # insert the header first: we need to know the size of column names
+        if len(data) > 0:
+            data.insert(0, header)
+
+        pkg_strings, sizes = tabulate(data)
+
+        # Create and add a separator.
+        if len(data) > 0:
+            pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes)))
+
+        for val in pkg_strings:
+            logger.info(val)
+
+
+def tabulate(vals):
+    # From pfmoore on GitHub:
+    # https://github.com/pypa/pip/issues/3651#issuecomment-216932564
+    assert len(vals) > 0
+
+    sizes = [0] * max(len(x) for x in vals)
+    for row in vals:
+        sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)]
+
+    result = []
+    for row in vals:
+        display = " ".join([str(c).ljust(s) if c is not None else ''
+                            for s, c in zip_longest(sizes, row)])
+        result.append(display)
+
+    return result, sizes
+
+
+def format_for_columns(pkgs, options):
+    """
+    Convert the package data into something usable
+    by output_package_listing_columns.
+    """
+    running_outdated = options.outdated
+    # Adjust the header for the `pip list --outdated` case.
+    if running_outdated:
+        header = ["Package", "Version", "Latest", "Type"]
+    else:
+        header = ["Package", "Version"]
+
+    data = []
+    if options.verbose >= 1 or any(dist_is_editable(x) for x in pkgs):
+        header.append("Location")
+    if options.verbose >= 1:
+        header.append("Installer")
+
+    for proj in pkgs:
+        # if we're working on the 'outdated' list, separate out the
+        # latest_version and type
+        row = [proj.project_name, proj.version]
+
+        if running_outdated:
+            row.append(proj.latest_version)
+            row.append(proj.latest_filetype)
+
+        if options.verbose >= 1 or dist_is_editable(proj):
+            row.append(proj.location)
+        if options.verbose >= 1:
+            row.append(get_installer(proj))
+
+        data.append(row)
+
+    return data, header
+
+
+def format_for_json(packages, options):
+    data = []
+    for dist in packages:
+        info = {
+            'name': dist.project_name,
+            'version': six.text_type(dist.version),
+        }
+        if options.verbose >= 1:
+            info['location'] = dist.location
+            info['installer'] = get_installer(dist)
+        if options.outdated:
+            info['latest_version'] = six.text_type(dist.latest_version)
+            info['latest_filetype'] = dist.latest_filetype
+        data.append(info)
+    return json.dumps(data)
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/search.py b/lib/python3.7/site-packages/pip/_internal/commands/search.py
new file mode 100644
index 0000000000000000000000000000000000000000..c157a3128a42ed5d9ecdf51782697af8e21a851c
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/search.py
@@ -0,0 +1,135 @@
+from __future__ import absolute_import
+
+import logging
+import sys
+import textwrap
+from collections import OrderedDict
+
+from pip._vendor import pkg_resources
+from pip._vendor.packaging.version import parse as parse_version
+# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is
+#       why we ignore the type on this import
+from pip._vendor.six.moves import xmlrpc_client  # type: ignore
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS
+from pip._internal.download import PipXmlrpcTransport
+from pip._internal.exceptions import CommandError
+from pip._internal.models.index import PyPI
+from pip._internal.utils.compat import get_terminal_size
+from pip._internal.utils.logging import indent_log
+
+logger = logging.getLogger(__name__)
+
+
+class SearchCommand(Command):
+    """Search for PyPI packages whose name or summary contains <query>."""
+    name = 'search'
+    usage = """
+      %prog [options] <query>"""
+    summary = 'Search PyPI for packages.'
+    ignore_require_venv = True
+
+    def __init__(self, *args, **kw):
+        super(SearchCommand, self).__init__(*args, **kw)
+        self.cmd_opts.add_option(
+            '-i', '--index',
+            dest='index',
+            metavar='URL',
+            default=PyPI.pypi_url,
+            help='Base URL of Python Package Index (default %default)')
+
+        self.parser.insert_option_group(0, self.cmd_opts)
+
+    def run(self, options, args):
+        if not args:
+            raise CommandError('Missing required argument (search query).')
+        query = args
+        pypi_hits = self.search(query, options)
+        hits = transform_hits(pypi_hits)
+
+        terminal_width = None
+        if sys.stdout.isatty():
+            terminal_width = get_terminal_size()[0]
+
+        print_results(hits, terminal_width=terminal_width)
+        if pypi_hits:
+            return SUCCESS
+        return NO_MATCHES_FOUND
+
+    def search(self, query, options):
+        index_url = options.index
+        with self._build_session(options) as session:
+            transport = PipXmlrpcTransport(index_url, session)
+            pypi = xmlrpc_client.ServerProxy(index_url, transport)
+            hits = pypi.search({'name': query, 'summary': query}, 'or')
+            return hits
+
+
+def transform_hits(hits):
+    """
+    The list from pypi is really a list of versions. We want a list of
+    packages with the list of versions stored inline. This converts the
+    list from pypi into one we can use.
+    """
+    packages = OrderedDict()
+    for hit in hits:
+        name = hit['name']
+        summary = hit['summary']
+        version = hit['version']
+
+        if name not in packages.keys():
+            packages[name] = {
+                'name': name,
+                'summary': summary,
+                'versions': [version],
+            }
+        else:
+            packages[name]['versions'].append(version)
+
+            # if this is the highest version, replace summary and score
+            if version == highest_version(packages[name]['versions']):
+                packages[name]['summary'] = summary
+
+    return list(packages.values())
+
+
+def print_results(hits, name_column_width=None, terminal_width=None):
+    if not hits:
+        return
+    if name_column_width is None:
+        name_column_width = max([
+            len(hit['name']) + len(highest_version(hit.get('versions', ['-'])))
+            for hit in hits
+        ]) + 4
+
+    installed_packages = [p.project_name for p in pkg_resources.working_set]
+    for hit in hits:
+        name = hit['name']
+        summary = hit['summary'] or ''
+        latest = highest_version(hit.get('versions', ['-']))
+        if terminal_width is not None:
+            target_width = terminal_width - name_column_width - 5
+            if target_width > 10:
+                # wrap and indent summary to fit terminal
+                summary = textwrap.wrap(summary, target_width)
+                summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
+
+        line = '%-*s - %s' % (name_column_width,
+                              '%s (%s)' % (name, latest), summary)
+        try:
+            logger.info(line)
+            if name in installed_packages:
+                dist = pkg_resources.get_distribution(name)
+                with indent_log():
+                    if dist.version == latest:
+                        logger.info('INSTALLED: %s (latest)', dist.version)
+                    else:
+                        logger.info('INSTALLED: %s', dist.version)
+                        logger.info('LATEST:    %s', latest)
+        except UnicodeEncodeError:
+            pass
+
+
+def highest_version(versions):
+    return max(versions, key=parse_version)
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/show.py b/lib/python3.7/site-packages/pip/_internal/commands/show.py
new file mode 100644
index 0000000000000000000000000000000000000000..a18a9020c412d7f34462852ac23d0f2193f17f9f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/show.py
@@ -0,0 +1,168 @@
+from __future__ import absolute_import
+
+import logging
+import os
+from email.parser import FeedParser
+
+from pip._vendor import pkg_resources
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import ERROR, SUCCESS
+
+logger = logging.getLogger(__name__)
+
+
+class ShowCommand(Command):
+    """
+    Show information about one or more installed packages.
+
+    The output is in RFC-compliant mail header format.
+    """
+    name = 'show'
+    usage = """
+      %prog [options] <package> ..."""
+    summary = 'Show information about installed packages.'
+    ignore_require_venv = True
+
+    def __init__(self, *args, **kw):
+        super(ShowCommand, self).__init__(*args, **kw)
+        self.cmd_opts.add_option(
+            '-f', '--files',
+            dest='files',
+            action='store_true',
+            default=False,
+            help='Show the full list of installed files for each package.')
+
+        self.parser.insert_option_group(0, self.cmd_opts)
+
+    def run(self, options, args):
+        if not args:
+            logger.warning('ERROR: Please provide a package name or names.')
+            return ERROR
+        query = args
+
+        results = search_packages_info(query)
+        if not print_results(
+                results, list_files=options.files, verbose=options.verbose):
+            return ERROR
+        return SUCCESS
+
+
+def search_packages_info(query):
+    """
+    Gather details from installed distributions. Print distribution name,
+    version, location, and installed files. Installed files requires a
+    pip generated 'installed-files.txt' in the distributions '.egg-info'
+    directory.
+    """
+    installed = {}
+    for p in pkg_resources.working_set:
+        installed[canonicalize_name(p.project_name)] = p
+
+    query_names = [canonicalize_name(name) for name in query]
+
+    for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
+        package = {
+            'name': dist.project_name,
+            'version': dist.version,
+            'location': dist.location,
+            'requires': [dep.project_name for dep in dist.requires()],
+        }
+        file_list = None
+        metadata = None
+        if isinstance(dist, pkg_resources.DistInfoDistribution):
+            # RECORDs should be part of .dist-info metadatas
+            if dist.has_metadata('RECORD'):
+                lines = dist.get_metadata_lines('RECORD')
+                paths = [l.split(',')[0] for l in lines]
+                paths = [os.path.join(dist.location, p) for p in paths]
+                file_list = [os.path.relpath(p, dist.location) for p in paths]
+
+            if dist.has_metadata('METADATA'):
+                metadata = dist.get_metadata('METADATA')
+        else:
+            # Otherwise use pip's log for .egg-info's
+            if dist.has_metadata('installed-files.txt'):
+                paths = dist.get_metadata_lines('installed-files.txt')
+                paths = [os.path.join(dist.egg_info, p) for p in paths]
+                file_list = [os.path.relpath(p, dist.location) for p in paths]
+
+            if dist.has_metadata('PKG-INFO'):
+                metadata = dist.get_metadata('PKG-INFO')
+
+        if dist.has_metadata('entry_points.txt'):
+            entry_points = dist.get_metadata_lines('entry_points.txt')
+            package['entry_points'] = entry_points
+
+        if dist.has_metadata('INSTALLER'):
+            for line in dist.get_metadata_lines('INSTALLER'):
+                if line.strip():
+                    package['installer'] = line.strip()
+                    break
+
+        # @todo: Should pkg_resources.Distribution have a
+        # `get_pkg_info` method?
+        feed_parser = FeedParser()
+        feed_parser.feed(metadata)
+        pkg_info_dict = feed_parser.close()
+        for key in ('metadata-version', 'summary',
+                    'home-page', 'author', 'author-email', 'license'):
+            package[key] = pkg_info_dict.get(key)
+
+        # It looks like FeedParser cannot deal with repeated headers
+        classifiers = []
+        for line in metadata.splitlines():
+            if line.startswith('Classifier: '):
+                classifiers.append(line[len('Classifier: '):])
+        package['classifiers'] = classifiers
+
+        if file_list:
+            package['files'] = sorted(file_list)
+        yield package
+
+
+def print_results(distributions, list_files=False, verbose=False):
+    """
+    Print the informations from installed distributions found.
+    """
+    results_printed = False
+    for i, dist in enumerate(distributions):
+        results_printed = True
+        if i > 0:
+            logger.info("---")
+
+        name = dist.get('name', '')
+        required_by = [
+            pkg.project_name for pkg in pkg_resources.working_set
+            if name in [required.name for required in pkg.requires()]
+        ]
+
+        logger.info("Name: %s", name)
+        logger.info("Version: %s", dist.get('version', ''))
+        logger.info("Summary: %s", dist.get('summary', ''))
+        logger.info("Home-page: %s", dist.get('home-page', ''))
+        logger.info("Author: %s", dist.get('author', ''))
+        logger.info("Author-email: %s", dist.get('author-email', ''))
+        logger.info("License: %s", dist.get('license', ''))
+        logger.info("Location: %s", dist.get('location', ''))
+        logger.info("Requires: %s", ', '.join(dist.get('requires', [])))
+        logger.info("Required-by: %s", ', '.join(required_by))
+
+        if verbose:
+            logger.info("Metadata-Version: %s",
+                        dist.get('metadata-version', ''))
+            logger.info("Installer: %s", dist.get('installer', ''))
+            logger.info("Classifiers:")
+            for classifier in dist.get('classifiers', []):
+                logger.info("  %s", classifier)
+            logger.info("Entry-points:")
+            for entry in dist.get('entry_points', []):
+                logger.info("  %s", entry.strip())
+        if list_files:
+            logger.info("Files:")
+            for line in dist.get('files', []):
+                logger.info("  %s", line.strip())
+            if "files" not in dist:
+                logger.info("Cannot locate installed-files.txt")
+    return results_printed
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/uninstall.py b/lib/python3.7/site-packages/pip/_internal/commands/uninstall.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cd6f54bd8683a64effa0ef00b437f8f9658ccff
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/uninstall.py
@@ -0,0 +1,78 @@
+from __future__ import absolute_import
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.cli.base_command import Command
+from pip._internal.exceptions import InstallationError
+from pip._internal.req import parse_requirements
+from pip._internal.req.constructors import install_req_from_line
+from pip._internal.utils.misc import protect_pip_from_modification_on_windows
+
+
+class UninstallCommand(Command):
+    """
+    Uninstall packages.
+
+    pip is able to uninstall most installed packages. Known exceptions are:
+
+    - Pure distutils packages installed with ``python setup.py install``, which
+      leave behind no metadata to determine what files were installed.
+    - Script wrappers installed by ``python setup.py develop``.
+    """
+    name = 'uninstall'
+    usage = """
+      %prog [options] <package> ...
+      %prog [options] -r <requirements file> ..."""
+    summary = 'Uninstall packages.'
+
+    def __init__(self, *args, **kw):
+        super(UninstallCommand, self).__init__(*args, **kw)
+        self.cmd_opts.add_option(
+            '-r', '--requirement',
+            dest='requirements',
+            action='append',
+            default=[],
+            metavar='file',
+            help='Uninstall all the packages listed in the given requirements '
+                 'file.  This option can be used multiple times.',
+        )
+        self.cmd_opts.add_option(
+            '-y', '--yes',
+            dest='yes',
+            action='store_true',
+            help="Don't ask for confirmation of uninstall deletions.")
+
+        self.parser.insert_option_group(0, self.cmd_opts)
+
+    def run(self, options, args):
+        with self._build_session(options) as session:
+            reqs_to_uninstall = {}
+            for name in args:
+                req = install_req_from_line(
+                    name, isolated=options.isolated_mode,
+                )
+                if req.name:
+                    reqs_to_uninstall[canonicalize_name(req.name)] = req
+            for filename in options.requirements:
+                for req in parse_requirements(
+                        filename,
+                        options=options,
+                        session=session):
+                    if req.name:
+                        reqs_to_uninstall[canonicalize_name(req.name)] = req
+            if not reqs_to_uninstall:
+                raise InstallationError(
+                    'You must give at least one requirement to %(name)s (see '
+                    '"pip help %(name)s")' % dict(name=self.name)
+                )
+
+            protect_pip_from_modification_on_windows(
+                modifying_pip="pip" in reqs_to_uninstall
+            )
+
+            for req in reqs_to_uninstall.values():
+                uninstall_pathset = req.uninstall(
+                    auto_confirm=options.yes, verbose=self.verbosity > 0,
+                )
+                if uninstall_pathset:
+                    uninstall_pathset.commit()
diff --git a/lib/python3.7/site-packages/pip/_internal/commands/wheel.py b/lib/python3.7/site-packages/pip/_internal/commands/wheel.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd72a3df1a76b271538183f5bb2c4f0acfa3e747
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/commands/wheel.py
@@ -0,0 +1,186 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import logging
+import os
+
+from pip._internal.cache import WheelCache
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.base_command import RequirementCommand
+from pip._internal.exceptions import CommandError, PreviousBuildDirError
+from pip._internal.operations.prepare import RequirementPreparer
+from pip._internal.req import RequirementSet
+from pip._internal.req.req_tracker import RequirementTracker
+from pip._internal.resolve import Resolver
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.wheel import WheelBuilder
+
+logger = logging.getLogger(__name__)
+
+
+class WheelCommand(RequirementCommand):
+    """
+    Build Wheel archives for your requirements and dependencies.
+
+    Wheel is a built-package format, and offers the advantage of not
+    recompiling your software during every install. For more details, see the
+    wheel docs: https://wheel.readthedocs.io/en/latest/
+
+    Requirements: setuptools>=0.8, and wheel.
+
+    'pip wheel' uses the bdist_wheel setuptools extension from the wheel
+    package to build individual wheels.
+
+    """
+
+    name = 'wheel'
+    usage = """
+      %prog [options] <requirement specifier> ...
+      %prog [options] -r <requirements file> ...
+      %prog [options] [-e] <vcs project url> ...
+      %prog [options] [-e] <local project path> ...
+      %prog [options] <archive url/path> ..."""
+
+    summary = 'Build wheels from your requirements.'
+
+    def __init__(self, *args, **kw):
+        super(WheelCommand, self).__init__(*args, **kw)
+
+        cmd_opts = self.cmd_opts
+
+        cmd_opts.add_option(
+            '-w', '--wheel-dir',
+            dest='wheel_dir',
+            metavar='dir',
+            default=os.curdir,
+            help=("Build wheels into <dir>, where the default is the "
+                  "current working directory."),
+        )
+        cmd_opts.add_option(cmdoptions.no_binary())
+        cmd_opts.add_option(cmdoptions.only_binary())
+        cmd_opts.add_option(cmdoptions.prefer_binary())
+        cmd_opts.add_option(
+            '--build-option',
+            dest='build_options',
+            metavar='options',
+            action='append',
+            help="Extra arguments to be supplied to 'setup.py bdist_wheel'.",
+        )
+        cmd_opts.add_option(cmdoptions.no_build_isolation())
+        cmd_opts.add_option(cmdoptions.use_pep517())
+        cmd_opts.add_option(cmdoptions.no_use_pep517())
+        cmd_opts.add_option(cmdoptions.constraints())
+        cmd_opts.add_option(cmdoptions.editable())
+        cmd_opts.add_option(cmdoptions.requirements())
+        cmd_opts.add_option(cmdoptions.src())
+        cmd_opts.add_option(cmdoptions.ignore_requires_python())
+        cmd_opts.add_option(cmdoptions.no_deps())
+        cmd_opts.add_option(cmdoptions.build_dir())
+        cmd_opts.add_option(cmdoptions.progress_bar())
+
+        cmd_opts.add_option(
+            '--global-option',
+            dest='global_options',
+            action='append',
+            metavar='options',
+            help="Extra global options to be supplied to the setup.py "
+            "call before the 'bdist_wheel' command.")
+
+        cmd_opts.add_option(
+            '--pre',
+            action='store_true',
+            default=False,
+            help=("Include pre-release and development versions. By default, "
+                  "pip only finds stable versions."),
+        )
+
+        cmd_opts.add_option(cmdoptions.no_clean())
+        cmd_opts.add_option(cmdoptions.require_hashes())
+
+        index_opts = cmdoptions.make_option_group(
+            cmdoptions.index_group,
+            self.parser,
+        )
+
+        self.parser.insert_option_group(0, index_opts)
+        self.parser.insert_option_group(0, cmd_opts)
+
+    def run(self, options, args):
+        cmdoptions.check_install_build_global(options)
+
+        index_urls = [options.index_url] + options.extra_index_urls
+        if options.no_index:
+            logger.debug('Ignoring indexes: %s', ','.join(index_urls))
+            index_urls = []
+
+        if options.build_dir:
+            options.build_dir = os.path.abspath(options.build_dir)
+
+        options.src_dir = os.path.abspath(options.src_dir)
+
+        with self._build_session(options) as session:
+            finder = self._build_package_finder(options, session)
+            build_delete = (not (options.no_clean or options.build_dir))
+            wheel_cache = WheelCache(options.cache_dir, options.format_control)
+
+            with RequirementTracker() as req_tracker, TempDirectory(
+                options.build_dir, delete=build_delete, kind="wheel"
+            ) as directory:
+
+                requirement_set = RequirementSet(
+                    require_hashes=options.require_hashes,
+                )
+
+                try:
+                    self.populate_requirement_set(
+                        requirement_set, args, options, finder, session,
+                        self.name, wheel_cache
+                    )
+
+                    preparer = RequirementPreparer(
+                        build_dir=directory.path,
+                        src_dir=options.src_dir,
+                        download_dir=None,
+                        wheel_download_dir=options.wheel_dir,
+                        progress_bar=options.progress_bar,
+                        build_isolation=options.build_isolation,
+                        req_tracker=req_tracker,
+                    )
+
+                    resolver = Resolver(
+                        preparer=preparer,
+                        finder=finder,
+                        session=session,
+                        wheel_cache=wheel_cache,
+                        use_user_site=False,
+                        upgrade_strategy="to-satisfy-only",
+                        force_reinstall=False,
+                        ignore_dependencies=options.ignore_dependencies,
+                        ignore_requires_python=options.ignore_requires_python,
+                        ignore_installed=True,
+                        isolated=options.isolated_mode,
+                        use_pep517=options.use_pep517
+                    )
+                    resolver.resolve(requirement_set)
+
+                    # build wheels
+                    wb = WheelBuilder(
+                        finder, preparer, wheel_cache,
+                        build_options=options.build_options or [],
+                        global_options=options.global_options or [],
+                        no_clean=options.no_clean,
+                    )
+                    build_failures = wb.build(
+                        requirement_set.requirements.values(), session=session,
+                    )
+                    if len(build_failures) != 0:
+                        raise CommandError(
+                            "Failed to build one or more wheels"
+                        )
+                except PreviousBuildDirError:
+                    options.no_clean = True
+                    raise
+                finally:
+                    if not options.no_clean:
+                        requirement_set.cleanup_files()
+                        wheel_cache.cleanup()
diff --git a/lib/python3.7/site-packages/pip/_internal/configuration.py b/lib/python3.7/site-packages/pip/_internal/configuration.py
new file mode 100644
index 0000000000000000000000000000000000000000..b199fa7007dffc29a67425bfb2b1ccadc366f096
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/configuration.py
@@ -0,0 +1,384 @@
+"""Configuration management setup
+
+Some terminology:
+- name
+  As written in config files.
+- value
+  Value associated with a name
+- key
+  Name combined with it's section (section.name)
+- variant
+  A single word describing where the configuration key-value pair came from
+"""
+
+import locale
+import logging
+import os
+
+from pip._vendor.six.moves import configparser
+
+from pip._internal.exceptions import (
+    ConfigurationError, ConfigurationFileCouldNotBeLoaded,
+)
+from pip._internal.locations import (
+    global_config_files, legacy_config_file, new_config_file, site_config_file,
+)
+from pip._internal.utils.misc import ensure_dir, enum
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Any, Dict, Iterable, List, NewType, Optional, Tuple
+    )
+
+    RawConfigParser = configparser.RawConfigParser  # Shorthand
+    Kind = NewType("Kind", str)
+
+logger = logging.getLogger(__name__)
+
+
+# NOTE: Maybe use the optionx attribute to normalize keynames.
+def _normalize_name(name):
+    # type: (str) -> str
+    """Make a name consistent regardless of source (environment or file)
+    """
+    name = name.lower().replace('_', '-')
+    if name.startswith('--'):
+        name = name[2:]  # only prefer long opts
+    return name
+
+
+def _disassemble_key(name):
+    # type: (str) -> List[str]
+    return name.split(".", 1)
+
+
+# The kinds of configurations there are.
+kinds = enum(
+    USER="user",        # User Specific
+    GLOBAL="global",    # System Wide
+    SITE="site",        # [Virtual] Environment Specific
+    ENV="env",          # from PIP_CONFIG_FILE
+    ENV_VAR="env-var",  # from Environment Variables
+)
+
+
+class Configuration(object):
+    """Handles management of configuration.
+
+    Provides an interface to accessing and managing configuration files.
+
+    This class converts provides an API that takes "section.key-name" style
+    keys and stores the value associated with it as "key-name" under the
+    section "section".
+
+    This allows for a clean interface wherein the both the section and the
+    key-name are preserved in an easy to manage form in the configuration files
+    and the data stored is also nice.
+    """
+
+    def __init__(self, isolated, load_only=None):
+        # type: (bool, Kind) -> None
+        super(Configuration, self).__init__()
+
+        _valid_load_only = [kinds.USER, kinds.GLOBAL, kinds.SITE, None]
+        if load_only not in _valid_load_only:
+            raise ConfigurationError(
+                "Got invalid value for load_only - should be one of {}".format(
+                    ", ".join(map(repr, _valid_load_only[:-1]))
+                )
+            )
+        self.isolated = isolated  # type: bool
+        self.load_only = load_only  # type: Optional[Kind]
+
+        # The order here determines the override order.
+        self._override_order = [
+            kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR
+        ]
+
+        self._ignore_env_names = ["version", "help"]
+
+        # Because we keep track of where we got the data from
+        self._parsers = {
+            variant: [] for variant in self._override_order
+        }  # type: Dict[Kind, List[Tuple[str, RawConfigParser]]]
+        self._config = {
+            variant: {} for variant in self._override_order
+        }  # type: Dict[Kind, Dict[str, Any]]
+        self._modified_parsers = []  # type: List[Tuple[str, RawConfigParser]]
+
+    def load(self):
+        # type: () -> None
+        """Loads configuration from configuration files and environment
+        """
+        self._load_config_files()
+        if not self.isolated:
+            self._load_environment_vars()
+
+    def get_file_to_edit(self):
+        # type: () -> Optional[str]
+        """Returns the file with highest priority in configuration
+        """
+        assert self.load_only is not None, \
+            "Need to be specified a file to be editing"
+
+        try:
+            return self._get_parser_to_modify()[0]
+        except IndexError:
+            return None
+
+    def items(self):
+        # type: () -> Iterable[Tuple[str, Any]]
+        """Returns key-value pairs like dict.items() representing the loaded
+        configuration
+        """
+        return self._dictionary.items()
+
+    def get_value(self, key):
+        # type: (str) -> Any
+        """Get a value from the configuration.
+        """
+        try:
+            return self._dictionary[key]
+        except KeyError:
+            raise ConfigurationError("No such key - {}".format(key))
+
+    def set_value(self, key, value):
+        # type: (str, Any) -> None
+        """Modify a value in the configuration.
+        """
+        self._ensure_have_load_only()
+
+        fname, parser = self._get_parser_to_modify()
+
+        if parser is not None:
+            section, name = _disassemble_key(key)
+
+            # Modify the parser and the configuration
+            if not parser.has_section(section):
+                parser.add_section(section)
+            parser.set(section, name, value)
+
+        self._config[self.load_only][key] = value
+        self._mark_as_modified(fname, parser)
+
+    def unset_value(self, key):
+        # type: (str) -> None
+        """Unset a value in the configuration.
+        """
+        self._ensure_have_load_only()
+
+        if key not in self._config[self.load_only]:
+            raise ConfigurationError("No such key - {}".format(key))
+
+        fname, parser = self._get_parser_to_modify()
+
+        if parser is not None:
+            section, name = _disassemble_key(key)
+
+            # Remove the key in the parser
+            modified_something = False
+            if parser.has_section(section):
+                # Returns whether the option was removed or not
+                modified_something = parser.remove_option(section, name)
+
+            if modified_something:
+                # name removed from parser, section may now be empty
+                section_iter = iter(parser.items(section))
+                try:
+                    val = next(section_iter)
+                except StopIteration:
+                    val = None
+
+                if val is None:
+                    parser.remove_section(section)
+
+                self._mark_as_modified(fname, parser)
+            else:
+                raise ConfigurationError(
+                    "Fatal Internal error [id=1]. Please report as a bug."
+                )
+
+        del self._config[self.load_only][key]
+
+    def save(self):
+        # type: () -> None
+        """Save the current in-memory state.
+        """
+        self._ensure_have_load_only()
+
+        for fname, parser in self._modified_parsers:
+            logger.info("Writing to %s", fname)
+
+            # Ensure directory exists.
+            ensure_dir(os.path.dirname(fname))
+
+            with open(fname, "w") as f:
+                parser.write(f)
+
+    #
+    # Private routines
+    #
+
+    def _ensure_have_load_only(self):
+        # type: () -> None
+        if self.load_only is None:
+            raise ConfigurationError("Needed a specific file to be modifying.")
+        logger.debug("Will be working with %s variant only", self.load_only)
+
+    @property
+    def _dictionary(self):
+        # type: () -> Dict[str, Any]
+        """A dictionary representing the loaded configuration.
+        """
+        # NOTE: Dictionaries are not populated if not loaded. So, conditionals
+        #       are not needed here.
+        retval = {}
+
+        for variant in self._override_order:
+            retval.update(self._config[variant])
+
+        return retval
+
+    def _load_config_files(self):
+        # type: () -> None
+        """Loads configuration from configuration files
+        """
+        config_files = dict(self._iter_config_files())
+        if config_files[kinds.ENV][0:1] == [os.devnull]:
+            logger.debug(
+                "Skipping loading configuration files due to "
+                "environment's PIP_CONFIG_FILE being os.devnull"
+            )
+            return
+
+        for variant, files in config_files.items():
+            for fname in files:
+                # If there's specific variant set in `load_only`, load only
+                # that variant, not the others.
+                if self.load_only is not None and variant != self.load_only:
+                    logger.debug(
+                        "Skipping file '%s' (variant: %s)", fname, variant
+                    )
+                    continue
+
+                parser = self._load_file(variant, fname)
+
+                # Keeping track of the parsers used
+                self._parsers[variant].append((fname, parser))
+
+    def _load_file(self, variant, fname):
+        # type: (Kind, str) -> RawConfigParser
+        logger.debug("For variant '%s', will try loading '%s'", variant, fname)
+        parser = self._construct_parser(fname)
+
+        for section in parser.sections():
+            items = parser.items(section)
+            self._config[variant].update(self._normalized_keys(section, items))
+
+        return parser
+
+    def _construct_parser(self, fname):
+        # type: (str) -> RawConfigParser
+        parser = configparser.RawConfigParser()
+        # If there is no such file, don't bother reading it but create the
+        # parser anyway, to hold the data.
+        # Doing this is useful when modifying and saving files, where we don't
+        # need to construct a parser.
+        if os.path.exists(fname):
+            try:
+                parser.read(fname)
+            except UnicodeDecodeError:
+                # See https://github.com/pypa/pip/issues/4963
+                raise ConfigurationFileCouldNotBeLoaded(
+                    reason="contains invalid {} characters".format(
+                        locale.getpreferredencoding(False)
+                    ),
+                    fname=fname,
+                )
+            except configparser.Error as error:
+                # See https://github.com/pypa/pip/issues/4893
+                raise ConfigurationFileCouldNotBeLoaded(error=error)
+        return parser
+
+    def _load_environment_vars(self):
+        # type: () -> None
+        """Loads configuration from environment variables
+        """
+        self._config[kinds.ENV_VAR].update(
+            self._normalized_keys(":env:", self._get_environ_vars())
+        )
+
+    def _normalized_keys(self, section, items):
+        # type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any]
+        """Normalizes items to construct a dictionary with normalized keys.
+
+        This routine is where the names become keys and are made the same
+        regardless of source - configuration files or environment.
+        """
+        normalized = {}
+        for name, val in items:
+            key = section + "." + _normalize_name(name)
+            normalized[key] = val
+        return normalized
+
+    def _get_environ_vars(self):
+        # type: () -> Iterable[Tuple[str, str]]
+        """Returns a generator with all environmental vars with prefix PIP_"""
+        for key, val in os.environ.items():
+            should_be_yielded = (
+                key.startswith("PIP_") and
+                key[4:].lower() not in self._ignore_env_names
+            )
+            if should_be_yielded:
+                yield key[4:].lower(), val
+
+    # XXX: This is patched in the tests.
+    def _iter_config_files(self):
+        # type: () -> Iterable[Tuple[Kind, List[str]]]
+        """Yields variant and configuration files associated with it.
+
+        This should be treated like items of a dictionary.
+        """
+        # SMELL: Move the conditions out of this function
+
+        # environment variables have the lowest priority
+        config_file = os.environ.get('PIP_CONFIG_FILE', None)
+        if config_file is not None:
+            yield kinds.ENV, [config_file]
+        else:
+            yield kinds.ENV, []
+
+        # at the base we have any global configuration
+        yield kinds.GLOBAL, list(global_config_files)
+
+        # per-user configuration next
+        should_load_user_config = not self.isolated and not (
+            config_file and os.path.exists(config_file)
+        )
+        if should_load_user_config:
+            # The legacy config file is overridden by the new config file
+            yield kinds.USER, [legacy_config_file, new_config_file]
+
+        # finally virtualenv configuration first trumping others
+        yield kinds.SITE, [site_config_file]
+
+    def _get_parser_to_modify(self):
+        # type: () -> Tuple[str, RawConfigParser]
+        # Determine which parser to modify
+        parsers = self._parsers[self.load_only]
+        if not parsers:
+            # This should not happen if everything works correctly.
+            raise ConfigurationError(
+                "Fatal Internal error [id=2]. Please report as a bug."
+            )
+
+        # Use the highest priority parser.
+        return parsers[-1]
+
+    # XXX: This is patched in the tests.
+    def _mark_as_modified(self, fname, parser):
+        # type: (str, RawConfigParser) -> None
+        file_parser_tuple = (fname, parser)
+        if file_parser_tuple not in self._modified_parsers:
+            self._modified_parsers.append(file_parser_tuple)
diff --git a/lib/python3.7/site-packages/pip/_internal/download.py b/lib/python3.7/site-packages/pip/_internal/download.py
new file mode 100644
index 0000000000000000000000000000000000000000..2683cf082934788125cf2373f5406d36b3931f82
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/download.py
@@ -0,0 +1,979 @@
+from __future__ import absolute_import
+
+import cgi
+import email.utils
+import getpass
+import json
+import logging
+import mimetypes
+import os
+import platform
+import re
+import shutil
+import sys
+
+from pip._vendor import requests, six, urllib3
+from pip._vendor.cachecontrol import CacheControlAdapter
+from pip._vendor.cachecontrol.caches import FileCache
+from pip._vendor.lockfile import LockError
+from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
+from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
+from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
+from pip._vendor.requests.structures import CaseInsensitiveDict
+from pip._vendor.requests.utils import get_netrc_auth
+# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is
+#       why we ignore the type on this import
+from pip._vendor.six.moves import xmlrpc_client  # type: ignore
+from pip._vendor.six.moves.urllib import parse as urllib_parse
+from pip._vendor.six.moves.urllib import request as urllib_request
+from pip._vendor.urllib3.util import IS_PYOPENSSL
+
+import pip
+from pip._internal.exceptions import HashMismatch, InstallationError
+from pip._internal.locations import write_delete_marker_file
+from pip._internal.models.index import PyPI
+from pip._internal.utils.encoding import auto_decode
+from pip._internal.utils.filesystem import check_path_owner
+from pip._internal.utils.glibc import libc_ver
+from pip._internal.utils.misc import (
+    ARCHIVE_EXTENSIONS, ask_path_exists, backup_dir, consume, display_path,
+    format_size, get_installed_version, rmtree, split_auth_from_netloc,
+    splitext, unpack_file,
+)
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+from pip._internal.utils.ui import DownloadProgressProvider
+from pip._internal.vcs import vcs
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Optional, Tuple, Dict, IO, Text, Union
+    )
+    from pip._internal.models.link import Link
+    from pip._internal.utils.hashes import Hashes
+    from pip._internal.vcs import AuthInfo
+
+try:
+    import ssl  # noqa
+except ImportError:
+    ssl = None
+
+HAS_TLS = (ssl is not None) or IS_PYOPENSSL
+
+__all__ = ['get_file_content',
+           'is_url', 'url_to_path', 'path_to_url',
+           'is_archive_file', 'unpack_vcs_link',
+           'unpack_file_url', 'is_vcs_url', 'is_file_url',
+           'unpack_http_url', 'unpack_url']
+
+
+logger = logging.getLogger(__name__)
+
+
+# These are environment variables present when running under various
+# CI systems.  For each variable, some CI systems that use the variable
+# are indicated.  The collection was chosen so that for each of a number
+# of popular systems, at least one of the environment variables is used.
+# This list is used to provide some indication of and lower bound for
+# CI traffic to PyPI.  Thus, it is okay if the list is not comprehensive.
+# For more background, see: https://github.com/pypa/pip/issues/5499
+CI_ENVIRONMENT_VARIABLES = (
+    # Azure Pipelines
+    'BUILD_BUILDID',
+    # Jenkins
+    'BUILD_ID',
+    # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI
+    'CI',
+)
+
+
+def looks_like_ci():
+    # type: () -> bool
+    """
+    Return whether it looks like pip is running under CI.
+    """
+    # We don't use the method of checking for a tty (e.g. using isatty())
+    # because some CI systems mimic a tty (e.g. Travis CI).  Thus that
+    # method doesn't provide definitive information in either direction.
+    return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES)
+
+
+def user_agent():
+    """
+    Return a string representing the user agent.
+    """
+    data = {
+        "installer": {"name": "pip", "version": pip.__version__},
+        "python": platform.python_version(),
+        "implementation": {
+            "name": platform.python_implementation(),
+        },
+    }
+
+    if data["implementation"]["name"] == 'CPython':
+        data["implementation"]["version"] = platform.python_version()
+    elif data["implementation"]["name"] == 'PyPy':
+        if sys.pypy_version_info.releaselevel == 'final':
+            pypy_version_info = sys.pypy_version_info[:3]
+        else:
+            pypy_version_info = sys.pypy_version_info
+        data["implementation"]["version"] = ".".join(
+            [str(x) for x in pypy_version_info]
+        )
+    elif data["implementation"]["name"] == 'Jython':
+        # Complete Guess
+        data["implementation"]["version"] = platform.python_version()
+    elif data["implementation"]["name"] == 'IronPython':
+        # Complete Guess
+        data["implementation"]["version"] = platform.python_version()
+
+    if sys.platform.startswith("linux"):
+        from pip._vendor import distro
+        distro_infos = dict(filter(
+            lambda x: x[1],
+            zip(["name", "version", "id"], distro.linux_distribution()),
+        ))
+        libc = dict(filter(
+            lambda x: x[1],
+            zip(["lib", "version"], libc_ver()),
+        ))
+        if libc:
+            distro_infos["libc"] = libc
+        if distro_infos:
+            data["distro"] = distro_infos
+
+    if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
+        data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
+
+    if platform.system():
+        data.setdefault("system", {})["name"] = platform.system()
+
+    if platform.release():
+        data.setdefault("system", {})["release"] = platform.release()
+
+    if platform.machine():
+        data["cpu"] = platform.machine()
+
+    if HAS_TLS:
+        data["openssl_version"] = ssl.OPENSSL_VERSION
+
+    setuptools_version = get_installed_version("setuptools")
+    if setuptools_version is not None:
+        data["setuptools_version"] = setuptools_version
+
+    # Use None rather than False so as not to give the impression that
+    # pip knows it is not being run under CI.  Rather, it is a null or
+    # inconclusive result.  Also, we include some value rather than no
+    # value to make it easier to know that the check has been run.
+    data["ci"] = True if looks_like_ci() else None
+
+    user_data = os.environ.get("PIP_USER_AGENT_USER_DATA")
+    if user_data is not None:
+        data["user_data"] = user_data
+
+    return "{data[installer][name]}/{data[installer][version]} {json}".format(
+        data=data,
+        json=json.dumps(data, separators=(",", ":"), sort_keys=True),
+    )
+
+
+class MultiDomainBasicAuth(AuthBase):
+
+    def __init__(self, prompting=True):
+        # type: (bool) -> None
+        self.prompting = prompting
+        self.passwords = {}  # type: Dict[str, AuthInfo]
+
+    def __call__(self, req):
+        parsed = urllib_parse.urlparse(req.url)
+
+        # Split the credentials from the netloc.
+        netloc, url_user_password = split_auth_from_netloc(parsed.netloc)
+
+        # Set the url of the request to the url without any credentials
+        req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
+
+        # Use any stored credentials that we have for this netloc
+        username, password = self.passwords.get(netloc, (None, None))
+
+        # Use the credentials embedded in the url if we have none stored
+        if username is None:
+            username, password = url_user_password
+
+        # Get creds from netrc if we still don't have them
+        if username is None and password is None:
+            netrc_auth = get_netrc_auth(req.url)
+            username, password = netrc_auth if netrc_auth else (None, None)
+
+        if username or password:
+            # Store the username and password
+            self.passwords[netloc] = (username, password)
+
+            # Send the basic auth with this request
+            req = HTTPBasicAuth(username or "", password or "")(req)
+
+        # Attach a hook to handle 401 responses
+        req.register_hook("response", self.handle_401)
+
+        return req
+
+    def handle_401(self, resp, **kwargs):
+        # We only care about 401 responses, anything else we want to just
+        #   pass through the actual response
+        if resp.status_code != 401:
+            return resp
+
+        # We are not able to prompt the user so simply return the response
+        if not self.prompting:
+            return resp
+
+        parsed = urllib_parse.urlparse(resp.url)
+
+        # Prompt the user for a new username and password
+        username = six.moves.input("User for %s: " % parsed.netloc)
+        password = getpass.getpass("Password: ")
+
+        # Store the new username and password to use for future requests
+        if username or password:
+            self.passwords[parsed.netloc] = (username, password)
+
+        # Consume content and release the original connection to allow our new
+        #   request to reuse the same one.
+        resp.content
+        resp.raw.release_conn()
+
+        # Add our new username and password to the request
+        req = HTTPBasicAuth(username or "", password or "")(resp.request)
+        req.register_hook("response", self.warn_on_401)
+
+        # Send our new request
+        new_resp = resp.connection.send(req, **kwargs)
+        new_resp.history.append(resp)
+
+        return new_resp
+
+    def warn_on_401(self, resp, **kwargs):
+        # warn user that they provided incorrect credentials
+        if resp.status_code == 401:
+            logger.warning('401 Error, Credentials not correct for %s',
+                           resp.request.url)
+
+
+class LocalFSAdapter(BaseAdapter):
+
+    def send(self, request, stream=None, timeout=None, verify=None, cert=None,
+             proxies=None):
+        pathname = url_to_path(request.url)
+
+        resp = Response()
+        resp.status_code = 200
+        resp.url = request.url
+
+        try:
+            stats = os.stat(pathname)
+        except OSError as exc:
+            resp.status_code = 404
+            resp.raw = exc
+        else:
+            modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
+            content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
+            resp.headers = CaseInsensitiveDict({
+                "Content-Type": content_type,
+                "Content-Length": stats.st_size,
+                "Last-Modified": modified,
+            })
+
+            resp.raw = open(pathname, "rb")
+            resp.close = resp.raw.close
+
+        return resp
+
+    def close(self):
+        pass
+
+
+class SafeFileCache(FileCache):
+    """
+    A file based cache which is safe to use even when the target directory may
+    not be accessible or writable.
+    """
+
+    def __init__(self, *args, **kwargs):
+        super(SafeFileCache, self).__init__(*args, **kwargs)
+
+        # Check to ensure that the directory containing our cache directory
+        # is owned by the user current executing pip. If it does not exist
+        # we will check the parent directory until we find one that does exist.
+        # If it is not owned by the user executing pip then we will disable
+        # the cache and log a warning.
+        if not check_path_owner(self.directory):
+            logger.warning(
+                "The directory '%s' or its parent directory is not owned by "
+                "the current user and the cache has been disabled. Please "
+                "check the permissions and owner of that directory. If "
+                "executing pip with sudo, you may want sudo's -H flag.",
+                self.directory,
+            )
+
+            # Set our directory to None to disable the Cache
+            self.directory = None
+
+    def get(self, *args, **kwargs):
+        # If we don't have a directory, then the cache should be a no-op.
+        if self.directory is None:
+            return
+
+        try:
+            return super(SafeFileCache, self).get(*args, **kwargs)
+        except (LockError, OSError, IOError):
+            # We intentionally silence this error, if we can't access the cache
+            # then we can just skip caching and process the request as if
+            # caching wasn't enabled.
+            pass
+
+    def set(self, *args, **kwargs):
+        # If we don't have a directory, then the cache should be a no-op.
+        if self.directory is None:
+            return
+
+        try:
+            return super(SafeFileCache, self).set(*args, **kwargs)
+        except (LockError, OSError, IOError):
+            # We intentionally silence this error, if we can't access the cache
+            # then we can just skip caching and process the request as if
+            # caching wasn't enabled.
+            pass
+
+    def delete(self, *args, **kwargs):
+        # If we don't have a directory, then the cache should be a no-op.
+        if self.directory is None:
+            return
+
+        try:
+            return super(SafeFileCache, self).delete(*args, **kwargs)
+        except (LockError, OSError, IOError):
+            # We intentionally silence this error, if we can't access the cache
+            # then we can just skip caching and process the request as if
+            # caching wasn't enabled.
+            pass
+
+
+class InsecureHTTPAdapter(HTTPAdapter):
+
+    def cert_verify(self, conn, url, verify, cert):
+        conn.cert_reqs = 'CERT_NONE'
+        conn.ca_certs = None
+
+
+class PipSession(requests.Session):
+
+    timeout = None  # type: Optional[int]
+
+    def __init__(self, *args, **kwargs):
+        retries = kwargs.pop("retries", 0)
+        cache = kwargs.pop("cache", None)
+        insecure_hosts = kwargs.pop("insecure_hosts", [])
+
+        super(PipSession, self).__init__(*args, **kwargs)
+
+        # Attach our User Agent to the request
+        self.headers["User-Agent"] = user_agent()
+
+        # Attach our Authentication handler to the session
+        self.auth = MultiDomainBasicAuth()
+
+        # Create our urllib3.Retry instance which will allow us to customize
+        # how we handle retries.
+        retries = urllib3.Retry(
+            # Set the total number of retries that a particular request can
+            # have.
+            total=retries,
+
+            # A 503 error from PyPI typically means that the Fastly -> Origin
+            # connection got interrupted in some way. A 503 error in general
+            # is typically considered a transient error so we'll go ahead and
+            # retry it.
+            # A 500 may indicate transient error in Amazon S3
+            # A 520 or 527 - may indicate transient error in CloudFlare
+            status_forcelist=[500, 503, 520, 527],
+
+            # Add a small amount of back off between failed requests in
+            # order to prevent hammering the service.
+            backoff_factor=0.25,
+        )
+
+        # We want to _only_ cache responses on securely fetched origins. We do
+        # this because we can't validate the response of an insecurely fetched
+        # origin, and we don't want someone to be able to poison the cache and
+        # require manual eviction from the cache to fix it.
+        if cache:
+            secure_adapter = CacheControlAdapter(
+                cache=SafeFileCache(cache, use_dir_lock=True),
+                max_retries=retries,
+            )
+        else:
+            secure_adapter = HTTPAdapter(max_retries=retries)
+
+        # Our Insecure HTTPAdapter disables HTTPS validation. It does not
+        # support caching (see above) so we'll use it for all http:// URLs as
+        # well as any https:// host that we've marked as ignoring TLS errors
+        # for.
+        insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
+
+        self.mount("https://", secure_adapter)
+        self.mount("http://", insecure_adapter)
+
+        # Enable file:// urls
+        self.mount("file://", LocalFSAdapter())
+
+        # We want to use a non-validating adapter for any requests which are
+        # deemed insecure.
+        for host in insecure_hosts:
+            self.mount("https://{}/".format(host), insecure_adapter)
+
+    def request(self, method, url, *args, **kwargs):
+        # Allow setting a default timeout on a session
+        kwargs.setdefault("timeout", self.timeout)
+
+        # Dispatch the actual request
+        return super(PipSession, self).request(method, url, *args, **kwargs)
+
+
+def get_file_content(url, comes_from=None, session=None):
+    # type: (str, Optional[str], Optional[PipSession]) -> Tuple[str, Text]
+    """Gets the content of a file; it may be a filename, file: URL, or
+    http: URL.  Returns (location, content).  Content is unicode.
+
+    :param url:         File path or url.
+    :param comes_from:  Origin description of requirements.
+    :param session:     Instance of pip.download.PipSession.
+    """
+    if session is None:
+        raise TypeError(
+            "get_file_content() missing 1 required keyword argument: 'session'"
+        )
+
+    match = _scheme_re.search(url)
+    if match:
+        scheme = match.group(1).lower()
+        if (scheme == 'file' and comes_from and
+                comes_from.startswith('http')):
+            raise InstallationError(
+                'Requirements file %s references URL %s, which is local'
+                % (comes_from, url))
+        if scheme == 'file':
+            path = url.split(':', 1)[1]
+            path = path.replace('\\', '/')
+            match = _url_slash_drive_re.match(path)
+            if match:
+                path = match.group(1) + ':' + path.split('|', 1)[1]
+            path = urllib_parse.unquote(path)
+            if path.startswith('/'):
+                path = '/' + path.lstrip('/')
+            url = path
+        else:
+            # FIXME: catch some errors
+            resp = session.get(url)
+            resp.raise_for_status()
+            return resp.url, resp.text
+    try:
+        with open(url, 'rb') as f:
+            content = auto_decode(f.read())
+    except IOError as exc:
+        raise InstallationError(
+            'Could not open requirements file: %s' % str(exc)
+        )
+    return url, content
+
+
+_scheme_re = re.compile(r'^(http|https|file):', re.I)
+_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
+
+
+def is_url(name):
+    # type: (Union[str, Text]) -> bool
+    """Returns true if the name looks like a URL"""
+    if ':' not in name:
+        return False
+    scheme = name.split(':', 1)[0].lower()
+    return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
+
+
+def url_to_path(url):
+    # type: (str) -> str
+    """
+    Convert a file: URL to a path.
+    """
+    assert url.startswith('file:'), (
+        "You can only turn file: urls into filenames (not %r)" % url)
+
+    _, netloc, path, _, _ = urllib_parse.urlsplit(url)
+
+    if not netloc or netloc == 'localhost':
+        # According to RFC 8089, same as empty authority.
+        netloc = ''
+    elif sys.platform == 'win32':
+        # If we have a UNC path, prepend UNC share notation.
+        netloc = '\\\\' + netloc
+    else:
+        raise ValueError(
+            'non-local file URIs are not supported on this platform: %r'
+            % url
+        )
+
+    path = urllib_request.url2pathname(netloc + path)
+    return path
+
+
+def path_to_url(path):
+    # type: (Union[str, Text]) -> str
+    """
+    Convert a path to a file: URL.  The path will be made absolute and have
+    quoted path parts.
+    """
+    path = os.path.normpath(os.path.abspath(path))
+    url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
+    return url
+
+
+def is_archive_file(name):
+    # type: (str) -> bool
+    """Return True if `name` is a considered as an archive file."""
+    ext = splitext(name)[1].lower()
+    if ext in ARCHIVE_EXTENSIONS:
+        return True
+    return False
+
+
+def unpack_vcs_link(link, location):
+    vcs_backend = _get_used_vcs_backend(link)
+    vcs_backend.unpack(location)
+
+
+def _get_used_vcs_backend(link):
+    for backend in vcs.backends:
+        if link.scheme in backend.schemes:
+            vcs_backend = backend(link.url)
+            return vcs_backend
+
+
+def is_vcs_url(link):
+    # type: (Link) -> bool
+    return bool(_get_used_vcs_backend(link))
+
+
+def is_file_url(link):
+    # type: (Link) -> bool
+    return link.url.lower().startswith('file:')
+
+
+def is_dir_url(link):
+    # type: (Link) -> bool
+    """Return whether a file:// Link points to a directory.
+
+    ``link`` must not have any other scheme but file://. Call is_file_url()
+    first.
+
+    """
+    link_path = url_to_path(link.url_without_fragment)
+    return os.path.isdir(link_path)
+
+
+def _progress_indicator(iterable, *args, **kwargs):
+    return iterable
+
+
+def _download_url(
+    resp,  # type: Response
+    link,  # type: Link
+    content_file,  # type: IO
+    hashes,  # type: Hashes
+    progress_bar  # type: str
+):
+    # type: (...) -> None
+    try:
+        total_length = int(resp.headers['content-length'])
+    except (ValueError, KeyError, TypeError):
+        total_length = 0
+
+    cached_resp = getattr(resp, "from_cache", False)
+    if logger.getEffectiveLevel() > logging.INFO:
+        show_progress = False
+    elif cached_resp:
+        show_progress = False
+    elif total_length > (40 * 1000):
+        show_progress = True
+    elif not total_length:
+        show_progress = True
+    else:
+        show_progress = False
+
+    show_url = link.show_url
+
+    def resp_read(chunk_size):
+        try:
+            # Special case for urllib3.
+            for chunk in resp.raw.stream(
+                    chunk_size,
+                    # We use decode_content=False here because we don't
+                    # want urllib3 to mess with the raw bytes we get
+                    # from the server. If we decompress inside of
+                    # urllib3 then we cannot verify the checksum
+                    # because the checksum will be of the compressed
+                    # file. This breakage will only occur if the
+                    # server adds a Content-Encoding header, which
+                    # depends on how the server was configured:
+                    # - Some servers will notice that the file isn't a
+                    #   compressible file and will leave the file alone
+                    #   and with an empty Content-Encoding
+                    # - Some servers will notice that the file is
+                    #   already compressed and will leave the file
+                    #   alone and will add a Content-Encoding: gzip
+                    #   header
+                    # - Some servers won't notice anything at all and
+                    #   will take a file that's already been compressed
+                    #   and compress it again and set the
+                    #   Content-Encoding: gzip header
+                    #
+                    # By setting this not to decode automatically we
+                    # hope to eliminate problems with the second case.
+                    decode_content=False):
+                yield chunk
+        except AttributeError:
+            # Standard file-like object.
+            while True:
+                chunk = resp.raw.read(chunk_size)
+                if not chunk:
+                    break
+                yield chunk
+
+    def written_chunks(chunks):
+        for chunk in chunks:
+            content_file.write(chunk)
+            yield chunk
+
+    progress_indicator = _progress_indicator
+
+    if link.netloc == PyPI.netloc:
+        url = show_url
+    else:
+        url = link.url_without_fragment
+
+    if show_progress:  # We don't show progress on cached responses
+        progress_indicator = DownloadProgressProvider(progress_bar,
+                                                      max=total_length)
+        if total_length:
+            logger.info("Downloading %s (%s)", url, format_size(total_length))
+        else:
+            logger.info("Downloading %s", url)
+    elif cached_resp:
+        logger.info("Using cached %s", url)
+    else:
+        logger.info("Downloading %s", url)
+
+    logger.debug('Downloading from URL %s', link)
+
+    downloaded_chunks = written_chunks(
+        progress_indicator(
+            resp_read(CONTENT_CHUNK_SIZE),
+            CONTENT_CHUNK_SIZE
+        )
+    )
+    if hashes:
+        hashes.check_against_chunks(downloaded_chunks)
+    else:
+        consume(downloaded_chunks)
+
+
+def _copy_file(filename, location, link):
+    copy = True
+    download_location = os.path.join(location, link.filename)
+    if os.path.exists(download_location):
+        response = ask_path_exists(
+            'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' %
+            display_path(download_location), ('i', 'w', 'b', 'a'))
+        if response == 'i':
+            copy = False
+        elif response == 'w':
+            logger.warning('Deleting %s', display_path(download_location))
+            os.remove(download_location)
+        elif response == 'b':
+            dest_file = backup_dir(download_location)
+            logger.warning(
+                'Backing up %s to %s',
+                display_path(download_location),
+                display_path(dest_file),
+            )
+            shutil.move(download_location, dest_file)
+        elif response == 'a':
+            sys.exit(-1)
+    if copy:
+        shutil.copy(filename, download_location)
+        logger.info('Saved %s', display_path(download_location))
+
+
+def unpack_http_url(
+    link,  # type: Link
+    location,  # type: str
+    download_dir=None,  # type: Optional[str]
+    session=None,  # type: Optional[PipSession]
+    hashes=None,  # type: Optional[Hashes]
+    progress_bar="on"  # type: str
+):
+    # type: (...) -> None
+    if session is None:
+        raise TypeError(
+            "unpack_http_url() missing 1 required keyword argument: 'session'"
+        )
+
+    with TempDirectory(kind="unpack") as temp_dir:
+        # If a download dir is specified, is the file already downloaded there?
+        already_downloaded_path = None
+        if download_dir:
+            already_downloaded_path = _check_download_dir(link,
+                                                          download_dir,
+                                                          hashes)
+
+        if already_downloaded_path:
+            from_path = already_downloaded_path
+            content_type = mimetypes.guess_type(from_path)[0]
+        else:
+            # let's download to a tmp dir
+            from_path, content_type = _download_http_url(link,
+                                                         session,
+                                                         temp_dir.path,
+                                                         hashes,
+                                                         progress_bar)
+
+        # unpack the archive to the build dir location. even when only
+        # downloading archives, they have to be unpacked to parse dependencies
+        unpack_file(from_path, location, content_type, link)
+
+        # a download dir is specified; let's copy the archive there
+        if download_dir and not already_downloaded_path:
+            _copy_file(from_path, download_dir, link)
+
+        if not already_downloaded_path:
+            os.unlink(from_path)
+
+
+def unpack_file_url(
+    link,  # type: Link
+    location,  # type: str
+    download_dir=None,  # type: Optional[str]
+    hashes=None  # type: Optional[Hashes]
+):
+    # type: (...) -> None
+    """Unpack link into location.
+
+    If download_dir is provided and link points to a file, make a copy
+    of the link file inside download_dir.
+    """
+    link_path = url_to_path(link.url_without_fragment)
+
+    # If it's a url to a local directory
+    if is_dir_url(link):
+        if os.path.isdir(location):
+            rmtree(location)
+        shutil.copytree(link_path, location, symlinks=True)
+        if download_dir:
+            logger.info('Link is a directory, ignoring download_dir')
+        return
+
+    # If --require-hashes is off, `hashes` is either empty, the
+    # link's embedded hash, or MissingHashes; it is required to
+    # match. If --require-hashes is on, we are satisfied by any
+    # hash in `hashes` matching: a URL-based or an option-based
+    # one; no internet-sourced hash will be in `hashes`.
+    if hashes:
+        hashes.check_against_path(link_path)
+
+    # If a download dir is specified, is the file already there and valid?
+    already_downloaded_path = None
+    if download_dir:
+        already_downloaded_path = _check_download_dir(link,
+                                                      download_dir,
+                                                      hashes)
+
+    if already_downloaded_path:
+        from_path = already_downloaded_path
+    else:
+        from_path = link_path
+
+    content_type = mimetypes.guess_type(from_path)[0]
+
+    # unpack the archive to the build dir location. even when only downloading
+    # archives, they have to be unpacked to parse dependencies
+    unpack_file(from_path, location, content_type, link)
+
+    # a download dir is specified and not already downloaded
+    if download_dir and not already_downloaded_path:
+        _copy_file(from_path, download_dir, link)
+
+
+class PipXmlrpcTransport(xmlrpc_client.Transport):
+    """Provide a `xmlrpclib.Transport` implementation via a `PipSession`
+    object.
+    """
+
+    def __init__(self, index_url, session, use_datetime=False):
+        xmlrpc_client.Transport.__init__(self, use_datetime)
+        index_parts = urllib_parse.urlparse(index_url)
+        self._scheme = index_parts.scheme
+        self._session = session
+
+    def request(self, host, handler, request_body, verbose=False):
+        parts = (self._scheme, host, handler, None, None, None)
+        url = urllib_parse.urlunparse(parts)
+        try:
+            headers = {'Content-Type': 'text/xml'}
+            response = self._session.post(url, data=request_body,
+                                          headers=headers, stream=True)
+            response.raise_for_status()
+            self.verbose = verbose
+            return self.parse_response(response.raw)
+        except requests.HTTPError as exc:
+            logger.critical(
+                "HTTP error %s while getting %s",
+                exc.response.status_code, url,
+            )
+            raise
+
+
+def unpack_url(
+    link,  # type: Optional[Link]
+    location,  # type: Optional[str]
+    download_dir=None,  # type: Optional[str]
+    only_download=False,  # type: bool
+    session=None,  # type: Optional[PipSession]
+    hashes=None,  # type: Optional[Hashes]
+    progress_bar="on"  # type: str
+):
+    # type: (...) -> None
+    """Unpack link.
+       If link is a VCS link:
+         if only_download, export into download_dir and ignore location
+          else unpack into location
+       for other types of link:
+         - unpack into location
+         - if download_dir, copy the file into download_dir
+         - if only_download, mark location for deletion
+
+    :param hashes: A Hashes object, one of whose embedded hashes must match,
+        or HashMismatch will be raised. If the Hashes is empty, no matches are
+        required, and unhashable types of requirements (like VCS ones, which
+        would ordinarily raise HashUnsupported) are allowed.
+    """
+    # non-editable vcs urls
+    if is_vcs_url(link):
+        unpack_vcs_link(link, location)
+
+    # file urls
+    elif is_file_url(link):
+        unpack_file_url(link, location, download_dir, hashes=hashes)
+
+    # http urls
+    else:
+        if session is None:
+            session = PipSession()
+
+        unpack_http_url(
+            link,
+            location,
+            download_dir,
+            session,
+            hashes=hashes,
+            progress_bar=progress_bar
+        )
+    if only_download:
+        write_delete_marker_file(location)
+
+
+def _download_http_url(
+    link,  # type: Link
+    session,  # type: PipSession
+    temp_dir,  # type: str
+    hashes,  # type: Hashes
+    progress_bar  # type: str
+):
+    # type: (...) -> Tuple[str, str]
+    """Download link url into temp_dir using provided session"""
+    target_url = link.url.split('#', 1)[0]
+    try:
+        resp = session.get(
+            target_url,
+            # We use Accept-Encoding: identity here because requests
+            # defaults to accepting compressed responses. This breaks in
+            # a variety of ways depending on how the server is configured.
+            # - Some servers will notice that the file isn't a compressible
+            #   file and will leave the file alone and with an empty
+            #   Content-Encoding
+            # - Some servers will notice that the file is already
+            #   compressed and will leave the file alone and will add a
+            #   Content-Encoding: gzip header
+            # - Some servers won't notice anything at all and will take
+            #   a file that's already been compressed and compress it again
+            #   and set the Content-Encoding: gzip header
+            # By setting this to request only the identity encoding We're
+            # hoping to eliminate the third case. Hopefully there does not
+            # exist a server which when given a file will notice it is
+            # already compressed and that you're not asking for a
+            # compressed file and will then decompress it before sending
+            # because if that's the case I don't think it'll ever be
+            # possible to make this work.
+            headers={"Accept-Encoding": "identity"},
+            stream=True,
+        )
+        resp.raise_for_status()
+    except requests.HTTPError as exc:
+        logger.critical(
+            "HTTP error %s while getting %s", exc.response.status_code, link,
+        )
+        raise
+
+    content_type = resp.headers.get('content-type', '')
+    filename = link.filename  # fallback
+    # Have a look at the Content-Disposition header for a better guess
+    content_disposition = resp.headers.get('content-disposition')
+    if content_disposition:
+        type, params = cgi.parse_header(content_disposition)
+        # We use ``or`` here because we don't want to use an "empty" value
+        # from the filename param.
+        filename = params.get('filename') or filename
+    ext = splitext(filename)[1]
+    if not ext:
+        ext = mimetypes.guess_extension(content_type)
+        if ext:
+            filename += ext
+    if not ext and link.url != resp.url:
+        ext = os.path.splitext(resp.url)[1]
+        if ext:
+            filename += ext
+    file_path = os.path.join(temp_dir, filename)
+    with open(file_path, 'wb') as content_file:
+        _download_url(resp, link, content_file, hashes, progress_bar)
+    return file_path, content_type
+
+
+def _check_download_dir(link, download_dir, hashes):
+    # type: (Link, str, Hashes) -> Optional[str]
+    """ Check download_dir for previously downloaded file with correct hash
+        If a correct file is found return its path else None
+    """
+    download_path = os.path.join(download_dir, link.filename)
+    if os.path.exists(download_path):
+        # If already downloaded, does its hash match?
+        logger.info('File was already downloaded %s', download_path)
+        if hashes:
+            try:
+                hashes.check_against_path(download_path)
+            except HashMismatch:
+                logger.warning(
+                    'Previously-downloaded file %s has bad hash. '
+                    'Re-downloading.',
+                    download_path
+                )
+                os.unlink(download_path)
+                return None
+        return download_path
+    return None
diff --git a/lib/python3.7/site-packages/pip/_internal/exceptions.py b/lib/python3.7/site-packages/pip/_internal/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b291a1e4e4b8387277217093c1a42f306b1b972
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/exceptions.py
@@ -0,0 +1,274 @@
+"""Exceptions used throughout package"""
+from __future__ import absolute_import
+
+from itertools import chain, groupby, repeat
+
+from pip._vendor.six import iteritems
+
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Optional
+    from pip._internal.req.req_install import InstallRequirement
+
+
+class PipError(Exception):
+    """Base pip exception"""
+
+
+class ConfigurationError(PipError):
+    """General exception in configuration"""
+
+
+class InstallationError(PipError):
+    """General exception during installation"""
+
+
+class UninstallationError(PipError):
+    """General exception during uninstallation"""
+
+
+class DistributionNotFound(InstallationError):
+    """Raised when a distribution cannot be found to satisfy a requirement"""
+
+
+class RequirementsFileParseError(InstallationError):
+    """Raised when a general error occurs parsing a requirements file line."""
+
+
+class BestVersionAlreadyInstalled(PipError):
+    """Raised when the most up-to-date version of a package is already
+    installed."""
+
+
+class BadCommand(PipError):
+    """Raised when virtualenv or a command is not found"""
+
+
+class CommandError(PipError):
+    """Raised when there is an error in command-line arguments"""
+
+
+class PreviousBuildDirError(PipError):
+    """Raised when there's a previous conflicting build directory"""
+
+
+class InvalidWheelFilename(InstallationError):
+    """Invalid wheel filename."""
+
+
+class UnsupportedWheel(InstallationError):
+    """Unsupported wheel."""
+
+
+class HashErrors(InstallationError):
+    """Multiple HashError instances rolled into one for reporting"""
+
+    def __init__(self):
+        self.errors = []
+
+    def append(self, error):
+        self.errors.append(error)
+
+    def __str__(self):
+        lines = []
+        self.errors.sort(key=lambda e: e.order)
+        for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__):
+            lines.append(cls.head)
+            lines.extend(e.body() for e in errors_of_cls)
+        if lines:
+            return '\n'.join(lines)
+
+    def __nonzero__(self):
+        return bool(self.errors)
+
+    def __bool__(self):
+        return self.__nonzero__()
+
+
+class HashError(InstallationError):
+    """
+    A failure to verify a package against known-good hashes
+
+    :cvar order: An int sorting hash exception classes by difficulty of
+        recovery (lower being harder), so the user doesn't bother fretting
+        about unpinned packages when he has deeper issues, like VCS
+        dependencies, to deal with. Also keeps error reports in a
+        deterministic order.
+    :cvar head: A section heading for display above potentially many
+        exceptions of this kind
+    :ivar req: The InstallRequirement that triggered this error. This is
+        pasted on after the exception is instantiated, because it's not
+        typically available earlier.
+
+    """
+    req = None  # type: Optional[InstallRequirement]
+    head = ''
+
+    def body(self):
+        """Return a summary of me for display under the heading.
+
+        This default implementation simply prints a description of the
+        triggering requirement.
+
+        :param req: The InstallRequirement that provoked this error, with
+            populate_link() having already been called
+
+        """
+        return '    %s' % self._requirement_name()
+
+    def __str__(self):
+        return '%s\n%s' % (self.head, self.body())
+
+    def _requirement_name(self):
+        """Return a description of the requirement that triggered me.
+
+        This default implementation returns long description of the req, with
+        line numbers
+
+        """
+        return str(self.req) if self.req else 'unknown package'
+
+
+class VcsHashUnsupported(HashError):
+    """A hash was provided for a version-control-system-based requirement, but
+    we don't have a method for hashing those."""
+
+    order = 0
+    head = ("Can't verify hashes for these requirements because we don't "
+            "have a way to hash version control repositories:")
+
+
+class DirectoryUrlHashUnsupported(HashError):
+    """A hash was provided for a version-control-system-based requirement, but
+    we don't have a method for hashing those."""
+
+    order = 1
+    head = ("Can't verify hashes for these file:// requirements because they "
+            "point to directories:")
+
+
+class HashMissing(HashError):
+    """A hash was needed for a requirement but is absent."""
+
+    order = 2
+    head = ('Hashes are required in --require-hashes mode, but they are '
+            'missing from some requirements. Here is a list of those '
+            'requirements along with the hashes their downloaded archives '
+            'actually had. Add lines like these to your requirements files to '
+            'prevent tampering. (If you did not enable --require-hashes '
+            'manually, note that it turns on automatically when any package '
+            'has a hash.)')
+
+    def __init__(self, gotten_hash):
+        """
+        :param gotten_hash: The hash of the (possibly malicious) archive we
+            just downloaded
+        """
+        self.gotten_hash = gotten_hash
+
+    def body(self):
+        # Dodge circular import.
+        from pip._internal.utils.hashes import FAVORITE_HASH
+
+        package = None
+        if self.req:
+            # In the case of URL-based requirements, display the original URL
+            # seen in the requirements file rather than the package name,
+            # so the output can be directly copied into the requirements file.
+            package = (self.req.original_link if self.req.original_link
+                       # In case someone feeds something downright stupid
+                       # to InstallRequirement's constructor.
+                       else getattr(self.req, 'req', None))
+        return '    %s --hash=%s:%s' % (package or 'unknown package',
+                                        FAVORITE_HASH,
+                                        self.gotten_hash)
+
+
+class HashUnpinned(HashError):
+    """A requirement had a hash specified but was not pinned to a specific
+    version."""
+
+    order = 3
+    head = ('In --require-hashes mode, all requirements must have their '
+            'versions pinned with ==. These do not:')
+
+
+class HashMismatch(HashError):
+    """
+    Distribution file hash values don't match.
+
+    :ivar package_name: The name of the package that triggered the hash
+        mismatch. Feel free to write to this after the exception is raise to
+        improve its error message.
+
+    """
+    order = 4
+    head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS '
+            'FILE. If you have updated the package versions, please update '
+            'the hashes. Otherwise, examine the package contents carefully; '
+            'someone may have tampered with them.')
+
+    def __init__(self, allowed, gots):
+        """
+        :param allowed: A dict of algorithm names pointing to lists of allowed
+            hex digests
+        :param gots: A dict of algorithm names pointing to hashes we
+            actually got from the files under suspicion
+        """
+        self.allowed = allowed
+        self.gots = gots
+
+    def body(self):
+        return '    %s:\n%s' % (self._requirement_name(),
+                                self._hash_comparison())
+
+    def _hash_comparison(self):
+        """
+        Return a comparison of actual and expected hash values.
+
+        Example::
+
+               Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde
+                            or 123451234512345123451234512345123451234512345
+                    Got        bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef
+
+        """
+        def hash_then_or(hash_name):
+            # For now, all the decent hashes have 6-char names, so we can get
+            # away with hard-coding space literals.
+            return chain([hash_name], repeat('    or'))
+
+        lines = []
+        for hash_name, expecteds in iteritems(self.allowed):
+            prefix = hash_then_or(hash_name)
+            lines.extend(('        Expected %s %s' % (next(prefix), e))
+                         for e in expecteds)
+            lines.append('             Got        %s\n' %
+                         self.gots[hash_name].hexdigest())
+            prefix = '    or'
+        return '\n'.join(lines)
+
+
+class UnsupportedPythonVersion(InstallationError):
+    """Unsupported python version according to Requires-Python package
+    metadata."""
+
+
+class ConfigurationFileCouldNotBeLoaded(ConfigurationError):
+    """When there are errors while loading a configuration file
+    """
+
+    def __init__(self, reason="could not be loaded", fname=None, error=None):
+        super(ConfigurationFileCouldNotBeLoaded, self).__init__(error)
+        self.reason = reason
+        self.fname = fname
+        self.error = error
+
+    def __str__(self):
+        if self.fname is not None:
+            message_part = " in {}.".format(self.fname)
+        else:
+            assert self.error is not None
+            message_part = ".\n{}\n".format(self.error.message)
+        return "Configuration file {}{}".format(self.reason, message_part)
diff --git a/lib/python3.7/site-packages/pip/_internal/index.py b/lib/python3.7/site-packages/pip/_internal/index.py
new file mode 100644
index 0000000000000000000000000000000000000000..81278e8f6580f516464f7d53ccba9e67261e24ab
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/index.py
@@ -0,0 +1,1105 @@
+"""Routines related to PyPI, indexes"""
+from __future__ import absolute_import
+
+import cgi
+import itertools
+import logging
+import mimetypes
+import os
+import posixpath
+import re
+import sys
+from collections import namedtuple
+
+from pip._vendor import html5lib, requests, six
+from pip._vendor.distlib.compat import unescape
+from pip._vendor.packaging import specifiers
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.packaging.version import parse as parse_version
+from pip._vendor.requests.exceptions import HTTPError, RetryError, SSLError
+from pip._vendor.six.moves.urllib import parse as urllib_parse
+from pip._vendor.six.moves.urllib import request as urllib_request
+
+from pip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path
+from pip._internal.exceptions import (
+    BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename,
+    UnsupportedWheel,
+)
+from pip._internal.models.candidate import InstallationCandidate
+from pip._internal.models.format_control import FormatControl
+from pip._internal.models.index import PyPI
+from pip._internal.models.link import Link
+from pip._internal.pep425tags import get_supported
+from pip._internal.utils.compat import ipaddress
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import (
+    ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, WHEEL_EXTENSION, normalize_path,
+    redact_password_from_url,
+)
+from pip._internal.utils.packaging import check_requires_python
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+from pip._internal.wheel import Wheel
+
+if MYPY_CHECK_RUNNING:
+    from logging import Logger
+    from typing import (
+        Tuple, Optional, Any, List, Union, Callable, Set, Sequence,
+        Iterable, MutableMapping
+    )
+    from pip._vendor.packaging.version import _BaseVersion
+    from pip._vendor.requests import Response
+    from pip._internal.pep425tags import Pep425Tag
+    from pip._internal.req import InstallRequirement
+    from pip._internal.download import PipSession
+
+    SecureOrigin = Tuple[str, str, Optional[str]]
+    BuildTag = Tuple[Any, ...]  # either empty tuple or Tuple[int, str]
+    CandidateSortingKey = Tuple[int, _BaseVersion, BuildTag, Optional[int]]
+
+
+__all__ = ['FormatControl', 'FoundCandidates', 'PackageFinder']
+
+
+SECURE_ORIGINS = [
+    # protocol, hostname, port
+    # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
+    ("https", "*", "*"),
+    ("*", "localhost", "*"),
+    ("*", "127.0.0.0/8", "*"),
+    ("*", "::1/128", "*"),
+    ("file", "*", None),
+    # ssh is always secure.
+    ("ssh", "*", "*"),
+]  # type: List[SecureOrigin]
+
+
+logger = logging.getLogger(__name__)
+
+
+def _match_vcs_scheme(url):
+    # type: (str) -> Optional[str]
+    """Look for VCS schemes in the URL.
+
+    Returns the matched VCS scheme, or None if there's no match.
+    """
+    from pip._internal.vcs import VcsSupport
+    for scheme in VcsSupport.schemes:
+        if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
+            return scheme
+    return None
+
+
+def _is_url_like_archive(url):
+    # type: (str) -> bool
+    """Return whether the URL looks like an archive.
+    """
+    filename = Link(url).filename
+    for bad_ext in ARCHIVE_EXTENSIONS:
+        if filename.endswith(bad_ext):
+            return True
+    return False
+
+
+class _NotHTML(Exception):
+    def __init__(self, content_type, request_desc):
+        # type: (str, str) -> None
+        super(_NotHTML, self).__init__(content_type, request_desc)
+        self.content_type = content_type
+        self.request_desc = request_desc
+
+
+def _ensure_html_header(response):
+    # type: (Response) -> None
+    """Check the Content-Type header to ensure the response contains HTML.
+
+    Raises `_NotHTML` if the content type is not text/html.
+    """
+    content_type = response.headers.get("Content-Type", "")
+    if not content_type.lower().startswith("text/html"):
+        raise _NotHTML(content_type, response.request.method)
+
+
+class _NotHTTP(Exception):
+    pass
+
+
+def _ensure_html_response(url, session):
+    # type: (str, PipSession) -> None
+    """Send a HEAD request to the URL, and ensure the response contains HTML.
+
+    Raises `_NotHTTP` if the URL is not available for a HEAD request, or
+    `_NotHTML` if the content type is not text/html.
+    """
+    scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
+    if scheme not in {'http', 'https'}:
+        raise _NotHTTP()
+
+    resp = session.head(url, allow_redirects=True)
+    resp.raise_for_status()
+
+    _ensure_html_header(resp)
+
+
+def _get_html_response(url, session):
+    # type: (str, PipSession) -> Response
+    """Access an HTML page with GET, and return the response.
+
+    This consists of three parts:
+
+    1. If the URL looks suspiciously like an archive, send a HEAD first to
+       check the Content-Type is HTML, to avoid downloading a large file.
+       Raise `_NotHTTP` if the content type cannot be determined, or
+       `_NotHTML` if it is not HTML.
+    2. Actually perform the request. Raise HTTP exceptions on network failures.
+    3. Check the Content-Type header to make sure we got HTML, and raise
+       `_NotHTML` otherwise.
+    """
+    if _is_url_like_archive(url):
+        _ensure_html_response(url, session=session)
+
+    logger.debug('Getting page %s', redact_password_from_url(url))
+
+    resp = session.get(
+        url,
+        headers={
+            "Accept": "text/html",
+            # We don't want to blindly returned cached data for
+            # /simple/, because authors generally expecting that
+            # twine upload && pip install will function, but if
+            # they've done a pip install in the last ~10 minutes
+            # it won't. Thus by setting this to zero we will not
+            # blindly use any cached data, however the benefit of
+            # using max-age=0 instead of no-cache, is that we will
+            # still support conditional requests, so we will still
+            # minimize traffic sent in cases where the page hasn't
+            # changed at all, we will just always incur the round
+            # trip for the conditional GET now instead of only
+            # once per 10 minutes.
+            # For more information, please see pypa/pip#5670.
+            "Cache-Control": "max-age=0",
+        },
+    )
+    resp.raise_for_status()
+
+    # The check for archives above only works if the url ends with
+    # something that looks like an archive. However that is not a
+    # requirement of an url. Unless we issue a HEAD request on every
+    # url we cannot know ahead of time for sure if something is HTML
+    # or not. However we can check after we've downloaded it.
+    _ensure_html_header(resp)
+
+    return resp
+
+
+def _handle_get_page_fail(
+    link,  # type: Link
+    reason,  # type: Union[str, Exception]
+    meth=None  # type: Optional[Callable[..., None]]
+):
+    # type: (...) -> None
+    if meth is None:
+        meth = logger.debug
+    meth("Could not fetch URL %s: %s - skipping", link, reason)
+
+
+def _get_html_page(link, session=None):
+    # type: (Link, Optional[PipSession]) -> Optional[HTMLPage]
+    if session is None:
+        raise TypeError(
+            "_get_html_page() missing 1 required keyword argument: 'session'"
+        )
+
+    url = link.url.split('#', 1)[0]
+
+    # Check for VCS schemes that do not support lookup as web pages.
+    vcs_scheme = _match_vcs_scheme(url)
+    if vcs_scheme:
+        logger.debug('Cannot look at %s URL %s', vcs_scheme, link)
+        return None
+
+    # Tack index.html onto file:// URLs that point to directories
+    scheme, _, path, _, _, _ = urllib_parse.urlparse(url)
+    if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))):
+        # add trailing slash if not present so urljoin doesn't trim
+        # final segment
+        if not url.endswith('/'):
+            url += '/'
+        url = urllib_parse.urljoin(url, 'index.html')
+        logger.debug(' file: URL is directory, getting %s', url)
+
+    try:
+        resp = _get_html_response(url, session=session)
+    except _NotHTTP:
+        logger.debug(
+            'Skipping page %s because it looks like an archive, and cannot '
+            'be checked by HEAD.', link,
+        )
+    except _NotHTML as exc:
+        logger.debug(
+            'Skipping page %s because the %s request got Content-Type: %s',
+            link, exc.request_desc, exc.content_type,
+        )
+    except HTTPError as exc:
+        _handle_get_page_fail(link, exc)
+    except RetryError as exc:
+        _handle_get_page_fail(link, exc)
+    except SSLError as exc:
+        reason = "There was a problem confirming the ssl certificate: "
+        reason += str(exc)
+        _handle_get_page_fail(link, reason, meth=logger.info)
+    except requests.ConnectionError as exc:
+        _handle_get_page_fail(link, "connection error: %s" % exc)
+    except requests.Timeout:
+        _handle_get_page_fail(link, "timed out")
+    else:
+        return HTMLPage(resp.content, resp.url, resp.headers)
+    return None
+
+
+class CandidateEvaluator(object):
+
+    def __init__(
+        self,
+        valid_tags,          # type: List[Pep425Tag]
+        prefer_binary=False  # type: bool
+
+    ):
+        # type: (...) -> None
+        self._prefer_binary = prefer_binary
+        self._valid_tags = valid_tags
+
+    def is_wheel_supported(self, wheel):
+        # type: (Wheel) -> bool
+        return wheel.supported(self._valid_tags)
+
+    def _sort_key(self, candidate):
+        # type: (InstallationCandidate) -> CandidateSortingKey
+        """
+        Function used to generate link sort key for link tuples.
+        The greater the return value, the more preferred it is.
+        If not finding wheels, then sorted by version only.
+        If finding wheels, then the sort order is by version, then:
+          1. existing installs
+          2. wheels ordered via Wheel.support_index_min(self._valid_tags)
+          3. source archives
+        If prefer_binary was set, then all wheels are sorted above sources.
+        Note: it was considered to embed this logic into the Link
+              comparison operators, but then different sdist links
+              with the same version, would have to be considered equal
+        """
+        support_num = len(self._valid_tags)
+        build_tag = tuple()  # type: BuildTag
+        binary_preference = 0
+        if candidate.location.is_wheel:
+            # can raise InvalidWheelFilename
+            wheel = Wheel(candidate.location.filename)
+            if not wheel.supported(self._valid_tags):
+                raise UnsupportedWheel(
+                    "%s is not a supported wheel for this platform. It "
+                    "can't be sorted." % wheel.filename
+                )
+            if self._prefer_binary:
+                binary_preference = 1
+            pri = -(wheel.support_index_min(self._valid_tags))
+            if wheel.build_tag is not None:
+                match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
+                build_tag_groups = match.groups()
+                build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
+        else:  # sdist
+            pri = -(support_num)
+        return (binary_preference, candidate.version, build_tag, pri)
+
+    def get_best_candidate(self, candidates):
+        # type: (List[InstallationCandidate]) -> InstallationCandidate
+        """
+        Return the best candidate per the instance's sort order, or None if
+        no candidates are given.
+        """
+        if not candidates:
+            return None
+
+        return max(candidates, key=self._sort_key)
+
+
+class FoundCandidates(object):
+    """A collection of candidates, returned by `PackageFinder.find_candidates`.
+
+    This class is only intended to be instantiated by PackageFinder through
+    the `from_specifier()` constructor.
+
+    Arguments:
+
+    * `candidates`: A sequence of all available candidates found.
+    * `specifier`: Specifier to filter applicable versions.
+    * `prereleases`: Whether prereleases should be accounted. Pass None to
+        infer from the specifier.
+    * `evaluator`: A CandidateEvaluator object to sort applicable candidates
+        by order of preference.
+    """
+    def __init__(
+        self,
+        candidates,     # type: List[InstallationCandidate]
+        versions,       # type: Set[str]
+        evaluator,      # type: CandidateEvaluator
+    ):
+        # type: (...) -> None
+        self._candidates = candidates
+        self._evaluator = evaluator
+        self._versions = versions
+
+    @classmethod
+    def from_specifier(
+        cls,
+        candidates,     # type: List[InstallationCandidate]
+        specifier,      # type: specifiers.BaseSpecifier
+        prereleases,    # type: Optional[bool]
+        evaluator,      # type: CandidateEvaluator
+    ):
+        # type: (...) -> FoundCandidates
+        versions = {
+            str(v) for v in specifier.filter(
+                # We turn the version object into a str here because otherwise
+                # when we're debundled but setuptools isn't, Python will see
+                # packaging.version.Version and
+                # pkg_resources._vendor.packaging.version.Version as different
+                # types. This way we'll use a str as a common data interchange
+                # format. If we stop using the pkg_resources provided specifier
+                # and start using our own, we can drop the cast to str().
+                (str(c.version) for c in candidates),
+                prereleases=prereleases,
+            )
+        }
+        return cls(candidates, versions, evaluator)
+
+    def iter_all(self):
+        # type: () -> Iterable[InstallationCandidate]
+        """Iterate through all candidates.
+        """
+        return iter(self._candidates)
+
+    def iter_applicable(self):
+        # type: () -> Iterable[InstallationCandidate]
+        """Iterate through candidates matching the versions associated with
+        this instance.
+        """
+        # Again, converting version to str to deal with debundling.
+        return (c for c in self.iter_all() if str(c.version) in self._versions)
+
+    def get_best(self):
+        # type: () -> Optional[InstallationCandidate]
+        """Return the best candidate available, or None if no applicable
+        candidates are found.
+        """
+        candidates = list(self.iter_applicable())
+        return self._evaluator.get_best_candidate(candidates)
+
+
+class PackageFinder(object):
+    """This finds packages.
+
+    This is meant to match easy_install's technique for looking for
+    packages, by reading pages and looking for appropriate links.
+    """
+
+    def __init__(
+        self,
+        find_links,  # type: List[str]
+        index_urls,  # type: List[str]
+        allow_all_prereleases=False,  # type: bool
+        trusted_hosts=None,  # type: Optional[Iterable[str]]
+        session=None,  # type: Optional[PipSession]
+        format_control=None,  # type: Optional[FormatControl]
+        platform=None,  # type: Optional[str]
+        versions=None,  # type: Optional[List[str]]
+        abi=None,  # type: Optional[str]
+        implementation=None,  # type: Optional[str]
+        prefer_binary=False  # type: bool
+    ):
+        # type: (...) -> None
+        """Create a PackageFinder.
+
+        :param format_control: A FormatControl object or None. Used to control
+            the selection of source packages / binary packages when consulting
+            the index and links.
+        :param platform: A string or None. If None, searches for packages
+            that are supported by the current system. Otherwise, will find
+            packages that can be built on the platform passed in. These
+            packages will only be downloaded for distribution: they will
+            not be built locally.
+        :param versions: A list of strings or None. This is passed directly
+            to pep425tags.py in the get_supported() method.
+        :param abi: A string or None. This is passed directly
+            to pep425tags.py in the get_supported() method.
+        :param implementation: A string or None. This is passed directly
+            to pep425tags.py in the get_supported() method.
+        :param prefer_binary: Whether to prefer an old, but valid, binary
+            dist over a new source dist.
+        """
+        if session is None:
+            raise TypeError(
+                "PackageFinder() missing 1 required keyword argument: "
+                "'session'"
+            )
+
+        # Build find_links. If an argument starts with ~, it may be
+        # a local file relative to a home directory. So try normalizing
+        # it and if it exists, use the normalized version.
+        # This is deliberately conservative - it might be fine just to
+        # blindly normalize anything starting with a ~...
+        self.find_links = []  # type: List[str]
+        for link in find_links:
+            if link.startswith('~'):
+                new_link = normalize_path(link)
+                if os.path.exists(new_link):
+                    link = new_link
+            self.find_links.append(link)
+
+        self.index_urls = index_urls
+
+        # These are boring links that have already been logged somehow:
+        self.logged_links = set()  # type: Set[Link]
+
+        self.format_control = format_control or FormatControl(set(), set())
+
+        # Domains that we won't emit warnings for when not using HTTPS
+        self.secure_origins = [
+            ("*", host, "*")
+            for host in (trusted_hosts if trusted_hosts else [])
+        ]  # type: List[SecureOrigin]
+
+        # Do we want to allow _all_ pre-releases?
+        self.allow_all_prereleases = allow_all_prereleases
+
+        # The Session we'll use to make requests
+        self.session = session
+
+        # The valid tags to check potential found wheel candidates against
+        valid_tags = get_supported(
+            versions=versions,
+            platform=platform,
+            abi=abi,
+            impl=implementation,
+        )
+        self.candidate_evaluator = CandidateEvaluator(
+            valid_tags=valid_tags, prefer_binary=prefer_binary,
+        )
+
+        # If we don't have TLS enabled, then WARN if anyplace we're looking
+        # relies on TLS.
+        if not HAS_TLS:
+            for link in itertools.chain(self.index_urls, self.find_links):
+                parsed = urllib_parse.urlparse(link)
+                if parsed.scheme == "https":
+                    logger.warning(
+                        "pip is configured with locations that require "
+                        "TLS/SSL, however the ssl module in Python is not "
+                        "available."
+                    )
+                    break
+
+    def get_formatted_locations(self):
+        # type: () -> str
+        lines = []
+        if self.index_urls and self.index_urls != [PyPI.simple_url]:
+            lines.append(
+                "Looking in indexes: {}".format(", ".join(
+                    redact_password_from_url(url) for url in self.index_urls))
+            )
+        if self.find_links:
+            lines.append(
+                "Looking in links: {}".format(", ".join(self.find_links))
+            )
+        return "\n".join(lines)
+
+    @staticmethod
+    def _sort_locations(locations, expand_dir=False):
+        # type: (Sequence[str], bool) -> Tuple[List[str], List[str]]
+        """
+        Sort locations into "files" (archives) and "urls", and return
+        a pair of lists (files,urls)
+        """
+        files = []
+        urls = []
+
+        # puts the url for the given file path into the appropriate list
+        def sort_path(path):
+            url = path_to_url(path)
+            if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
+                urls.append(url)
+            else:
+                files.append(url)
+
+        for url in locations:
+
+            is_local_path = os.path.exists(url)
+            is_file_url = url.startswith('file:')
+
+            if is_local_path or is_file_url:
+                if is_local_path:
+                    path = url
+                else:
+                    path = url_to_path(url)
+                if os.path.isdir(path):
+                    if expand_dir:
+                        path = os.path.realpath(path)
+                        for item in os.listdir(path):
+                            sort_path(os.path.join(path, item))
+                    elif is_file_url:
+                        urls.append(url)
+                    else:
+                        logger.warning(
+                            "Path '{0}' is ignored: "
+                            "it is a directory.".format(path),
+                        )
+                elif os.path.isfile(path):
+                    sort_path(path)
+                else:
+                    logger.warning(
+                        "Url '%s' is ignored: it is neither a file "
+                        "nor a directory.", url,
+                    )
+            elif is_url(url):
+                # Only add url with clear scheme
+                urls.append(url)
+            else:
+                logger.warning(
+                    "Url '%s' is ignored. It is either a non-existing "
+                    "path or lacks a specific scheme.", url,
+                )
+
+        return files, urls
+
+    def _validate_secure_origin(self, logger, location):
+        # type: (Logger, Link) -> bool
+        # Determine if this url used a secure transport mechanism
+        parsed = urllib_parse.urlparse(str(location))
+        origin = (parsed.scheme, parsed.hostname, parsed.port)
+
+        # The protocol to use to see if the protocol matches.
+        # Don't count the repository type as part of the protocol: in
+        # cases such as "git+ssh", only use "ssh". (I.e., Only verify against
+        # the last scheme.)
+        protocol = origin[0].rsplit('+', 1)[-1]
+
+        # Determine if our origin is a secure origin by looking through our
+        # hardcoded list of secure origins, as well as any additional ones
+        # configured on this PackageFinder instance.
+        for secure_origin in (SECURE_ORIGINS + self.secure_origins):
+            if protocol != secure_origin[0] and secure_origin[0] != "*":
+                continue
+
+            try:
+                # We need to do this decode dance to ensure that we have a
+                # unicode object, even on Python 2.x.
+                addr = ipaddress.ip_address(
+                    origin[1]
+                    if (
+                        isinstance(origin[1], six.text_type) or
+                        origin[1] is None
+                    )
+                    else origin[1].decode("utf8")
+                )
+                network = ipaddress.ip_network(
+                    secure_origin[1]
+                    if isinstance(secure_origin[1], six.text_type)
+                    # setting secure_origin[1] to proper Union[bytes, str]
+                    # creates problems in other places
+                    else secure_origin[1].decode("utf8")  # type: ignore
+                )
+            except ValueError:
+                # We don't have both a valid address or a valid network, so
+                # we'll check this origin against hostnames.
+                if (origin[1] and
+                        origin[1].lower() != secure_origin[1].lower() and
+                        secure_origin[1] != "*"):
+                    continue
+            else:
+                # We have a valid address and network, so see if the address
+                # is contained within the network.
+                if addr not in network:
+                    continue
+
+            # Check to see if the port patches
+            if (origin[2] != secure_origin[2] and
+                    secure_origin[2] != "*" and
+                    secure_origin[2] is not None):
+                continue
+
+            # If we've gotten here, then this origin matches the current
+            # secure origin and we should return True
+            return True
+
+        # If we've gotten to this point, then the origin isn't secure and we
+        # will not accept it as a valid location to search. We will however
+        # log a warning that we are ignoring it.
+        logger.warning(
+            "The repository located at %s is not a trusted or secure host and "
+            "is being ignored. If this repository is available via HTTPS we "
+            "recommend you use HTTPS instead, otherwise you may silence "
+            "this warning and allow it anyway with '--trusted-host %s'.",
+            parsed.hostname,
+            parsed.hostname,
+        )
+
+        return False
+
+    def _get_index_urls_locations(self, project_name):
+        # type: (str) -> List[str]
+        """Returns the locations found via self.index_urls
+
+        Checks the url_name on the main (first in the list) index and
+        use this url_name to produce all locations
+        """
+
+        def mkurl_pypi_url(url):
+            loc = posixpath.join(
+                url,
+                urllib_parse.quote(canonicalize_name(project_name)))
+            # For maximum compatibility with easy_install, ensure the path
+            # ends in a trailing slash.  Although this isn't in the spec
+            # (and PyPI can handle it without the slash) some other index
+            # implementations might break if they relied on easy_install's
+            # behavior.
+            if not loc.endswith('/'):
+                loc = loc + '/'
+            return loc
+
+        return [mkurl_pypi_url(url) for url in self.index_urls]
+
+    def find_all_candidates(self, project_name):
+        # type: (str) -> List[Optional[InstallationCandidate]]
+        """Find all available InstallationCandidate for project_name
+
+        This checks index_urls and find_links.
+        All versions found are returned as an InstallationCandidate list.
+
+        See _link_package_versions for details on which files are accepted
+        """
+        index_locations = self._get_index_urls_locations(project_name)
+        index_file_loc, index_url_loc = self._sort_locations(index_locations)
+        fl_file_loc, fl_url_loc = self._sort_locations(
+            self.find_links, expand_dir=True,
+        )
+
+        file_locations = (Link(url) for url in itertools.chain(
+            index_file_loc, fl_file_loc,
+        ))
+
+        # We trust every url that the user has given us whether it was given
+        #   via --index-url or --find-links.
+        # We want to filter out any thing which does not have a secure origin.
+        url_locations = [
+            link for link in itertools.chain(
+                (Link(url) for url in index_url_loc),
+                (Link(url) for url in fl_url_loc),
+            )
+            if self._validate_secure_origin(logger, link)
+        ]
+
+        logger.debug('%d location(s) to search for versions of %s:',
+                     len(url_locations), project_name)
+
+        for location in url_locations:
+            logger.debug('* %s', location)
+
+        canonical_name = canonicalize_name(project_name)
+        formats = self.format_control.get_allowed_formats(canonical_name)
+        search = Search(project_name, canonical_name, formats)
+        find_links_versions = self._package_versions(
+            # We trust every directly linked archive in find_links
+            (Link(url, '-f') for url in self.find_links),
+            search
+        )
+
+        page_versions = []
+        for page in self._get_pages(url_locations, project_name):
+            logger.debug('Analyzing links from page %s', page.url)
+            with indent_log():
+                page_versions.extend(
+                    self._package_versions(page.iter_links(), search)
+                )
+
+        file_versions = self._package_versions(file_locations, search)
+        if file_versions:
+            file_versions.sort(reverse=True)
+            logger.debug(
+                'Local files found: %s',
+                ', '.join([
+                    url_to_path(candidate.location.url)
+                    for candidate in file_versions
+                ])
+            )
+
+        # This is an intentional priority ordering
+        return file_versions + find_links_versions + page_versions
+
+    def find_candidates(
+        self,
+        project_name,       # type: str
+        specifier=None,     # type: Optional[specifiers.BaseSpecifier]
+    ):
+        """Find matches for the given project and specifier.
+
+        If given, `specifier` should implement `filter` to allow version
+        filtering (e.g. ``packaging.specifiers.SpecifierSet``).
+
+        Returns a `FoundCandidates` instance.
+        """
+        if specifier is None:
+            specifier = specifiers.SpecifierSet()
+        return FoundCandidates.from_specifier(
+            self.find_all_candidates(project_name),
+            specifier=specifier,
+            prereleases=(self.allow_all_prereleases or None),
+            evaluator=self.candidate_evaluator,
+        )
+
+    def find_requirement(self, req, upgrade):
+        # type: (InstallRequirement, bool) -> Optional[Link]
+        """Try to find a Link matching req
+
+        Expects req, an InstallRequirement and upgrade, a boolean
+        Returns a Link if found,
+        Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
+        """
+        candidates = self.find_candidates(req.name, req.specifier)
+        best_candidate = candidates.get_best()
+
+        installed_version = None    # type: Optional[_BaseVersion]
+        if req.satisfied_by is not None:
+            installed_version = parse_version(req.satisfied_by.version)
+
+        def _format_versions(cand_iter):
+            # This repeated parse_version and str() conversion is needed to
+            # handle different vendoring sources from pip and pkg_resources.
+            # If we stop using the pkg_resources provided specifier and start
+            # using our own, we can drop the cast to str().
+            return ", ".join(sorted(
+                {str(c.version) for c in cand_iter},
+                key=parse_version,
+            )) or "none"
+
+        if installed_version is None and best_candidate is None:
+            logger.critical(
+                'Could not find a version that satisfies the requirement %s '
+                '(from versions: %s)',
+                req,
+                _format_versions(candidates.iter_all()),
+            )
+
+            raise DistributionNotFound(
+                'No matching distribution found for %s' % req
+            )
+
+        best_installed = False
+        if installed_version and (
+                best_candidate is None or
+                best_candidate.version <= installed_version):
+            best_installed = True
+
+        if not upgrade and installed_version is not None:
+            if best_installed:
+                logger.debug(
+                    'Existing installed version (%s) is most up-to-date and '
+                    'satisfies requirement',
+                    installed_version,
+                )
+            else:
+                logger.debug(
+                    'Existing installed version (%s) satisfies requirement '
+                    '(most up-to-date version is %s)',
+                    installed_version,
+                    best_candidate.version,
+                )
+            return None
+
+        if best_installed:
+            # We have an existing version, and its the best version
+            logger.debug(
+                'Installed version (%s) is most up-to-date (past versions: '
+                '%s)',
+                installed_version,
+                _format_versions(candidates.iter_applicable()),
+            )
+            raise BestVersionAlreadyInstalled
+
+        logger.debug(
+            'Using version %s (newest of versions: %s)',
+            best_candidate.version,
+            _format_versions(candidates.iter_applicable()),
+        )
+        return best_candidate.location
+
+    def _get_pages(self, locations, project_name):
+        # type: (Iterable[Link], str) -> Iterable[HTMLPage]
+        """
+        Yields (page, page_url) from the given locations, skipping
+        locations that have errors.
+        """
+        seen = set()  # type: Set[Link]
+        for location in locations:
+            if location in seen:
+                continue
+            seen.add(location)
+
+            page = _get_html_page(location, session=self.session)
+            if page is None:
+                continue
+
+            yield page
+
+    _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
+
+    def _sort_links(self, links):
+        # type: (Iterable[Link]) -> List[Link]
+        """
+        Returns elements of links in order, non-egg links first, egg links
+        second, while eliminating duplicates
+        """
+        eggs, no_eggs = [], []
+        seen = set()  # type: Set[Link]
+        for link in links:
+            if link not in seen:
+                seen.add(link)
+                if link.egg_fragment:
+                    eggs.append(link)
+                else:
+                    no_eggs.append(link)
+        return no_eggs + eggs
+
+    def _package_versions(
+        self,
+        links,  # type: Iterable[Link]
+        search  # type: Search
+    ):
+        # type: (...) -> List[Optional[InstallationCandidate]]
+        result = []
+        for link in self._sort_links(links):
+            v = self._link_package_versions(link, search)
+            if v is not None:
+                result.append(v)
+        return result
+
+    def _log_skipped_link(self, link, reason):
+        # type: (Link, str) -> None
+        if link not in self.logged_links:
+            logger.debug('Skipping link %s; %s', link, reason)
+            self.logged_links.add(link)
+
+    def _link_package_versions(self, link, search):
+        # type: (Link, Search) -> Optional[InstallationCandidate]
+        """Return an InstallationCandidate or None"""
+        version = None
+        if link.egg_fragment:
+            egg_info = link.egg_fragment
+            ext = link.ext
+        else:
+            egg_info, ext = link.splitext()
+            if not ext:
+                self._log_skipped_link(link, 'not a file')
+                return None
+            if ext not in SUPPORTED_EXTENSIONS:
+                self._log_skipped_link(
+                    link, 'unsupported archive format: %s' % ext,
+                )
+                return None
+            if "binary" not in search.formats and ext == WHEEL_EXTENSION:
+                self._log_skipped_link(
+                    link, 'No binaries permitted for %s' % search.supplied,
+                )
+                return None
+            if "macosx10" in link.path and ext == '.zip':
+                self._log_skipped_link(link, 'macosx10 one')
+                return None
+            if ext == WHEEL_EXTENSION:
+                try:
+                    wheel = Wheel(link.filename)
+                except InvalidWheelFilename:
+                    self._log_skipped_link(link, 'invalid wheel filename')
+                    return None
+                if canonicalize_name(wheel.name) != search.canonical:
+                    self._log_skipped_link(
+                        link, 'wrong project name (not %s)' % search.supplied)
+                    return None
+
+                if not self.candidate_evaluator.is_wheel_supported(wheel):
+                    self._log_skipped_link(
+                        link, 'it is not compatible with this Python')
+                    return None
+
+                version = wheel.version
+
+        # This should be up by the search.ok_binary check, but see issue 2700.
+        if "source" not in search.formats and ext != WHEEL_EXTENSION:
+            self._log_skipped_link(
+                link, 'No sources permitted for %s' % search.supplied,
+            )
+            return None
+
+        if not version:
+            version = _egg_info_matches(egg_info, search.canonical)
+        if not version:
+            self._log_skipped_link(
+                link, 'Missing project version for %s' % search.supplied)
+            return None
+
+        match = self._py_version_re.search(version)
+        if match:
+            version = version[:match.start()]
+            py_version = match.group(1)
+            if py_version != sys.version[:3]:
+                self._log_skipped_link(
+                    link, 'Python version is incorrect')
+                return None
+        try:
+            support_this_python = check_requires_python(link.requires_python)
+        except specifiers.InvalidSpecifier:
+            logger.debug("Package %s has an invalid Requires-Python entry: %s",
+                         link.filename, link.requires_python)
+            support_this_python = True
+
+        if not support_this_python:
+            logger.debug("The package %s is incompatible with the python "
+                         "version in use. Acceptable python versions are: %s",
+                         link, link.requires_python)
+            return None
+        logger.debug('Found link %s, version: %s', link, version)
+
+        return InstallationCandidate(search.supplied, version, link)
+
+
+def _find_name_version_sep(egg_info, canonical_name):
+    # type: (str, str) -> int
+    """Find the separator's index based on the package's canonical name.
+
+    `egg_info` must be an egg info string for the given package, and
+    `canonical_name` must be the package's canonical name.
+
+    This function is needed since the canonicalized name does not necessarily
+    have the same length as the egg info's name part. An example::
+
+    >>> egg_info = 'foo__bar-1.0'
+    >>> canonical_name = 'foo-bar'
+    >>> _find_name_version_sep(egg_info, canonical_name)
+    8
+    """
+    # Project name and version must be separated by one single dash. Find all
+    # occurrences of dashes; if the string in front of it matches the canonical
+    # name, this is the one separating the name and version parts.
+    for i, c in enumerate(egg_info):
+        if c != "-":
+            continue
+        if canonicalize_name(egg_info[:i]) == canonical_name:
+            return i
+    raise ValueError("{} does not match {}".format(egg_info, canonical_name))
+
+
+def _egg_info_matches(egg_info, canonical_name):
+    # type: (str, str) -> Optional[str]
+    """Pull the version part out of a string.
+
+    :param egg_info: The string to parse. E.g. foo-2.1
+    :param canonical_name: The canonicalized name of the package this
+        belongs to.
+    """
+    try:
+        version_start = _find_name_version_sep(egg_info, canonical_name) + 1
+    except ValueError:
+        return None
+    version = egg_info[version_start:]
+    if not version:
+        return None
+    return version
+
+
+def _determine_base_url(document, page_url):
+    """Determine the HTML document's base URL.
+
+    This looks for a ``<base>`` tag in the HTML document. If present, its href
+    attribute denotes the base URL of anchor tags in the document. If there is
+    no such tag (or if it does not have a valid href attribute), the HTML
+    file's URL is used as the base URL.
+
+    :param document: An HTML document representation. The current
+        implementation expects the result of ``html5lib.parse()``.
+    :param page_url: The URL of the HTML document.
+    """
+    for base in document.findall(".//base"):
+        href = base.get("href")
+        if href is not None:
+            return href
+    return page_url
+
+
+def _get_encoding_from_headers(headers):
+    """Determine if we have any encoding information in our headers.
+    """
+    if headers and "Content-Type" in headers:
+        content_type, params = cgi.parse_header(headers["Content-Type"])
+        if "charset" in params:
+            return params['charset']
+    return None
+
+
+def _clean_link(url):
+    # type: (str) -> str
+    """Makes sure a link is fully encoded.  That is, if a ' ' shows up in
+    the link, it will be rewritten to %20 (while not over-quoting
+    % or other characters)."""
+    # Split the URL into parts according to the general structure
+    # `scheme://netloc/path;parameters?query#fragment`. Note that the
+    # `netloc` can be empty and the URI will then refer to a local
+    # filesystem path.
+    result = urllib_parse.urlparse(url)
+    # In both cases below we unquote prior to quoting to make sure
+    # nothing is double quoted.
+    if result.netloc == "":
+        # On Windows the path part might contain a drive letter which
+        # should not be quoted. On Linux where drive letters do not
+        # exist, the colon should be quoted. We rely on urllib.request
+        # to do the right thing here.
+        path = urllib_request.pathname2url(
+            urllib_request.url2pathname(result.path))
+    else:
+        path = urllib_parse.quote(urllib_parse.unquote(result.path))
+    return urllib_parse.urlunparse(result._replace(path=path))
+
+
+class HTMLPage(object):
+    """Represents one page, along with its URL"""
+
+    def __init__(self, content, url, headers=None):
+        # type: (bytes, str, MutableMapping[str, str]) -> None
+        self.content = content
+        self.url = url
+        self.headers = headers
+
+    def __str__(self):
+        return redact_password_from_url(self.url)
+
+    def iter_links(self):
+        # type: () -> Iterable[Link]
+        """Yields all links in the page"""
+        document = html5lib.parse(
+            self.content,
+            transport_encoding=_get_encoding_from_headers(self.headers),
+            namespaceHTMLElements=False,
+        )
+        base_url = _determine_base_url(document, self.url)
+        for anchor in document.findall(".//a"):
+            if anchor.get("href"):
+                href = anchor.get("href")
+                url = _clean_link(urllib_parse.urljoin(base_url, href))
+                pyrequire = anchor.get('data-requires-python')
+                pyrequire = unescape(pyrequire) if pyrequire else None
+                yield Link(url, self.url, requires_python=pyrequire)
+
+
+Search = namedtuple('Search', 'supplied canonical formats')
+"""Capture key aspects of a search.
+
+:attribute supplied: The user supplied package.
+:attribute canonical: The canonical package name.
+:attribute formats: The formats allowed for this package. Should be a set
+    with 'binary' or 'source' or both in it.
+"""
diff --git a/lib/python3.7/site-packages/pip/_internal/locations.py b/lib/python3.7/site-packages/pip/_internal/locations.py
new file mode 100644
index 0000000000000000000000000000000000000000..10860f7b52034106bbda16490340a62ae4927841
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/locations.py
@@ -0,0 +1,211 @@
+"""Locations where we look for configs, install stuff, etc"""
+from __future__ import absolute_import
+
+import os
+import os.path
+import platform
+import site
+import sys
+import sysconfig
+from distutils import sysconfig as distutils_sysconfig
+from distutils.command.install import SCHEME_KEYS  # type: ignore
+
+from pip._internal.utils import appdirs
+from pip._internal.utils.compat import WINDOWS, expanduser
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Any, Union, Dict, List, Optional
+
+
+# Application Directories
+USER_CACHE_DIR = appdirs.user_cache_dir("pip")
+
+
+DELETE_MARKER_MESSAGE = '''\
+This file is placed here by pip to indicate the source was put
+here by pip.
+
+Once this package is successfully installed this source code will be
+deleted (unless you remove this file).
+'''
+PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
+
+
+def write_delete_marker_file(directory):
+    # type: (str) -> None
+    """
+    Write the pip delete marker file into this directory.
+    """
+    filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
+    with open(filepath, 'w') as marker_fp:
+        marker_fp.write(DELETE_MARKER_MESSAGE)
+
+
+def running_under_virtualenv():
+    # type: () -> bool
+    """
+    Return True if we're running inside a virtualenv, False otherwise.
+
+    """
+    if hasattr(sys, 'real_prefix'):
+        return True
+    elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
+        return True
+
+    return False
+
+
+def virtualenv_no_global():
+    # type: () -> bool
+    """
+    Return True if in a venv and no system site packages.
+    """
+    # this mirrors the logic in virtualenv.py for locating the
+    # no-global-site-packages.txt file
+    site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
+    no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
+    if running_under_virtualenv() and os.path.isfile(no_global_file):
+        return True
+    else:
+        return False
+
+
+if running_under_virtualenv():
+    src_prefix = os.path.join(sys.prefix, 'src')
+else:
+    # FIXME: keep src in cwd for now (it is not a temporary folder)
+    try:
+        src_prefix = os.path.join(os.getcwd(), 'src')
+    except OSError:
+        # In case the current working directory has been renamed or deleted
+        sys.exit(
+            "The folder you are executing pip from can no longer be found."
+        )
+
+# under macOS + virtualenv sys.prefix is not properly resolved
+# it is something like /path/to/python/bin/..
+# Note: using realpath due to tmp dirs on OSX being symlinks
+src_prefix = os.path.abspath(src_prefix)
+
+# FIXME doesn't account for venv linked to global site-packages
+
+site_packages = sysconfig.get_path("purelib")  # type: Optional[str]
+
+# This is because of a bug in PyPy's sysconfig module, see
+# https://bitbucket.org/pypy/pypy/issues/2506/sysconfig-returns-incorrect-paths
+# for more information.
+if platform.python_implementation().lower() == "pypy":
+    site_packages = distutils_sysconfig.get_python_lib()
+try:
+    # Use getusersitepackages if this is present, as it ensures that the
+    # value is initialised properly.
+    user_site = site.getusersitepackages()
+except AttributeError:
+    user_site = site.USER_SITE
+user_dir = expanduser('~')
+if WINDOWS:
+    bin_py = os.path.join(sys.prefix, 'Scripts')
+    bin_user = os.path.join(user_site, 'Scripts')
+    # buildout uses 'bin' on Windows too?
+    if not os.path.exists(bin_py):
+        bin_py = os.path.join(sys.prefix, 'bin')
+        bin_user = os.path.join(user_site, 'bin')
+
+    config_basename = 'pip.ini'
+
+    legacy_storage_dir = os.path.join(user_dir, 'pip')
+    legacy_config_file = os.path.join(
+        legacy_storage_dir,
+        config_basename,
+    )
+else:
+    bin_py = os.path.join(sys.prefix, 'bin')
+    bin_user = os.path.join(user_site, 'bin')
+
+    config_basename = 'pip.conf'
+
+    legacy_storage_dir = os.path.join(user_dir, '.pip')
+    legacy_config_file = os.path.join(
+        legacy_storage_dir,
+        config_basename,
+    )
+    # Forcing to use /usr/local/bin for standard macOS framework installs
+    # Also log to ~/Library/Logs/ for use with the Console.app log viewer
+    if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/':
+        bin_py = '/usr/local/bin'
+
+global_config_files = [
+    os.path.join(path, config_basename)
+    for path in appdirs.site_config_dirs('pip')
+]
+
+site_config_file = os.path.join(sys.prefix, config_basename)
+new_config_file = os.path.join(appdirs.user_config_dir("pip"), config_basename)
+
+
+def distutils_scheme(dist_name, user=False, home=None, root=None,
+                     isolated=False, prefix=None):
+    # type:(str, bool, str, str, bool, str) -> dict
+    """
+    Return a distutils install scheme
+    """
+    from distutils.dist import Distribution
+
+    scheme = {}
+
+    if isolated:
+        extra_dist_args = {"script_args": ["--no-user-cfg"]}
+    else:
+        extra_dist_args = {}
+    dist_args = {'name': dist_name}  # type: Dict[str, Union[str, List[str]]]
+    dist_args.update(extra_dist_args)
+
+    d = Distribution(dist_args)
+    # Ignoring, typeshed issue reported python/typeshed/issues/2567
+    d.parse_config_files()
+    # NOTE: Ignoring type since mypy can't find attributes on 'Command'
+    i = d.get_command_obj('install', create=True)  # type: Any
+    assert i is not None
+    # NOTE: setting user or home has the side-effect of creating the home dir
+    # or user base for installations during finalize_options()
+    # ideally, we'd prefer a scheme class that has no side-effects.
+    assert not (user and prefix), "user={} prefix={}".format(user, prefix)
+    i.user = user or i.user
+    if user:
+        i.prefix = ""
+    i.prefix = prefix or i.prefix
+    i.home = home or i.home
+    i.root = root or i.root
+    i.finalize_options()
+    for key in SCHEME_KEYS:
+        scheme[key] = getattr(i, 'install_' + key)
+
+    # install_lib specified in setup.cfg should install *everything*
+    # into there (i.e. it takes precedence over both purelib and
+    # platlib).  Note, i.install_lib is *always* set after
+    # finalize_options(); we only want to override here if the user
+    # has explicitly requested it hence going back to the config
+
+    # Ignoring, typeshed issue reported python/typeshed/issues/2567
+    if 'install_lib' in d.get_option_dict('install'):  # type: ignore
+        scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
+
+    if running_under_virtualenv():
+        scheme['headers'] = os.path.join(
+            sys.prefix,
+            'include',
+            'site',
+            'python' + sys.version[:3],
+            dist_name,
+        )
+
+        if root is not None:
+            path_no_drive = os.path.splitdrive(
+                os.path.abspath(scheme["headers"]))[1]
+            scheme["headers"] = os.path.join(
+                root,
+                path_no_drive[1:],
+            )
+
+    return scheme
diff --git a/lib/python3.7/site-packages/pip/_internal/models/__init__.py b/lib/python3.7/site-packages/pip/_internal/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7855226e4b500142deef8fb247cd33a9a991d122
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/models/__init__.py
@@ -0,0 +1,2 @@
+"""A package that contains models that represent entities.
+"""
diff --git a/lib/python3.7/site-packages/pip/_internal/models/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/models/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f913b2b3dae1a25db1982b7cafd64057424a194
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/models/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/models/__pycache__/candidate.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/models/__pycache__/candidate.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3f6acb3a9e388ad1b9e1ceab6564e19f520452e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/models/__pycache__/candidate.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/models/__pycache__/format_control.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/models/__pycache__/format_control.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4fc9d8c4ea9aeeceda81209837a0924c03ec5cb9
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/models/__pycache__/format_control.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/models/__pycache__/index.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/models/__pycache__/index.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..28be50db8c7b0d1c36c1defdb9a8dccf386e7d6b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/models/__pycache__/index.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/models/__pycache__/link.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/models/__pycache__/link.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c353e8055af5014aece4f28953a53fdd0e779ae
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/models/__pycache__/link.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/models/candidate.py b/lib/python3.7/site-packages/pip/_internal/models/candidate.py
new file mode 100644
index 0000000000000000000000000000000000000000..b66c3657f037b0c3df7ae0739222bf671a82b267
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/models/candidate.py
@@ -0,0 +1,31 @@
+from pip._vendor.packaging.version import parse as parse_version
+
+from pip._internal.utils.models import KeyBasedCompareMixin
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from pip._vendor.packaging.version import _BaseVersion
+    from pip._internal.models.link import Link
+    from typing import Any
+
+
+class InstallationCandidate(KeyBasedCompareMixin):
+    """Represents a potential "candidate" for installation.
+    """
+
+    def __init__(self, project, version, location):
+        # type: (Any, str, Link) -> None
+        self.project = project
+        self.version = parse_version(version)  # type: _BaseVersion
+        self.location = location
+
+        super(InstallationCandidate, self).__init__(
+            key=(self.project, self.version, self.location),
+            defining_class=InstallationCandidate
+        )
+
+    def __repr__(self):
+        # type: () -> str
+        return "<InstallationCandidate({!r}, {!r}, {!r})>".format(
+            self.project, self.version, self.location,
+        )
diff --git a/lib/python3.7/site-packages/pip/_internal/models/format_control.py b/lib/python3.7/site-packages/pip/_internal/models/format_control.py
new file mode 100644
index 0000000000000000000000000000000000000000..53138e48ecad4ea5489f4591882374016fd83c38
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/models/format_control.py
@@ -0,0 +1,73 @@
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Optional, Set, FrozenSet
+
+
+class FormatControl(object):
+    """Helper for managing formats from which a package can be installed.
+    """
+
+    def __init__(self, no_binary=None, only_binary=None):
+        # type: (Optional[Set], Optional[Set]) -> None
+        if no_binary is None:
+            no_binary = set()
+        if only_binary is None:
+            only_binary = set()
+
+        self.no_binary = no_binary
+        self.only_binary = only_binary
+
+    def __eq__(self, other):
+        return self.__dict__ == other.__dict__
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __repr__(self):
+        return "{}({}, {})".format(
+            self.__class__.__name__,
+            self.no_binary,
+            self.only_binary
+        )
+
+    @staticmethod
+    def handle_mutual_excludes(value, target, other):
+        # type: (str, Optional[Set], Optional[Set]) -> None
+        new = value.split(',')
+        while ':all:' in new:
+            other.clear()
+            target.clear()
+            target.add(':all:')
+            del new[:new.index(':all:') + 1]
+            # Without a none, we want to discard everything as :all: covers it
+            if ':none:' not in new:
+                return
+        for name in new:
+            if name == ':none:':
+                target.clear()
+                continue
+            name = canonicalize_name(name)
+            other.discard(name)
+            target.add(name)
+
+    def get_allowed_formats(self, canonical_name):
+        # type: (str) -> FrozenSet
+        result = {"binary", "source"}
+        if canonical_name in self.only_binary:
+            result.discard('source')
+        elif canonical_name in self.no_binary:
+            result.discard('binary')
+        elif ':all:' in self.only_binary:
+            result.discard('source')
+        elif ':all:' in self.no_binary:
+            result.discard('binary')
+        return frozenset(result)
+
+    def disallow_binaries(self):
+        # type: () -> None
+        self.handle_mutual_excludes(
+            ':all:', self.no_binary, self.only_binary,
+        )
diff --git a/lib/python3.7/site-packages/pip/_internal/models/index.py b/lib/python3.7/site-packages/pip/_internal/models/index.py
new file mode 100644
index 0000000000000000000000000000000000000000..ead1efbda761ebed373700ce9e69797838c2b9d9
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/models/index.py
@@ -0,0 +1,31 @@
+from pip._vendor.six.moves.urllib import parse as urllib_parse
+
+
+class PackageIndex(object):
+    """Represents a Package Index and provides easier access to endpoints
+    """
+
+    def __init__(self, url, file_storage_domain):
+        # type: (str, str) -> None
+        super(PackageIndex, self).__init__()
+        self.url = url
+        self.netloc = urllib_parse.urlsplit(url).netloc
+        self.simple_url = self._url_for_path('simple')
+        self.pypi_url = self._url_for_path('pypi')
+
+        # This is part of a temporary hack used to block installs of PyPI
+        # packages which depend on external urls only necessary until PyPI can
+        # block such packages themselves
+        self.file_storage_domain = file_storage_domain
+
+    def _url_for_path(self, path):
+        # type: (str) -> str
+        return urllib_parse.urljoin(self.url, path)
+
+
+PyPI = PackageIndex(
+    'https://pypi.org/', file_storage_domain='files.pythonhosted.org'
+)
+TestPyPI = PackageIndex(
+    'https://test.pypi.org/', file_storage_domain='test-files.pythonhosted.org'
+)
diff --git a/lib/python3.7/site-packages/pip/_internal/models/link.py b/lib/python3.7/site-packages/pip/_internal/models/link.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f420760b1cef8d3b15444d3933e9ce5b97d9a27
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/models/link.py
@@ -0,0 +1,163 @@
+import posixpath
+import re
+
+from pip._vendor.six.moves.urllib import parse as urllib_parse
+
+from pip._internal.download import path_to_url
+from pip._internal.utils.misc import (
+    WHEEL_EXTENSION, redact_password_from_url, splitext,
+)
+from pip._internal.utils.models import KeyBasedCompareMixin
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Optional, Tuple, Union
+    from pip._internal.index import HTMLPage
+
+
+class Link(KeyBasedCompareMixin):
+    """Represents a parsed link from a Package Index's simple URL
+    """
+
+    def __init__(self, url, comes_from=None, requires_python=None):
+        # type: (str, Optional[Union[str, HTMLPage]], Optional[str]) -> None
+        """
+        url:
+            url of the resource pointed to (href of the link)
+        comes_from:
+            instance of HTMLPage where the link was found, or string.
+        requires_python:
+            String containing the `Requires-Python` metadata field, specified
+            in PEP 345. This may be specified by a data-requires-python
+            attribute in the HTML link tag, as described in PEP 503.
+        """
+
+        # url can be a UNC windows share
+        if url.startswith('\\\\'):
+            url = path_to_url(url)
+
+        self.url = url
+        self.comes_from = comes_from
+        self.requires_python = requires_python if requires_python else None
+
+        super(Link, self).__init__(
+            key=(self.url),
+            defining_class=Link
+        )
+
+    def __str__(self):
+        if self.requires_python:
+            rp = ' (requires-python:%s)' % self.requires_python
+        else:
+            rp = ''
+        if self.comes_from:
+            return '%s (from %s)%s' % (redact_password_from_url(self.url),
+                                       self.comes_from, rp)
+        else:
+            return redact_password_from_url(str(self.url))
+
+    def __repr__(self):
+        return '<Link %s>' % self
+
+    @property
+    def filename(self):
+        # type: () -> str
+        _, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
+        name = posixpath.basename(path.rstrip('/')) or netloc
+        name = urllib_parse.unquote(name)
+        assert name, ('URL %r produced no filename' % self.url)
+        return name
+
+    @property
+    def scheme(self):
+        # type: () -> str
+        return urllib_parse.urlsplit(self.url)[0]
+
+    @property
+    def netloc(self):
+        # type: () -> str
+        return urllib_parse.urlsplit(self.url)[1]
+
+    @property
+    def path(self):
+        # type: () -> str
+        return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
+
+    def splitext(self):
+        # type: () -> Tuple[str, str]
+        return splitext(posixpath.basename(self.path.rstrip('/')))
+
+    @property
+    def ext(self):
+        # type: () -> str
+        return self.splitext()[1]
+
+    @property
+    def url_without_fragment(self):
+        # type: () -> str
+        scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
+        return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
+
+    _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')
+
+    @property
+    def egg_fragment(self):
+        # type: () -> Optional[str]
+        match = self._egg_fragment_re.search(self.url)
+        if not match:
+            return None
+        return match.group(1)
+
+    _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
+
+    @property
+    def subdirectory_fragment(self):
+        # type: () -> Optional[str]
+        match = self._subdirectory_fragment_re.search(self.url)
+        if not match:
+            return None
+        return match.group(1)
+
+    _hash_re = re.compile(
+        r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
+    )
+
+    @property
+    def hash(self):
+        # type: () -> Optional[str]
+        match = self._hash_re.search(self.url)
+        if match:
+            return match.group(2)
+        return None
+
+    @property
+    def hash_name(self):
+        # type: () -> Optional[str]
+        match = self._hash_re.search(self.url)
+        if match:
+            return match.group(1)
+        return None
+
+    @property
+    def show_url(self):
+        # type: () -> Optional[str]
+        return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
+
+    @property
+    def is_wheel(self):
+        # type: () -> bool
+        return self.ext == WHEEL_EXTENSION
+
+    @property
+    def is_artifact(self):
+        # type: () -> bool
+        """
+        Determines if this points to an actual artifact (e.g. a tarball) or if
+        it points to an "abstract" thing like a path or a VCS location.
+        """
+        from pip._internal.vcs import vcs
+
+        if self.scheme in vcs.all_schemes:
+            return False
+
+        return True
diff --git a/lib/python3.7/site-packages/pip/_internal/operations/__init__.py b/lib/python3.7/site-packages/pip/_internal/operations/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b4926881840231bb0d2d5ab58713158e789be1f
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/check.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/check.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a6690f865f05a823b11a214878f55040ee94615
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/check.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1085c385821a17451b2c52ac4b58d48649850f45
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/freeze.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e77fc4c24d8198bb606bd26d249906778aadc28
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/operations/__pycache__/prepare.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/operations/check.py b/lib/python3.7/site-packages/pip/_internal/operations/check.py
new file mode 100644
index 0000000000000000000000000000000000000000..920df5d489dba04b0bf1d165e861d2dd2579de95
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/operations/check.py
@@ -0,0 +1,155 @@
+"""Validation of dependencies of packages
+"""
+
+import logging
+from collections import namedtuple
+
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.pkg_resources import RequirementParseError
+
+from pip._internal.operations.prepare import make_abstract_dist
+from pip._internal.utils.misc import get_installed_distributions
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+logger = logging.getLogger(__name__)
+
+if MYPY_CHECK_RUNNING:
+    from pip._internal.req.req_install import InstallRequirement
+    from typing import (
+        Any, Callable, Dict, Optional, Set, Tuple, List
+    )
+
+    # Shorthands
+    PackageSet = Dict[str, 'PackageDetails']
+    Missing = Tuple[str, Any]
+    Conflicting = Tuple[str, str, Any]
+
+    MissingDict = Dict[str, List[Missing]]
+    ConflictingDict = Dict[str, List[Conflicting]]
+    CheckResult = Tuple[MissingDict, ConflictingDict]
+
+PackageDetails = namedtuple('PackageDetails', ['version', 'requires'])
+
+
+def create_package_set_from_installed(**kwargs):
+    # type: (**Any) -> Tuple[PackageSet, bool]
+    """Converts a list of distributions into a PackageSet.
+    """
+    # Default to using all packages installed on the system
+    if kwargs == {}:
+        kwargs = {"local_only": False, "skip": ()}
+
+    package_set = {}
+    problems = False
+    for dist in get_installed_distributions(**kwargs):
+        name = canonicalize_name(dist.project_name)
+        try:
+            package_set[name] = PackageDetails(dist.version, dist.requires())
+        except RequirementParseError as e:
+            # Don't crash on broken metadata
+            logging.warning("Error parsing requirements for %s: %s", name, e)
+            problems = True
+    return package_set, problems
+
+
+def check_package_set(package_set, should_ignore=None):
+    # type: (PackageSet, Optional[Callable[[str], bool]]) -> CheckResult
+    """Check if a package set is consistent
+
+    If should_ignore is passed, it should be a callable that takes a
+    package name and returns a boolean.
+    """
+    if should_ignore is None:
+        def should_ignore(name):
+            return False
+
+    missing = dict()
+    conflicting = dict()
+
+    for package_name in package_set:
+        # Info about dependencies of package_name
+        missing_deps = set()  # type: Set[Missing]
+        conflicting_deps = set()  # type: Set[Conflicting]
+
+        if should_ignore(package_name):
+            continue
+
+        for req in package_set[package_name].requires:
+            name = canonicalize_name(req.project_name)  # type: str
+
+            # Check if it's missing
+            if name not in package_set:
+                missed = True
+                if req.marker is not None:
+                    missed = req.marker.evaluate()
+                if missed:
+                    missing_deps.add((name, req))
+                continue
+
+            # Check if there's a conflict
+            version = package_set[name].version  # type: str
+            if not req.specifier.contains(version, prereleases=True):
+                conflicting_deps.add((name, version, req))
+
+        if missing_deps:
+            missing[package_name] = sorted(missing_deps, key=str)
+        if conflicting_deps:
+            conflicting[package_name] = sorted(conflicting_deps, key=str)
+
+    return missing, conflicting
+
+
+def check_install_conflicts(to_install):
+    # type: (List[InstallRequirement]) -> Tuple[PackageSet, CheckResult]
+    """For checking if the dependency graph would be consistent after \
+    installing given requirements
+    """
+    # Start from the current state
+    package_set, _ = create_package_set_from_installed()
+    # Install packages
+    would_be_installed = _simulate_installation_of(to_install, package_set)
+
+    # Only warn about directly-dependent packages; create a whitelist of them
+    whitelist = _create_whitelist(would_be_installed, package_set)
+
+    return (
+        package_set,
+        check_package_set(
+            package_set, should_ignore=lambda name: name not in whitelist
+        )
+    )
+
+
+def _simulate_installation_of(to_install, package_set):
+    # type: (List[InstallRequirement], PackageSet) -> Set[str]
+    """Computes the version of packages after installing to_install.
+    """
+
+    # Keep track of packages that were installed
+    installed = set()
+
+    # Modify it as installing requirement_set would (assuming no errors)
+    for inst_req in to_install:
+        dist = make_abstract_dist(inst_req).dist()
+        name = canonicalize_name(dist.key)
+        package_set[name] = PackageDetails(dist.version, dist.requires())
+
+        installed.add(name)
+
+    return installed
+
+
+def _create_whitelist(would_be_installed, package_set):
+    # type: (Set[str], PackageSet) -> Set[str]
+    packages_affected = set(would_be_installed)
+
+    for package_name in package_set:
+        if package_name in packages_affected:
+            continue
+
+        for req in package_set[package_name].requires:
+            if canonicalize_name(req.name) in packages_affected:
+                packages_affected.add(package_name)
+                break
+
+    return packages_affected
diff --git a/lib/python3.7/site-packages/pip/_internal/operations/freeze.py b/lib/python3.7/site-packages/pip/_internal/operations/freeze.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c4c76107c99310defbd5128064cd0072cdd0473
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/operations/freeze.py
@@ -0,0 +1,247 @@
+from __future__ import absolute_import
+
+import collections
+import logging
+import os
+import re
+
+from pip._vendor import six
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.pkg_resources import RequirementParseError
+
+from pip._internal.exceptions import BadCommand, InstallationError
+from pip._internal.req.constructors import (
+    install_req_from_editable, install_req_from_line,
+)
+from pip._internal.req.req_file import COMMENT_RE
+from pip._internal.utils.misc import (
+    dist_is_editable, get_installed_distributions,
+)
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Iterator, Optional, List, Container, Set, Dict, Tuple, Iterable, Union
+    )
+    from pip._internal.cache import WheelCache
+    from pip._vendor.pkg_resources import (
+        Distribution, Requirement
+    )
+
+    RequirementInfo = Tuple[Optional[Union[str, Requirement]], bool, List[str]]
+
+
+logger = logging.getLogger(__name__)
+
+
+def freeze(
+    requirement=None,  # type: Optional[List[str]]
+    find_links=None,  # type: Optional[List[str]]
+    local_only=None,  # type: Optional[bool]
+    user_only=None,  # type: Optional[bool]
+    skip_regex=None,  # type: Optional[str]
+    isolated=False,  # type: bool
+    wheel_cache=None,  # type: Optional[WheelCache]
+    exclude_editable=False,  # type: bool
+    skip=()  # type: Container[str]
+):
+    # type: (...) -> Iterator[str]
+    find_links = find_links or []
+    skip_match = None
+
+    if skip_regex:
+        skip_match = re.compile(skip_regex).search
+
+    for link in find_links:
+        yield '-f %s' % link
+    installations = {}  # type: Dict[str, FrozenRequirement]
+    for dist in get_installed_distributions(local_only=local_only,
+                                            skip=(),
+                                            user_only=user_only):
+        try:
+            req = FrozenRequirement.from_dist(dist)
+        except RequirementParseError:
+            logger.warning(
+                "Could not parse requirement: %s",
+                dist.project_name
+            )
+            continue
+        if exclude_editable and req.editable:
+            continue
+        installations[req.name] = req
+
+    if requirement:
+        # the options that don't get turned into an InstallRequirement
+        # should only be emitted once, even if the same option is in multiple
+        # requirements files, so we need to keep track of what has been emitted
+        # so that we don't emit it again if it's seen again
+        emitted_options = set()  # type: Set[str]
+        # keep track of which files a requirement is in so that we can
+        # give an accurate warning if a requirement appears multiple times.
+        req_files = collections.defaultdict(list)  # type: Dict[str, List[str]]
+        for req_file_path in requirement:
+            with open(req_file_path) as req_file:
+                for line in req_file:
+                    if (not line.strip() or
+                            line.strip().startswith('#') or
+                            (skip_match and skip_match(line)) or
+                            line.startswith((
+                                '-r', '--requirement',
+                                '-Z', '--always-unzip',
+                                '-f', '--find-links',
+                                '-i', '--index-url',
+                                '--pre',
+                                '--trusted-host',
+                                '--process-dependency-links',
+                                '--extra-index-url'))):
+                        line = line.rstrip()
+                        if line not in emitted_options:
+                            emitted_options.add(line)
+                            yield line
+                        continue
+
+                    if line.startswith('-e') or line.startswith('--editable'):
+                        if line.startswith('-e'):
+                            line = line[2:].strip()
+                        else:
+                            line = line[len('--editable'):].strip().lstrip('=')
+                        line_req = install_req_from_editable(
+                            line,
+                            isolated=isolated,
+                            wheel_cache=wheel_cache,
+                        )
+                    else:
+                        line_req = install_req_from_line(
+                            COMMENT_RE.sub('', line).strip(),
+                            isolated=isolated,
+                            wheel_cache=wheel_cache,
+                        )
+
+                    if not line_req.name:
+                        logger.info(
+                            "Skipping line in requirement file [%s] because "
+                            "it's not clear what it would install: %s",
+                            req_file_path, line.strip(),
+                        )
+                        logger.info(
+                            "  (add #egg=PackageName to the URL to avoid"
+                            " this warning)"
+                        )
+                    elif line_req.name not in installations:
+                        # either it's not installed, or it is installed
+                        # but has been processed already
+                        if not req_files[line_req.name]:
+                            logger.warning(
+                                "Requirement file [%s] contains %s, but "
+                                "package %r is not installed",
+                                req_file_path,
+                                COMMENT_RE.sub('', line).strip(), line_req.name
+                            )
+                        else:
+                            req_files[line_req.name].append(req_file_path)
+                    else:
+                        yield str(installations[line_req.name]).rstrip()
+                        del installations[line_req.name]
+                        req_files[line_req.name].append(req_file_path)
+
+        # Warn about requirements that were included multiple times (in a
+        # single requirements file or in different requirements files).
+        for name, files in six.iteritems(req_files):
+            if len(files) > 1:
+                logger.warning("Requirement %s included multiple times [%s]",
+                               name, ', '.join(sorted(set(files))))
+
+        yield(
+            '## The following requirements were added by '
+            'pip freeze:'
+        )
+    for installation in sorted(
+            installations.values(), key=lambda x: x.name.lower()):
+        if canonicalize_name(installation.name) not in skip:
+            yield str(installation).rstrip()
+
+
+def get_requirement_info(dist):
+    # type: (Distribution) -> RequirementInfo
+    """
+    Compute and return values (req, editable, comments) for use in
+    FrozenRequirement.from_dist().
+    """
+    if not dist_is_editable(dist):
+        return (None, False, [])
+
+    location = os.path.normcase(os.path.abspath(dist.location))
+
+    from pip._internal.vcs import vcs, RemoteNotFoundError
+    vc_type = vcs.get_backend_type(location)
+
+    if not vc_type:
+        req = dist.as_requirement()
+        logger.debug(
+            'No VCS found for editable requirement {!r} in: {!r}', req,
+            location,
+        )
+        comments = [
+            '# Editable install with no version control ({})'.format(req)
+        ]
+        return (location, True, comments)
+
+    try:
+        req = vc_type.get_src_requirement(location, dist.project_name)
+    except RemoteNotFoundError:
+        req = dist.as_requirement()
+        comments = [
+            '# Editable {} install with no remote ({})'.format(
+                vc_type.__name__, req,
+            )
+        ]
+        return (location, True, comments)
+
+    except BadCommand:
+        logger.warning(
+            'cannot determine version of editable source in %s '
+            '(%s command not found in path)',
+            location,
+            vc_type.name,
+        )
+        return (None, True, [])
+
+    except InstallationError as exc:
+        logger.warning(
+            "Error when trying to get requirement for VCS system %s, "
+            "falling back to uneditable format", exc
+        )
+    else:
+        if req is not None:
+            return (req, True, [])
+
+    logger.warning(
+        'Could not determine repository location of %s', location
+    )
+    comments = ['## !! Could not determine repository location']
+
+    return (None, False, comments)
+
+
+class FrozenRequirement(object):
+    def __init__(self, name, req, editable, comments=()):
+        # type: (str, Union[str, Requirement], bool, Iterable[str]) -> None
+        self.name = name
+        self.req = req
+        self.editable = editable
+        self.comments = comments
+
+    @classmethod
+    def from_dist(cls, dist):
+        # type: (Distribution) -> FrozenRequirement
+        req, editable, comments = get_requirement_info(dist)
+        if req is None:
+            req = dist.as_requirement()
+
+        return cls(dist.project_name, req, editable, comments=comments)
+
+    def __str__(self):
+        req = self.req
+        if self.editable:
+            req = '-e %s' % req
+        return '\n'.join(list(self.comments) + [str(req)]) + '\n'
diff --git a/lib/python3.7/site-packages/pip/_internal/operations/prepare.py b/lib/python3.7/site-packages/pip/_internal/operations/prepare.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc7f86eed5612205b8481a469fbc2f258abf24ae
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/operations/prepare.py
@@ -0,0 +1,432 @@
+"""Prepares a distribution for installation
+"""
+
+import logging
+import os
+
+from pip._vendor import pkg_resources, requests
+
+from pip._internal.build_env import BuildEnvironment
+from pip._internal.download import (
+    is_dir_url, is_file_url, is_vcs_url, unpack_url, url_to_path,
+)
+from pip._internal.exceptions import (
+    DirectoryUrlHashUnsupported, HashUnpinned, InstallationError,
+    PreviousBuildDirError, VcsHashUnsupported,
+)
+from pip._internal.utils.compat import expanduser
+from pip._internal.utils.hashes import MissingHashes
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import display_path, normalize_path
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Any, Optional
+    from pip._internal.req.req_install import InstallRequirement
+    from pip._internal.index import PackageFinder
+    from pip._internal.download import PipSession
+    from pip._internal.req.req_tracker import RequirementTracker
+
+logger = logging.getLogger(__name__)
+
+
+def make_abstract_dist(req):
+    # type: (InstallRequirement) -> DistAbstraction
+    """Factory to make an abstract dist object.
+
+    Preconditions: Either an editable req with a source_dir, or satisfied_by or
+    a wheel link, or a non-editable req with a source_dir.
+
+    :return: A concrete DistAbstraction.
+    """
+    if req.editable:
+        return IsSDist(req)
+    elif req.link and req.link.is_wheel:
+        return IsWheel(req)
+    else:
+        return IsSDist(req)
+
+
+class DistAbstraction(object):
+    """Abstracts out the wheel vs non-wheel Resolver.resolve() logic.
+
+    The requirements for anything installable are as follows:
+     - we must be able to determine the requirement name
+       (or we can't correctly handle the non-upgrade case).
+     - we must be able to generate a list of run-time dependencies
+       without installing any additional packages (or we would
+       have to either burn time by doing temporary isolated installs
+       or alternatively violate pips 'don't start installing unless
+       all requirements are available' rule - neither of which are
+       desirable).
+     - for packages with setup requirements, we must also be able
+       to determine their requirements without installing additional
+       packages (for the same reason as run-time dependencies)
+     - we must be able to create a Distribution object exposing the
+       above metadata.
+    """
+
+    def __init__(self, req):
+        # type: (InstallRequirement) -> None
+        self.req = req  # type: InstallRequirement
+
+    def dist(self):
+        # type: () -> Any
+        """Return a setuptools Dist object."""
+        raise NotImplementedError
+
+    def prep_for_dist(self, finder, build_isolation):
+        # type: (PackageFinder, bool) -> Any
+        """Ensure that we can get a Dist for this requirement."""
+        raise NotImplementedError
+
+
+class IsWheel(DistAbstraction):
+
+    def dist(self):
+        # type: () -> pkg_resources.Distribution
+        return list(pkg_resources.find_distributions(
+            self.req.source_dir))[0]
+
+    def prep_for_dist(self, finder, build_isolation):
+        # type: (PackageFinder, bool) -> Any
+        # FIXME:https://github.com/pypa/pip/issues/1112
+        pass
+
+
+class IsSDist(DistAbstraction):
+
+    def dist(self):
+        return self.req.get_dist()
+
+    def _raise_conflicts(self, conflicting_with, conflicting_reqs):
+        conflict_messages = [
+            '%s is incompatible with %s' % (installed, wanted)
+            for installed, wanted in sorted(conflicting_reqs)
+        ]
+        raise InstallationError(
+            "Some build dependencies for %s conflict with %s: %s." % (
+                self.req, conflicting_with, ', '.join(conflict_messages))
+        )
+
+    def install_backend_dependencies(self, finder):
+        # type: (PackageFinder) -> None
+        """
+        Install any extra build dependencies that the backend requests.
+
+        :param finder: a PackageFinder object.
+        """
+        req = self.req
+        with req.build_env:
+            # We need to have the env active when calling the hook.
+            req.spin_message = "Getting requirements to build wheel"
+            reqs = req.pep517_backend.get_requires_for_build_wheel()
+        conflicting, missing = req.build_env.check_requirements(reqs)
+        if conflicting:
+            self._raise_conflicts("the backend dependencies", conflicting)
+        req.build_env.install_requirements(
+            finder, missing, 'normal',
+            "Installing backend dependencies"
+        )
+
+    def prep_for_dist(self, finder, build_isolation):
+        # type: (PackageFinder, bool) -> None
+        # Prepare for building. We need to:
+        #   1. Load pyproject.toml (if it exists)
+        #   2. Set up the build environment
+
+        self.req.load_pyproject_toml()
+
+        should_isolate = (
+            (self.req.use_pep517 or self.req.pyproject_requires) and
+            build_isolation
+        )
+
+        if should_isolate:
+            # Isolate in a BuildEnvironment and install the build-time
+            # requirements.
+            self.req.build_env = BuildEnvironment()
+            self.req.build_env.install_requirements(
+                finder, self.req.pyproject_requires, 'overlay',
+                "Installing build dependencies"
+            )
+            conflicting, missing = self.req.build_env.check_requirements(
+                self.req.requirements_to_check
+            )
+            if conflicting:
+                self._raise_conflicts("PEP 517/518 supported requirements",
+                                      conflicting)
+            if missing:
+                logger.warning(
+                    "Missing build requirements in pyproject.toml for %s.",
+                    self.req,
+                )
+                logger.warning(
+                    "The project does not specify a build backend, and "
+                    "pip cannot fall back to setuptools without %s.",
+                    " and ".join(map(repr, sorted(missing)))
+                )
+
+            if self.req.use_pep517:
+                # If we're using PEP 517, then install any extra build
+                # dependencies that the backend requested.  This must be
+                # done in a second pass, as the pyproject.toml dependencies
+                # must be installed before we can call the backend.
+                self.install_backend_dependencies(finder=finder)
+
+        self.req.prepare_metadata()
+        self.req.assert_source_matches_version()
+
+
+class Installed(DistAbstraction):
+
+    def dist(self):
+        # type: () -> pkg_resources.Distribution
+        return self.req.satisfied_by
+
+    def prep_for_dist(self, finder, build_isolation):
+        # type: (PackageFinder, bool) -> Any
+        pass
+
+
+class RequirementPreparer(object):
+    """Prepares a Requirement
+    """
+
+    def __init__(
+        self,
+        build_dir,  # type: str
+        download_dir,  # type: Optional[str]
+        src_dir,  # type: str
+        wheel_download_dir,  # type: Optional[str]
+        progress_bar,  # type: str
+        build_isolation,  # type: bool
+        req_tracker  # type: RequirementTracker
+    ):
+        # type: (...) -> None
+        super(RequirementPreparer, self).__init__()
+
+        self.src_dir = src_dir
+        self.build_dir = build_dir
+        self.req_tracker = req_tracker
+
+        # Where still packed archives should be written to. If None, they are
+        # not saved, and are deleted immediately after unpacking.
+        self.download_dir = download_dir
+
+        # Where still-packed .whl files should be written to. If None, they are
+        # written to the download_dir parameter. Separate to download_dir to
+        # permit only keeping wheel archives for pip wheel.
+        if wheel_download_dir:
+            wheel_download_dir = normalize_path(wheel_download_dir)
+        self.wheel_download_dir = wheel_download_dir
+
+        # NOTE
+        # download_dir and wheel_download_dir overlap semantically and may
+        # be combined if we're willing to have non-wheel archives present in
+        # the wheelhouse output by 'pip wheel'.
+
+        self.progress_bar = progress_bar
+
+        # Is build isolation allowed?
+        self.build_isolation = build_isolation
+
+    @property
+    def _download_should_save(self):
+        # type: () -> bool
+        # TODO: Modify to reduce indentation needed
+        if self.download_dir:
+            self.download_dir = expanduser(self.download_dir)
+            if os.path.exists(self.download_dir):
+                return True
+            else:
+                logger.critical('Could not find download directory')
+                raise InstallationError(
+                    "Could not find or access download directory '%s'"
+                    % display_path(self.download_dir))
+        return False
+
+    def prepare_linked_requirement(
+        self,
+        req,  # type: InstallRequirement
+        session,  # type: PipSession
+        finder,  # type: PackageFinder
+        upgrade_allowed,  # type: bool
+        require_hashes  # type: bool
+    ):
+        # type: (...) -> DistAbstraction
+        """Prepare a requirement that would be obtained from req.link
+        """
+        # TODO: Breakup into smaller functions
+        if req.link and req.link.scheme == 'file':
+            path = url_to_path(req.link.url)
+            logger.info('Processing %s', display_path(path))
+        else:
+            logger.info('Collecting %s', req)
+
+        with indent_log():
+            # @@ if filesystem packages are not marked
+            # editable in a req, a non deterministic error
+            # occurs when the script attempts to unpack the
+            # build directory
+            req.ensure_has_source_dir(self.build_dir)
+            # If a checkout exists, it's unwise to keep going.  version
+            # inconsistencies are logged later, but do not fail the
+            # installation.
+            # FIXME: this won't upgrade when there's an existing
+            # package unpacked in `req.source_dir`
+            # package unpacked in `req.source_dir`
+            if os.path.exists(os.path.join(req.source_dir, 'setup.py')):
+                raise PreviousBuildDirError(
+                    "pip can't proceed with requirements '%s' due to a"
+                    " pre-existing build directory (%s). This is "
+                    "likely due to a previous installation that failed"
+                    ". pip is being responsible and not assuming it "
+                    "can delete this. Please delete it and try again."
+                    % (req, req.source_dir)
+                )
+            req.populate_link(finder, upgrade_allowed, require_hashes)
+
+            # We can't hit this spot and have populate_link return None.
+            # req.satisfied_by is None here (because we're
+            # guarded) and upgrade has no impact except when satisfied_by
+            # is not None.
+            # Then inside find_requirement existing_applicable -> False
+            # If no new versions are found, DistributionNotFound is raised,
+            # otherwise a result is guaranteed.
+            assert req.link
+            link = req.link
+
+            # Now that we have the real link, we can tell what kind of
+            # requirements we have and raise some more informative errors
+            # than otherwise. (For example, we can raise VcsHashUnsupported
+            # for a VCS URL rather than HashMissing.)
+            if require_hashes:
+                # We could check these first 2 conditions inside
+                # unpack_url and save repetition of conditions, but then
+                # we would report less-useful error messages for
+                # unhashable requirements, complaining that there's no
+                # hash provided.
+                if is_vcs_url(link):
+                    raise VcsHashUnsupported()
+                elif is_file_url(link) and is_dir_url(link):
+                    raise DirectoryUrlHashUnsupported()
+                if not req.original_link and not req.is_pinned:
+                    # Unpinned packages are asking for trouble when a new
+                    # version is uploaded. This isn't a security check, but
+                    # it saves users a surprising hash mismatch in the
+                    # future.
+                    #
+                    # file:/// URLs aren't pinnable, so don't complain
+                    # about them not being pinned.
+                    raise HashUnpinned()
+
+            hashes = req.hashes(trust_internet=not require_hashes)
+            if require_hashes and not hashes:
+                # Known-good hashes are missing for this requirement, so
+                # shim it with a facade object that will provoke hash
+                # computation and then raise a HashMissing exception
+                # showing the user what the hash should be.
+                hashes = MissingHashes()
+
+            try:
+                download_dir = self.download_dir
+                # We always delete unpacked sdists after pip ran.
+                autodelete_unpacked = True
+                if req.link.is_wheel and self.wheel_download_dir:
+                    # when doing 'pip wheel` we download wheels to a
+                    # dedicated dir.
+                    download_dir = self.wheel_download_dir
+                if req.link.is_wheel:
+                    if download_dir:
+                        # When downloading, we only unpack wheels to get
+                        # metadata.
+                        autodelete_unpacked = True
+                    else:
+                        # When installing a wheel, we use the unpacked
+                        # wheel.
+                        autodelete_unpacked = False
+                unpack_url(
+                    req.link, req.source_dir,
+                    download_dir, autodelete_unpacked,
+                    session=session, hashes=hashes,
+                    progress_bar=self.progress_bar
+                )
+            except requests.HTTPError as exc:
+                logger.critical(
+                    'Could not install requirement %s because of error %s',
+                    req,
+                    exc,
+                )
+                raise InstallationError(
+                    'Could not install requirement %s because of HTTP '
+                    'error %s for URL %s' %
+                    (req, exc, req.link)
+                )
+            abstract_dist = make_abstract_dist(req)
+            with self.req_tracker.track(req):
+                abstract_dist.prep_for_dist(finder, self.build_isolation)
+            if self._download_should_save:
+                # Make a .zip of the source_dir we already created.
+                if not req.link.is_artifact:
+                    req.archive(self.download_dir)
+        return abstract_dist
+
+    def prepare_editable_requirement(
+        self,
+        req,  # type: InstallRequirement
+        require_hashes,  # type: bool
+        use_user_site,  # type: bool
+        finder  # type: PackageFinder
+    ):
+        # type: (...) -> DistAbstraction
+        """Prepare an editable requirement
+        """
+        assert req.editable, "cannot prepare a non-editable req as editable"
+
+        logger.info('Obtaining %s', req)
+
+        with indent_log():
+            if require_hashes:
+                raise InstallationError(
+                    'The editable requirement %s cannot be installed when '
+                    'requiring hashes, because there is no single file to '
+                    'hash.' % req
+                )
+            req.ensure_has_source_dir(self.src_dir)
+            req.update_editable(not self._download_should_save)
+
+            abstract_dist = make_abstract_dist(req)
+            with self.req_tracker.track(req):
+                abstract_dist.prep_for_dist(finder, self.build_isolation)
+
+            if self._download_should_save:
+                req.archive(self.download_dir)
+            req.check_if_exists(use_user_site)
+
+        return abstract_dist
+
+    def prepare_installed_requirement(self, req, require_hashes, skip_reason):
+        # type: (InstallRequirement, bool, Optional[str]) -> DistAbstraction
+        """Prepare an already-installed requirement
+        """
+        assert req.satisfied_by, "req should have been satisfied but isn't"
+        assert skip_reason is not None, (
+            "did not get skip reason skipped but req.satisfied_by "
+            "is set to %r" % (req.satisfied_by,)
+        )
+        logger.info(
+            'Requirement %s: %s (%s)',
+            skip_reason, req, req.satisfied_by.version
+        )
+        with indent_log():
+            if require_hashes:
+                logger.debug(
+                    'Since it is already installed, we are trusting this '
+                    'package without checking its hash. To ensure a '
+                    'completely repeatable environment, install into an '
+                    'empty virtualenv.'
+                )
+            abstract_dist = Installed(req)
+
+        return abstract_dist
diff --git a/lib/python3.7/site-packages/pip/_internal/pep425tags.py b/lib/python3.7/site-packages/pip/_internal/pep425tags.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b68f28d2030e77fd28c84938c45d68a264b9711
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/pep425tags.py
@@ -0,0 +1,381 @@
+"""Generate and work with PEP 425 Compatibility Tags."""
+from __future__ import absolute_import
+
+import distutils.util
+import logging
+import platform
+import re
+import sys
+import sysconfig
+import warnings
+from collections import OrderedDict
+
+import pip._internal.utils.glibc
+from pip._internal.utils.compat import get_extension_suffixes
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Tuple, Callable, List, Optional, Union, Dict
+    )
+
+    Pep425Tag = Tuple[str, str, str]
+
+logger = logging.getLogger(__name__)
+
+_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
+
+
+def get_config_var(var):
+    # type: (str) -> Optional[str]
+    try:
+        return sysconfig.get_config_var(var)
+    except IOError as e:  # Issue #1074
+        warnings.warn("{}".format(e), RuntimeWarning)
+        return None
+
+
+def get_abbr_impl():
+    # type: () -> str
+    """Return abbreviated implementation name."""
+    if hasattr(sys, 'pypy_version_info'):
+        pyimpl = 'pp'
+    elif sys.platform.startswith('java'):
+        pyimpl = 'jy'
+    elif sys.platform == 'cli':
+        pyimpl = 'ip'
+    else:
+        pyimpl = 'cp'
+    return pyimpl
+
+
+def get_impl_ver():
+    # type: () -> str
+    """Return implementation version."""
+    impl_ver = get_config_var("py_version_nodot")
+    if not impl_ver or get_abbr_impl() == 'pp':
+        impl_ver = ''.join(map(str, get_impl_version_info()))
+    return impl_ver
+
+
+def get_impl_version_info():
+    # type: () -> Tuple[int, ...]
+    """Return sys.version_info-like tuple for use in decrementing the minor
+    version."""
+    if get_abbr_impl() == 'pp':
+        # as per https://github.com/pypa/pip/issues/2882
+        # attrs exist only on pypy
+        return (sys.version_info[0],
+                sys.pypy_version_info.major,  # type: ignore
+                sys.pypy_version_info.minor)  # type: ignore
+    else:
+        return sys.version_info[0], sys.version_info[1]
+
+
+def get_impl_tag():
+    # type: () -> str
+    """
+    Returns the Tag for this specific implementation.
+    """
+    return "{}{}".format(get_abbr_impl(), get_impl_ver())
+
+
+def get_flag(var, fallback, expected=True, warn=True):
+    # type: (str, Callable[..., bool], Union[bool, int], bool) -> bool
+    """Use a fallback method for determining SOABI flags if the needed config
+    var is unset or unavailable."""
+    val = get_config_var(var)
+    if val is None:
+        if warn:
+            logger.debug("Config variable '%s' is unset, Python ABI tag may "
+                         "be incorrect", var)
+        return fallback()
+    return val == expected
+
+
+def get_abi_tag():
+    # type: () -> Optional[str]
+    """Return the ABI tag based on SOABI (if available) or emulate SOABI
+    (CPython 2, PyPy)."""
+    soabi = get_config_var('SOABI')
+    impl = get_abbr_impl()
+    if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'):
+        d = ''
+        m = ''
+        u = ''
+        if get_flag('Py_DEBUG',
+                    lambda: hasattr(sys, 'gettotalrefcount'),
+                    warn=(impl == 'cp')):
+            d = 'd'
+        if get_flag('WITH_PYMALLOC',
+                    lambda: impl == 'cp',
+                    warn=(impl == 'cp')):
+            m = 'm'
+        if get_flag('Py_UNICODE_SIZE',
+                    lambda: sys.maxunicode == 0x10ffff,
+                    expected=4,
+                    warn=(impl == 'cp' and
+                          sys.version_info < (3, 3))) \
+                and sys.version_info < (3, 3):
+            u = 'u'
+        abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
+    elif soabi and soabi.startswith('cpython-'):
+        abi = 'cp' + soabi.split('-')[1]
+    elif soabi:
+        abi = soabi.replace('.', '_').replace('-', '_')
+    else:
+        abi = None
+    return abi
+
+
+def _is_running_32bit():
+    # type: () -> bool
+    return sys.maxsize == 2147483647
+
+
+def get_platform():
+    # type: () -> str
+    """Return our platform name 'win32', 'linux_x86_64'"""
+    if sys.platform == 'darwin':
+        # distutils.util.get_platform() returns the release based on the value
+        # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may
+        # be significantly older than the user's current machine.
+        release, _, machine = platform.mac_ver()
+        split_ver = release.split('.')
+
+        if machine == "x86_64" and _is_running_32bit():
+            machine = "i386"
+        elif machine == "ppc64" and _is_running_32bit():
+            machine = "ppc"
+
+        return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine)
+
+    # XXX remove distutils dependency
+    result = distutils.util.get_platform().replace('.', '_').replace('-', '_')
+    if result == "linux_x86_64" and _is_running_32bit():
+        # 32 bit Python program (running on a 64 bit Linux): pip should only
+        # install and run 32 bit compiled extensions in that case.
+        result = "linux_i686"
+
+    return result
+
+
+def is_manylinux1_compatible():
+    # type: () -> bool
+    # Only Linux, and only x86-64 / i686
+    if get_platform() not in {"linux_x86_64", "linux_i686"}:
+        return False
+
+    # Check for presence of _manylinux module
+    try:
+        import _manylinux
+        return bool(_manylinux.manylinux1_compatible)
+    except (ImportError, AttributeError):
+        # Fall through to heuristic check below
+        pass
+
+    # Check glibc version. CentOS 5 uses glibc 2.5.
+    return pip._internal.utils.glibc.have_compatible_glibc(2, 5)
+
+
+def is_manylinux2010_compatible():
+    # type: () -> bool
+    # Only Linux, and only x86-64 / i686
+    if get_platform() not in {"linux_x86_64", "linux_i686"}:
+        return False
+
+    # Check for presence of _manylinux module
+    try:
+        import _manylinux
+        return bool(_manylinux.manylinux2010_compatible)
+    except (ImportError, AttributeError):
+        # Fall through to heuristic check below
+        pass
+
+    # Check glibc version. CentOS 6 uses glibc 2.12.
+    return pip._internal.utils.glibc.have_compatible_glibc(2, 12)
+
+
+def get_darwin_arches(major, minor, machine):
+    # type: (int, int, str) -> List[str]
+    """Return a list of supported arches (including group arches) for
+    the given major, minor and machine architecture of an macOS machine.
+    """
+    arches = []
+
+    def _supports_arch(major, minor, arch):
+        # type: (int, int, str) -> bool
+        # Looking at the application support for macOS versions in the chart
+        # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears
+        # our timeline looks roughly like:
+        #
+        # 10.0 - Introduces ppc support.
+        # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64
+        #        and x86_64 support is CLI only, and cannot be used for GUI
+        #        applications.
+        # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications.
+        # 10.6 - Drops support for ppc64
+        # 10.7 - Drops support for ppc
+        #
+        # Given that we do not know if we're installing a CLI or a GUI
+        # application, we must be conservative and assume it might be a GUI
+        # application and behave as if ppc64 and x86_64 support did not occur
+        # until 10.5.
+        #
+        # Note: The above information is taken from the "Application support"
+        #       column in the chart not the "Processor support" since I believe
+        #       that we care about what instruction sets an application can use
+        #       not which processors the OS supports.
+        if arch == 'ppc':
+            return (major, minor) <= (10, 5)
+        if arch == 'ppc64':
+            return (major, minor) == (10, 5)
+        if arch == 'i386':
+            return (major, minor) >= (10, 4)
+        if arch == 'x86_64':
+            return (major, minor) >= (10, 5)
+        if arch in groups:
+            for garch in groups[arch]:
+                if _supports_arch(major, minor, garch):
+                    return True
+        return False
+
+    groups = OrderedDict([
+        ("fat", ("i386", "ppc")),
+        ("intel", ("x86_64", "i386")),
+        ("fat64", ("x86_64", "ppc64")),
+        ("fat32", ("x86_64", "i386", "ppc")),
+    ])  # type: Dict[str, Tuple[str, ...]]
+
+    if _supports_arch(major, minor, machine):
+        arches.append(machine)
+
+    for garch in groups:
+        if machine in groups[garch] and _supports_arch(major, minor, garch):
+            arches.append(garch)
+
+    arches.append('universal')
+
+    return arches
+
+
+def get_all_minor_versions_as_strings(version_info):
+    # type: (Tuple[int, ...]) -> List[str]
+    versions = []
+    major = version_info[:-1]
+    # Support all previous minor Python versions.
+    for minor in range(version_info[-1], -1, -1):
+        versions.append(''.join(map(str, major + (minor,))))
+    return versions
+
+
+def get_supported(
+    versions=None,  # type: Optional[List[str]]
+    noarch=False,  # type: bool
+    platform=None,  # type: Optional[str]
+    impl=None,  # type: Optional[str]
+    abi=None  # type: Optional[str]
+):
+    # type: (...) -> List[Pep425Tag]
+    """Return a list of supported tags for each version specified in
+    `versions`.
+
+    :param versions: a list of string versions, of the form ["33", "32"],
+        or None. The first version will be assumed to support our ABI.
+    :param platform: specify the exact platform you want valid
+        tags for, or None. If None, use the local system platform.
+    :param impl: specify the exact implementation you want valid
+        tags for, or None. If None, use the local interpreter impl.
+    :param abi: specify the exact abi you want valid
+        tags for, or None. If None, use the local interpreter abi.
+    """
+    supported = []
+
+    # Versions must be given with respect to the preference
+    if versions is None:
+        version_info = get_impl_version_info()
+        versions = get_all_minor_versions_as_strings(version_info)
+
+    impl = impl or get_abbr_impl()
+
+    abis = []  # type: List[str]
+
+    abi = abi or get_abi_tag()
+    if abi:
+        abis[0:0] = [abi]
+
+    abi3s = set()
+    for suffix in get_extension_suffixes():
+        if suffix.startswith('.abi'):
+            abi3s.add(suffix.split('.', 2)[1])
+
+    abis.extend(sorted(list(abi3s)))
+
+    abis.append('none')
+
+    if not noarch:
+        arch = platform or get_platform()
+        arch_prefix, arch_sep, arch_suffix = arch.partition('_')
+        if arch.startswith('macosx'):
+            # support macosx-10.6-intel on macosx-10.9-x86_64
+            match = _osx_arch_pat.match(arch)
+            if match:
+                name, major, minor, actual_arch = match.groups()
+                tpl = '{}_{}_%i_%s'.format(name, major)
+                arches = []
+                for m in reversed(range(int(minor) + 1)):
+                    for a in get_darwin_arches(int(major), m, actual_arch):
+                        arches.append(tpl % (m, a))
+            else:
+                # arch pattern didn't match (?!)
+                arches = [arch]
+        elif arch_prefix == 'manylinux2010':
+            # manylinux1 wheels run on most manylinux2010 systems with the
+            # exception of wheels depending on ncurses. PEP 571 states
+            # manylinux1 wheels should be considered manylinux2010 wheels:
+            # https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels
+            arches = [arch, 'manylinux1' + arch_sep + arch_suffix]
+        elif platform is None:
+            arches = []
+            if is_manylinux2010_compatible():
+                arches.append('manylinux2010' + arch_sep + arch_suffix)
+            if is_manylinux1_compatible():
+                arches.append('manylinux1' + arch_sep + arch_suffix)
+            arches.append(arch)
+        else:
+            arches = [arch]
+
+        # Current version, current API (built specifically for our Python):
+        for abi in abis:
+            for arch in arches:
+                supported.append(('%s%s' % (impl, versions[0]), abi, arch))
+
+        # abi3 modules compatible with older version of Python
+        for version in versions[1:]:
+            # abi3 was introduced in Python 3.2
+            if version in {'31', '30'}:
+                break
+            for abi in abi3s:   # empty set if not Python 3
+                for arch in arches:
+                    supported.append(("%s%s" % (impl, version), abi, arch))
+
+        # Has binaries, does not use the Python API:
+        for arch in arches:
+            supported.append(('py%s' % (versions[0][0]), 'none', arch))
+
+    # No abi / arch, but requires our implementation:
+    supported.append(('%s%s' % (impl, versions[0]), 'none', 'any'))
+    # Tagged specifically as being cross-version compatible
+    # (with just the major version specified)
+    supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
+
+    # No abi / arch, generic Python
+    for i, version in enumerate(versions):
+        supported.append(('py%s' % (version,), 'none', 'any'))
+        if i == 0:
+            supported.append(('py%s' % (version[0]), 'none', 'any'))
+
+    return supported
+
+
+implementation_tag = get_impl_tag()
diff --git a/lib/python3.7/site-packages/pip/_internal/pyproject.py b/lib/python3.7/site-packages/pip/_internal/pyproject.py
new file mode 100644
index 0000000000000000000000000000000000000000..13a8f35a95da33265f5d8577dee0fe73b2f7811a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/pyproject.py
@@ -0,0 +1,282 @@
+from __future__ import absolute_import
+
+import io
+import os
+import sys
+
+from pip._vendor import pytoml, six
+
+from pip._internal.exceptions import InstallationError
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Any, Dict, List, Optional, Tuple
+
+    Pep517Data = Tuple[str, List[str]]
+
+
+def _is_list_of_str(obj):
+    # type: (Any) -> bool
+    return (
+        isinstance(obj, list) and
+        all(isinstance(item, six.string_types) for item in obj)
+    )
+
+
+def make_pyproject_path(setup_py_dir):
+    # type: (str) -> str
+    path = os.path.join(setup_py_dir, 'pyproject.toml')
+
+    # Python2 __file__ should not be unicode
+    if six.PY2 and isinstance(path, six.text_type):
+        path = path.encode(sys.getfilesystemencoding())
+
+    return path
+
+
+def read_pyproject_toml(path):
+    # type: (str) -> Optional[Dict[str, str]]
+    """
+    Read a project's pyproject.toml file.
+
+    :param path: The path to the pyproject.toml file.
+
+    :return: The "build_system" value specified in the project's
+        pyproject.toml file.
+    """
+    with io.open(path, encoding="utf-8") as f:
+        pp_toml = pytoml.load(f)
+    build_system = pp_toml.get("build-system")
+
+    return build_system
+
+
+def make_editable_error(req_name, reason):
+    """
+    :param req_name: the name of the requirement.
+    :param reason: the reason the requirement is being processed as
+        pyproject.toml-style.
+    """
+    message = (
+        'Error installing {!r}: editable mode is not supported for '
+        'pyproject.toml-style projects. This project is being processed '
+        'as pyproject.toml-style because {}. '
+        'See PEP 517 for the relevant specification.'
+    ).format(req_name, reason)
+    return InstallationError(message)
+
+
+def get_build_system_requires(build_system, req_name):
+    if build_system is None:
+        return None
+
+    # Ensure that the build-system section in pyproject.toml conforms
+    # to PEP 518.
+    error_template = (
+        "{package} has a pyproject.toml file that does not comply "
+        "with PEP 518: {reason}"
+    )
+
+    # Specifying the build-system table but not the requires key is invalid
+    if "requires" not in build_system:
+        raise InstallationError(
+            error_template.format(package=req_name, reason=(
+                "it has a 'build-system' table but not "
+                "'build-system.requires' which is mandatory in the table"
+            ))
+        )
+
+    # Error out if requires is not a list of strings
+    requires = build_system["requires"]
+    if not _is_list_of_str(requires):
+        raise InstallationError(error_template.format(
+            package=req_name,
+            reason="'build-system.requires' is not a list of strings.",
+        ))
+
+    return requires
+
+
+def resolve_pyproject_toml(
+    build_system,  # type: Optional[Dict[str, Any]]
+    has_pyproject,  # type: bool
+    has_setup,  # type: bool
+    use_pep517,  # type: Optional[bool]
+    editable,  # type: bool
+    req_name,  # type: str
+):
+    # type: (...) -> Tuple[Optional[List[str]], Optional[Pep517Data]]
+    """
+    Return how a pyproject.toml file's contents should be interpreted.
+
+    :param build_system: the "build_system" value specified in a project's
+        pyproject.toml file, or None if the project either doesn't have the
+        file or does but the file doesn't have a "build_system" value.
+    :param has_pyproject: whether the project has a pyproject.toml file.
+    :param has_setup: whether the project has a setup.py file.
+    :param use_pep517: whether the user requested PEP 517 processing.  None
+        means the user didn't explicitly specify.
+    :param editable: whether editable mode was requested for the requirement.
+    :param req_name: the name of the requirement we're processing (for
+        error reporting).
+
+    :return: a tuple (requires, pep517_data), where `requires` is the list
+      of build requirements from pyproject.toml (or else None).  The value
+      `pep517_data` is None if `use_pep517` is False.  Otherwise, it is the
+      tuple (backend, check), where `backend` is the name of the PEP 517
+      backend and `check` is the list of requirements we should check are
+      installed after setting up the build environment.
+    """
+    # The following cases must use PEP 517
+    # We check for use_pep517 being non-None and falsey because that means
+    # the user explicitly requested --no-use-pep517.  The value 0 as
+    # opposed to False can occur when the value is provided via an
+    # environment variable or config file option (due to the quirk of
+    # strtobool() returning an integer in pip's configuration code).
+    if has_pyproject and not has_setup:
+        if use_pep517 is not None and not use_pep517:
+            raise InstallationError(
+                "Disabling PEP 517 processing is invalid: "
+                "project does not have a setup.py"
+            )
+        if editable:
+            raise make_editable_error(
+                req_name, 'it has a pyproject.toml file and no setup.py'
+            )
+        use_pep517 = True
+    elif build_system and "build-backend" in build_system:
+        if use_pep517 is not None and not use_pep517:
+            raise InstallationError(
+                "Disabling PEP 517 processing is invalid: "
+                "project specifies a build backend of {} "
+                "in pyproject.toml".format(
+                    build_system["build-backend"]
+                )
+            )
+        if editable:
+            reason = (
+                'it has a pyproject.toml file with a "build-backend" key '
+                'in the "build_system" value'
+            )
+            raise make_editable_error(req_name, reason)
+        use_pep517 = True
+    elif use_pep517:
+        if editable:
+            raise make_editable_error(
+                req_name, 'PEP 517 processing was explicitly requested'
+            )
+
+    # If we haven't worked out whether to use PEP 517 yet, and the user
+    # hasn't explicitly stated a preference, we do so if the project has
+    # a pyproject.toml file (provided editable mode wasn't requested).
+    elif use_pep517 is None:
+        if has_pyproject and editable:
+            message = (
+                'Error installing {!r}: editable mode is not supported for '
+                'pyproject.toml-style projects. pip is processing this '
+                'project as pyproject.toml-style because it has a '
+                'pyproject.toml file. Since the project has a setup.py and '
+                'the pyproject.toml has no "build-backend" key for the '
+                '"build_system" value, you may pass --no-use-pep517 to opt '
+                'out of pyproject.toml-style processing. '
+                'See PEP 517 for details on pyproject.toml-style projects.'
+            ).format(req_name)
+            raise InstallationError(message)
+
+        use_pep517 = has_pyproject
+
+    # At this point, we know whether we're going to use PEP 517.
+    assert use_pep517 is not None
+
+    requires = get_build_system_requires(build_system, req_name=req_name)
+
+    # If we're using the legacy code path, there is nothing further
+    # for us to do here.
+    if not use_pep517:
+        return (requires, None)
+
+    if build_system is None:
+        # Either the user has a pyproject.toml with no build-system
+        # section, or the user has no pyproject.toml, but has opted in
+        # explicitly via --use-pep517.
+        # In the absence of any explicit backend specification, we
+        # assume the setuptools backend that most closely emulates the
+        # traditional direct setup.py execution, and require wheel and
+        # a version of setuptools that supports that backend.
+
+        requires = ["setuptools>=40.8.0", "wheel"]
+        build_system = {
+            "build-backend": "setuptools.build_meta:__legacy__",
+        }
+
+    # If we're using PEP 517, we have build system information (either
+    # from pyproject.toml, or defaulted by the code above).
+    # Note that at this point, we do not know if the user has actually
+    # specified a backend, though.
+    assert build_system is not None
+
+    backend = build_system.get("build-backend")
+    check = []  # type: List[str]
+    if backend is None:
+        # If the user didn't specify a backend, we assume they want to use
+        # the setuptools backend. But we can't be sure they have included
+        # a version of setuptools which supplies the backend, or wheel
+        # (which is needed by the backend) in their requirements. So we
+        # make a note to check that those requirements are present once
+        # we have set up the environment.
+        # This is quite a lot of work to check for a very specific case. But
+        # the problem is, that case is potentially quite common - projects that
+        # adopted PEP 518 early for the ability to specify requirements to
+        # execute setup.py, but never considered needing to mention the build
+        # tools themselves. The original PEP 518 code had a similar check (but
+        # implemented in a different way).
+        backend = "setuptools.build_meta:__legacy__"
+        check = ["setuptools>=40.8.0", "wheel"]
+
+    return (requires, (backend, check))
+
+
+def load_pyproject_toml(
+    use_pep517,  # type: Optional[bool]
+    editable,  # type: bool
+    pyproject_toml,  # type: str
+    setup_py,  # type: str
+    req_name  # type: str
+):
+    # type: (...) -> Tuple[Optional[List[str]], Optional[Pep517Data]]
+    """Load the pyproject.toml file.
+
+    Parameters:
+        use_pep517 - Has the user requested PEP 517 processing? None
+                     means the user hasn't explicitly specified.
+        editable - Whether editable mode was requested for the requirement.
+        pyproject_toml - Location of the project's pyproject.toml file
+        setup_py - Location of the project's setup.py file
+        req_name - The name of the requirement we're processing (for
+                   error reporting)
+
+    Returns: (requires, pep_517_data)
+      requires: requirements from pyproject.toml (can be None).
+      pep_517_data: None if we should use the legacy code path, otherwise:
+        (
+            name of PEP 517 backend,
+            requirements we should check are installed after setting up
+            the build environment
+        )
+    """
+    has_pyproject = os.path.isfile(pyproject_toml)
+    has_setup = os.path.isfile(setup_py)
+
+    if has_pyproject:
+        build_system = read_pyproject_toml(pyproject_toml)
+    else:
+        build_system = None
+
+    return resolve_pyproject_toml(
+        build_system=build_system,
+        has_pyproject=has_pyproject,
+        has_setup=has_setup,
+        use_pep517=use_pep517,
+        editable=editable,
+        req_name=req_name,
+    )
diff --git a/lib/python3.7/site-packages/pip/_internal/req/__init__.py b/lib/python3.7/site-packages/pip/_internal/req/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c39f63fa831484babe250ec5b5ef30382323d842
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/req/__init__.py
@@ -0,0 +1,78 @@
+from __future__ import absolute_import
+
+import logging
+
+from .req_install import InstallRequirement
+from .req_set import RequirementSet
+from .req_file import parse_requirements
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Any, List, Sequence
+
+__all__ = [
+    "RequirementSet", "InstallRequirement",
+    "parse_requirements", "install_given_reqs",
+]
+
+logger = logging.getLogger(__name__)
+
+
+def install_given_reqs(
+    to_install,  # type: List[InstallRequirement]
+    install_options,  # type: List[str]
+    global_options=(),  # type: Sequence[str]
+    *args,  # type: Any
+    **kwargs  # type: Any
+):
+    # type: (...) -> List[InstallRequirement]
+    """
+    Install everything in the given list.
+
+    (to be called after having downloaded and unpacked the packages)
+    """
+
+    if to_install:
+        logger.info(
+            'Installing collected packages: %s',
+            ', '.join([req.name for req in to_install]),
+        )
+
+    with indent_log():
+        for requirement in to_install:
+            if requirement.conflicts_with:
+                logger.info(
+                    'Found existing installation: %s',
+                    requirement.conflicts_with,
+                )
+                with indent_log():
+                    uninstalled_pathset = requirement.uninstall(
+                        auto_confirm=True
+                    )
+            try:
+                requirement.install(
+                    install_options,
+                    global_options,
+                    *args,
+                    **kwargs
+                )
+            except Exception:
+                should_rollback = (
+                    requirement.conflicts_with and
+                    not requirement.install_succeeded
+                )
+                # if install did not succeed, rollback previous uninstall
+                if should_rollback:
+                    uninstalled_pathset.rollback()
+                raise
+            else:
+                should_commit = (
+                    requirement.conflicts_with and
+                    requirement.install_succeeded
+                )
+                if should_commit:
+                    uninstalled_pathset.commit()
+            requirement.remove_temporary_source()
+
+    return to_install
diff --git a/lib/python3.7/site-packages/pip/_internal/req/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25680665c80c6556a39eaa39c0ada1f336894638
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/req/__pycache__/constructors.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/constructors.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b00e9c14cc3f18d53d542d275883d34a0e699cb8
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/constructors.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_file.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_file.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2bdaf96a93a1040e431e954fb01d5fbdfe059a6a
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_file.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_install.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_install.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..627e835b595f45788581855c2eaa27bcc34772dc
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_install.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_set.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_set.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..793beeedfd8ed9e3b27f1383bc4c1292d528c389
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_set.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_tracker.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_tracker.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fa24b0653fad600122a4805dc4423ace96727585
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_tracker.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8ff14add6b116a13ba292f85c4787db6bb99ba6
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/req/__pycache__/req_uninstall.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/req/constructors.py b/lib/python3.7/site-packages/pip/_internal/req/constructors.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f18b6a727d2aec6190ab6684833d1ebc504113e
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/req/constructors.py
@@ -0,0 +1,340 @@
+"""Backing implementation for InstallRequirement's various constructors
+
+The idea here is that these formed a major chunk of InstallRequirement's size
+so, moving them and support code dedicated to them outside of that class
+helps creates for better understandability for the rest of the code.
+
+These are meant to be used elsewhere within pip to create instances of
+InstallRequirement.
+"""
+
+import logging
+import os
+import re
+
+from pip._vendor.packaging.markers import Marker
+from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
+from pip._vendor.packaging.specifiers import Specifier
+from pip._vendor.pkg_resources import RequirementParseError, parse_requirements
+
+from pip._internal.download import (
+    is_archive_file, is_url, path_to_url, url_to_path,
+)
+from pip._internal.exceptions import InstallationError
+from pip._internal.models.index import PyPI, TestPyPI
+from pip._internal.models.link import Link
+from pip._internal.pyproject import make_pyproject_path
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.utils.misc import is_installable_dir
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+from pip._internal.vcs import vcs
+from pip._internal.wheel import Wheel
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Any, Dict, Optional, Set, Tuple, Union,
+    )
+    from pip._internal.cache import WheelCache
+
+
+__all__ = [
+    "install_req_from_editable", "install_req_from_line",
+    "parse_editable"
+]
+
+logger = logging.getLogger(__name__)
+operators = Specifier._operators.keys()
+
+
+def _strip_extras(path):
+    # type: (str) -> Tuple[str, Optional[str]]
+    m = re.match(r'^(.+)(\[[^\]]+\])$', path)
+    extras = None
+    if m:
+        path_no_extras = m.group(1)
+        extras = m.group(2)
+    else:
+        path_no_extras = path
+
+    return path_no_extras, extras
+
+
+def parse_editable(editable_req):
+    # type: (str) -> Tuple[Optional[str], str, Optional[Set[str]]]
+    """Parses an editable requirement into:
+        - a requirement name
+        - an URL
+        - extras
+        - editable options
+    Accepted requirements:
+        svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
+        .[some_extra]
+    """
+
+    url = editable_req
+
+    # If a file path is specified with extras, strip off the extras.
+    url_no_extras, extras = _strip_extras(url)
+
+    if os.path.isdir(url_no_extras):
+        if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
+            msg = (
+                'File "setup.py" not found. Directory cannot be installed '
+                'in editable mode: {}'.format(os.path.abspath(url_no_extras))
+            )
+            pyproject_path = make_pyproject_path(url_no_extras)
+            if os.path.isfile(pyproject_path):
+                msg += (
+                    '\n(A "pyproject.toml" file was found, but editable '
+                    'mode currently requires a setup.py based build.)'
+                )
+            raise InstallationError(msg)
+
+        # Treating it as code that has already been checked out
+        url_no_extras = path_to_url(url_no_extras)
+
+    if url_no_extras.lower().startswith('file:'):
+        package_name = Link(url_no_extras).egg_fragment
+        if extras:
+            return (
+                package_name,
+                url_no_extras,
+                Requirement("placeholder" + extras.lower()).extras,
+            )
+        else:
+            return package_name, url_no_extras, None
+
+    for version_control in vcs:
+        if url.lower().startswith('%s:' % version_control):
+            url = '%s+%s' % (version_control, url)
+            break
+
+    if '+' not in url:
+        raise InstallationError(
+            '%s should either be a path to a local project or a VCS url '
+            'beginning with svn+, git+, hg+, or bzr+' %
+            editable_req
+        )
+
+    vc_type = url.split('+', 1)[0].lower()
+
+    if not vcs.get_backend(vc_type):
+        error_message = 'For --editable=%s only ' % editable_req + \
+            ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
+            ' is currently supported'
+        raise InstallationError(error_message)
+
+    package_name = Link(url).egg_fragment
+    if not package_name:
+        raise InstallationError(
+            "Could not detect requirement name for '%s', please specify one "
+            "with #egg=your_package_name" % editable_req
+        )
+    return package_name, url, None
+
+
+def deduce_helpful_msg(req):
+    # type: (str) -> str
+    """Returns helpful msg in case requirements file does not exist,
+    or cannot be parsed.
+
+    :params req: Requirements file path
+    """
+    msg = ""
+    if os.path.exists(req):
+        msg = " It does exist."
+        # Try to parse and check if it is a requirements file.
+        try:
+            with open(req, 'r') as fp:
+                # parse first line only
+                next(parse_requirements(fp.read()))
+                msg += " The argument you provided " + \
+                    "(%s) appears to be a" % (req) + \
+                    " requirements file. If that is the" + \
+                    " case, use the '-r' flag to install" + \
+                    " the packages specified within it."
+        except RequirementParseError:
+            logger.debug("Cannot parse '%s' as requirements \
+            file" % (req), exc_info=True)
+    else:
+        msg += " File '%s' does not exist." % (req)
+    return msg
+
+
+# ---- The actual constructors follow ----
+
+
+def install_req_from_editable(
+    editable_req,  # type: str
+    comes_from=None,  # type: Optional[str]
+    use_pep517=None,  # type: Optional[bool]
+    isolated=False,  # type: bool
+    options=None,  # type: Optional[Dict[str, Any]]
+    wheel_cache=None,  # type: Optional[WheelCache]
+    constraint=False  # type: bool
+):
+    # type: (...) -> InstallRequirement
+    name, url, extras_override = parse_editable(editable_req)
+    if url.startswith('file:'):
+        source_dir = url_to_path(url)
+    else:
+        source_dir = None
+
+    if name is not None:
+        try:
+            req = Requirement(name)
+        except InvalidRequirement:
+            raise InstallationError("Invalid requirement: '%s'" % name)
+    else:
+        req = None
+    return InstallRequirement(
+        req, comes_from, source_dir=source_dir,
+        editable=True,
+        link=Link(url),
+        constraint=constraint,
+        use_pep517=use_pep517,
+        isolated=isolated,
+        options=options if options else {},
+        wheel_cache=wheel_cache,
+        extras=extras_override or (),
+    )
+
+
+def install_req_from_line(
+    name,  # type: str
+    comes_from=None,  # type: Optional[Union[str, InstallRequirement]]
+    use_pep517=None,  # type: Optional[bool]
+    isolated=False,  # type: bool
+    options=None,  # type: Optional[Dict[str, Any]]
+    wheel_cache=None,  # type: Optional[WheelCache]
+    constraint=False  # type: bool
+):
+    # type: (...) -> InstallRequirement
+    """Creates an InstallRequirement from a name, which might be a
+    requirement, directory containing 'setup.py', filename, or URL.
+    """
+    if is_url(name):
+        marker_sep = '; '
+    else:
+        marker_sep = ';'
+    if marker_sep in name:
+        name, markers_as_string = name.split(marker_sep, 1)
+        markers_as_string = markers_as_string.strip()
+        if not markers_as_string:
+            markers = None
+        else:
+            markers = Marker(markers_as_string)
+    else:
+        markers = None
+    name = name.strip()
+    req_as_string = None
+    path = os.path.normpath(os.path.abspath(name))
+    link = None
+    extras_as_string = None
+
+    if is_url(name):
+        link = Link(name)
+    else:
+        p, extras_as_string = _strip_extras(path)
+        looks_like_dir = os.path.isdir(p) and (
+            os.path.sep in name or
+            (os.path.altsep is not None and os.path.altsep in name) or
+            name.startswith('.')
+        )
+        if looks_like_dir:
+            if not is_installable_dir(p):
+                raise InstallationError(
+                    "Directory %r is not installable. Neither 'setup.py' "
+                    "nor 'pyproject.toml' found." % name
+                )
+            link = Link(path_to_url(p))
+        elif is_archive_file(p):
+            if not os.path.isfile(p):
+                logger.warning(
+                    'Requirement %r looks like a filename, but the '
+                    'file does not exist',
+                    name
+                )
+            link = Link(path_to_url(p))
+
+    # it's a local file, dir, or url
+    if link:
+        # Handle relative file URLs
+        if link.scheme == 'file' and re.search(r'\.\./', link.url):
+            link = Link(
+                path_to_url(os.path.normpath(os.path.abspath(link.path))))
+        # wheel file
+        if link.is_wheel:
+            wheel = Wheel(link.filename)  # can raise InvalidWheelFilename
+            req_as_string = "%s==%s" % (wheel.name, wheel.version)
+        else:
+            # set the req to the egg fragment.  when it's not there, this
+            # will become an 'unnamed' requirement
+            req_as_string = link.egg_fragment
+
+    # a requirement specifier
+    else:
+        req_as_string = name
+
+    if extras_as_string:
+        extras = Requirement("placeholder" + extras_as_string.lower()).extras
+    else:
+        extras = ()
+    if req_as_string is not None:
+        try:
+            req = Requirement(req_as_string)
+        except InvalidRequirement:
+            if os.path.sep in req_as_string:
+                add_msg = "It looks like a path."
+                add_msg += deduce_helpful_msg(req_as_string)
+            elif ('=' in req_as_string and
+                  not any(op in req_as_string for op in operators)):
+                add_msg = "= is not a valid operator. Did you mean == ?"
+            else:
+                add_msg = ""
+            raise InstallationError(
+                "Invalid requirement: '%s'\n%s" % (req_as_string, add_msg)
+            )
+    else:
+        req = None
+
+    return InstallRequirement(
+        req, comes_from, link=link, markers=markers,
+        use_pep517=use_pep517, isolated=isolated,
+        options=options if options else {},
+        wheel_cache=wheel_cache,
+        constraint=constraint,
+        extras=extras,
+    )
+
+
+def install_req_from_req_string(
+    req_string,  # type: str
+    comes_from=None,  # type: Optional[InstallRequirement]
+    isolated=False,  # type: bool
+    wheel_cache=None,  # type: Optional[WheelCache]
+    use_pep517=None  # type: Optional[bool]
+):
+    # type: (...) -> InstallRequirement
+    try:
+        req = Requirement(req_string)
+    except InvalidRequirement:
+        raise InstallationError("Invalid requirement: '%s'" % req_string)
+
+    domains_not_allowed = [
+        PyPI.file_storage_domain,
+        TestPyPI.file_storage_domain,
+    ]
+    if (req.url and comes_from and comes_from.link and
+            comes_from.link.netloc in domains_not_allowed):
+        # Explicitly disallow pypi packages that depend on external urls
+        raise InstallationError(
+            "Packages installed from PyPI cannot depend on packages "
+            "which are not also hosted on PyPI.\n"
+            "%s depends on %s " % (comes_from.name, req)
+        )
+
+    return InstallRequirement(
+        req, comes_from, isolated=isolated, wheel_cache=wheel_cache,
+        use_pep517=use_pep517
+    )
diff --git a/lib/python3.7/site-packages/pip/_internal/req/req_file.py b/lib/python3.7/site-packages/pip/_internal/req/req_file.py
new file mode 100644
index 0000000000000000000000000000000000000000..270b75cd1ae917665134f0e1958b66ba5f572d21
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/req/req_file.py
@@ -0,0 +1,383 @@
+"""
+Requirements file parsing
+"""
+
+from __future__ import absolute_import
+
+import optparse
+import os
+import re
+import shlex
+import sys
+
+from pip._vendor.six.moves import filterfalse
+from pip._vendor.six.moves.urllib import parse as urllib_parse
+
+from pip._internal.cli import cmdoptions
+from pip._internal.download import get_file_content
+from pip._internal.exceptions import RequirementsFileParseError
+from pip._internal.req.constructors import (
+    install_req_from_editable, install_req_from_line,
+)
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Any, Callable, Iterator, List, NoReturn, Optional, Text, Tuple,
+    )
+    from pip._internal.req import InstallRequirement
+    from pip._internal.cache import WheelCache
+    from pip._internal.index import PackageFinder
+    from pip._internal.download import PipSession
+
+    ReqFileLines = Iterator[Tuple[int, Text]]
+
+__all__ = ['parse_requirements']
+
+SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
+COMMENT_RE = re.compile(r'(^|\s)+#.*$')
+
+# Matches environment variable-style values in '${MY_VARIABLE_1}' with the
+# variable name consisting of only uppercase letters, digits or the '_'
+# (underscore). This follows the POSIX standard defined in IEEE Std 1003.1,
+# 2013 Edition.
+ENV_VAR_RE = re.compile(r'(?P<var>\$\{(?P<name>[A-Z0-9_]+)\})')
+
+SUPPORTED_OPTIONS = [
+    cmdoptions.constraints,
+    cmdoptions.editable,
+    cmdoptions.requirements,
+    cmdoptions.no_index,
+    cmdoptions.index_url,
+    cmdoptions.find_links,
+    cmdoptions.extra_index_url,
+    cmdoptions.always_unzip,
+    cmdoptions.no_binary,
+    cmdoptions.only_binary,
+    cmdoptions.pre,
+    cmdoptions.trusted_host,
+    cmdoptions.require_hashes,
+]  # type: List[Callable[..., optparse.Option]]
+
+# options to be passed to requirements
+SUPPORTED_OPTIONS_REQ = [
+    cmdoptions.install_options,
+    cmdoptions.global_options,
+    cmdoptions.hash,
+]  # type: List[Callable[..., optparse.Option]]
+
+# the 'dest' string values
+SUPPORTED_OPTIONS_REQ_DEST = [str(o().dest) for o in SUPPORTED_OPTIONS_REQ]
+
+
+def parse_requirements(
+    filename,  # type: str
+    finder=None,  # type: Optional[PackageFinder]
+    comes_from=None,  # type: Optional[str]
+    options=None,  # type: Optional[optparse.Values]
+    session=None,  # type: Optional[PipSession]
+    constraint=False,  # type: bool
+    wheel_cache=None,  # type: Optional[WheelCache]
+    use_pep517=None  # type: Optional[bool]
+):
+    # type: (...) -> Iterator[InstallRequirement]
+    """Parse a requirements file and yield InstallRequirement instances.
+
+    :param filename:    Path or url of requirements file.
+    :param finder:      Instance of pip.index.PackageFinder.
+    :param comes_from:  Origin description of requirements.
+    :param options:     cli options.
+    :param session:     Instance of pip.download.PipSession.
+    :param constraint:  If true, parsing a constraint file rather than
+        requirements file.
+    :param wheel_cache: Instance of pip.wheel.WheelCache
+    :param use_pep517:  Value of the --use-pep517 option.
+    """
+    if session is None:
+        raise TypeError(
+            "parse_requirements() missing 1 required keyword argument: "
+            "'session'"
+        )
+
+    _, content = get_file_content(
+        filename, comes_from=comes_from, session=session
+    )
+
+    lines_enum = preprocess(content, options)
+
+    for line_number, line in lines_enum:
+        req_iter = process_line(line, filename, line_number, finder,
+                                comes_from, options, session, wheel_cache,
+                                use_pep517=use_pep517, constraint=constraint)
+        for req in req_iter:
+            yield req
+
+
+def preprocess(content, options):
+    # type: (Text, Optional[optparse.Values]) -> ReqFileLines
+    """Split, filter, and join lines, and return a line iterator
+
+    :param content: the content of the requirements file
+    :param options: cli options
+    """
+    lines_enum = enumerate(content.splitlines(), start=1)  # type: ReqFileLines
+    lines_enum = join_lines(lines_enum)
+    lines_enum = ignore_comments(lines_enum)
+    lines_enum = skip_regex(lines_enum, options)
+    lines_enum = expand_env_variables(lines_enum)
+    return lines_enum
+
+
+def process_line(
+    line,  # type: Text
+    filename,  # type: str
+    line_number,  # type: int
+    finder=None,  # type: Optional[PackageFinder]
+    comes_from=None,  # type: Optional[str]
+    options=None,  # type: Optional[optparse.Values]
+    session=None,  # type: Optional[PipSession]
+    wheel_cache=None,  # type: Optional[WheelCache]
+    use_pep517=None,  # type: Optional[bool]
+    constraint=False  # type: bool
+):
+    # type: (...) -> Iterator[InstallRequirement]
+    """Process a single requirements line; This can result in creating/yielding
+    requirements, or updating the finder.
+
+    For lines that contain requirements, the only options that have an effect
+    are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
+    requirement. Other options from SUPPORTED_OPTIONS may be present, but are
+    ignored.
+
+    For lines that do not contain requirements, the only options that have an
+    effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
+    be present, but are ignored. These lines may contain multiple options
+    (although our docs imply only one is supported), and all our parsed and
+    affect the finder.
+
+    :param constraint: If True, parsing a constraints file.
+    :param options: OptionParser options that we may update
+    """
+    parser = build_parser(line)
+    defaults = parser.get_default_values()
+    defaults.index_url = None
+    if finder:
+        defaults.format_control = finder.format_control
+    args_str, options_str = break_args_options(line)
+    # Prior to 2.7.3, shlex cannot deal with unicode entries
+    if sys.version_info < (2, 7, 3):
+        # https://github.com/python/mypy/issues/1174
+        options_str = options_str.encode('utf8')  # type: ignore
+    # https://github.com/python/mypy/issues/1174
+    opts, _ = parser.parse_args(
+        shlex.split(options_str), defaults)  # type: ignore
+
+    # preserve for the nested code path
+    line_comes_from = '%s %s (line %s)' % (
+        '-c' if constraint else '-r', filename, line_number,
+    )
+
+    # yield a line requirement
+    if args_str:
+        isolated = options.isolated_mode if options else False
+        if options:
+            cmdoptions.check_install_build_global(options, opts)
+        # get the options that apply to requirements
+        req_options = {}
+        for dest in SUPPORTED_OPTIONS_REQ_DEST:
+            if dest in opts.__dict__ and opts.__dict__[dest]:
+                req_options[dest] = opts.__dict__[dest]
+        yield install_req_from_line(
+            args_str, line_comes_from, constraint=constraint,
+            use_pep517=use_pep517,
+            isolated=isolated, options=req_options, wheel_cache=wheel_cache
+        )
+
+    # yield an editable requirement
+    elif opts.editables:
+        isolated = options.isolated_mode if options else False
+        yield install_req_from_editable(
+            opts.editables[0], comes_from=line_comes_from,
+            use_pep517=use_pep517,
+            constraint=constraint, isolated=isolated, wheel_cache=wheel_cache
+        )
+
+    # parse a nested requirements file
+    elif opts.requirements or opts.constraints:
+        if opts.requirements:
+            req_path = opts.requirements[0]
+            nested_constraint = False
+        else:
+            req_path = opts.constraints[0]
+            nested_constraint = True
+        # original file is over http
+        if SCHEME_RE.search(filename):
+            # do a url join so relative paths work
+            req_path = urllib_parse.urljoin(filename, req_path)
+        # original file and nested file are paths
+        elif not SCHEME_RE.search(req_path):
+            # do a join so relative paths work
+            req_path = os.path.join(os.path.dirname(filename), req_path)
+        # TODO: Why not use `comes_from='-r {} (line {})'` here as well?
+        parsed_reqs = parse_requirements(
+            req_path, finder, comes_from, options, session,
+            constraint=nested_constraint, wheel_cache=wheel_cache
+        )
+        for req in parsed_reqs:
+            yield req
+
+    # percolate hash-checking option upward
+    elif opts.require_hashes:
+        options.require_hashes = opts.require_hashes
+
+    # set finder options
+    elif finder:
+        if opts.index_url:
+            finder.index_urls = [opts.index_url]
+        if opts.no_index is True:
+            finder.index_urls = []
+        if opts.extra_index_urls:
+            finder.index_urls.extend(opts.extra_index_urls)
+        if opts.find_links:
+            # FIXME: it would be nice to keep track of the source
+            # of the find_links: support a find-links local path
+            # relative to a requirements file.
+            value = opts.find_links[0]
+            req_dir = os.path.dirname(os.path.abspath(filename))
+            relative_to_reqs_file = os.path.join(req_dir, value)
+            if os.path.exists(relative_to_reqs_file):
+                value = relative_to_reqs_file
+            finder.find_links.append(value)
+        if opts.pre:
+            finder.allow_all_prereleases = True
+        if opts.trusted_hosts:
+            finder.secure_origins.extend(
+                ("*", host, "*") for host in opts.trusted_hosts)
+
+
+def break_args_options(line):
+    # type: (Text) -> Tuple[str, Text]
+    """Break up the line into an args and options string.  We only want to shlex
+    (and then optparse) the options, not the args.  args can contain markers
+    which are corrupted by shlex.
+    """
+    tokens = line.split(' ')
+    args = []
+    options = tokens[:]
+    for token in tokens:
+        if token.startswith('-') or token.startswith('--'):
+            break
+        else:
+            args.append(token)
+            options.pop(0)
+    return ' '.join(args), ' '.join(options)  # type: ignore
+
+
+def build_parser(line):
+    # type: (Text) -> optparse.OptionParser
+    """
+    Return a parser for parsing requirement lines
+    """
+    parser = optparse.OptionParser(add_help_option=False)
+
+    option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
+    for option_factory in option_factories:
+        option = option_factory()
+        parser.add_option(option)
+
+    # By default optparse sys.exits on parsing errors. We want to wrap
+    # that in our own exception.
+    def parser_exit(self, msg):
+        # type: (Any, str) -> NoReturn
+        # add offending line
+        msg = 'Invalid requirement: %s\n%s' % (line, msg)
+        raise RequirementsFileParseError(msg)
+    # NOTE: mypy disallows assigning to a method
+    #       https://github.com/python/mypy/issues/2427
+    parser.exit = parser_exit  # type: ignore
+
+    return parser
+
+
+def join_lines(lines_enum):
+    # type: (ReqFileLines) -> ReqFileLines
+    """Joins a line ending in '\' with the previous line (except when following
+    comments).  The joined line takes on the index of the first line.
+    """
+    primary_line_number = None
+    new_line = []  # type: List[Text]
+    for line_number, line in lines_enum:
+        if not line.endswith('\\') or COMMENT_RE.match(line):
+            if COMMENT_RE.match(line):
+                # this ensures comments are always matched later
+                line = ' ' + line
+            if new_line:
+                new_line.append(line)
+                yield primary_line_number, ''.join(new_line)
+                new_line = []
+            else:
+                yield line_number, line
+        else:
+            if not new_line:
+                primary_line_number = line_number
+            new_line.append(line.strip('\\'))
+
+    # last line contains \
+    if new_line:
+        yield primary_line_number, ''.join(new_line)
+
+    # TODO: handle space after '\'.
+
+
+def ignore_comments(lines_enum):
+    # type: (ReqFileLines) -> ReqFileLines
+    """
+    Strips comments and filter empty lines.
+    """
+    for line_number, line in lines_enum:
+        line = COMMENT_RE.sub('', line)
+        line = line.strip()
+        if line:
+            yield line_number, line
+
+
+def skip_regex(lines_enum, options):
+    # type: (ReqFileLines, Optional[optparse.Values]) -> ReqFileLines
+    """
+    Skip lines that match '--skip-requirements-regex' pattern
+
+    Note: the regex pattern is only built once
+    """
+    skip_regex = options.skip_requirements_regex if options else None
+    if skip_regex:
+        pattern = re.compile(skip_regex)
+        lines_enum = filterfalse(lambda e: pattern.search(e[1]), lines_enum)
+    return lines_enum
+
+
+def expand_env_variables(lines_enum):
+    # type: (ReqFileLines) -> ReqFileLines
+    """Replace all environment variables that can be retrieved via `os.getenv`.
+
+    The only allowed format for environment variables defined in the
+    requirement file is `${MY_VARIABLE_1}` to ensure two things:
+
+    1. Strings that contain a `$` aren't accidentally (partially) expanded.
+    2. Ensure consistency across platforms for requirement files.
+
+    These points are the result of a discussion on the `github pull
+    request #3514 <https://github.com/pypa/pip/pull/3514>`_.
+
+    Valid characters in variable names follow the `POSIX standard
+    <http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
+    to uppercase letter, digits and the `_` (underscore).
+    """
+    for line_number, line in lines_enum:
+        for env_var, var_name in ENV_VAR_RE.findall(line):
+            value = os.getenv(var_name)
+            if not value:
+                continue
+
+            line = line.replace(env_var, value)
+
+        yield line_number, line
diff --git a/lib/python3.7/site-packages/pip/_internal/req/req_install.py b/lib/python3.7/site-packages/pip/_internal/req/req_install.py
new file mode 100644
index 0000000000000000000000000000000000000000..111ad0c69e27cfd59321d1a0be0f4d38c9de35d6
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/req/req_install.py
@@ -0,0 +1,1030 @@
+from __future__ import absolute_import
+
+import logging
+import os
+import shutil
+import sys
+import sysconfig
+import zipfile
+from distutils.util import change_root
+
+from pip._vendor import pkg_resources, six
+from pip._vendor.packaging.requirements import Requirement
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.packaging.version import Version
+from pip._vendor.packaging.version import parse as parse_version
+from pip._vendor.pep517.wrappers import Pep517HookCaller
+
+from pip._internal import wheel
+from pip._internal.build_env import NoOpBuildEnvironment
+from pip._internal.exceptions import InstallationError
+from pip._internal.locations import (
+    PIP_DELETE_MARKER_FILENAME, running_under_virtualenv,
+)
+from pip._internal.models.link import Link
+from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path
+from pip._internal.req.req_uninstall import UninstallPathSet
+from pip._internal.utils.compat import native_str
+from pip._internal.utils.hashes import Hashes
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import (
+    _make_build_dir, ask_path_exists, backup_dir, call_subprocess,
+    display_path, dist_in_site_packages, dist_in_usersite, ensure_dir,
+    get_installed_version, redact_password_from_url, rmtree,
+)
+from pip._internal.utils.packaging import get_metadata
+from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+from pip._internal.utils.ui import open_spinner
+from pip._internal.vcs import vcs
+from pip._internal.wheel import move_wheel_files
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Any, Dict, Iterable, List, Mapping, Optional, Sequence, Union,
+    )
+    from pip._internal.build_env import BuildEnvironment
+    from pip._internal.cache import WheelCache
+    from pip._internal.index import PackageFinder
+    from pip._vendor.pkg_resources import Distribution
+    from pip._vendor.packaging.specifiers import SpecifierSet
+    from pip._vendor.packaging.markers import Marker
+
+
+logger = logging.getLogger(__name__)
+
+
+class InstallRequirement(object):
+    """
+    Represents something that may be installed later on, may have information
+    about where to fetch the relevant requirement and also contains logic for
+    installing the said requirement.
+    """
+
+    def __init__(
+        self,
+        req,  # type: Optional[Requirement]
+        comes_from,  # type: Optional[Union[str, InstallRequirement]]
+        source_dir=None,  # type: Optional[str]
+        editable=False,  # type: bool
+        link=None,  # type: Optional[Link]
+        update=True,  # type: bool
+        markers=None,  # type: Optional[Marker]
+        use_pep517=None,  # type: Optional[bool]
+        isolated=False,  # type: bool
+        options=None,  # type: Optional[Dict[str, Any]]
+        wheel_cache=None,  # type: Optional[WheelCache]
+        constraint=False,  # type: bool
+        extras=()  # type: Iterable[str]
+    ):
+        # type: (...) -> None
+        assert req is None or isinstance(req, Requirement), req
+        self.req = req
+        self.comes_from = comes_from
+        self.constraint = constraint
+        if source_dir is not None:
+            self.source_dir = os.path.normpath(os.path.abspath(source_dir))
+        else:
+            self.source_dir = None
+        self.editable = editable
+
+        self._wheel_cache = wheel_cache
+        if link is None and req and req.url:
+            # PEP 508 URL requirement
+            link = Link(req.url)
+        self.link = self.original_link = link
+
+        if extras:
+            self.extras = extras
+        elif req:
+            self.extras = {
+                pkg_resources.safe_extra(extra) for extra in req.extras
+            }
+        else:
+            self.extras = set()
+        if markers is None and req:
+            markers = req.marker
+        self.markers = markers
+
+        self._egg_info_path = None  # type: Optional[str]
+        # This holds the pkg_resources.Distribution object if this requirement
+        # is already available:
+        self.satisfied_by = None
+        # This hold the pkg_resources.Distribution object if this requirement
+        # conflicts with another installed distribution:
+        self.conflicts_with = None
+        # Temporary build location
+        self._temp_build_dir = TempDirectory(kind="req-build")
+        # Used to store the global directory where the _temp_build_dir should
+        # have been created. Cf _correct_build_location method.
+        self._ideal_build_dir = None  # type: Optional[str]
+        # True if the editable should be updated:
+        self.update = update
+        # Set to True after successful installation
+        self.install_succeeded = None  # type: Optional[bool]
+        # UninstallPathSet of uninstalled distribution (for possible rollback)
+        self.uninstalled_pathset = None
+        self.options = options if options else {}
+        # Set to True after successful preparation of this requirement
+        self.prepared = False
+        self.is_direct = False
+
+        self.isolated = isolated
+        self.build_env = NoOpBuildEnvironment()  # type: BuildEnvironment
+
+        # For PEP 517, the directory where we request the project metadata
+        # gets stored. We need this to pass to build_wheel, so the backend
+        # can ensure that the wheel matches the metadata (see the PEP for
+        # details).
+        self.metadata_directory = None  # type: Optional[str]
+
+        # The static build requirements (from pyproject.toml)
+        self.pyproject_requires = None  # type: Optional[List[str]]
+
+        # Build requirements that we will check are available
+        self.requirements_to_check = []  # type: List[str]
+
+        # The PEP 517 backend we should use to build the project
+        self.pep517_backend = None  # type: Optional[Pep517HookCaller]
+
+        # Are we using PEP 517 for this requirement?
+        # After pyproject.toml has been loaded, the only valid values are True
+        # and False. Before loading, None is valid (meaning "use the default").
+        # Setting an explicit value before loading pyproject.toml is supported,
+        # but after loading this flag should be treated as read only.
+        self.use_pep517 = use_pep517
+
+    def __str__(self):
+        # type: () -> str
+        if self.req:
+            s = str(self.req)
+            if self.link:
+                s += ' from %s' % redact_password_from_url(self.link.url)
+        elif self.link:
+            s = redact_password_from_url(self.link.url)
+        else:
+            s = '<InstallRequirement>'
+        if self.satisfied_by is not None:
+            s += ' in %s' % display_path(self.satisfied_by.location)
+        if self.comes_from:
+            if isinstance(self.comes_from, six.string_types):
+                comes_from = self.comes_from
+            else:
+                comes_from = self.comes_from.from_path()
+            if comes_from:
+                s += ' (from %s)' % comes_from
+        return s
+
+    def __repr__(self):
+        # type: () -> str
+        return '<%s object: %s editable=%r>' % (
+            self.__class__.__name__, str(self), self.editable)
+
+    def populate_link(self, finder, upgrade, require_hashes):
+        # type: (PackageFinder, bool, bool) -> None
+        """Ensure that if a link can be found for this, that it is found.
+
+        Note that self.link may still be None - if Upgrade is False and the
+        requirement is already installed.
+
+        If require_hashes is True, don't use the wheel cache, because cached
+        wheels, always built locally, have different hashes than the files
+        downloaded from the index server and thus throw false hash mismatches.
+        Furthermore, cached wheels at present have undeterministic contents due
+        to file modification times.
+        """
+        if self.link is None:
+            self.link = finder.find_requirement(self, upgrade)
+        if self._wheel_cache is not None and not require_hashes:
+            old_link = self.link
+            self.link = self._wheel_cache.get(self.link, self.name)
+            if old_link != self.link:
+                logger.debug('Using cached wheel link: %s', self.link)
+
+    # Things that are valid for all kinds of requirements?
+    @property
+    def name(self):
+        # type: () -> Optional[str]
+        if self.req is None:
+            return None
+        return native_str(pkg_resources.safe_name(self.req.name))
+
+    @property
+    def specifier(self):
+        # type: () -> SpecifierSet
+        return self.req.specifier
+
+    @property
+    def is_pinned(self):
+        # type: () -> bool
+        """Return whether I am pinned to an exact version.
+
+        For example, some-package==1.2 is pinned; some-package>1.2 is not.
+        """
+        specifiers = self.specifier
+        return (len(specifiers) == 1 and
+                next(iter(specifiers)).operator in {'==', '==='})
+
+    @property
+    def installed_version(self):
+        # type: () -> Optional[str]
+        return get_installed_version(self.name)
+
+    def match_markers(self, extras_requested=None):
+        # type: (Optional[Iterable[str]]) -> bool
+        if not extras_requested:
+            # Provide an extra to safely evaluate the markers
+            # without matching any extra
+            extras_requested = ('',)
+        if self.markers is not None:
+            return any(
+                self.markers.evaluate({'extra': extra})
+                for extra in extras_requested)
+        else:
+            return True
+
+    @property
+    def has_hash_options(self):
+        # type: () -> bool
+        """Return whether any known-good hashes are specified as options.
+
+        These activate --require-hashes mode; hashes specified as part of a
+        URL do not.
+
+        """
+        return bool(self.options.get('hashes', {}))
+
+    def hashes(self, trust_internet=True):
+        # type: (bool) -> Hashes
+        """Return a hash-comparer that considers my option- and URL-based
+        hashes to be known-good.
+
+        Hashes in URLs--ones embedded in the requirements file, not ones
+        downloaded from an index server--are almost peers with ones from
+        flags. They satisfy --require-hashes (whether it was implicitly or
+        explicitly activated) but do not activate it. md5 and sha224 are not
+        allowed in flags, which should nudge people toward good algos. We
+        always OR all hashes together, even ones from URLs.
+
+        :param trust_internet: Whether to trust URL-based (#md5=...) hashes
+            downloaded from the internet, as by populate_link()
+
+        """
+        good_hashes = self.options.get('hashes', {}).copy()
+        link = self.link if trust_internet else self.original_link
+        if link and link.hash:
+            good_hashes.setdefault(link.hash_name, []).append(link.hash)
+        return Hashes(good_hashes)
+
+    def from_path(self):
+        # type: () -> Optional[str]
+        """Format a nice indicator to show where this "comes from"
+        """
+        if self.req is None:
+            return None
+        s = str(self.req)
+        if self.comes_from:
+            if isinstance(self.comes_from, six.string_types):
+                comes_from = self.comes_from
+            else:
+                comes_from = self.comes_from.from_path()
+            if comes_from:
+                s += '->' + comes_from
+        return s
+
+    def build_location(self, build_dir):
+        # type: (str) -> Optional[str]
+        assert build_dir is not None
+        if self._temp_build_dir.path is not None:
+            return self._temp_build_dir.path
+        if self.req is None:
+            # for requirement via a path to a directory: the name of the
+            # package is not available yet so we create a temp directory
+            # Once run_egg_info will have run, we'll be able
+            # to fix it via _correct_build_location
+            # Some systems have /tmp as a symlink which confuses custom
+            # builds (such as numpy). Thus, we ensure that the real path
+            # is returned.
+            self._temp_build_dir.create()
+            self._ideal_build_dir = build_dir
+
+            return self._temp_build_dir.path
+        if self.editable:
+            name = self.name.lower()
+        else:
+            name = self.name
+        # FIXME: Is there a better place to create the build_dir? (hg and bzr
+        # need this)
+        if not os.path.exists(build_dir):
+            logger.debug('Creating directory %s', build_dir)
+            _make_build_dir(build_dir)
+        return os.path.join(build_dir, name)
+
+    def _correct_build_location(self):
+        # type: () -> None
+        """Move self._temp_build_dir to self._ideal_build_dir/self.req.name
+
+        For some requirements (e.g. a path to a directory), the name of the
+        package is not available until we run egg_info, so the build_location
+        will return a temporary directory and store the _ideal_build_dir.
+
+        This is only called by self.run_egg_info to fix the temporary build
+        directory.
+        """
+        if self.source_dir is not None:
+            return
+        assert self.req is not None
+        assert self._temp_build_dir.path
+        assert (self._ideal_build_dir is not None and
+                self._ideal_build_dir.path)  # type: ignore
+        old_location = self._temp_build_dir.path
+        self._temp_build_dir.path = None
+
+        new_location = self.build_location(self._ideal_build_dir)
+        if os.path.exists(new_location):
+            raise InstallationError(
+                'A package already exists in %s; please remove it to continue'
+                % display_path(new_location))
+        logger.debug(
+            'Moving package %s from %s to new location %s',
+            self, display_path(old_location), display_path(new_location),
+        )
+        shutil.move(old_location, new_location)
+        self._temp_build_dir.path = new_location
+        self._ideal_build_dir = None
+        self.source_dir = os.path.normpath(os.path.abspath(new_location))
+        self._egg_info_path = None
+
+        # Correct the metadata directory, if it exists
+        if self.metadata_directory:
+            old_meta = self.metadata_directory
+            rel = os.path.relpath(old_meta, start=old_location)
+            new_meta = os.path.join(new_location, rel)
+            new_meta = os.path.normpath(os.path.abspath(new_meta))
+            self.metadata_directory = new_meta
+
+    def remove_temporary_source(self):
+        # type: () -> None
+        """Remove the source files from this requirement, if they are marked
+        for deletion"""
+        if self.source_dir and os.path.exists(
+                os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)):
+            logger.debug('Removing source in %s', self.source_dir)
+            rmtree(self.source_dir)
+        self.source_dir = None
+        self._temp_build_dir.cleanup()
+        self.build_env.cleanup()
+
+    def check_if_exists(self, use_user_site):
+        # type: (bool) -> bool
+        """Find an installed distribution that satisfies or conflicts
+        with this requirement, and set self.satisfied_by or
+        self.conflicts_with appropriately.
+        """
+        if self.req is None:
+            return False
+        try:
+            # get_distribution() will resolve the entire list of requirements
+            # anyway, and we've already determined that we need the requirement
+            # in question, so strip the marker so that we don't try to
+            # evaluate it.
+            no_marker = Requirement(str(self.req))
+            no_marker.marker = None
+            self.satisfied_by = pkg_resources.get_distribution(str(no_marker))
+            if self.editable and self.satisfied_by:
+                self.conflicts_with = self.satisfied_by
+                # when installing editables, nothing pre-existing should ever
+                # satisfy
+                self.satisfied_by = None
+                return True
+        except pkg_resources.DistributionNotFound:
+            return False
+        except pkg_resources.VersionConflict:
+            existing_dist = pkg_resources.get_distribution(
+                self.req.name
+            )
+            if use_user_site:
+                if dist_in_usersite(existing_dist):
+                    self.conflicts_with = existing_dist
+                elif (running_under_virtualenv() and
+                        dist_in_site_packages(existing_dist)):
+                    raise InstallationError(
+                        "Will not install to the user site because it will "
+                        "lack sys.path precedence to %s in %s" %
+                        (existing_dist.project_name, existing_dist.location)
+                    )
+            else:
+                self.conflicts_with = existing_dist
+        return True
+
+    # Things valid for wheels
+    @property
+    def is_wheel(self):
+        # type: () -> bool
+        if not self.link:
+            return False
+        return self.link.is_wheel
+
+    def move_wheel_files(
+        self,
+        wheeldir,  # type: str
+        root=None,  # type: Optional[str]
+        home=None,  # type: Optional[str]
+        prefix=None,  # type: Optional[str]
+        warn_script_location=True,  # type: bool
+        use_user_site=False,  # type: bool
+        pycompile=True  # type: bool
+    ):
+        # type: (...) -> None
+        move_wheel_files(
+            self.name, self.req, wheeldir,
+            user=use_user_site,
+            home=home,
+            root=root,
+            prefix=prefix,
+            pycompile=pycompile,
+            isolated=self.isolated,
+            warn_script_location=warn_script_location,
+        )
+
+    # Things valid for sdists
+    @property
+    def setup_py_dir(self):
+        # type: () -> str
+        return os.path.join(
+            self.source_dir,
+            self.link and self.link.subdirectory_fragment or '')
+
+    @property
+    def setup_py(self):
+        # type: () -> str
+        assert self.source_dir, "No source dir for %s" % self
+
+        setup_py = os.path.join(self.setup_py_dir, 'setup.py')
+
+        # Python2 __file__ should not be unicode
+        if six.PY2 and isinstance(setup_py, six.text_type):
+            setup_py = setup_py.encode(sys.getfilesystemencoding())
+
+        return setup_py
+
+    @property
+    def pyproject_toml(self):
+        # type: () -> str
+        assert self.source_dir, "No source dir for %s" % self
+
+        return make_pyproject_path(self.setup_py_dir)
+
+    def load_pyproject_toml(self):
+        # type: () -> None
+        """Load the pyproject.toml file.
+
+        After calling this routine, all of the attributes related to PEP 517
+        processing for this requirement have been set. In particular, the
+        use_pep517 attribute can be used to determine whether we should
+        follow the PEP 517 or legacy (setup.py) code path.
+        """
+        requires, pep517_data = load_pyproject_toml(
+            self.use_pep517,
+            self.editable,
+            self.pyproject_toml,
+            self.setup_py,
+            str(self)
+        )
+
+        use_pep517 = bool(pep517_data)
+
+        self.use_pep517 = use_pep517
+        self.pyproject_requires = requires
+
+        if use_pep517:
+            backend, check = pep517_data
+            self.requirements_to_check = check
+            self.pep517_backend = Pep517HookCaller(self.setup_py_dir, backend)
+
+            # Use a custom function to call subprocesses
+            self.spin_message = ""
+
+            def runner(
+                cmd,  # type: List[str]
+                cwd=None,  # type: Optional[str]
+                extra_environ=None  # type: Optional[Mapping[str, Any]]
+            ):
+                # type: (...) -> None
+                with open_spinner(self.spin_message) as spinner:
+                    call_subprocess(
+                        cmd,
+                        cwd=cwd,
+                        extra_environ=extra_environ,
+                        spinner=spinner
+                    )
+                self.spin_message = ""
+
+            self.pep517_backend._subprocess_runner = runner
+
+    def prepare_metadata(self):
+        # type: () -> None
+        """Ensure that project metadata is available.
+
+        Under PEP 517, call the backend hook to prepare the metadata.
+        Under legacy processing, call setup.py egg-info.
+        """
+        assert self.source_dir
+
+        with indent_log():
+            if self.use_pep517:
+                self.prepare_pep517_metadata()
+            else:
+                self.run_egg_info()
+
+        if not self.req:
+            if isinstance(parse_version(self.metadata["Version"]), Version):
+                op = "=="
+            else:
+                op = "==="
+            self.req = Requirement(
+                "".join([
+                    self.metadata["Name"],
+                    op,
+                    self.metadata["Version"],
+                ])
+            )
+            self._correct_build_location()
+        else:
+            metadata_name = canonicalize_name(self.metadata["Name"])
+            if canonicalize_name(self.req.name) != metadata_name:
+                logger.warning(
+                    'Generating metadata for package %s '
+                    'produced metadata for project name %s. Fix your '
+                    '#egg=%s fragments.',
+                    self.name, metadata_name, self.name
+                )
+                self.req = Requirement(metadata_name)
+
+    def prepare_pep517_metadata(self):
+        # type: () -> None
+        assert self.pep517_backend is not None
+
+        metadata_dir = os.path.join(
+            self.setup_py_dir,
+            'pip-wheel-metadata'
+        )
+        ensure_dir(metadata_dir)
+
+        with self.build_env:
+            # Note that Pep517HookCaller implements a fallback for
+            # prepare_metadata_for_build_wheel, so we don't have to
+            # consider the possibility that this hook doesn't exist.
+            backend = self.pep517_backend
+            self.spin_message = "Preparing wheel metadata"
+            distinfo_dir = backend.prepare_metadata_for_build_wheel(
+                metadata_dir
+            )
+
+        self.metadata_directory = os.path.join(metadata_dir, distinfo_dir)
+
+    def run_egg_info(self):
+        # type: () -> None
+        if self.name:
+            logger.debug(
+                'Running setup.py (path:%s) egg_info for package %s',
+                self.setup_py, self.name,
+            )
+        else:
+            logger.debug(
+                'Running setup.py (path:%s) egg_info for package from %s',
+                self.setup_py, self.link,
+            )
+        script = SETUPTOOLS_SHIM % self.setup_py
+        base_cmd = [sys.executable, '-c', script]
+        if self.isolated:
+            base_cmd += ["--no-user-cfg"]
+        egg_info_cmd = base_cmd + ['egg_info']
+        # We can't put the .egg-info files at the root, because then the
+        # source code will be mistaken for an installed egg, causing
+        # problems
+        if self.editable:
+            egg_base_option = []  # type: List[str]
+        else:
+            egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info')
+            ensure_dir(egg_info_dir)
+            egg_base_option = ['--egg-base', 'pip-egg-info']
+        with self.build_env:
+            call_subprocess(
+                egg_info_cmd + egg_base_option,
+                cwd=self.setup_py_dir,
+                command_desc='python setup.py egg_info')
+
+    @property
+    def egg_info_path(self):
+        # type: () -> str
+        if self._egg_info_path is None:
+            if self.editable:
+                base = self.source_dir
+            else:
+                base = os.path.join(self.setup_py_dir, 'pip-egg-info')
+            filenames = os.listdir(base)
+            if self.editable:
+                filenames = []
+                for root, dirs, files in os.walk(base):
+                    for dir in vcs.dirnames:
+                        if dir in dirs:
+                            dirs.remove(dir)
+                    # Iterate over a copy of ``dirs``, since mutating
+                    # a list while iterating over it can cause trouble.
+                    # (See https://github.com/pypa/pip/pull/462.)
+                    for dir in list(dirs):
+                        # Don't search in anything that looks like a virtualenv
+                        # environment
+                        if (
+                                os.path.lexists(
+                                    os.path.join(root, dir, 'bin', 'python')
+                                ) or
+                                os.path.exists(
+                                    os.path.join(
+                                        root, dir, 'Scripts', 'Python.exe'
+                                    )
+                                )):
+                            dirs.remove(dir)
+                        # Also don't search through tests
+                        elif dir == 'test' or dir == 'tests':
+                            dirs.remove(dir)
+                    filenames.extend([os.path.join(root, dir)
+                                      for dir in dirs])
+                filenames = [f for f in filenames if f.endswith('.egg-info')]
+
+            if not filenames:
+                raise InstallationError(
+                    "Files/directories not found in %s" % base
+                )
+            # if we have more than one match, we pick the toplevel one.  This
+            # can easily be the case if there is a dist folder which contains
+            # an extracted tarball for testing purposes.
+            if len(filenames) > 1:
+                filenames.sort(
+                    key=lambda x: x.count(os.path.sep) +
+                    (os.path.altsep and x.count(os.path.altsep) or 0)
+                )
+            self._egg_info_path = os.path.join(base, filenames[0])
+        return self._egg_info_path
+
+    @property
+    def metadata(self):
+        # type: () -> Any
+        if not hasattr(self, '_metadata'):
+            self._metadata = get_metadata(self.get_dist())
+
+        return self._metadata
+
+    def get_dist(self):
+        # type: () -> Distribution
+        """Return a pkg_resources.Distribution for this requirement"""
+        if self.metadata_directory:
+            base_dir, distinfo = os.path.split(self.metadata_directory)
+            metadata = pkg_resources.PathMetadata(
+                base_dir, self.metadata_directory
+            )
+            dist_name = os.path.splitext(distinfo)[0]
+            typ = pkg_resources.DistInfoDistribution
+        else:
+            egg_info = self.egg_info_path.rstrip(os.path.sep)
+            base_dir = os.path.dirname(egg_info)
+            metadata = pkg_resources.PathMetadata(base_dir, egg_info)
+            dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+            # https://github.com/python/mypy/issues/1174
+            typ = pkg_resources.Distribution  # type: ignore
+
+        return typ(
+            base_dir,
+            project_name=dist_name,
+            metadata=metadata,
+        )
+
+    def assert_source_matches_version(self):
+        # type: () -> None
+        assert self.source_dir
+        version = self.metadata['version']
+        if self.req.specifier and version not in self.req.specifier:
+            logger.warning(
+                'Requested %s, but installing version %s',
+                self,
+                version,
+            )
+        else:
+            logger.debug(
+                'Source in %s has version %s, which satisfies requirement %s',
+                display_path(self.source_dir),
+                version,
+                self,
+            )
+
+    # For both source distributions and editables
+    def ensure_has_source_dir(self, parent_dir):
+        # type: (str) -> str
+        """Ensure that a source_dir is set.
+
+        This will create a temporary build dir if the name of the requirement
+        isn't known yet.
+
+        :param parent_dir: The ideal pip parent_dir for the source_dir.
+            Generally src_dir for editables and build_dir for sdists.
+        :return: self.source_dir
+        """
+        if self.source_dir is None:
+            self.source_dir = self.build_location(parent_dir)
+        return self.source_dir
+
+    # For editable installations
+    def install_editable(
+        self,
+        install_options,  # type: List[str]
+        global_options=(),  # type: Sequence[str]
+        prefix=None  # type: Optional[str]
+    ):
+        # type: (...) -> None
+        logger.info('Running setup.py develop for %s', self.name)
+
+        if self.isolated:
+            global_options = list(global_options) + ["--no-user-cfg"]
+
+        if prefix:
+            prefix_param = ['--prefix={}'.format(prefix)]
+            install_options = list(install_options) + prefix_param
+
+        with indent_log():
+            # FIXME: should we do --install-headers here too?
+            with self.build_env:
+                call_subprocess(
+                    [
+                        sys.executable,
+                        '-c',
+                        SETUPTOOLS_SHIM % self.setup_py
+                    ] +
+                    list(global_options) +
+                    ['develop', '--no-deps'] +
+                    list(install_options),
+
+                    cwd=self.setup_py_dir,
+                )
+
+        self.install_succeeded = True
+
+    def update_editable(self, obtain=True):
+        # type: (bool) -> None
+        if not self.link:
+            logger.debug(
+                "Cannot update repository at %s; repository location is "
+                "unknown",
+                self.source_dir,
+            )
+            return
+        assert self.editable
+        assert self.source_dir
+        if self.link.scheme == 'file':
+            # Static paths don't get updated
+            return
+        assert '+' in self.link.url, "bad url: %r" % self.link.url
+        if not self.update:
+            return
+        vc_type, url = self.link.url.split('+', 1)
+        backend = vcs.get_backend(vc_type)
+        if backend:
+            vcs_backend = backend(self.link.url)
+            if obtain:
+                vcs_backend.obtain(self.source_dir)
+            else:
+                vcs_backend.export(self.source_dir)
+        else:
+            assert 0, (
+                'Unexpected version control type (in %s): %s'
+                % (self.link, vc_type))
+
+    # Top-level Actions
+    def uninstall(self, auto_confirm=False, verbose=False,
+                  use_user_site=False):
+        # type: (bool, bool, bool) -> Optional[UninstallPathSet]
+        """
+        Uninstall the distribution currently satisfying this requirement.
+
+        Prompts before removing or modifying files unless
+        ``auto_confirm`` is True.
+
+        Refuses to delete or modify files outside of ``sys.prefix`` -
+        thus uninstallation within a virtual environment can only
+        modify that virtual environment, even if the virtualenv is
+        linked to global site-packages.
+
+        """
+        if not self.check_if_exists(use_user_site):
+            logger.warning("Skipping %s as it is not installed.", self.name)
+            return None
+        dist = self.satisfied_by or self.conflicts_with
+
+        uninstalled_pathset = UninstallPathSet.from_dist(dist)
+        uninstalled_pathset.remove(auto_confirm, verbose)
+        return uninstalled_pathset
+
+    def _clean_zip_name(self, name, prefix):  # only used by archive.
+        # type: (str, str) -> str
+        assert name.startswith(prefix + os.path.sep), (
+            "name %r doesn't start with prefix %r" % (name, prefix)
+        )
+        name = name[len(prefix) + 1:]
+        name = name.replace(os.path.sep, '/')
+        return name
+
+    def _get_archive_name(self, path, parentdir, rootdir):
+        # type: (str, str, str) -> str
+        path = os.path.join(parentdir, path)
+        name = self._clean_zip_name(path, rootdir)
+        return self.name + '/' + name
+
+    # TODO: Investigate if this should be kept in InstallRequirement
+    #       Seems to be used only when VCS + downloads
+    def archive(self, build_dir):
+        # type: (str) -> None
+        assert self.source_dir
+        create_archive = True
+        archive_name = '%s-%s.zip' % (self.name, self.metadata["version"])
+        archive_path = os.path.join(build_dir, archive_name)
+        if os.path.exists(archive_path):
+            response = ask_path_exists(
+                'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)bort ' %
+                display_path(archive_path), ('i', 'w', 'b', 'a'))
+            if response == 'i':
+                create_archive = False
+            elif response == 'w':
+                logger.warning('Deleting %s', display_path(archive_path))
+                os.remove(archive_path)
+            elif response == 'b':
+                dest_file = backup_dir(archive_path)
+                logger.warning(
+                    'Backing up %s to %s',
+                    display_path(archive_path),
+                    display_path(dest_file),
+                )
+                shutil.move(archive_path, dest_file)
+            elif response == 'a':
+                sys.exit(-1)
+        if create_archive:
+            zip = zipfile.ZipFile(
+                archive_path, 'w', zipfile.ZIP_DEFLATED,
+                allowZip64=True
+            )
+            dir = os.path.normcase(os.path.abspath(self.setup_py_dir))
+            for dirpath, dirnames, filenames in os.walk(dir):
+                if 'pip-egg-info' in dirnames:
+                    dirnames.remove('pip-egg-info')
+                for dirname in dirnames:
+                    dir_arcname = self._get_archive_name(dirname,
+                                                         parentdir=dirpath,
+                                                         rootdir=dir)
+                    zipdir = zipfile.ZipInfo(dir_arcname + '/')
+                    zipdir.external_attr = 0x1ED << 16  # 0o755
+                    zip.writestr(zipdir, '')
+                for filename in filenames:
+                    if filename == PIP_DELETE_MARKER_FILENAME:
+                        continue
+                    file_arcname = self._get_archive_name(filename,
+                                                          parentdir=dirpath,
+                                                          rootdir=dir)
+                    filename = os.path.join(dirpath, filename)
+                    zip.write(filename, file_arcname)
+            zip.close()
+            logger.info('Saved %s', display_path(archive_path))
+
+    def install(
+        self,
+        install_options,  # type: List[str]
+        global_options=None,  # type: Optional[Sequence[str]]
+        root=None,  # type: Optional[str]
+        home=None,  # type: Optional[str]
+        prefix=None,  # type: Optional[str]
+        warn_script_location=True,  # type: bool
+        use_user_site=False,  # type: bool
+        pycompile=True  # type: bool
+    ):
+        # type: (...) -> None
+        global_options = global_options if global_options is not None else []
+        if self.editable:
+            self.install_editable(
+                install_options, global_options, prefix=prefix,
+            )
+            return
+        if self.is_wheel:
+            version = wheel.wheel_version(self.source_dir)
+            wheel.check_compatibility(version, self.name)
+
+            self.move_wheel_files(
+                self.source_dir, root=root, prefix=prefix, home=home,
+                warn_script_location=warn_script_location,
+                use_user_site=use_user_site, pycompile=pycompile,
+            )
+            self.install_succeeded = True
+            return
+
+        # Extend the list of global and install options passed on to
+        # the setup.py call with the ones from the requirements file.
+        # Options specified in requirements file override those
+        # specified on the command line, since the last option given
+        # to setup.py is the one that is used.
+        global_options = list(global_options) + \
+            self.options.get('global_options', [])
+        install_options = list(install_options) + \
+            self.options.get('install_options', [])
+
+        if self.isolated:
+            # https://github.com/python/mypy/issues/1174
+            global_options = global_options + ["--no-user-cfg"]  # type: ignore
+
+        with TempDirectory(kind="record") as temp_dir:
+            record_filename = os.path.join(temp_dir.path, 'install-record.txt')
+            install_args = self.get_install_args(
+                global_options, record_filename, root, prefix, pycompile,
+            )
+            msg = 'Running setup.py install for %s' % (self.name,)
+            with open_spinner(msg) as spinner:
+                with indent_log():
+                    with self.build_env:
+                        call_subprocess(
+                            install_args + install_options,
+                            cwd=self.setup_py_dir,
+                            spinner=spinner,
+                        )
+
+            if not os.path.exists(record_filename):
+                logger.debug('Record file %s not found', record_filename)
+                return
+            self.install_succeeded = True
+
+            def prepend_root(path):
+                # type: (str) -> str
+                if root is None or not os.path.isabs(path):
+                    return path
+                else:
+                    return change_root(root, path)
+
+            with open(record_filename) as f:
+                for line in f:
+                    directory = os.path.dirname(line)
+                    if directory.endswith('.egg-info'):
+                        egg_info_dir = prepend_root(directory)
+                        break
+                else:
+                    logger.warning(
+                        'Could not find .egg-info directory in install record'
+                        ' for %s',
+                        self,
+                    )
+                    # FIXME: put the record somewhere
+                    # FIXME: should this be an error?
+                    return
+            new_lines = []
+            with open(record_filename) as f:
+                for line in f:
+                    filename = line.strip()
+                    if os.path.isdir(filename):
+                        filename += os.path.sep
+                    new_lines.append(
+                        os.path.relpath(prepend_root(filename), egg_info_dir)
+                    )
+            new_lines.sort()
+            ensure_dir(egg_info_dir)
+            inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
+            with open(inst_files_path, 'w') as f:
+                f.write('\n'.join(new_lines) + '\n')
+
+    def get_install_args(
+        self,
+        global_options,  # type: Sequence[str]
+        record_filename,  # type: str
+        root,  # type: Optional[str]
+        prefix,  # type: Optional[str]
+        pycompile  # type: bool
+    ):
+        # type: (...) -> List[str]
+        install_args = [sys.executable, "-u"]
+        install_args.append('-c')
+        install_args.append(SETUPTOOLS_SHIM % self.setup_py)
+        install_args += list(global_options) + \
+            ['install', '--record', record_filename]
+        install_args += ['--single-version-externally-managed']
+
+        if root is not None:
+            install_args += ['--root', root]
+        if prefix is not None:
+            install_args += ['--prefix', prefix]
+
+        if pycompile:
+            install_args += ["--compile"]
+        else:
+            install_args += ["--no-compile"]
+
+        if running_under_virtualenv():
+            py_ver_str = 'python' + sysconfig.get_python_version()
+            install_args += ['--install-headers',
+                             os.path.join(sys.prefix, 'include', 'site',
+                                          py_ver_str, self.name)]
+
+        return install_args
diff --git a/lib/python3.7/site-packages/pip/_internal/req/req_set.py b/lib/python3.7/site-packages/pip/_internal/req/req_set.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1966a4aa9c7616a33bfd68beff28cbcc04b1bf0
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/req/req_set.py
@@ -0,0 +1,193 @@
+from __future__ import absolute_import
+
+import logging
+from collections import OrderedDict
+
+from pip._internal.exceptions import InstallationError
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+from pip._internal.wheel import Wheel
+
+if MYPY_CHECK_RUNNING:
+    from typing import Dict, Iterable, List, Optional, Tuple
+    from pip._internal.req.req_install import InstallRequirement
+
+
+logger = logging.getLogger(__name__)
+
+
+class RequirementSet(object):
+
+    def __init__(self, require_hashes=False, check_supported_wheels=True):
+        # type: (bool, bool) -> None
+        """Create a RequirementSet.
+        """
+
+        self.requirements = OrderedDict()  # type: Dict[str, InstallRequirement]  # noqa: E501
+        self.require_hashes = require_hashes
+        self.check_supported_wheels = check_supported_wheels
+
+        # Mapping of alias: real_name
+        self.requirement_aliases = {}  # type: Dict[str, str]
+        self.unnamed_requirements = []  # type: List[InstallRequirement]
+        self.successfully_downloaded = []  # type: List[InstallRequirement]
+        self.reqs_to_cleanup = []  # type: List[InstallRequirement]
+
+    def __str__(self):
+        # type: () -> str
+        reqs = [req for req in self.requirements.values()
+                if not req.comes_from]
+        reqs.sort(key=lambda req: req.name.lower())
+        return ' '.join([str(req.req) for req in reqs])
+
+    def __repr__(self):
+        # type: () -> str
+        reqs = [req for req in self.requirements.values()]
+        reqs.sort(key=lambda req: req.name.lower())
+        reqs_str = ', '.join([str(req.req) for req in reqs])
+        return ('<%s object; %d requirement(s): %s>'
+                % (self.__class__.__name__, len(reqs), reqs_str))
+
+    def add_requirement(
+        self,
+        install_req,  # type: InstallRequirement
+        parent_req_name=None,  # type: Optional[str]
+        extras_requested=None  # type: Optional[Iterable[str]]
+    ):
+        # type: (...) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]  # noqa: E501
+        """Add install_req as a requirement to install.
+
+        :param parent_req_name: The name of the requirement that needed this
+            added. The name is used because when multiple unnamed requirements
+            resolve to the same name, we could otherwise end up with dependency
+            links that point outside the Requirements set. parent_req must
+            already be added. Note that None implies that this is a user
+            supplied requirement, vs an inferred one.
+        :param extras_requested: an iterable of extras used to evaluate the
+            environment markers.
+        :return: Additional requirements to scan. That is either [] if
+            the requirement is not applicable, or [install_req] if the
+            requirement is applicable and has just been added.
+        """
+        name = install_req.name
+
+        # If the markers do not match, ignore this requirement.
+        if not install_req.match_markers(extras_requested):
+            logger.info(
+                "Ignoring %s: markers '%s' don't match your environment",
+                name, install_req.markers,
+            )
+            return [], None
+
+        # If the wheel is not supported, raise an error.
+        # Should check this after filtering out based on environment markers to
+        # allow specifying different wheels based on the environment/OS, in a
+        # single requirements file.
+        if install_req.link and install_req.link.is_wheel:
+            wheel = Wheel(install_req.link.filename)
+            if self.check_supported_wheels and not wheel.supported():
+                raise InstallationError(
+                    "%s is not a supported wheel on this platform." %
+                    wheel.filename
+                )
+
+        # This next bit is really a sanity check.
+        assert install_req.is_direct == (parent_req_name is None), (
+            "a direct req shouldn't have a parent and also, "
+            "a non direct req should have a parent"
+        )
+
+        # Unnamed requirements are scanned again and the requirement won't be
+        # added as a dependency until after scanning.
+        if not name:
+            # url or path requirement w/o an egg fragment
+            self.unnamed_requirements.append(install_req)
+            return [install_req], None
+
+        try:
+            existing_req = self.get_requirement(name)
+        except KeyError:
+            existing_req = None
+
+        has_conflicting_requirement = (
+            parent_req_name is None and
+            existing_req and
+            not existing_req.constraint and
+            existing_req.extras == install_req.extras and
+            existing_req.req.specifier != install_req.req.specifier
+        )
+        if has_conflicting_requirement:
+            raise InstallationError(
+                "Double requirement given: %s (already in %s, name=%r)"
+                % (install_req, existing_req, name)
+            )
+
+        # When no existing requirement exists, add the requirement as a
+        # dependency and it will be scanned again after.
+        if not existing_req:
+            self.requirements[name] = install_req
+            # FIXME: what about other normalizations?  E.g., _ vs. -?
+            if name.lower() != name:
+                self.requirement_aliases[name.lower()] = name
+            # We'd want to rescan this requirements later
+            return [install_req], install_req
+
+        # Assume there's no need to scan, and that we've already
+        # encountered this for scanning.
+        if install_req.constraint or not existing_req.constraint:
+            return [], existing_req
+
+        does_not_satisfy_constraint = (
+            install_req.link and
+            not (
+                existing_req.link and
+                install_req.link.path == existing_req.link.path
+            )
+        )
+        if does_not_satisfy_constraint:
+            self.reqs_to_cleanup.append(install_req)
+            raise InstallationError(
+                "Could not satisfy constraints for '%s': "
+                "installation from path or url cannot be "
+                "constrained to a version" % name,
+            )
+        # If we're now installing a constraint, mark the existing
+        # object for real installation.
+        existing_req.constraint = False
+        existing_req.extras = tuple(sorted(
+            set(existing_req.extras) | set(install_req.extras)
+        ))
+        logger.debug(
+            "Setting %s extras to: %s",
+            existing_req, existing_req.extras,
+        )
+        # Return the existing requirement for addition to the parent and
+        # scanning again.
+        return [existing_req], existing_req
+
+    def has_requirement(self, project_name):
+        # type: (str) -> bool
+        name = project_name.lower()
+        if (name in self.requirements and
+           not self.requirements[name].constraint or
+           name in self.requirement_aliases and
+           not self.requirements[self.requirement_aliases[name]].constraint):
+            return True
+        return False
+
+    def get_requirement(self, project_name):
+        # type: (str) -> InstallRequirement
+        for name in project_name, project_name.lower():
+            if name in self.requirements:
+                return self.requirements[name]
+            if name in self.requirement_aliases:
+                return self.requirements[self.requirement_aliases[name]]
+        raise KeyError("No project with the name %r" % project_name)
+
+    def cleanup_files(self):
+        # type: () -> None
+        """Clean up files, remove builds."""
+        logger.debug('Cleaning up...')
+        with indent_log():
+            for req in self.reqs_to_cleanup:
+                req.remove_temporary_source()
diff --git a/lib/python3.7/site-packages/pip/_internal/req/req_tracker.py b/lib/python3.7/site-packages/pip/_internal/req/req_tracker.py
new file mode 100644
index 0000000000000000000000000000000000000000..e36a3f6b5c2126c1076de0c4abd9e2f32c11305f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/req/req_tracker.py
@@ -0,0 +1,96 @@
+from __future__ import absolute_import
+
+import contextlib
+import errno
+import hashlib
+import logging
+import os
+
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from types import TracebackType
+    from typing import Iterator, Optional, Set, Type
+    from pip._internal.req.req_install import InstallRequirement
+    from pip._internal.models.link import Link
+
+logger = logging.getLogger(__name__)
+
+
+class RequirementTracker(object):
+
+    def __init__(self):
+        # type: () -> None
+        self._root = os.environ.get('PIP_REQ_TRACKER')
+        if self._root is None:
+            self._temp_dir = TempDirectory(delete=False, kind='req-tracker')
+            self._temp_dir.create()
+            self._root = os.environ['PIP_REQ_TRACKER'] = self._temp_dir.path
+            logger.debug('Created requirements tracker %r', self._root)
+        else:
+            self._temp_dir = None
+            logger.debug('Re-using requirements tracker %r', self._root)
+        self._entries = set()  # type: Set[InstallRequirement]
+
+    def __enter__(self):
+        # type: () -> RequirementTracker
+        return self
+
+    def __exit__(
+        self,
+        exc_type,  # type: Optional[Type[BaseException]]
+        exc_val,  # type: Optional[BaseException]
+        exc_tb  # type: Optional[TracebackType]
+    ):
+        # type: (...) -> None
+        self.cleanup()
+
+    def _entry_path(self, link):
+        # type: (Link) -> str
+        hashed = hashlib.sha224(link.url_without_fragment.encode()).hexdigest()
+        return os.path.join(self._root, hashed)
+
+    def add(self, req):
+        # type: (InstallRequirement) -> None
+        link = req.link
+        info = str(req)
+        entry_path = self._entry_path(link)
+        try:
+            with open(entry_path) as fp:
+                # Error, these's already a build in progress.
+                raise LookupError('%s is already being built: %s'
+                                  % (link, fp.read()))
+        except IOError as e:
+            if e.errno != errno.ENOENT:
+                raise
+            assert req not in self._entries
+            with open(entry_path, 'w') as fp:
+                fp.write(info)
+            self._entries.add(req)
+            logger.debug('Added %s to build tracker %r', req, self._root)
+
+    def remove(self, req):
+        # type: (InstallRequirement) -> None
+        link = req.link
+        self._entries.remove(req)
+        os.unlink(self._entry_path(link))
+        logger.debug('Removed %s from build tracker %r', req, self._root)
+
+    def cleanup(self):
+        # type: () -> None
+        for req in set(self._entries):
+            self.remove(req)
+        remove = self._temp_dir is not None
+        if remove:
+            self._temp_dir.cleanup()
+        logger.debug('%s build tracker %r',
+                     'Removed' if remove else 'Cleaned',
+                     self._root)
+
+    @contextlib.contextmanager
+    def track(self, req):
+        # type: (InstallRequirement) -> Iterator[None]
+        self.add(req)
+        yield
+        self.remove(req)
diff --git a/lib/python3.7/site-packages/pip/_internal/req/req_uninstall.py b/lib/python3.7/site-packages/pip/_internal/req/req_uninstall.py
new file mode 100644
index 0000000000000000000000000000000000000000..733301cee6c1404675433f8a3dc8aebcfd7b0bf5
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/req/req_uninstall.py
@@ -0,0 +1,633 @@
+from __future__ import absolute_import
+
+import csv
+import functools
+import logging
+import os
+import sys
+import sysconfig
+
+from pip._vendor import pkg_resources
+
+from pip._internal.exceptions import UninstallationError
+from pip._internal.locations import bin_py, bin_user
+from pip._internal.utils.compat import WINDOWS, cache_from_source, uses_pycache
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import (
+    FakeFile, ask, dist_in_usersite, dist_is_local, egg_link_path, is_local,
+    normalize_path, renames, rmtree,
+)
+from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Any, Callable, Dict, Iterable, Iterator, List, Optional, Set, Tuple,
+    )
+    from pip._vendor.pkg_resources import Distribution
+
+logger = logging.getLogger(__name__)
+
+
+def _script_names(dist, script_name, is_gui):
+    # type: (Distribution, str, bool) -> List[str]
+    """Create the fully qualified name of the files created by
+    {console,gui}_scripts for the given ``dist``.
+    Returns the list of file names
+    """
+    if dist_in_usersite(dist):
+        bin_dir = bin_user
+    else:
+        bin_dir = bin_py
+    exe_name = os.path.join(bin_dir, script_name)
+    paths_to_remove = [exe_name]
+    if WINDOWS:
+        paths_to_remove.append(exe_name + '.exe')
+        paths_to_remove.append(exe_name + '.exe.manifest')
+        if is_gui:
+            paths_to_remove.append(exe_name + '-script.pyw')
+        else:
+            paths_to_remove.append(exe_name + '-script.py')
+    return paths_to_remove
+
+
+def _unique(fn):
+    # type: (Callable) -> Callable[..., Iterator[Any]]
+    @functools.wraps(fn)
+    def unique(*args, **kw):
+        # type: (Any, Any) -> Iterator[Any]
+        seen = set()  # type: Set[Any]
+        for item in fn(*args, **kw):
+            if item not in seen:
+                seen.add(item)
+                yield item
+    return unique
+
+
+@_unique
+def uninstallation_paths(dist):
+    # type: (Distribution) -> Iterator[str]
+    """
+    Yield all the uninstallation paths for dist based on RECORD-without-.py[co]
+
+    Yield paths to all the files in RECORD. For each .py file in RECORD, add
+    the .pyc and .pyo in the same directory.
+
+    UninstallPathSet.add() takes care of the __pycache__ .py[co].
+    """
+    r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
+    for row in r:
+        path = os.path.join(dist.location, row[0])
+        yield path
+        if path.endswith('.py'):
+            dn, fn = os.path.split(path)
+            base = fn[:-3]
+            path = os.path.join(dn, base + '.pyc')
+            yield path
+            path = os.path.join(dn, base + '.pyo')
+            yield path
+
+
+def compact(paths):
+    # type: (Iterable[str]) -> Set[str]
+    """Compact a path set to contain the minimal number of paths
+    necessary to contain all paths in the set. If /a/path/ and
+    /a/path/to/a/file.txt are both in the set, leave only the
+    shorter path."""
+
+    sep = os.path.sep
+    short_paths = set()  # type: Set[str]
+    for path in sorted(paths, key=len):
+        should_skip = any(
+            path.startswith(shortpath.rstrip("*")) and
+            path[len(shortpath.rstrip("*").rstrip(sep))] == sep
+            for shortpath in short_paths
+        )
+        if not should_skip:
+            short_paths.add(path)
+    return short_paths
+
+
+def compress_for_rename(paths):
+    # type: (Iterable[str]) -> Set[str]
+    """Returns a set containing the paths that need to be renamed.
+
+    This set may include directories when the original sequence of paths
+    included every file on disk.
+    """
+    case_map = dict((os.path.normcase(p), p) for p in paths)
+    remaining = set(case_map)
+    unchecked = sorted(set(os.path.split(p)[0]
+                           for p in case_map.values()), key=len)
+    wildcards = set()  # type: Set[str]
+
+    def norm_join(*a):
+        # type: (str) -> str
+        return os.path.normcase(os.path.join(*a))
+
+    for root in unchecked:
+        if any(os.path.normcase(root).startswith(w)
+               for w in wildcards):
+            # This directory has already been handled.
+            continue
+
+        all_files = set()  # type: Set[str]
+        all_subdirs = set()  # type: Set[str]
+        for dirname, subdirs, files in os.walk(root):
+            all_subdirs.update(norm_join(root, dirname, d)
+                               for d in subdirs)
+            all_files.update(norm_join(root, dirname, f)
+                             for f in files)
+        # If all the files we found are in our remaining set of files to
+        # remove, then remove them from the latter set and add a wildcard
+        # for the directory.
+        if not (all_files - remaining):
+            remaining.difference_update(all_files)
+            wildcards.add(root + os.sep)
+
+    return set(map(case_map.__getitem__, remaining)) | wildcards
+
+
+def compress_for_output_listing(paths):
+    # type: (Iterable[str]) -> Tuple[Set[str], Set[str]]
+    """Returns a tuple of 2 sets of which paths to display to user
+
+    The first set contains paths that would be deleted. Files of a package
+    are not added and the top-level directory of the package has a '*' added
+    at the end - to signify that all it's contents are removed.
+
+    The second set contains files that would have been skipped in the above
+    folders.
+    """
+
+    will_remove = set(paths)
+    will_skip = set()
+
+    # Determine folders and files
+    folders = set()
+    files = set()
+    for path in will_remove:
+        if path.endswith(".pyc"):
+            continue
+        if path.endswith("__init__.py") or ".dist-info" in path:
+            folders.add(os.path.dirname(path))
+        files.add(path)
+
+    # probably this one https://github.com/python/mypy/issues/390
+    _normcased_files = set(map(os.path.normcase, files))  # type: ignore
+
+    folders = compact(folders)
+
+    # This walks the tree using os.walk to not miss extra folders
+    # that might get added.
+    for folder in folders:
+        for dirpath, _, dirfiles in os.walk(folder):
+            for fname in dirfiles:
+                if fname.endswith(".pyc"):
+                    continue
+
+                file_ = os.path.join(dirpath, fname)
+                if (os.path.isfile(file_) and
+                        os.path.normcase(file_) not in _normcased_files):
+                    # We are skipping this file. Add it to the set.
+                    will_skip.add(file_)
+
+    will_remove = files | {
+        os.path.join(folder, "*") for folder in folders
+    }
+
+    return will_remove, will_skip
+
+
+class StashedUninstallPathSet(object):
+    """A set of file rename operations to stash files while
+    tentatively uninstalling them."""
+    def __init__(self):
+        # type: () -> None
+        # Mapping from source file root to [Adjacent]TempDirectory
+        # for files under that directory.
+        self._save_dirs = {}  # type: Dict[str, TempDirectory]
+        # (old path, new path) tuples for each move that may need
+        # to be undone.
+        self._moves = []  # type: List[Tuple[str, str]]
+
+    def _get_directory_stash(self, path):
+        # type: (str) -> str
+        """Stashes a directory.
+
+        Directories are stashed adjacent to their original location if
+        possible, or else moved/copied into the user's temp dir."""
+
+        try:
+            save_dir = AdjacentTempDirectory(path)  # type: TempDirectory
+            save_dir.create()
+        except OSError:
+            save_dir = TempDirectory(kind="uninstall")
+            save_dir.create()
+        self._save_dirs[os.path.normcase(path)] = save_dir
+
+        return save_dir.path
+
+    def _get_file_stash(self, path):
+        # type: (str) -> str
+        """Stashes a file.
+
+        If no root has been provided, one will be created for the directory
+        in the user's temp directory."""
+        path = os.path.normcase(path)
+        head, old_head = os.path.dirname(path), None
+        save_dir = None
+
+        while head != old_head:
+            try:
+                save_dir = self._save_dirs[head]
+                break
+            except KeyError:
+                pass
+            head, old_head = os.path.dirname(head), head
+        else:
+            # Did not find any suitable root
+            head = os.path.dirname(path)
+            save_dir = TempDirectory(kind='uninstall')
+            save_dir.create()
+            self._save_dirs[head] = save_dir
+
+        relpath = os.path.relpath(path, head)
+        if relpath and relpath != os.path.curdir:
+            return os.path.join(save_dir.path, relpath)
+        return save_dir.path
+
+    def stash(self, path):
+        # type: (str) -> str
+        """Stashes the directory or file and returns its new location.
+        """
+        if os.path.isdir(path):
+            new_path = self._get_directory_stash(path)
+        else:
+            new_path = self._get_file_stash(path)
+
+        self._moves.append((path, new_path))
+        if os.path.isdir(path) and os.path.isdir(new_path):
+            # If we're moving a directory, we need to
+            # remove the destination first or else it will be
+            # moved to inside the existing directory.
+            # We just created new_path ourselves, so it will
+            # be removable.
+            os.rmdir(new_path)
+        renames(path, new_path)
+        return new_path
+
+    def commit(self):
+        # type: () -> None
+        """Commits the uninstall by removing stashed files."""
+        for _, save_dir in self._save_dirs.items():
+            save_dir.cleanup()
+        self._moves = []
+        self._save_dirs = {}
+
+    def rollback(self):
+        # type: () -> None
+        """Undoes the uninstall by moving stashed files back."""
+        for p in self._moves:
+            logging.info("Moving to %s\n from %s", *p)
+
+        for new_path, path in self._moves:
+            try:
+                logger.debug('Replacing %s from %s', new_path, path)
+                if os.path.isfile(new_path):
+                    os.unlink(new_path)
+                elif os.path.isdir(new_path):
+                    rmtree(new_path)
+                renames(path, new_path)
+            except OSError as ex:
+                logger.error("Failed to restore %s", new_path)
+                logger.debug("Exception: %s", ex)
+
+        self.commit()
+
+    @property
+    def can_rollback(self):
+        # type: () -> bool
+        return bool(self._moves)
+
+
+class UninstallPathSet(object):
+    """A set of file paths to be removed in the uninstallation of a
+    requirement."""
+    def __init__(self, dist):
+        # type: (Distribution) -> None
+        self.paths = set()  # type: Set[str]
+        self._refuse = set()  # type: Set[str]
+        self.pth = {}  # type: Dict[str, UninstallPthEntries]
+        self.dist = dist
+        self._moved_paths = StashedUninstallPathSet()
+
+    def _permitted(self, path):
+        # type: (str) -> bool
+        """
+        Return True if the given path is one we are permitted to
+        remove/modify, False otherwise.
+
+        """
+        return is_local(path)
+
+    def add(self, path):
+        # type: (str) -> None
+        head, tail = os.path.split(path)
+
+        # we normalize the head to resolve parent directory symlinks, but not
+        # the tail, since we only want to uninstall symlinks, not their targets
+        path = os.path.join(normalize_path(head), os.path.normcase(tail))
+
+        if not os.path.exists(path):
+            return
+        if self._permitted(path):
+            self.paths.add(path)
+        else:
+            self._refuse.add(path)
+
+        # __pycache__ files can show up after 'installed-files.txt' is created,
+        # due to imports
+        if os.path.splitext(path)[1] == '.py' and uses_pycache:
+            self.add(cache_from_source(path))
+
+    def add_pth(self, pth_file, entry):
+        # type: (str, str) -> None
+        pth_file = normalize_path(pth_file)
+        if self._permitted(pth_file):
+            if pth_file not in self.pth:
+                self.pth[pth_file] = UninstallPthEntries(pth_file)
+            self.pth[pth_file].add(entry)
+        else:
+            self._refuse.add(pth_file)
+
+    def remove(self, auto_confirm=False, verbose=False):
+        # type: (bool, bool) -> None
+        """Remove paths in ``self.paths`` with confirmation (unless
+        ``auto_confirm`` is True)."""
+
+        if not self.paths:
+            logger.info(
+                "Can't uninstall '%s'. No files were found to uninstall.",
+                self.dist.project_name,
+            )
+            return
+
+        dist_name_version = (
+            self.dist.project_name + "-" + self.dist.version
+        )
+        logger.info('Uninstalling %s:', dist_name_version)
+
+        with indent_log():
+            if auto_confirm or self._allowed_to_proceed(verbose):
+                moved = self._moved_paths
+
+                for_rename = compress_for_rename(self.paths)
+
+                for path in sorted(compact(for_rename)):
+                    moved.stash(path)
+                    logger.debug('Removing file or directory %s', path)
+
+                for pth in self.pth.values():
+                    pth.remove()
+
+                logger.info('Successfully uninstalled %s', dist_name_version)
+
+    def _allowed_to_proceed(self, verbose):
+        # type: (bool) -> bool
+        """Display which files would be deleted and prompt for confirmation
+        """
+
+        def _display(msg, paths):
+            # type: (str, Iterable[str]) -> None
+            if not paths:
+                return
+
+            logger.info(msg)
+            with indent_log():
+                for path in sorted(compact(paths)):
+                    logger.info(path)
+
+        if not verbose:
+            will_remove, will_skip = compress_for_output_listing(self.paths)
+        else:
+            # In verbose mode, display all the files that are going to be
+            # deleted.
+            will_remove = set(self.paths)
+            will_skip = set()
+
+        _display('Would remove:', will_remove)
+        _display('Would not remove (might be manually added):', will_skip)
+        _display('Would not remove (outside of prefix):', self._refuse)
+        if verbose:
+            _display('Will actually move:', compress_for_rename(self.paths))
+
+        return ask('Proceed (y/n)? ', ('y', 'n')) == 'y'
+
+    def rollback(self):
+        # type: () -> None
+        """Rollback the changes previously made by remove()."""
+        if not self._moved_paths.can_rollback:
+            logger.error(
+                "Can't roll back %s; was not uninstalled",
+                self.dist.project_name,
+            )
+            return
+        logger.info('Rolling back uninstall of %s', self.dist.project_name)
+        self._moved_paths.rollback()
+        for pth in self.pth.values():
+            pth.rollback()
+
+    def commit(self):
+        # type: () -> None
+        """Remove temporary save dir: rollback will no longer be possible."""
+        self._moved_paths.commit()
+
+    @classmethod
+    def from_dist(cls, dist):
+        # type: (Distribution) -> UninstallPathSet
+        dist_path = normalize_path(dist.location)
+        if not dist_is_local(dist):
+            logger.info(
+                "Not uninstalling %s at %s, outside environment %s",
+                dist.key,
+                dist_path,
+                sys.prefix,
+            )
+            return cls(dist)
+
+        if dist_path in {p for p in {sysconfig.get_path("stdlib"),
+                                     sysconfig.get_path("platstdlib")}
+                         if p}:
+            logger.info(
+                "Not uninstalling %s at %s, as it is in the standard library.",
+                dist.key,
+                dist_path,
+            )
+            return cls(dist)
+
+        paths_to_remove = cls(dist)
+        develop_egg_link = egg_link_path(dist)
+        develop_egg_link_egg_info = '{}.egg-info'.format(
+            pkg_resources.to_filename(dist.project_name))
+        egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info)
+        # Special case for distutils installed package
+        distutils_egg_info = getattr(dist._provider, 'path', None)
+
+        # Uninstall cases order do matter as in the case of 2 installs of the
+        # same package, pip needs to uninstall the currently detected version
+        if (egg_info_exists and dist.egg_info.endswith('.egg-info') and
+                not dist.egg_info.endswith(develop_egg_link_egg_info)):
+            # if dist.egg_info.endswith(develop_egg_link_egg_info), we
+            # are in fact in the develop_egg_link case
+            paths_to_remove.add(dist.egg_info)
+            if dist.has_metadata('installed-files.txt'):
+                for installed_file in dist.get_metadata(
+                        'installed-files.txt').splitlines():
+                    path = os.path.normpath(
+                        os.path.join(dist.egg_info, installed_file)
+                    )
+                    paths_to_remove.add(path)
+            # FIXME: need a test for this elif block
+            # occurs with --single-version-externally-managed/--record outside
+            # of pip
+            elif dist.has_metadata('top_level.txt'):
+                if dist.has_metadata('namespace_packages.txt'):
+                    namespaces = dist.get_metadata('namespace_packages.txt')
+                else:
+                    namespaces = []
+                for top_level_pkg in [
+                        p for p
+                        in dist.get_metadata('top_level.txt').splitlines()
+                        if p and p not in namespaces]:
+                    path = os.path.join(dist.location, top_level_pkg)
+                    paths_to_remove.add(path)
+                    paths_to_remove.add(path + '.py')
+                    paths_to_remove.add(path + '.pyc')
+                    paths_to_remove.add(path + '.pyo')
+
+        elif distutils_egg_info:
+            raise UninstallationError(
+                "Cannot uninstall {!r}. It is a distutils installed project "
+                "and thus we cannot accurately determine which files belong "
+                "to it which would lead to only a partial uninstall.".format(
+                    dist.project_name,
+                )
+            )
+
+        elif dist.location.endswith('.egg'):
+            # package installed by easy_install
+            # We cannot match on dist.egg_name because it can slightly vary
+            # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
+            paths_to_remove.add(dist.location)
+            easy_install_egg = os.path.split(dist.location)[1]
+            easy_install_pth = os.path.join(os.path.dirname(dist.location),
+                                            'easy-install.pth')
+            paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
+
+        elif egg_info_exists and dist.egg_info.endswith('.dist-info'):
+            for path in uninstallation_paths(dist):
+                paths_to_remove.add(path)
+
+        elif develop_egg_link:
+            # develop egg
+            with open(develop_egg_link, 'r') as fh:
+                link_pointer = os.path.normcase(fh.readline().strip())
+            assert (link_pointer == dist.location), (
+                'Egg-link %s does not match installed location of %s '
+                '(at %s)' % (link_pointer, dist.project_name, dist.location)
+            )
+            paths_to_remove.add(develop_egg_link)
+            easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
+                                            'easy-install.pth')
+            paths_to_remove.add_pth(easy_install_pth, dist.location)
+
+        else:
+            logger.debug(
+                'Not sure how to uninstall: %s - Check: %s',
+                dist, dist.location,
+            )
+
+        # find distutils scripts= scripts
+        if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
+            for script in dist.metadata_listdir('scripts'):
+                if dist_in_usersite(dist):
+                    bin_dir = bin_user
+                else:
+                    bin_dir = bin_py
+                paths_to_remove.add(os.path.join(bin_dir, script))
+                if WINDOWS:
+                    paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
+
+        # find console_scripts
+        _scripts_to_remove = []
+        console_scripts = dist.get_entry_map(group='console_scripts')
+        for name in console_scripts.keys():
+            _scripts_to_remove.extend(_script_names(dist, name, False))
+        # find gui_scripts
+        gui_scripts = dist.get_entry_map(group='gui_scripts')
+        for name in gui_scripts.keys():
+            _scripts_to_remove.extend(_script_names(dist, name, True))
+
+        for s in _scripts_to_remove:
+            paths_to_remove.add(s)
+
+        return paths_to_remove
+
+
+class UninstallPthEntries(object):
+    def __init__(self, pth_file):
+        # type: (str) -> None
+        if not os.path.isfile(pth_file):
+            raise UninstallationError(
+                "Cannot remove entries from nonexistent file %s" % pth_file
+            )
+        self.file = pth_file
+        self.entries = set()  # type: Set[str]
+        self._saved_lines = None  # type: Optional[List[bytes]]
+
+    def add(self, entry):
+        # type: (str) -> None
+        entry = os.path.normcase(entry)
+        # On Windows, os.path.normcase converts the entry to use
+        # backslashes.  This is correct for entries that describe absolute
+        # paths outside of site-packages, but all the others use forward
+        # slashes.
+        if WINDOWS and not os.path.splitdrive(entry)[0]:
+            entry = entry.replace('\\', '/')
+        self.entries.add(entry)
+
+    def remove(self):
+        # type: () -> None
+        logger.debug('Removing pth entries from %s:', self.file)
+        with open(self.file, 'rb') as fh:
+            # windows uses '\r\n' with py3k, but uses '\n' with py2.x
+            lines = fh.readlines()
+            self._saved_lines = lines
+        if any(b'\r\n' in line for line in lines):
+            endline = '\r\n'
+        else:
+            endline = '\n'
+        # handle missing trailing newline
+        if lines and not lines[-1].endswith(endline.encode("utf-8")):
+            lines[-1] = lines[-1] + endline.encode("utf-8")
+        for entry in self.entries:
+            try:
+                logger.debug('Removing entry: %s', entry)
+                lines.remove((entry + endline).encode("utf-8"))
+            except ValueError:
+                pass
+        with open(self.file, 'wb') as fh:
+            fh.writelines(lines)
+
+    def rollback(self):
+        # type: () -> bool
+        if self._saved_lines is None:
+            logger.error(
+                'Cannot roll back changes to %s, none were made', self.file
+            )
+            return False
+        logger.debug('Rolling %s back to previous state', self.file)
+        with open(self.file, 'wb') as fh:
+            fh.writelines(self._saved_lines)
+        return True
diff --git a/lib/python3.7/site-packages/pip/_internal/resolve.py b/lib/python3.7/site-packages/pip/_internal/resolve.py
new file mode 100644
index 0000000000000000000000000000000000000000..f49667bb8b7a2aba4ae21bf7764807fa8adad1ca
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/resolve.py
@@ -0,0 +1,393 @@
+"""Dependency Resolution
+
+The dependency resolution in pip is performed as follows:
+
+for top-level requirements:
+    a. only one spec allowed per project, regardless of conflicts or not.
+       otherwise a "double requirement" exception is raised
+    b. they override sub-dependency requirements.
+for sub-dependencies
+    a. "first found, wins" (where the order is breadth first)
+"""
+
+import logging
+from collections import defaultdict
+from itertools import chain
+
+from pip._internal.exceptions import (
+    BestVersionAlreadyInstalled, DistributionNotFound, HashError, HashErrors,
+    UnsupportedPythonVersion,
+)
+from pip._internal.req.constructors import install_req_from_req_string
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import dist_in_usersite, ensure_dir
+from pip._internal.utils.packaging import check_dist_requires_python
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Optional, DefaultDict, List, Set
+    from pip._internal.download import PipSession
+    from pip._internal.req.req_install import InstallRequirement
+    from pip._internal.index import PackageFinder
+    from pip._internal.req.req_set import RequirementSet
+    from pip._internal.operations.prepare import (
+        DistAbstraction, RequirementPreparer
+    )
+    from pip._internal.cache import WheelCache
+
+logger = logging.getLogger(__name__)
+
+
+class Resolver(object):
+    """Resolves which packages need to be installed/uninstalled to perform \
+    the requested operation without breaking the requirements of any package.
+    """
+
+    _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
+
+    def __init__(
+        self,
+        preparer,  # type: RequirementPreparer
+        session,  # type: PipSession
+        finder,  # type: PackageFinder
+        wheel_cache,  # type: Optional[WheelCache]
+        use_user_site,  # type: bool
+        ignore_dependencies,  # type: bool
+        ignore_installed,  # type: bool
+        ignore_requires_python,  # type: bool
+        force_reinstall,  # type: bool
+        isolated,  # type: bool
+        upgrade_strategy,  # type: str
+        use_pep517=None  # type: Optional[bool]
+    ):
+        # type: (...) -> None
+        super(Resolver, self).__init__()
+        assert upgrade_strategy in self._allowed_strategies
+
+        self.preparer = preparer
+        self.finder = finder
+        self.session = session
+
+        # NOTE: This would eventually be replaced with a cache that can give
+        #       information about both sdist and wheels transparently.
+        self.wheel_cache = wheel_cache
+
+        # This is set in resolve
+        self.require_hashes = None  # type: Optional[bool]
+
+        self.upgrade_strategy = upgrade_strategy
+        self.force_reinstall = force_reinstall
+        self.isolated = isolated
+        self.ignore_dependencies = ignore_dependencies
+        self.ignore_installed = ignore_installed
+        self.ignore_requires_python = ignore_requires_python
+        self.use_user_site = use_user_site
+        self.use_pep517 = use_pep517
+
+        self._discovered_dependencies = \
+            defaultdict(list)  # type: DefaultDict[str, List]
+
+    def resolve(self, requirement_set):
+        # type: (RequirementSet) -> None
+        """Resolve what operations need to be done
+
+        As a side-effect of this method, the packages (and their dependencies)
+        are downloaded, unpacked and prepared for installation. This
+        preparation is done by ``pip.operations.prepare``.
+
+        Once PyPI has static dependency metadata available, it would be
+        possible to move the preparation to become a step separated from
+        dependency resolution.
+        """
+        # make the wheelhouse
+        if self.preparer.wheel_download_dir:
+            ensure_dir(self.preparer.wheel_download_dir)
+
+        # If any top-level requirement has a hash specified, enter
+        # hash-checking mode, which requires hashes from all.
+        root_reqs = (
+            requirement_set.unnamed_requirements +
+            list(requirement_set.requirements.values())
+        )
+        self.require_hashes = (
+            requirement_set.require_hashes or
+            any(req.has_hash_options for req in root_reqs)
+        )
+
+        # Display where finder is looking for packages
+        locations = self.finder.get_formatted_locations()
+        if locations:
+            logger.info(locations)
+
+        # Actually prepare the files, and collect any exceptions. Most hash
+        # exceptions cannot be checked ahead of time, because
+        # req.populate_link() needs to be called before we can make decisions
+        # based on link type.
+        discovered_reqs = []  # type: List[InstallRequirement]
+        hash_errors = HashErrors()
+        for req in chain(root_reqs, discovered_reqs):
+            try:
+                discovered_reqs.extend(
+                    self._resolve_one(requirement_set, req)
+                )
+            except HashError as exc:
+                exc.req = req
+                hash_errors.append(exc)
+
+        if hash_errors:
+            raise hash_errors
+
+    def _is_upgrade_allowed(self, req):
+        # type: (InstallRequirement) -> bool
+        if self.upgrade_strategy == "to-satisfy-only":
+            return False
+        elif self.upgrade_strategy == "eager":
+            return True
+        else:
+            assert self.upgrade_strategy == "only-if-needed"
+            return req.is_direct
+
+    def _set_req_to_reinstall(self, req):
+        # type: (InstallRequirement) -> None
+        """
+        Set a requirement to be installed.
+        """
+        # Don't uninstall the conflict if doing a user install and the
+        # conflict is not a user install.
+        if not self.use_user_site or dist_in_usersite(req.satisfied_by):
+            req.conflicts_with = req.satisfied_by
+        req.satisfied_by = None
+
+    # XXX: Stop passing requirement_set for options
+    def _check_skip_installed(self, req_to_install):
+        # type: (InstallRequirement) -> Optional[str]
+        """Check if req_to_install should be skipped.
+
+        This will check if the req is installed, and whether we should upgrade
+        or reinstall it, taking into account all the relevant user options.
+
+        After calling this req_to_install will only have satisfied_by set to
+        None if the req_to_install is to be upgraded/reinstalled etc. Any
+        other value will be a dist recording the current thing installed that
+        satisfies the requirement.
+
+        Note that for vcs urls and the like we can't assess skipping in this
+        routine - we simply identify that we need to pull the thing down,
+        then later on it is pulled down and introspected to assess upgrade/
+        reinstalls etc.
+
+        :return: A text reason for why it was skipped, or None.
+        """
+        if self.ignore_installed:
+            return None
+
+        req_to_install.check_if_exists(self.use_user_site)
+        if not req_to_install.satisfied_by:
+            return None
+
+        if self.force_reinstall:
+            self._set_req_to_reinstall(req_to_install)
+            return None
+
+        if not self._is_upgrade_allowed(req_to_install):
+            if self.upgrade_strategy == "only-if-needed":
+                return 'already satisfied, skipping upgrade'
+            return 'already satisfied'
+
+        # Check for the possibility of an upgrade.  For link-based
+        # requirements we have to pull the tree down and inspect to assess
+        # the version #, so it's handled way down.
+        if not req_to_install.link:
+            try:
+                self.finder.find_requirement(req_to_install, upgrade=True)
+            except BestVersionAlreadyInstalled:
+                # Then the best version is installed.
+                return 'already up-to-date'
+            except DistributionNotFound:
+                # No distribution found, so we squash the error.  It will
+                # be raised later when we re-try later to do the install.
+                # Why don't we just raise here?
+                pass
+
+        self._set_req_to_reinstall(req_to_install)
+        return None
+
+    def _get_abstract_dist_for(self, req):
+        # type: (InstallRequirement) -> DistAbstraction
+        """Takes a InstallRequirement and returns a single AbstractDist \
+        representing a prepared variant of the same.
+        """
+        assert self.require_hashes is not None, (
+            "require_hashes should have been set in Resolver.resolve()"
+        )
+
+        if req.editable:
+            return self.preparer.prepare_editable_requirement(
+                req, self.require_hashes, self.use_user_site, self.finder,
+            )
+
+        # satisfied_by is only evaluated by calling _check_skip_installed,
+        # so it must be None here.
+        assert req.satisfied_by is None
+        skip_reason = self._check_skip_installed(req)
+
+        if req.satisfied_by:
+            return self.preparer.prepare_installed_requirement(
+                req, self.require_hashes, skip_reason
+            )
+
+        upgrade_allowed = self._is_upgrade_allowed(req)
+        abstract_dist = self.preparer.prepare_linked_requirement(
+            req, self.session, self.finder, upgrade_allowed,
+            self.require_hashes
+        )
+
+        # NOTE
+        # The following portion is for determining if a certain package is
+        # going to be re-installed/upgraded or not and reporting to the user.
+        # This should probably get cleaned up in a future refactor.
+
+        # req.req is only avail after unpack for URL
+        # pkgs repeat check_if_exists to uninstall-on-upgrade
+        # (#14)
+        if not self.ignore_installed:
+            req.check_if_exists(self.use_user_site)
+
+        if req.satisfied_by:
+            should_modify = (
+                self.upgrade_strategy != "to-satisfy-only" or
+                self.force_reinstall or
+                self.ignore_installed or
+                req.link.scheme == 'file'
+            )
+            if should_modify:
+                self._set_req_to_reinstall(req)
+            else:
+                logger.info(
+                    'Requirement already satisfied (use --upgrade to upgrade):'
+                    ' %s', req,
+                )
+
+        return abstract_dist
+
+    def _resolve_one(
+        self,
+        requirement_set,  # type: RequirementSet
+        req_to_install  # type: InstallRequirement
+    ):
+        # type: (...) -> List[InstallRequirement]
+        """Prepare a single requirements file.
+
+        :return: A list of additional InstallRequirements to also install.
+        """
+        # Tell user what we are doing for this requirement:
+        # obtain (editable), skipping, processing (local url), collecting
+        # (remote url or package name)
+        if req_to_install.constraint or req_to_install.prepared:
+            return []
+
+        req_to_install.prepared = True
+
+        # register tmp src for cleanup in case something goes wrong
+        requirement_set.reqs_to_cleanup.append(req_to_install)
+
+        abstract_dist = self._get_abstract_dist_for(req_to_install)
+
+        # Parse and return dependencies
+        dist = abstract_dist.dist()
+        try:
+            check_dist_requires_python(dist)
+        except UnsupportedPythonVersion as err:
+            if self.ignore_requires_python:
+                logger.warning(err.args[0])
+            else:
+                raise
+
+        more_reqs = []  # type: List[InstallRequirement]
+
+        def add_req(subreq, extras_requested):
+            sub_install_req = install_req_from_req_string(
+                str(subreq),
+                req_to_install,
+                isolated=self.isolated,
+                wheel_cache=self.wheel_cache,
+                use_pep517=self.use_pep517
+            )
+            parent_req_name = req_to_install.name
+            to_scan_again, add_to_parent = requirement_set.add_requirement(
+                sub_install_req,
+                parent_req_name=parent_req_name,
+                extras_requested=extras_requested,
+            )
+            if parent_req_name and add_to_parent:
+                self._discovered_dependencies[parent_req_name].append(
+                    add_to_parent
+                )
+            more_reqs.extend(to_scan_again)
+
+        with indent_log():
+            # We add req_to_install before its dependencies, so that we
+            # can refer to it when adding dependencies.
+            if not requirement_set.has_requirement(req_to_install.name):
+                # 'unnamed' requirements will get added here
+                req_to_install.is_direct = True
+                requirement_set.add_requirement(
+                    req_to_install, parent_req_name=None,
+                )
+
+            if not self.ignore_dependencies:
+                if req_to_install.extras:
+                    logger.debug(
+                        "Installing extra requirements: %r",
+                        ','.join(req_to_install.extras),
+                    )
+                missing_requested = sorted(
+                    set(req_to_install.extras) - set(dist.extras)
+                )
+                for missing in missing_requested:
+                    logger.warning(
+                        '%s does not provide the extra \'%s\'',
+                        dist, missing
+                    )
+
+                available_requested = sorted(
+                    set(dist.extras) & set(req_to_install.extras)
+                )
+                for subreq in dist.requires(available_requested):
+                    add_req(subreq, extras_requested=available_requested)
+
+            if not req_to_install.editable and not req_to_install.satisfied_by:
+                # XXX: --no-install leads this to report 'Successfully
+                # downloaded' for only non-editable reqs, even though we took
+                # action on them.
+                requirement_set.successfully_downloaded.append(req_to_install)
+
+        return more_reqs
+
+    def get_installation_order(self, req_set):
+        # type: (RequirementSet) -> List[InstallRequirement]
+        """Create the installation order.
+
+        The installation order is topological - requirements are installed
+        before the requiring thing. We break cycles at an arbitrary point,
+        and make no other guarantees.
+        """
+        # The current implementation, which we may change at any point
+        # installs the user specified things in the order given, except when
+        # dependencies must come earlier to achieve topological order.
+        order = []
+        ordered_reqs = set()  # type: Set[InstallRequirement]
+
+        def schedule(req):
+            if req.satisfied_by or req in ordered_reqs:
+                return
+            if req.constraint:
+                return
+            ordered_reqs.add(req)
+            for dep in self._discovered_dependencies[req.name]:
+                schedule(dep)
+            order.append(req)
+
+        for install_req in req_set.requirements.values():
+            schedule(install_req)
+        return order
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__init__.py b/lib/python3.7/site-packages/pip/_internal/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5210a8ee0b69db90c7acc7c33d0bfa9132032284
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2df3f3603f7b2456897b27e2ac8cfddeb3e9593
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/appdirs.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/compat.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9981956376ab1513e0f9bef33291c891d0e822e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..841502b1d43a8a12ca23d5b4813a262e3148331e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/deprecation.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/encoding.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/encoding.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f0741f574159ddfaced452ddc1a926630b31fa1d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/encoding.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..feb1caba695207182f1a7bf3fedb073fad8e8f0b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/filesystem.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d33664c6153236d51ad006cfcdb6ec418228987
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/glibc.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..697285aa22f706eb84a0eb47ab1c58c844d96a1c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/hashes.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/logging.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/logging.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67b376ce5d437dd8fb302c45de06e0cfaf28fd23
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/logging.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/misc.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/misc.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eea5a76865a35c6e4ae3d1a9bdf8d4ad690390f1
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/misc.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/models.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/models.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6536a33ef35e9ca5060d8a05b1ef49a945d8d9eb
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/models.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/outdated.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/outdated.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64a2109c03c56148916b5eef78b967d4e3ac9f7e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/outdated.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..966be25e88176e96e5e384b93f903e43a95e8774
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/packaging.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/setuptools_build.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/setuptools_build.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47fb7a47572caebaab77c6ad10de316edd08714a
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/setuptools_build.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9dda2350f15e04a76c335ac909ee347b95d81e10
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/temp_dir.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/typing.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/typing.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..06afa2c2901aec0b24b72c9dbe6726edf04e47f0
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/typing.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/ui.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/ui.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a0e8dcc6f349bc7a791fca65c3765323e8428f28
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/utils/__pycache__/ui.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/appdirs.py b/lib/python3.7/site-packages/pip/_internal/utils/appdirs.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb2611104c8544840667e676fe0d42890e0bff05
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/appdirs.py
@@ -0,0 +1,268 @@
+"""
+This code was taken from https://github.com/ActiveState/appdirs and modified
+to suit our purposes.
+"""
+from __future__ import absolute_import
+
+import os
+import sys
+
+from pip._vendor.six import PY2, text_type
+
+from pip._internal.utils.compat import WINDOWS, expanduser
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import List
+
+
+def user_cache_dir(appname):
+    # type: (str) -> str
+    r"""
+    Return full path to the user-specific cache dir for this application.
+
+        "appname" is the name of application.
+
+    Typical user cache directories are:
+        macOS:      ~/Library/Caches/<AppName>
+        Unix:       ~/.cache/<AppName> (XDG default)
+        Windows:    C:\Users\<username>\AppData\Local\<AppName>\Cache
+
+    On Windows the only suggestion in the MSDN docs is that local settings go
+    in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
+    non-roaming app data dir (the default returned by `user_data_dir`). Apps
+    typically put cache data somewhere *under* the given dir here. Some
+    examples:
+        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
+        ...\Acme\SuperApp\Cache\1.0
+
+    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
+    """
+    if WINDOWS:
+        # Get the base path
+        path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
+
+        # When using Python 2, return paths as bytes on Windows like we do on
+        # other operating systems. See helper function docs for more details.
+        if PY2 and isinstance(path, text_type):
+            path = _win_path_to_bytes(path)
+
+        # Add our app name and Cache directory to it
+        path = os.path.join(path, appname, "Cache")
+    elif sys.platform == "darwin":
+        # Get the base path
+        path = expanduser("~/Library/Caches")
+
+        # Add our app name to it
+        path = os.path.join(path, appname)
+    else:
+        # Get the base path
+        path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache"))
+
+        # Add our app name to it
+        path = os.path.join(path, appname)
+
+    return path
+
+
+def user_data_dir(appname, roaming=False):
+    # type: (str, bool) -> str
+    r"""
+    Return full path to the user-specific data dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "roaming" (boolean, default False) can be set True to use the Windows
+            roaming appdata directory. That means that for users on a Windows
+            network setup for roaming profiles, this user data will be
+            sync'd on login. See
+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+            for a discussion of issues.
+
+    Typical user data directories are:
+        macOS:                  ~/Library/Application Support/<AppName>
+                                if it exists, else ~/.config/<AppName>
+        Unix:                   ~/.local/share/<AppName>    # or in
+                                $XDG_DATA_HOME, if defined
+        Win XP (not roaming):   C:\Documents and Settings\<username>\ ...
+                                ...Application Data\<AppName>
+        Win XP (roaming):       C:\Documents and Settings\<username>\Local ...
+                                ...Settings\Application Data\<AppName>
+        Win 7  (not roaming):   C:\\Users\<username>\AppData\Local\<AppName>
+        Win 7  (roaming):       C:\\Users\<username>\AppData\Roaming\<AppName>
+
+    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
+    That means, by default "~/.local/share/<AppName>".
+    """
+    if WINDOWS:
+        const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
+        path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
+    elif sys.platform == "darwin":
+        path = os.path.join(
+            expanduser('~/Library/Application Support/'),
+            appname,
+        ) if os.path.isdir(os.path.join(
+            expanduser('~/Library/Application Support/'),
+            appname,
+        )
+        ) else os.path.join(
+            expanduser('~/.config/'),
+            appname,
+        )
+    else:
+        path = os.path.join(
+            os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")),
+            appname,
+        )
+
+    return path
+
+
+def user_config_dir(appname, roaming=True):
+    # type: (str, bool) -> str
+    """Return full path to the user-specific config dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "roaming" (boolean, default True) can be set False to not use the
+            Windows roaming appdata directory. That means that for users on a
+            Windows network setup for roaming profiles, this user data will be
+            sync'd on login. See
+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+            for a discussion of issues.
+
+    Typical user data directories are:
+        macOS:                  same as user_data_dir
+        Unix:                   ~/.config/<AppName>
+        Win *:                  same as user_data_dir
+
+    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
+    That means, by default "~/.config/<AppName>".
+    """
+    if WINDOWS:
+        path = user_data_dir(appname, roaming=roaming)
+    elif sys.platform == "darwin":
+        path = user_data_dir(appname)
+    else:
+        path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config"))
+        path = os.path.join(path, appname)
+
+    return path
+
+
+# for the discussion regarding site_config_dirs locations
+# see <https://github.com/pypa/pip/issues/1733>
+def site_config_dirs(appname):
+    # type: (str) -> List[str]
+    r"""Return a list of potential user-shared config dirs for this application.
+
+        "appname" is the name of application.
+
+    Typical user config directories are:
+        macOS:      /Library/Application Support/<AppName>/
+        Unix:       /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
+                    $XDG_CONFIG_DIRS
+        Win XP:     C:\Documents and Settings\All Users\Application ...
+                    ...Data\<AppName>\
+        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory
+                    on Vista.)
+        Win 7:      Hidden, but writeable on Win 7:
+                    C:\ProgramData\<AppName>\
+    """
+    if WINDOWS:
+        path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
+        pathlist = [os.path.join(path, appname)]
+    elif sys.platform == 'darwin':
+        pathlist = [os.path.join('/Library/Application Support', appname)]
+    else:
+        # try looking in $XDG_CONFIG_DIRS
+        xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
+        if xdg_config_dirs:
+            pathlist = [
+                os.path.join(expanduser(x), appname)
+                for x in xdg_config_dirs.split(os.pathsep)
+            ]
+        else:
+            pathlist = []
+
+        # always look in /etc directly as well
+        pathlist.append('/etc')
+
+    return pathlist
+
+
+# -- Windows support functions --
+
+def _get_win_folder_from_registry(csidl_name):
+    # type: (str) -> str
+    """
+    This is a fallback technique at best. I'm not sure if using the
+    registry for this guarantees us the correct answer for all CSIDL_*
+    names.
+    """
+    import _winreg
+
+    shell_folder_name = {
+        "CSIDL_APPDATA": "AppData",
+        "CSIDL_COMMON_APPDATA": "Common AppData",
+        "CSIDL_LOCAL_APPDATA": "Local AppData",
+    }[csidl_name]
+
+    key = _winreg.OpenKey(
+        _winreg.HKEY_CURRENT_USER,
+        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+    )
+    directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
+    return directory
+
+
+def _get_win_folder_with_ctypes(csidl_name):
+    # type: (str) -> str
+    csidl_const = {
+        "CSIDL_APPDATA": 26,
+        "CSIDL_COMMON_APPDATA": 35,
+        "CSIDL_LOCAL_APPDATA": 28,
+    }[csidl_name]
+
+    buf = ctypes.create_unicode_buffer(1024)
+    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+    # Downgrade to short path name if have highbit chars. See
+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+    has_high_char = False
+    for c in buf:
+        if ord(c) > 255:
+            has_high_char = True
+            break
+    if has_high_char:
+        buf2 = ctypes.create_unicode_buffer(1024)
+        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+            buf = buf2
+
+    return buf.value
+
+
+if WINDOWS:
+    try:
+        import ctypes
+        _get_win_folder = _get_win_folder_with_ctypes
+    except ImportError:
+        _get_win_folder = _get_win_folder_from_registry
+
+
+def _win_path_to_bytes(path):
+    """Encode Windows paths to bytes. Only used on Python 2.
+
+    Motivation is to be consistent with other operating systems where paths
+    are also returned as bytes. This avoids problems mixing bytes and Unicode
+    elsewhere in the codebase. For more details and discussion see
+    <https://github.com/pypa/pip/issues/3463>.
+
+    If encoding using ASCII and MBCS fails, return the original Unicode path.
+    """
+    for encoding in ('ASCII', 'MBCS'):
+        try:
+            return path.encode(encoding)
+        except (UnicodeEncodeError, LookupError):
+            pass
+    return path
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/compat.py b/lib/python3.7/site-packages/pip/_internal/utils/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..845436e48943685060beda47c7ac8bf08b82ad66
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/compat.py
@@ -0,0 +1,264 @@
+"""Stuff that differs in different Python versions and platform
+distributions."""
+from __future__ import absolute_import, division
+
+import codecs
+import locale
+import logging
+import os
+import shutil
+import sys
+
+from pip._vendor.six import text_type
+
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Tuple, Text
+
+try:
+    import ipaddress
+except ImportError:
+    try:
+        from pip._vendor import ipaddress  # type: ignore
+    except ImportError:
+        import ipaddr as ipaddress  # type: ignore
+        ipaddress.ip_address = ipaddress.IPAddress  # type: ignore
+        ipaddress.ip_network = ipaddress.IPNetwork  # type: ignore
+
+
+__all__ = [
+    "ipaddress", "uses_pycache", "console_to_str", "native_str",
+    "get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile", "get_terminal_size",
+    "get_extension_suffixes",
+]
+
+
+logger = logging.getLogger(__name__)
+
+if sys.version_info >= (3, 4):
+    uses_pycache = True
+    from importlib.util import cache_from_source
+else:
+    import imp
+
+    try:
+        cache_from_source = imp.cache_from_source  # type: ignore
+    except AttributeError:
+        # does not use __pycache__
+        cache_from_source = None
+
+    uses_pycache = cache_from_source is not None
+
+
+if sys.version_info >= (3, 5):
+    backslashreplace_decode = "backslashreplace"
+else:
+    # In version 3.4 and older, backslashreplace exists
+    # but does not support use for decoding.
+    # We implement our own replace handler for this
+    # situation, so that we can consistently use
+    # backslash replacement for all versions.
+    def backslashreplace_decode_fn(err):
+        raw_bytes = (err.object[i] for i in range(err.start, err.end))
+        if sys.version_info[0] == 2:
+            # Python 2 gave us characters - convert to numeric bytes
+            raw_bytes = (ord(b) for b in raw_bytes)
+        return u"".join(u"\\x%x" % c for c in raw_bytes), err.end
+    codecs.register_error(
+        "backslashreplace_decode",
+        backslashreplace_decode_fn,
+    )
+    backslashreplace_decode = "backslashreplace_decode"
+
+
+def console_to_str(data):
+    # type: (bytes) -> Text
+    """Return a string, safe for output, of subprocess output.
+
+    We assume the data is in the locale preferred encoding.
+    If it won't decode properly, we warn the user but decode as
+    best we can.
+
+    We also ensure that the output can be safely written to
+    standard output without encoding errors.
+    """
+
+    # First, get the encoding we assume. This is the preferred
+    # encoding for the locale, unless that is not found, or
+    # it is ASCII, in which case assume UTF-8
+    encoding = locale.getpreferredencoding()
+    if (not encoding) or codecs.lookup(encoding).name == "ascii":
+        encoding = "utf-8"
+
+    # Now try to decode the data - if we fail, warn the user and
+    # decode with replacement.
+    try:
+        decoded_data = data.decode(encoding)
+    except UnicodeDecodeError:
+        logger.warning(
+            "Subprocess output does not appear to be encoded as %s",
+            encoding,
+        )
+        decoded_data = data.decode(encoding, errors=backslashreplace_decode)
+
+    # Make sure we can print the output, by encoding it to the output
+    # encoding with replacement of unencodable characters, and then
+    # decoding again.
+    # We use stderr's encoding because it's less likely to be
+    # redirected and if we don't find an encoding we skip this
+    # step (on the assumption that output is wrapped by something
+    # that won't fail).
+    # The double getattr is to deal with the possibility that we're
+    # being called in a situation where sys.__stderr__ doesn't exist,
+    # or doesn't have an encoding attribute. Neither of these cases
+    # should occur in normal pip use, but there's no harm in checking
+    # in case people use pip in (unsupported) unusual situations.
+    output_encoding = getattr(getattr(sys, "__stderr__", None),
+                              "encoding", None)
+
+    if output_encoding:
+        output_encoded = decoded_data.encode(
+            output_encoding,
+            errors="backslashreplace"
+        )
+        decoded_data = output_encoded.decode(output_encoding)
+
+    return decoded_data
+
+
+if sys.version_info >= (3,):
+    def native_str(s, replace=False):
+        # type: (str, bool) -> str
+        if isinstance(s, bytes):
+            return s.decode('utf-8', 'replace' if replace else 'strict')
+        return s
+
+else:
+    def native_str(s, replace=False):
+        # type: (str, bool) -> str
+        # Replace is ignored -- unicode to UTF-8 can't fail
+        if isinstance(s, text_type):
+            return s.encode('utf-8')
+        return s
+
+
+def get_path_uid(path):
+    # type: (str) -> int
+    """
+    Return path's uid.
+
+    Does not follow symlinks:
+        https://github.com/pypa/pip/pull/935#discussion_r5307003
+
+    Placed this function in compat due to differences on AIX and
+    Jython, that should eventually go away.
+
+    :raises OSError: When path is a symlink or can't be read.
+    """
+    if hasattr(os, 'O_NOFOLLOW'):
+        fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
+        file_uid = os.fstat(fd).st_uid
+        os.close(fd)
+    else:  # AIX and Jython
+        # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
+        if not os.path.islink(path):
+            # older versions of Jython don't have `os.fstat`
+            file_uid = os.stat(path).st_uid
+        else:
+            # raise OSError for parity with os.O_NOFOLLOW above
+            raise OSError(
+                "%s is a symlink; Will not return uid for symlinks" % path
+            )
+    return file_uid
+
+
+if sys.version_info >= (3, 4):
+    from importlib.machinery import EXTENSION_SUFFIXES
+
+    def get_extension_suffixes():
+        return EXTENSION_SUFFIXES
+else:
+    from imp import get_suffixes
+
+    def get_extension_suffixes():
+        return [suffix[0] for suffix in get_suffixes()]
+
+
+def expanduser(path):
+    # type: (str) -> str
+    """
+    Expand ~ and ~user constructions.
+
+    Includes a workaround for https://bugs.python.org/issue14768
+    """
+    expanded = os.path.expanduser(path)
+    if path.startswith('~/') and expanded.startswith('//'):
+        expanded = expanded[1:]
+    return expanded
+
+
+# packages in the stdlib that may have installation metadata, but should not be
+# considered 'installed'.  this theoretically could be determined based on
+# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
+# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
+# make this ineffective, so hard-coding
+stdlib_pkgs = {"python", "wsgiref", "argparse"}
+
+
+# windows detection, covers cpython and ironpython
+WINDOWS = (sys.platform.startswith("win") or
+           (sys.platform == 'cli' and os.name == 'nt'))
+
+
+def samefile(file1, file2):
+    # type: (str, str) -> bool
+    """Provide an alternative for os.path.samefile on Windows/Python2"""
+    if hasattr(os.path, 'samefile'):
+        return os.path.samefile(file1, file2)
+    else:
+        path1 = os.path.normcase(os.path.abspath(file1))
+        path2 = os.path.normcase(os.path.abspath(file2))
+        return path1 == path2
+
+
+if hasattr(shutil, 'get_terminal_size'):
+    def get_terminal_size():
+        # type: () -> Tuple[int, int]
+        """
+        Returns a tuple (x, y) representing the width(x) and the height(y)
+        in characters of the terminal window.
+        """
+        return tuple(shutil.get_terminal_size())  # type: ignore
+else:
+    def get_terminal_size():
+        # type: () -> Tuple[int, int]
+        """
+        Returns a tuple (x, y) representing the width(x) and the height(y)
+        in characters of the terminal window.
+        """
+        def ioctl_GWINSZ(fd):
+            try:
+                import fcntl
+                import termios
+                import struct
+                cr = struct.unpack_from(
+                    'hh',
+                    fcntl.ioctl(fd, termios.TIOCGWINSZ, '12345678')
+                )
+            except Exception:
+                return None
+            if cr == (0, 0):
+                return None
+            return cr
+        cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
+        if not cr:
+            try:
+                fd = os.open(os.ctermid(), os.O_RDONLY)
+                cr = ioctl_GWINSZ(fd)
+                os.close(fd)
+            except Exception:
+                pass
+        if not cr:
+            cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
+        return int(cr[1]), int(cr[0])
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/deprecation.py b/lib/python3.7/site-packages/pip/_internal/utils/deprecation.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c896f8cee5eaacd9447c43ab8191d80aa36895b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/deprecation.py
@@ -0,0 +1,93 @@
+"""
+A module that implements tooling to enable easy warnings about deprecations.
+"""
+from __future__ import absolute_import
+
+import logging
+import warnings
+
+from pip._vendor.packaging.version import parse
+
+from pip import __version__ as current_version
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Any, Optional
+
+
+DEPRECATION_MSG_PREFIX = "DEPRECATION: "
+
+
+class PipDeprecationWarning(Warning):
+    pass
+
+
+_original_showwarning = None  # type: Any
+
+
+# Warnings <-> Logging Integration
+def _showwarning(message, category, filename, lineno, file=None, line=None):
+    if file is not None:
+        if _original_showwarning is not None:
+            _original_showwarning(
+                message, category, filename, lineno, file, line,
+            )
+    elif issubclass(category, PipDeprecationWarning):
+        # We use a specially named logger which will handle all of the
+        # deprecation messages for pip.
+        logger = logging.getLogger("pip._internal.deprecations")
+        logger.warning(message)
+    else:
+        _original_showwarning(
+            message, category, filename, lineno, file, line,
+        )
+
+
+def install_warning_logger():
+    # type: () -> None
+    # Enable our Deprecation Warnings
+    warnings.simplefilter("default", PipDeprecationWarning, append=True)
+
+    global _original_showwarning
+
+    if _original_showwarning is None:
+        _original_showwarning = warnings.showwarning
+        warnings.showwarning = _showwarning
+
+
+def deprecated(reason, replacement, gone_in, issue=None):
+    # type: (str, Optional[str], Optional[str], Optional[int]) -> None
+    """Helper to deprecate existing functionality.
+
+    reason:
+        Textual reason shown to the user about why this functionality has
+        been deprecated.
+    replacement:
+        Textual suggestion shown to the user about what alternative
+        functionality they can use.
+    gone_in:
+        The version of pip does this functionality should get removed in.
+        Raises errors if pip's current version is greater than or equal to
+        this.
+    issue:
+        Issue number on the tracker that would serve as a useful place for
+        users to find related discussion and provide feedback.
+
+    Always pass replacement, gone_in and issue as keyword arguments for clarity
+    at the call site.
+    """
+
+    # Construct a nice message.
+    # This is purposely eagerly formatted as we want it to appear as if someone
+    # typed this entire message out.
+    message = DEPRECATION_MSG_PREFIX + reason
+    if replacement is not None:
+        message += " A possible replacement is {}.".format(replacement)
+    if issue is not None:
+        url = "https://github.com/pypa/pip/issues/" + str(issue)
+        message += " You can find discussion regarding this at {}.".format(url)
+
+    # Raise as an error if it has to be removed.
+    if gone_in is not None and parse(current_version) >= parse(gone_in):
+        raise PipDeprecationWarning(message)
+    warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/encoding.py b/lib/python3.7/site-packages/pip/_internal/utils/encoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..30139f2e591c89659dd87c9b3fbf1427285418f6
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/encoding.py
@@ -0,0 +1,39 @@
+import codecs
+import locale
+import re
+import sys
+
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import List, Tuple, Text
+
+BOMS = [
+    (codecs.BOM_UTF8, 'utf-8'),
+    (codecs.BOM_UTF16, 'utf-16'),
+    (codecs.BOM_UTF16_BE, 'utf-16-be'),
+    (codecs.BOM_UTF16_LE, 'utf-16-le'),
+    (codecs.BOM_UTF32, 'utf-32'),
+    (codecs.BOM_UTF32_BE, 'utf-32-be'),
+    (codecs.BOM_UTF32_LE, 'utf-32-le'),
+]  # type: List[Tuple[bytes, Text]]
+
+ENCODING_RE = re.compile(br'coding[:=]\s*([-\w.]+)')
+
+
+def auto_decode(data):
+    # type: (bytes) -> Text
+    """Check a bytes string for a BOM to correctly detect the encoding
+
+    Fallback to locale.getpreferredencoding(False) like open() on Python3"""
+    for bom, encoding in BOMS:
+        if data.startswith(bom):
+            return data[len(bom):].decode(encoding)
+    # Lets check the first two lines as in PEP263
+    for line in data.split(b'\n')[:2]:
+        if line[0:1] == b'#' and ENCODING_RE.search(line):
+            encoding = ENCODING_RE.search(line).groups()[0].decode('ascii')
+            return data.decode(encoding)
+    return data.decode(
+        locale.getpreferredencoding(False) or sys.getdefaultencoding(),
+    )
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/filesystem.py b/lib/python3.7/site-packages/pip/_internal/utils/filesystem.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e6b0338581a170391c2f86c4ef26fbb554fc4c5
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/filesystem.py
@@ -0,0 +1,30 @@
+import os
+import os.path
+
+from pip._internal.utils.compat import get_path_uid
+
+
+def check_path_owner(path):
+    # type: (str) -> bool
+    # If we don't have a way to check the effective uid of this process, then
+    # we'll just assume that we own the directory.
+    if not hasattr(os, "geteuid"):
+        return True
+
+    previous = None
+    while path != previous:
+        if os.path.lexists(path):
+            # Check if path is writable by current user.
+            if os.geteuid() == 0:
+                # Special handling for root user in order to handle properly
+                # cases where users use sudo without -H flag.
+                try:
+                    path_uid = get_path_uid(path)
+                except OSError:
+                    return False
+                return path_uid == 0
+            else:
+                return os.access(path, os.W_OK)
+        else:
+            previous, path = path, os.path.dirname(path)
+    return False  # assume we don't own the path
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/glibc.py b/lib/python3.7/site-packages/pip/_internal/utils/glibc.py
new file mode 100644
index 0000000000000000000000000000000000000000..5bea655ebcc8c4634a7bfbdf2ea05cf09051bc83
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/glibc.py
@@ -0,0 +1,93 @@
+from __future__ import absolute_import
+
+import ctypes
+import re
+import warnings
+
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Optional, Tuple
+
+
+def glibc_version_string():
+    # type: () -> Optional[str]
+    "Returns glibc version string, or None if not using glibc."
+
+    # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+    # manpage says, "If filename is NULL, then the returned handle is for the
+    # main program". This way we can let the linker do the work to figure out
+    # which libc our process is actually using.
+    process_namespace = ctypes.CDLL(None)
+    try:
+        gnu_get_libc_version = process_namespace.gnu_get_libc_version
+    except AttributeError:
+        # Symbol doesn't exist -> therefore, we are not linked to
+        # glibc.
+        return None
+
+    # Call gnu_get_libc_version, which returns a string like "2.5"
+    gnu_get_libc_version.restype = ctypes.c_char_p
+    version_str = gnu_get_libc_version()
+    # py2 / py3 compatibility:
+    if not isinstance(version_str, str):
+        version_str = version_str.decode("ascii")
+
+    return version_str
+
+
+# Separated out from have_compatible_glibc for easier unit testing
+def check_glibc_version(version_str, required_major, minimum_minor):
+    # type: (str, int, int) -> bool
+    # Parse string and check against requested version.
+    #
+    # We use a regexp instead of str.split because we want to discard any
+    # random junk that might come after the minor version -- this might happen
+    # in patched/forked versions of glibc (e.g. Linaro's version of glibc
+    # uses version strings like "2.20-2014.11"). See gh-3588.
+    m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+    if not m:
+        warnings.warn("Expected glibc version with 2 components major.minor,"
+                      " got: %s" % version_str, RuntimeWarning)
+        return False
+    return (int(m.group("major")) == required_major and
+            int(m.group("minor")) >= minimum_minor)
+
+
+def have_compatible_glibc(required_major, minimum_minor):
+    # type: (int, int) -> bool
+    version_str = glibc_version_string()  # type: Optional[str]
+    if version_str is None:
+        return False
+    return check_glibc_version(version_str, required_major, minimum_minor)
+
+
+# platform.libc_ver regularly returns completely nonsensical glibc
+# versions. E.g. on my computer, platform says:
+#
+#   ~$ python2.7 -c 'import platform; print(platform.libc_ver())'
+#   ('glibc', '2.7')
+#   ~$ python3.5 -c 'import platform; print(platform.libc_ver())'
+#   ('glibc', '2.9')
+#
+# But the truth is:
+#
+#   ~$ ldd --version
+#   ldd (Debian GLIBC 2.22-11) 2.22
+#
+# This is unfortunate, because it means that the linehaul data on libc
+# versions that was generated by pip 8.1.2 and earlier is useless and
+# misleading. Solution: instead of using platform, use our code that actually
+# works.
+def libc_ver():
+    # type: () -> Tuple[str, str]
+    """Try to determine the glibc version
+
+    Returns a tuple of strings (lib, version) which default to empty strings
+    in case the lookup fails.
+    """
+    glibc_version = glibc_version_string()
+    if glibc_version is None:
+        return ("", "")
+    else:
+        return ("glibc", glibc_version)
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/hashes.py b/lib/python3.7/site-packages/pip/_internal/utils/hashes.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7142069fab65e98bda1bc2f86ddf7902c5911e3
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/hashes.py
@@ -0,0 +1,115 @@
+from __future__ import absolute_import
+
+import hashlib
+
+from pip._vendor.six import iteritems, iterkeys, itervalues
+
+from pip._internal.exceptions import (
+    HashMismatch, HashMissing, InstallationError,
+)
+from pip._internal.utils.misc import read_chunks
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Dict, List, BinaryIO, NoReturn, Iterator
+    )
+    from pip._vendor.six import PY3
+    if PY3:
+        from hashlib import _Hash
+    else:
+        from hashlib import _hash as _Hash
+
+
+# The recommended hash algo of the moment. Change this whenever the state of
+# the art changes; it won't hurt backward compatibility.
+FAVORITE_HASH = 'sha256'
+
+
+# Names of hashlib algorithms allowed by the --hash option and ``pip hash``
+# Currently, those are the ones at least as collision-resistant as sha256.
+STRONG_HASHES = ['sha256', 'sha384', 'sha512']
+
+
+class Hashes(object):
+    """A wrapper that builds multiple hashes at once and checks them against
+    known-good values
+
+    """
+    def __init__(self, hashes=None):
+        # type: (Dict[str, List[str]]) -> None
+        """
+        :param hashes: A dict of algorithm names pointing to lists of allowed
+            hex digests
+        """
+        self._allowed = {} if hashes is None else hashes
+
+    def check_against_chunks(self, chunks):
+        # type: (Iterator[bytes]) -> None
+        """Check good hashes against ones built from iterable of chunks of
+        data.
+
+        Raise HashMismatch if none match.
+
+        """
+        gots = {}
+        for hash_name in iterkeys(self._allowed):
+            try:
+                gots[hash_name] = hashlib.new(hash_name)
+            except (ValueError, TypeError):
+                raise InstallationError('Unknown hash name: %s' % hash_name)
+
+        for chunk in chunks:
+            for hash in itervalues(gots):
+                hash.update(chunk)
+
+        for hash_name, got in iteritems(gots):
+            if got.hexdigest() in self._allowed[hash_name]:
+                return
+        self._raise(gots)
+
+    def _raise(self, gots):
+        # type: (Dict[str, _Hash]) -> NoReturn
+        raise HashMismatch(self._allowed, gots)
+
+    def check_against_file(self, file):
+        # type: (BinaryIO) -> None
+        """Check good hashes against a file-like object
+
+        Raise HashMismatch if none match.
+
+        """
+        return self.check_against_chunks(read_chunks(file))
+
+    def check_against_path(self, path):
+        # type: (str) -> None
+        with open(path, 'rb') as file:
+            return self.check_against_file(file)
+
+    def __nonzero__(self):
+        # type: () -> bool
+        """Return whether I know any known-good hashes."""
+        return bool(self._allowed)
+
+    def __bool__(self):
+        # type: () -> bool
+        return self.__nonzero__()
+
+
+class MissingHashes(Hashes):
+    """A workalike for Hashes used when we're missing a hash for a requirement
+
+    It computes the actual hash of the requirement and raises a HashMissing
+    exception showing it to the user.
+
+    """
+    def __init__(self):
+        # type: () -> None
+        """Don't offer the ``hashes`` kwarg."""
+        # Pass our favorite hash in to generate a "gotten hash". With the
+        # empty list, it will never match, so an error will always raise.
+        super(MissingHashes, self).__init__(hashes={FAVORITE_HASH: []})
+
+    def _raise(self, gots):
+        # type: (Dict[str, _Hash]) -> NoReturn
+        raise HashMissing(gots[FAVORITE_HASH].hexdigest())
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/logging.py b/lib/python3.7/site-packages/pip/_internal/utils/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..a28e88cac3545de6836e63fc7703bf69da78d034
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/logging.py
@@ -0,0 +1,371 @@
+from __future__ import absolute_import
+
+import contextlib
+import errno
+import logging
+import logging.handlers
+import os
+import sys
+from logging import Filter
+
+from pip._vendor.six import PY2
+
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.deprecation import DEPRECATION_MSG_PREFIX
+from pip._internal.utils.misc import ensure_dir, subprocess_logger
+
+try:
+    import threading
+except ImportError:
+    import dummy_threading as threading  # type: ignore
+
+
+try:
+    from pip._vendor import colorama
+# Lots of different errors can come from this, including SystemError and
+# ImportError.
+except Exception:
+    colorama = None
+
+
+_log_state = threading.local()
+_log_state.indentation = 0
+
+
+class BrokenStdoutLoggingError(Exception):
+    """
+    Raised if BrokenPipeError occurs for the stdout stream while logging.
+    """
+    pass
+
+
+# BrokenPipeError does not exist in Python 2 and, in addition, manifests
+# differently in Windows and non-Windows.
+if WINDOWS:
+    # In Windows, a broken pipe can show up as EINVAL rather than EPIPE:
+    # https://bugs.python.org/issue19612
+    # https://bugs.python.org/issue30418
+    if PY2:
+        def _is_broken_pipe_error(exc_class, exc):
+            """See the docstring for non-Windows Python 3 below."""
+            return (exc_class is IOError and
+                    exc.errno in (errno.EINVAL, errno.EPIPE))
+    else:
+        # In Windows, a broken pipe IOError became OSError in Python 3.
+        def _is_broken_pipe_error(exc_class, exc):
+            """See the docstring for non-Windows Python 3 below."""
+            return ((exc_class is BrokenPipeError) or  # noqa: F821
+                    (exc_class is OSError and
+                     exc.errno in (errno.EINVAL, errno.EPIPE)))
+elif PY2:
+    def _is_broken_pipe_error(exc_class, exc):
+        """See the docstring for non-Windows Python 3 below."""
+        return (exc_class is IOError and exc.errno == errno.EPIPE)
+else:
+    # Then we are in the non-Windows Python 3 case.
+    def _is_broken_pipe_error(exc_class, exc):
+        """
+        Return whether an exception is a broken pipe error.
+
+        Args:
+          exc_class: an exception class.
+          exc: an exception instance.
+        """
+        return (exc_class is BrokenPipeError)  # noqa: F821
+
+
+@contextlib.contextmanager
+def indent_log(num=2):
+    """
+    A context manager which will cause the log output to be indented for any
+    log messages emitted inside it.
+    """
+    _log_state.indentation += num
+    try:
+        yield
+    finally:
+        _log_state.indentation -= num
+
+
+def get_indentation():
+    return getattr(_log_state, 'indentation', 0)
+
+
+class IndentingFormatter(logging.Formatter):
+    def __init__(self, *args, **kwargs):
+        """
+        A logging.Formatter that obeys the indent_log() context manager.
+
+        :param add_timestamp: A bool indicating output lines should be prefixed
+            with their record's timestamp.
+        """
+        self.add_timestamp = kwargs.pop("add_timestamp", False)
+        super(IndentingFormatter, self).__init__(*args, **kwargs)
+
+    def get_message_start(self, formatted, levelno):
+        """
+        Return the start of the formatted log message (not counting the
+        prefix to add to each line).
+        """
+        if levelno < logging.WARNING:
+            return ''
+        if formatted.startswith(DEPRECATION_MSG_PREFIX):
+            # Then the message already has a prefix.  We don't want it to
+            # look like "WARNING: DEPRECATION: ...."
+            return ''
+        if levelno < logging.ERROR:
+            return 'WARNING: '
+
+        return 'ERROR: '
+
+    def format(self, record):
+        """
+        Calls the standard formatter, but will indent all of the log messages
+        by our current indentation level.
+        """
+        formatted = super(IndentingFormatter, self).format(record)
+        message_start = self.get_message_start(formatted, record.levelno)
+        formatted = message_start + formatted
+
+        prefix = ''
+        if self.add_timestamp:
+            prefix = self.formatTime(record, "%Y-%m-%dT%H:%M:%S ")
+        prefix += " " * get_indentation()
+        formatted = "".join([
+            prefix + line
+            for line in formatted.splitlines(True)
+        ])
+        return formatted
+
+
+def _color_wrap(*colors):
+    def wrapped(inp):
+        return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
+    return wrapped
+
+
+class ColorizedStreamHandler(logging.StreamHandler):
+
+    # Don't build up a list of colors if we don't have colorama
+    if colorama:
+        COLORS = [
+            # This needs to be in order from highest logging level to lowest.
+            (logging.ERROR, _color_wrap(colorama.Fore.RED)),
+            (logging.WARNING, _color_wrap(colorama.Fore.YELLOW)),
+        ]
+    else:
+        COLORS = []
+
+    def __init__(self, stream=None, no_color=None):
+        logging.StreamHandler.__init__(self, stream)
+        self._no_color = no_color
+
+        if WINDOWS and colorama:
+            self.stream = colorama.AnsiToWin32(self.stream)
+
+    def _using_stdout(self):
+        """
+        Return whether the handler is using sys.stdout.
+        """
+        if WINDOWS and colorama:
+            # Then self.stream is an AnsiToWin32 object.
+            return self.stream.wrapped is sys.stdout
+
+        return self.stream is sys.stdout
+
+    def should_color(self):
+        # Don't colorize things if we do not have colorama or if told not to
+        if not colorama or self._no_color:
+            return False
+
+        real_stream = (
+            self.stream if not isinstance(self.stream, colorama.AnsiToWin32)
+            else self.stream.wrapped
+        )
+
+        # If the stream is a tty we should color it
+        if hasattr(real_stream, "isatty") and real_stream.isatty():
+            return True
+
+        # If we have an ANSI term we should color it
+        if os.environ.get("TERM") == "ANSI":
+            return True
+
+        # If anything else we should not color it
+        return False
+
+    def format(self, record):
+        msg = logging.StreamHandler.format(self, record)
+
+        if self.should_color():
+            for level, color in self.COLORS:
+                if record.levelno >= level:
+                    msg = color(msg)
+                    break
+
+        return msg
+
+    # The logging module says handleError() can be customized.
+    def handleError(self, record):
+        exc_class, exc = sys.exc_info()[:2]
+        # If a broken pipe occurred while calling write() or flush() on the
+        # stdout stream in logging's Handler.emit(), then raise our special
+        # exception so we can handle it in main() instead of logging the
+        # broken pipe error and continuing.
+        if (exc_class and self._using_stdout() and
+                _is_broken_pipe_error(exc_class, exc)):
+            raise BrokenStdoutLoggingError()
+
+        return super(ColorizedStreamHandler, self).handleError(record)
+
+
+class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
+
+    def _open(self):
+        ensure_dir(os.path.dirname(self.baseFilename))
+        return logging.handlers.RotatingFileHandler._open(self)
+
+
+class MaxLevelFilter(Filter):
+
+    def __init__(self, level):
+        self.level = level
+
+    def filter(self, record):
+        return record.levelno < self.level
+
+
+class ExcludeLoggerFilter(Filter):
+
+    """
+    A logging Filter that excludes records from a logger (or its children).
+    """
+
+    def filter(self, record):
+        # The base Filter class allows only records from a logger (or its
+        # children).
+        return not super(ExcludeLoggerFilter, self).filter(record)
+
+
+def setup_logging(verbosity, no_color, user_log_file):
+    """Configures and sets up all of the logging
+
+    Returns the requested logging level, as its integer value.
+    """
+
+    # Determine the level to be logging at.
+    if verbosity >= 1:
+        level = "DEBUG"
+    elif verbosity == -1:
+        level = "WARNING"
+    elif verbosity == -2:
+        level = "ERROR"
+    elif verbosity <= -3:
+        level = "CRITICAL"
+    else:
+        level = "INFO"
+
+    level_number = getattr(logging, level)
+
+    # The "root" logger should match the "console" level *unless* we also need
+    # to log to a user log file.
+    include_user_log = user_log_file is not None
+    if include_user_log:
+        additional_log_file = user_log_file
+        root_level = "DEBUG"
+    else:
+        additional_log_file = "/dev/null"
+        root_level = level
+
+    # Disable any logging besides WARNING unless we have DEBUG level logging
+    # enabled for vendored libraries.
+    vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG"
+
+    # Shorthands for clarity
+    log_streams = {
+        "stdout": "ext://sys.stdout",
+        "stderr": "ext://sys.stderr",
+    }
+    handler_classes = {
+        "stream": "pip._internal.utils.logging.ColorizedStreamHandler",
+        "file": "pip._internal.utils.logging.BetterRotatingFileHandler",
+    }
+    handlers = ["console", "console_errors", "console_subprocess"] + (
+        ["user_log"] if include_user_log else []
+    )
+
+    logging.config.dictConfig({
+        "version": 1,
+        "disable_existing_loggers": False,
+        "filters": {
+            "exclude_warnings": {
+                "()": "pip._internal.utils.logging.MaxLevelFilter",
+                "level": logging.WARNING,
+            },
+            "restrict_to_subprocess": {
+                "()": "logging.Filter",
+                "name": subprocess_logger.name,
+            },
+            "exclude_subprocess": {
+                "()": "pip._internal.utils.logging.ExcludeLoggerFilter",
+                "name": subprocess_logger.name,
+            },
+        },
+        "formatters": {
+            "indent": {
+                "()": IndentingFormatter,
+                "format": "%(message)s",
+            },
+            "indent_with_timestamp": {
+                "()": IndentingFormatter,
+                "format": "%(message)s",
+                "add_timestamp": True,
+            },
+        },
+        "handlers": {
+            "console": {
+                "level": level,
+                "class": handler_classes["stream"],
+                "no_color": no_color,
+                "stream": log_streams["stdout"],
+                "filters": ["exclude_subprocess", "exclude_warnings"],
+                "formatter": "indent",
+            },
+            "console_errors": {
+                "level": "WARNING",
+                "class": handler_classes["stream"],
+                "no_color": no_color,
+                "stream": log_streams["stderr"],
+                "filters": ["exclude_subprocess"],
+                "formatter": "indent",
+            },
+            # A handler responsible for logging to the console messages
+            # from the "subprocessor" logger.
+            "console_subprocess": {
+                "level": level,
+                "class": handler_classes["stream"],
+                "no_color": no_color,
+                "stream": log_streams["stderr"],
+                "filters": ["restrict_to_subprocess"],
+                "formatter": "indent",
+            },
+            "user_log": {
+                "level": "DEBUG",
+                "class": handler_classes["file"],
+                "filename": additional_log_file,
+                "delay": True,
+                "formatter": "indent_with_timestamp",
+            },
+        },
+        "root": {
+            "level": root_level,
+            "handlers": handlers,
+        },
+        "loggers": {
+            "pip._vendor": {
+                "level": vendored_log_level
+            }
+        },
+    })
+
+    return level_number
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/misc.py b/lib/python3.7/site-packages/pip/_internal/utils/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca7a529387c40e416acf4fd1cb1b8a5557d1adfd
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/misc.py
@@ -0,0 +1,1011 @@
+from __future__ import absolute_import
+
+import contextlib
+import errno
+import io
+# we have a submodule named 'logging' which would shadow this if we used the
+# regular name:
+import logging as std_logging
+import os
+import posixpath
+import re
+import shutil
+import stat
+import subprocess
+import sys
+import tarfile
+import zipfile
+from collections import deque
+
+from pip._vendor import pkg_resources
+# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is
+#       why we ignore the type on this import.
+from pip._vendor.retrying import retry  # type: ignore
+from pip._vendor.six import PY2
+from pip._vendor.six.moves import input, shlex_quote
+from pip._vendor.six.moves.urllib import parse as urllib_parse
+from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote
+
+from pip._internal.exceptions import CommandError, InstallationError
+from pip._internal.locations import (
+    running_under_virtualenv, site_packages, user_site, virtualenv_no_global,
+    write_delete_marker_file,
+)
+from pip._internal.utils.compat import (
+    WINDOWS, console_to_str, expanduser, stdlib_pkgs,
+)
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if PY2:
+    from io import BytesIO as StringIO
+else:
+    from io import StringIO
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Optional, Tuple, Iterable, List, Match, Union, Any, Mapping, Text,
+        AnyStr, Container
+    )
+    from pip._vendor.pkg_resources import Distribution
+    from pip._internal.models.link import Link
+    from pip._internal.utils.ui import SpinnerInterface
+
+
+__all__ = ['rmtree', 'display_path', 'backup_dir',
+           'ask', 'splitext',
+           'format_size', 'is_installable_dir',
+           'is_svn_page', 'file_contents',
+           'split_leading_dir', 'has_leading_dir',
+           'normalize_path',
+           'renames', 'get_prog',
+           'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
+           'captured_stdout', 'ensure_dir',
+           'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', 'WHEEL_EXTENSION',
+           'get_installed_version', 'remove_auth_from_url']
+
+
+logger = std_logging.getLogger(__name__)
+subprocess_logger = std_logging.getLogger('pip.subprocessor')
+
+LOG_DIVIDER = '----------------------------------------'
+
+WHEEL_EXTENSION = '.whl'
+BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
+XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')
+ZIP_EXTENSIONS = ('.zip', WHEEL_EXTENSION)
+TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
+ARCHIVE_EXTENSIONS = (
+    ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)
+SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
+
+try:
+    import bz2  # noqa
+    SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
+except ImportError:
+    logger.debug('bz2 module is not available')
+
+try:
+    # Only for Python 3.3+
+    import lzma  # noqa
+    SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
+except ImportError:
+    logger.debug('lzma module is not available')
+
+
+def ensure_dir(path):
+    # type: (AnyStr) -> None
+    """os.path.makedirs without EEXIST."""
+    try:
+        os.makedirs(path)
+    except OSError as e:
+        if e.errno != errno.EEXIST:
+            raise
+
+
+def get_prog():
+    # type: () -> str
+    try:
+        prog = os.path.basename(sys.argv[0])
+        if prog in ('__main__.py', '-c'):
+            return "%s -m pip" % sys.executable
+        else:
+            return prog
+    except (AttributeError, TypeError, IndexError):
+        pass
+    return 'pip'
+
+
+# Retry every half second for up to 3 seconds
+@retry(stop_max_delay=3000, wait_fixed=500)
+def rmtree(dir, ignore_errors=False):
+    # type: (str, bool) -> None
+    shutil.rmtree(dir, ignore_errors=ignore_errors,
+                  onerror=rmtree_errorhandler)
+
+
+def rmtree_errorhandler(func, path, exc_info):
+    """On Windows, the files in .svn are read-only, so when rmtree() tries to
+    remove them, an exception is thrown.  We catch that here, remove the
+    read-only attribute, and hopefully continue without problems."""
+    # if file type currently read only
+    if os.stat(path).st_mode & stat.S_IREAD:
+        # convert to read/write
+        os.chmod(path, stat.S_IWRITE)
+        # use the original function to repeat the operation
+        func(path)
+        return
+    else:
+        raise
+
+
+def display_path(path):
+    # type: (Union[str, Text]) -> str
+    """Gives the display value for a given path, making it relative to cwd
+    if possible."""
+    path = os.path.normcase(os.path.abspath(path))
+    if sys.version_info[0] == 2:
+        path = path.decode(sys.getfilesystemencoding(), 'replace')
+        path = path.encode(sys.getdefaultencoding(), 'replace')
+    if path.startswith(os.getcwd() + os.path.sep):
+        path = '.' + path[len(os.getcwd()):]
+    return path
+
+
+def backup_dir(dir, ext='.bak'):
+    # type: (str, str) -> str
+    """Figure out the name of a directory to back up the given dir to
+    (adding .bak, .bak2, etc)"""
+    n = 1
+    extension = ext
+    while os.path.exists(dir + extension):
+        n += 1
+        extension = ext + str(n)
+    return dir + extension
+
+
+def ask_path_exists(message, options):
+    # type: (str, Iterable[str]) -> str
+    for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
+        if action in options:
+            return action
+    return ask(message, options)
+
+
+def ask(message, options):
+    # type: (str, Iterable[str]) -> str
+    """Ask the message interactively, with the given possible responses"""
+    while 1:
+        if os.environ.get('PIP_NO_INPUT'):
+            raise Exception(
+                'No input was expected ($PIP_NO_INPUT set); question: %s' %
+                message
+            )
+        response = input(message)
+        response = response.strip().lower()
+        if response not in options:
+            print(
+                'Your response (%r) was not one of the expected responses: '
+                '%s' % (response, ', '.join(options))
+            )
+        else:
+            return response
+
+
+def format_size(bytes):
+    # type: (float) -> str
+    if bytes > 1000 * 1000:
+        return '%.1fMB' % (bytes / 1000.0 / 1000)
+    elif bytes > 10 * 1000:
+        return '%ikB' % (bytes / 1000)
+    elif bytes > 1000:
+        return '%.1fkB' % (bytes / 1000.0)
+    else:
+        return '%ibytes' % bytes
+
+
+def is_installable_dir(path):
+    # type: (str) -> bool
+    """Is path is a directory containing setup.py or pyproject.toml?
+    """
+    if not os.path.isdir(path):
+        return False
+    setup_py = os.path.join(path, 'setup.py')
+    if os.path.isfile(setup_py):
+        return True
+    pyproject_toml = os.path.join(path, 'pyproject.toml')
+    if os.path.isfile(pyproject_toml):
+        return True
+    return False
+
+
+def is_svn_page(html):
+    # type: (Union[str, Text]) -> Optional[Match[Union[str, Text]]]
+    """
+    Returns true if the page appears to be the index page of an svn repository
+    """
+    return (re.search(r'<title>[^<]*Revision \d+:', html) and
+            re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
+
+
+def file_contents(filename):
+    # type: (str) -> Text
+    with open(filename, 'rb') as fp:
+        return fp.read().decode('utf-8')
+
+
+def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
+    """Yield pieces of data from a file-like object until EOF."""
+    while True:
+        chunk = file.read(size)
+        if not chunk:
+            break
+        yield chunk
+
+
+def split_leading_dir(path):
+    # type: (Union[str, Text]) -> List[Union[str, Text]]
+    path = path.lstrip('/').lstrip('\\')
+    if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
+                        '\\' not in path):
+        return path.split('/', 1)
+    elif '\\' in path:
+        return path.split('\\', 1)
+    else:
+        return [path, '']
+
+
+def has_leading_dir(paths):
+    # type: (Iterable[Union[str, Text]]) -> bool
+    """Returns true if all the paths have the same leading path name
+    (i.e., everything is in one subdirectory in an archive)"""
+    common_prefix = None
+    for path in paths:
+        prefix, rest = split_leading_dir(path)
+        if not prefix:
+            return False
+        elif common_prefix is None:
+            common_prefix = prefix
+        elif prefix != common_prefix:
+            return False
+    return True
+
+
+def normalize_path(path, resolve_symlinks=True):
+    # type: (str, bool) -> str
+    """
+    Convert a path to its canonical, case-normalized, absolute version.
+
+    """
+    path = expanduser(path)
+    if resolve_symlinks:
+        path = os.path.realpath(path)
+    else:
+        path = os.path.abspath(path)
+    return os.path.normcase(path)
+
+
+def splitext(path):
+    # type: (str) -> Tuple[str, str]
+    """Like os.path.splitext, but take off .tar too"""
+    base, ext = posixpath.splitext(path)
+    if base.lower().endswith('.tar'):
+        ext = base[-4:] + ext
+        base = base[:-4]
+    return base, ext
+
+
+def renames(old, new):
+    # type: (str, str) -> None
+    """Like os.renames(), but handles renaming across devices."""
+    # Implementation borrowed from os.renames().
+    head, tail = os.path.split(new)
+    if head and tail and not os.path.exists(head):
+        os.makedirs(head)
+
+    shutil.move(old, new)
+
+    head, tail = os.path.split(old)
+    if head and tail:
+        try:
+            os.removedirs(head)
+        except OSError:
+            pass
+
+
+def is_local(path):
+    # type: (str) -> bool
+    """
+    Return True if path is within sys.prefix, if we're running in a virtualenv.
+
+    If we're not in a virtualenv, all paths are considered "local."
+
+    """
+    if not running_under_virtualenv():
+        return True
+    return normalize_path(path).startswith(normalize_path(sys.prefix))
+
+
+def dist_is_local(dist):
+    # type: (Distribution) -> bool
+    """
+    Return True if given Distribution object is installed locally
+    (i.e. within current virtualenv).
+
+    Always True if we're not in a virtualenv.
+
+    """
+    return is_local(dist_location(dist))
+
+
+def dist_in_usersite(dist):
+    # type: (Distribution) -> bool
+    """
+    Return True if given Distribution is installed in user site.
+    """
+    norm_path = normalize_path(dist_location(dist))
+    return norm_path.startswith(normalize_path(user_site))
+
+
+def dist_in_site_packages(dist):
+    # type: (Distribution) -> bool
+    """
+    Return True if given Distribution is installed in
+    sysconfig.get_python_lib().
+    """
+    return normalize_path(
+        dist_location(dist)
+    ).startswith(normalize_path(site_packages))
+
+
+def dist_is_editable(dist):
+    # type: (Distribution) -> bool
+    """
+    Return True if given Distribution is an editable install.
+    """
+    for path_item in sys.path:
+        egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
+        if os.path.isfile(egg_link):
+            return True
+    return False
+
+
+def get_installed_distributions(local_only=True,
+                                skip=stdlib_pkgs,
+                                include_editables=True,
+                                editables_only=False,
+                                user_only=False):
+    # type: (bool, Container[str], bool, bool, bool) -> List[Distribution]
+    """
+    Return a list of installed Distribution objects.
+
+    If ``local_only`` is True (default), only return installations
+    local to the current virtualenv, if in a virtualenv.
+
+    ``skip`` argument is an iterable of lower-case project names to
+    ignore; defaults to stdlib_pkgs
+
+    If ``include_editables`` is False, don't report editables.
+
+    If ``editables_only`` is True , only report editables.
+
+    If ``user_only`` is True , only report installations in the user
+    site directory.
+
+    """
+    if local_only:
+        local_test = dist_is_local
+    else:
+        def local_test(d):
+            return True
+
+    if include_editables:
+        def editable_test(d):
+            return True
+    else:
+        def editable_test(d):
+            return not dist_is_editable(d)
+
+    if editables_only:
+        def editables_only_test(d):
+            return dist_is_editable(d)
+    else:
+        def editables_only_test(d):
+            return True
+
+    if user_only:
+        user_test = dist_in_usersite
+    else:
+        def user_test(d):
+            return True
+
+    # because of pkg_resources vendoring, mypy cannot find stub in typeshed
+    return [d for d in pkg_resources.working_set  # type: ignore
+            if local_test(d) and
+            d.key not in skip and
+            editable_test(d) and
+            editables_only_test(d) and
+            user_test(d)
+            ]
+
+
+def egg_link_path(dist):
+    # type: (Distribution) -> Optional[str]
+    """
+    Return the path for the .egg-link file if it exists, otherwise, None.
+
+    There's 3 scenarios:
+    1) not in a virtualenv
+       try to find in site.USER_SITE, then site_packages
+    2) in a no-global virtualenv
+       try to find in site_packages
+    3) in a yes-global virtualenv
+       try to find in site_packages, then site.USER_SITE
+       (don't look in global location)
+
+    For #1 and #3, there could be odd cases, where there's an egg-link in 2
+    locations.
+
+    This method will just return the first one found.
+    """
+    sites = []
+    if running_under_virtualenv():
+        if virtualenv_no_global():
+            sites.append(site_packages)
+        else:
+            sites.append(site_packages)
+            if user_site:
+                sites.append(user_site)
+    else:
+        if user_site:
+            sites.append(user_site)
+        sites.append(site_packages)
+
+    for site in sites:
+        egglink = os.path.join(site, dist.project_name) + '.egg-link'
+        if os.path.isfile(egglink):
+            return egglink
+    return None
+
+
+def dist_location(dist):
+    # type: (Distribution) -> str
+    """
+    Get the site-packages location of this distribution. Generally
+    this is dist.location, except in the case of develop-installed
+    packages, where dist.location is the source code location, and we
+    want to know where the egg-link file is.
+
+    """
+    egg_link = egg_link_path(dist)
+    if egg_link:
+        return egg_link
+    return dist.location
+
+
+def current_umask():
+    """Get the current umask which involves having to set it temporarily."""
+    mask = os.umask(0)
+    os.umask(mask)
+    return mask
+
+
+def unzip_file(filename, location, flatten=True):
+    # type: (str, str, bool) -> None
+    """
+    Unzip the file (with path `filename`) to the destination `location`.  All
+    files are written based on system defaults and umask (i.e. permissions are
+    not preserved), except that regular file members with any execute
+    permissions (user, group, or world) have "chmod +x" applied after being
+    written. Note that for windows, any execute changes using os.chmod are
+    no-ops per the python docs.
+    """
+    ensure_dir(location)
+    zipfp = open(filename, 'rb')
+    try:
+        zip = zipfile.ZipFile(zipfp, allowZip64=True)
+        leading = has_leading_dir(zip.namelist()) and flatten
+        for info in zip.infolist():
+            name = info.filename
+            fn = name
+            if leading:
+                fn = split_leading_dir(name)[1]
+            fn = os.path.join(location, fn)
+            dir = os.path.dirname(fn)
+            if fn.endswith('/') or fn.endswith('\\'):
+                # A directory
+                ensure_dir(fn)
+            else:
+                ensure_dir(dir)
+                # Don't use read() to avoid allocating an arbitrarily large
+                # chunk of memory for the file's content
+                fp = zip.open(name)
+                try:
+                    with open(fn, 'wb') as destfp:
+                        shutil.copyfileobj(fp, destfp)
+                finally:
+                    fp.close()
+                    mode = info.external_attr >> 16
+                    # if mode and regular file and any execute permissions for
+                    # user/group/world?
+                    if mode and stat.S_ISREG(mode) and mode & 0o111:
+                        # make dest file have execute for user/group/world
+                        # (chmod +x) no-op on windows per python docs
+                        os.chmod(fn, (0o777 - current_umask() | 0o111))
+    finally:
+        zipfp.close()
+
+
+def untar_file(filename, location):
+    # type: (str, str) -> None
+    """
+    Untar the file (with path `filename`) to the destination `location`.
+    All files are written based on system defaults and umask (i.e. permissions
+    are not preserved), except that regular file members with any execute
+    permissions (user, group, or world) have "chmod +x" applied after being
+    written.  Note that for windows, any execute changes using os.chmod are
+    no-ops per the python docs.
+    """
+    ensure_dir(location)
+    if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
+        mode = 'r:gz'
+    elif filename.lower().endswith(BZ2_EXTENSIONS):
+        mode = 'r:bz2'
+    elif filename.lower().endswith(XZ_EXTENSIONS):
+        mode = 'r:xz'
+    elif filename.lower().endswith('.tar'):
+        mode = 'r'
+    else:
+        logger.warning(
+            'Cannot determine compression type for file %s', filename,
+        )
+        mode = 'r:*'
+    tar = tarfile.open(filename, mode)
+    try:
+        leading = has_leading_dir([
+            member.name for member in tar.getmembers()
+        ])
+        for member in tar.getmembers():
+            fn = member.name
+            if leading:
+                # https://github.com/python/mypy/issues/1174
+                fn = split_leading_dir(fn)[1]  # type: ignore
+            path = os.path.join(location, fn)
+            if member.isdir():
+                ensure_dir(path)
+            elif member.issym():
+                try:
+                    # https://github.com/python/typeshed/issues/2673
+                    tar._extract_member(member, path)  # type: ignore
+                except Exception as exc:
+                    # Some corrupt tar files seem to produce this
+                    # (specifically bad symlinks)
+                    logger.warning(
+                        'In the tar file %s the member %s is invalid: %s',
+                        filename, member.name, exc,
+                    )
+                    continue
+            else:
+                try:
+                    fp = tar.extractfile(member)
+                except (KeyError, AttributeError) as exc:
+                    # Some corrupt tar files seem to produce this
+                    # (specifically bad symlinks)
+                    logger.warning(
+                        'In the tar file %s the member %s is invalid: %s',
+                        filename, member.name, exc,
+                    )
+                    continue
+                ensure_dir(os.path.dirname(path))
+                with open(path, 'wb') as destfp:
+                    shutil.copyfileobj(fp, destfp)
+                fp.close()
+                # Update the timestamp (useful for cython compiled files)
+                # https://github.com/python/typeshed/issues/2673
+                tar.utime(member, path)  # type: ignore
+                # member have any execute permissions for user/group/world?
+                if member.mode & 0o111:
+                    # make dest file have execute for user/group/world
+                    # no-op on windows per python docs
+                    os.chmod(path, (0o777 - current_umask() | 0o111))
+    finally:
+        tar.close()
+
+
+def unpack_file(
+    filename,  # type: str
+    location,  # type: str
+    content_type,  # type: Optional[str]
+    link  # type: Optional[Link]
+):
+    # type: (...) -> None
+    filename = os.path.realpath(filename)
+    if (content_type == 'application/zip' or
+            filename.lower().endswith(ZIP_EXTENSIONS) or
+            zipfile.is_zipfile(filename)):
+        unzip_file(
+            filename,
+            location,
+            flatten=not filename.endswith('.whl')
+        )
+    elif (content_type == 'application/x-gzip' or
+            tarfile.is_tarfile(filename) or
+            filename.lower().endswith(
+                TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
+        untar_file(filename, location)
+    elif (content_type and content_type.startswith('text/html') and
+            is_svn_page(file_contents(filename))):
+        # We don't really care about this
+        from pip._internal.vcs.subversion import Subversion
+        Subversion('svn+' + link.url).unpack(location)
+    else:
+        # FIXME: handle?
+        # FIXME: magic signatures?
+        logger.critical(
+            'Cannot unpack file %s (downloaded from %s, content-type: %s); '
+            'cannot detect archive format',
+            filename, location, content_type,
+        )
+        raise InstallationError(
+            'Cannot determine archive format of %s' % location
+        )
+
+
+def format_command_args(args):
+    # type: (List[str]) -> str
+    """
+    Format command arguments for display.
+    """
+    return ' '.join(shlex_quote(arg) for arg in args)
+
+
+def call_subprocess(
+    cmd,  # type: List[str]
+    show_stdout=False,  # type: bool
+    cwd=None,  # type: Optional[str]
+    on_returncode='raise',  # type: str
+    extra_ok_returncodes=None,  # type: Optional[Iterable[int]]
+    command_desc=None,  # type: Optional[str]
+    extra_environ=None,  # type: Optional[Mapping[str, Any]]
+    unset_environ=None,  # type: Optional[Iterable[str]]
+    spinner=None  # type: Optional[SpinnerInterface]
+):
+    # type: (...) -> Optional[Text]
+    """
+    Args:
+      show_stdout: if true, use INFO to log the subprocess's stderr and
+        stdout streams.  Otherwise, use DEBUG.  Defaults to False.
+      extra_ok_returncodes: an iterable of integer return codes that are
+        acceptable, in addition to 0. Defaults to None, which means [].
+      unset_environ: an iterable of environment variable names to unset
+        prior to calling subprocess.Popen().
+    """
+    if extra_ok_returncodes is None:
+        extra_ok_returncodes = []
+    if unset_environ is None:
+        unset_environ = []
+    # Most places in pip use show_stdout=False. What this means is--
+    #
+    # - We connect the child's output (combined stderr and stdout) to a
+    #   single pipe, which we read.
+    # - We log this output to stderr at DEBUG level as it is received.
+    # - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't
+    #   requested), then we show a spinner so the user can still see the
+    #   subprocess is in progress.
+    # - If the subprocess exits with an error, we log the output to stderr
+    #   at ERROR level if it hasn't already been displayed to the console
+    #   (e.g. if --verbose logging wasn't enabled).  This way we don't log
+    #   the output to the console twice.
+    #
+    # If show_stdout=True, then the above is still done, but with DEBUG
+    # replaced by INFO.
+    if show_stdout:
+        # Then log the subprocess output at INFO level.
+        log_subprocess = subprocess_logger.info
+        used_level = std_logging.INFO
+    else:
+        # Then log the subprocess output using DEBUG.  This also ensures
+        # it will be logged to the log file (aka user_log), if enabled.
+        log_subprocess = subprocess_logger.debug
+        used_level = std_logging.DEBUG
+
+    # Whether the subprocess will be visible in the console.
+    showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level
+
+    # Only use the spinner if we're not showing the subprocess output
+    # and we have a spinner.
+    use_spinner = not showing_subprocess and spinner is not None
+
+    if command_desc is None:
+        command_desc = format_command_args(cmd)
+
+    log_subprocess("Running command %s", command_desc)
+    env = os.environ.copy()
+    if extra_environ:
+        env.update(extra_environ)
+    for name in unset_environ:
+        env.pop(name, None)
+    try:
+        proc = subprocess.Popen(
+            cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE, cwd=cwd, env=env,
+        )
+        proc.stdin.close()
+    except Exception as exc:
+        subprocess_logger.critical(
+            "Error %s while executing command %s", exc, command_desc,
+        )
+        raise
+    all_output = []
+    while True:
+        line = console_to_str(proc.stdout.readline())
+        if not line:
+            break
+        line = line.rstrip()
+        all_output.append(line + '\n')
+
+        # Show the line immediately.
+        log_subprocess(line)
+        # Update the spinner.
+        if use_spinner:
+            spinner.spin()
+    try:
+        proc.wait()
+    finally:
+        if proc.stdout:
+            proc.stdout.close()
+    proc_had_error = (
+        proc.returncode and proc.returncode not in extra_ok_returncodes
+    )
+    if use_spinner:
+        if proc_had_error:
+            spinner.finish("error")
+        else:
+            spinner.finish("done")
+    if proc_had_error:
+        if on_returncode == 'raise':
+            if not showing_subprocess:
+                # Then the subprocess streams haven't been logged to the
+                # console yet.
+                subprocess_logger.error(
+                    'Complete output from command %s:', command_desc,
+                )
+                # The all_output value already ends in a newline.
+                subprocess_logger.error(''.join(all_output) + LOG_DIVIDER)
+            raise InstallationError(
+                'Command "%s" failed with error code %s in %s'
+                % (command_desc, proc.returncode, cwd))
+        elif on_returncode == 'warn':
+            subprocess_logger.warning(
+                'Command "%s" had error code %s in %s',
+                command_desc, proc.returncode, cwd,
+            )
+        elif on_returncode == 'ignore':
+            pass
+        else:
+            raise ValueError('Invalid value: on_returncode=%s' %
+                             repr(on_returncode))
+    return ''.join(all_output)
+
+
+def _make_build_dir(build_dir):
+    os.makedirs(build_dir)
+    write_delete_marker_file(build_dir)
+
+
+class FakeFile(object):
+    """Wrap a list of lines in an object with readline() to make
+    ConfigParser happy."""
+    def __init__(self, lines):
+        self._gen = (l for l in lines)
+
+    def readline(self):
+        try:
+            try:
+                return next(self._gen)
+            except NameError:
+                return self._gen.next()
+        except StopIteration:
+            return ''
+
+    def __iter__(self):
+        return self._gen
+
+
+class StreamWrapper(StringIO):
+
+    @classmethod
+    def from_stream(cls, orig_stream):
+        cls.orig_stream = orig_stream
+        return cls()
+
+    # compileall.compile_dir() needs stdout.encoding to print to stdout
+    @property
+    def encoding(self):
+        return self.orig_stream.encoding
+
+
+@contextlib.contextmanager
+def captured_output(stream_name):
+    """Return a context manager used by captured_stdout/stdin/stderr
+    that temporarily replaces the sys stream *stream_name* with a StringIO.
+
+    Taken from Lib/support/__init__.py in the CPython repo.
+    """
+    orig_stdout = getattr(sys, stream_name)
+    setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
+    try:
+        yield getattr(sys, stream_name)
+    finally:
+        setattr(sys, stream_name, orig_stdout)
+
+
+def captured_stdout():
+    """Capture the output of sys.stdout:
+
+       with captured_stdout() as stdout:
+           print('hello')
+       self.assertEqual(stdout.getvalue(), 'hello\n')
+
+    Taken from Lib/support/__init__.py in the CPython repo.
+    """
+    return captured_output('stdout')
+
+
+def captured_stderr():
+    """
+    See captured_stdout().
+    """
+    return captured_output('stderr')
+
+
+class cached_property(object):
+    """A property that is only computed once per instance and then replaces
+       itself with an ordinary attribute. Deleting the attribute resets the
+       property.
+
+       Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
+    """
+
+    def __init__(self, func):
+        self.__doc__ = getattr(func, '__doc__')
+        self.func = func
+
+    def __get__(self, obj, cls):
+        if obj is None:
+            # We're being accessed from the class itself, not from an object
+            return self
+        value = obj.__dict__[self.func.__name__] = self.func(obj)
+        return value
+
+
+def get_installed_version(dist_name, working_set=None):
+    """Get the installed version of dist_name avoiding pkg_resources cache"""
+    # Create a requirement that we'll look for inside of setuptools.
+    req = pkg_resources.Requirement.parse(dist_name)
+
+    if working_set is None:
+        # We want to avoid having this cached, so we need to construct a new
+        # working set each time.
+        working_set = pkg_resources.WorkingSet()
+
+    # Get the installed distribution from our working set
+    dist = working_set.find(req)
+
+    # Check to see if we got an installed distribution or not, if we did
+    # we want to return it's version.
+    return dist.version if dist else None
+
+
+def consume(iterator):
+    """Consume an iterable at C speed."""
+    deque(iterator, maxlen=0)
+
+
+# Simulates an enum
+def enum(*sequential, **named):
+    enums = dict(zip(sequential, range(len(sequential))), **named)
+    reverse = {value: key for key, value in enums.items()}
+    enums['reverse_mapping'] = reverse
+    return type('Enum', (), enums)
+
+
+def split_auth_from_netloc(netloc):
+    """
+    Parse out and remove the auth information from a netloc.
+
+    Returns: (netloc, (username, password)).
+    """
+    if '@' not in netloc:
+        return netloc, (None, None)
+
+    # Split from the right because that's how urllib.parse.urlsplit()
+    # behaves if more than one @ is present (which can be checked using
+    # the password attribute of urlsplit()'s return value).
+    auth, netloc = netloc.rsplit('@', 1)
+    if ':' in auth:
+        # Split from the left because that's how urllib.parse.urlsplit()
+        # behaves if more than one : is present (which again can be checked
+        # using the password attribute of the return value)
+        user_pass = auth.split(':', 1)
+    else:
+        user_pass = auth, None
+
+    user_pass = tuple(
+        None if x is None else urllib_unquote(x) for x in user_pass
+    )
+
+    return netloc, user_pass
+
+
+def redact_netloc(netloc):
+    # type: (str) -> str
+    """
+    Replace the password in a netloc with "****", if it exists.
+
+    For example, "user:pass@example.com" returns "user:****@example.com".
+    """
+    netloc, (user, password) = split_auth_from_netloc(netloc)
+    if user is None:
+        return netloc
+    password = '' if password is None else ':****'
+    return '{user}{password}@{netloc}'.format(user=urllib_parse.quote(user),
+                                              password=password,
+                                              netloc=netloc)
+
+
+def _transform_url(url, transform_netloc):
+    purl = urllib_parse.urlsplit(url)
+    netloc = transform_netloc(purl.netloc)
+    # stripped url
+    url_pieces = (
+        purl.scheme, netloc, purl.path, purl.query, purl.fragment
+    )
+    surl = urllib_parse.urlunsplit(url_pieces)
+    return surl
+
+
+def _get_netloc(netloc):
+    return split_auth_from_netloc(netloc)[0]
+
+
+def remove_auth_from_url(url):
+    # type: (str) -> str
+    # Return a copy of url with 'username:password@' removed.
+    # username/pass params are passed to subversion through flags
+    # and are not recognized in the url.
+    return _transform_url(url, _get_netloc)
+
+
+def redact_password_from_url(url):
+    # type: (str) -> str
+    """Replace the password in a given url with ****."""
+    return _transform_url(url, redact_netloc)
+
+
+def protect_pip_from_modification_on_windows(modifying_pip):
+    """Protection of pip.exe from modification on Windows
+
+    On Windows, any operation modifying pip should be run as:
+        python -m pip ...
+    """
+    pip_names = [
+        "pip.exe",
+        "pip{}.exe".format(sys.version_info[0]),
+        "pip{}.{}.exe".format(*sys.version_info[:2])
+    ]
+
+    # See https://github.com/pypa/pip/issues/1299 for more discussion
+    should_show_use_python_msg = (
+        modifying_pip and
+        WINDOWS and
+        os.path.basename(sys.argv[0]) in pip_names
+    )
+
+    if should_show_use_python_msg:
+        new_command = [
+            sys.executable, "-m", "pip"
+        ] + sys.argv[1:]
+        raise CommandError(
+            'To modify pip, please run the following command:\n{}'
+            .format(" ".join(new_command))
+        )
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/models.py b/lib/python3.7/site-packages/pip/_internal/utils/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5cb80a7cbdfc14a0f4726d3d416b3dd3c47e9a7
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/models.py
@@ -0,0 +1,40 @@
+"""Utilities for defining models
+"""
+
+import operator
+
+
+class KeyBasedCompareMixin(object):
+    """Provides comparision capabilities that is based on a key
+    """
+
+    def __init__(self, key, defining_class):
+        self._compare_key = key
+        self._defining_class = defining_class
+
+    def __hash__(self):
+        return hash(self._compare_key)
+
+    def __lt__(self, other):
+        return self._compare(other, operator.__lt__)
+
+    def __le__(self, other):
+        return self._compare(other, operator.__le__)
+
+    def __gt__(self, other):
+        return self._compare(other, operator.__gt__)
+
+    def __ge__(self, other):
+        return self._compare(other, operator.__ge__)
+
+    def __eq__(self, other):
+        return self._compare(other, operator.__eq__)
+
+    def __ne__(self, other):
+        return self._compare(other, operator.__ne__)
+
+    def _compare(self, other, method):
+        if not isinstance(other, self._defining_class):
+            return NotImplemented
+
+        return method(self._compare_key, other._compare_key)
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/outdated.py b/lib/python3.7/site-packages/pip/_internal/utils/outdated.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b58cd5e2ffa5a24ea6abbb1f0207d703ac32238
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/outdated.py
@@ -0,0 +1,162 @@
+from __future__ import absolute_import
+
+import datetime
+import json
+import logging
+import os.path
+import sys
+
+from pip._vendor import lockfile, pkg_resources
+from pip._vendor.packaging import version as packaging_version
+
+from pip._internal.index import PackageFinder
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.filesystem import check_path_owner
+from pip._internal.utils.misc import ensure_dir, get_installed_version
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    import optparse
+    from typing import Any, Dict
+    from pip._internal.download import PipSession
+
+
+SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
+
+
+logger = logging.getLogger(__name__)
+
+
+class SelfCheckState(object):
+    def __init__(self, cache_dir):
+        # type: (str) -> None
+        self.state = {}  # type: Dict[str, Any]
+        self.statefile_path = None
+
+        # Try to load the existing state
+        if cache_dir:
+            self.statefile_path = os.path.join(cache_dir, "selfcheck.json")
+            try:
+                with open(self.statefile_path) as statefile:
+                    self.state = json.load(statefile)[sys.prefix]
+            except (IOError, ValueError, KeyError):
+                # Explicitly suppressing exceptions, since we don't want to
+                # error out if the cache file is invalid.
+                pass
+
+    def save(self, pypi_version, current_time):
+        # type: (str, datetime.datetime) -> None
+        # If we do not have a path to cache in, don't bother saving.
+        if not self.statefile_path:
+            return
+
+        # Check to make sure that we own the directory
+        if not check_path_owner(os.path.dirname(self.statefile_path)):
+            return
+
+        # Now that we've ensured the directory is owned by this user, we'll go
+        # ahead and make sure that all our directories are created.
+        ensure_dir(os.path.dirname(self.statefile_path))
+
+        # Attempt to write out our version check file
+        with lockfile.LockFile(self.statefile_path):
+            if os.path.exists(self.statefile_path):
+                with open(self.statefile_path) as statefile:
+                    state = json.load(statefile)
+            else:
+                state = {}
+
+            state[sys.prefix] = {
+                "last_check": current_time.strftime(SELFCHECK_DATE_FMT),
+                "pypi_version": pypi_version,
+            }
+
+            with open(self.statefile_path, "w") as statefile:
+                json.dump(state, statefile, sort_keys=True,
+                          separators=(",", ":"))
+
+
+def was_installed_by_pip(pkg):
+    # type: (str) -> bool
+    """Checks whether pkg was installed by pip
+
+    This is used not to display the upgrade message when pip is in fact
+    installed by system package manager, such as dnf on Fedora.
+    """
+    try:
+        dist = pkg_resources.get_distribution(pkg)
+        return (dist.has_metadata('INSTALLER') and
+                'pip' in dist.get_metadata_lines('INSTALLER'))
+    except pkg_resources.DistributionNotFound:
+        return False
+
+
+def pip_version_check(session, options):
+    # type: (PipSession, optparse.Values) -> None
+    """Check for an update for pip.
+
+    Limit the frequency of checks to once per week. State is stored either in
+    the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
+    of the pip script path.
+    """
+    installed_version = get_installed_version("pip")
+    if not installed_version:
+        return
+
+    pip_version = packaging_version.parse(installed_version)
+    pypi_version = None
+
+    try:
+        state = SelfCheckState(cache_dir=options.cache_dir)
+
+        current_time = datetime.datetime.utcnow()
+        # Determine if we need to refresh the state
+        if "last_check" in state.state and "pypi_version" in state.state:
+            last_check = datetime.datetime.strptime(
+                state.state["last_check"],
+                SELFCHECK_DATE_FMT
+            )
+            if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:
+                pypi_version = state.state["pypi_version"]
+
+        # Refresh the version if we need to or just see if we need to warn
+        if pypi_version is None:
+            # Lets use PackageFinder to see what the latest pip version is
+            finder = PackageFinder(
+                find_links=options.find_links,
+                index_urls=[options.index_url] + options.extra_index_urls,
+                allow_all_prereleases=False,  # Explicitly set to False
+                trusted_hosts=options.trusted_hosts,
+                session=session,
+            )
+            candidate = finder.find_candidates("pip").get_best()
+            if candidate is None:
+                return
+            pypi_version = str(candidate.version)
+
+            # save that we've performed a check
+            state.save(pypi_version, current_time)
+
+        remote_version = packaging_version.parse(pypi_version)
+
+        # Determine if our pypi_version is older
+        if (pip_version < remote_version and
+                pip_version.base_version != remote_version.base_version and
+                was_installed_by_pip('pip')):
+            # Advise "python -m pip" on Windows to avoid issues
+            # with overwriting pip.exe.
+            if WINDOWS:
+                pip_cmd = "python -m pip"
+            else:
+                pip_cmd = "pip"
+            logger.warning(
+                "You are using pip version %s, however version %s is "
+                "available.\nYou should consider upgrading via the "
+                "'%s install --upgrade pip' command.",
+                pip_version, pypi_version, pip_cmd
+            )
+    except Exception:
+        logger.debug(
+            "There was an error checking the latest version of pip",
+            exc_info=True,
+        )
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/packaging.py b/lib/python3.7/site-packages/pip/_internal/utils/packaging.py
new file mode 100644
index 0000000000000000000000000000000000000000..449f3fd6be94f39ab3159faf2c6e3db0e0dd96c3
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/packaging.py
@@ -0,0 +1,85 @@
+from __future__ import absolute_import
+
+import logging
+import sys
+from email.parser import FeedParser
+
+from pip._vendor import pkg_resources
+from pip._vendor.packaging import specifiers, version
+
+from pip._internal import exceptions
+from pip._internal.utils.misc import display_path
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Optional
+    from email.message import Message
+    from pip._vendor.pkg_resources import Distribution
+
+
+logger = logging.getLogger(__name__)
+
+
+def check_requires_python(requires_python):
+    # type: (Optional[str]) -> bool
+    """
+    Check if the python version in use match the `requires_python` specifier.
+
+    Returns `True` if the version of python in use matches the requirement.
+    Returns `False` if the version of python in use does not matches the
+    requirement.
+
+    Raises an InvalidSpecifier if `requires_python` have an invalid format.
+    """
+    if requires_python is None:
+        # The package provides no information
+        return True
+    requires_python_specifier = specifiers.SpecifierSet(requires_python)
+
+    # We only use major.minor.micro
+    python_version = version.parse('.'.join(map(str, sys.version_info[:3])))
+    return python_version in requires_python_specifier
+
+
+def get_metadata(dist):
+    # type: (Distribution) -> Message
+    if (isinstance(dist, pkg_resources.DistInfoDistribution) and
+            dist.has_metadata('METADATA')):
+        metadata = dist.get_metadata('METADATA')
+    elif dist.has_metadata('PKG-INFO'):
+        metadata = dist.get_metadata('PKG-INFO')
+    else:
+        logger.warning("No metadata found in %s", display_path(dist.location))
+        metadata = ''
+
+    feed_parser = FeedParser()
+    feed_parser.feed(metadata)
+    return feed_parser.close()
+
+
+def check_dist_requires_python(dist):
+    pkg_info_dict = get_metadata(dist)
+    requires_python = pkg_info_dict.get('Requires-Python')
+    try:
+        if not check_requires_python(requires_python):
+            raise exceptions.UnsupportedPythonVersion(
+                "%s requires Python '%s' but the running Python is %s" % (
+                    dist.project_name,
+                    requires_python,
+                    '.'.join(map(str, sys.version_info[:3])),)
+            )
+    except specifiers.InvalidSpecifier as e:
+        logger.warning(
+            "Package %s has an invalid Requires-Python entry %s - %s",
+            dist.project_name, requires_python, e,
+        )
+        return
+
+
+def get_installer(dist):
+    # type: (Distribution) -> str
+    if dist.has_metadata('INSTALLER'):
+        for line in dist.get_metadata_lines('INSTALLER'):
+            if line.strip():
+                return line.strip()
+    return ''
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/setuptools_build.py b/lib/python3.7/site-packages/pip/_internal/utils/setuptools_build.py
new file mode 100644
index 0000000000000000000000000000000000000000..03973e976cad1b9f3363fafb9f3513dffa1b2a5e
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/setuptools_build.py
@@ -0,0 +1,8 @@
+# Shim to wrap setup.py invocation with setuptools
+SETUPTOOLS_SHIM = (
+    "import setuptools, tokenize;__file__=%r;"
+    "f=getattr(tokenize, 'open', open)(__file__);"
+    "code=f.read().replace('\\r\\n', '\\n');"
+    "f.close();"
+    "exec(compile(code, __file__, 'exec'))"
+)
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/temp_dir.py b/lib/python3.7/site-packages/pip/_internal/utils/temp_dir.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c81ad554c8316e587106b9a0b3b490d3fd119fb
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/temp_dir.py
@@ -0,0 +1,155 @@
+from __future__ import absolute_import
+
+import errno
+import itertools
+import logging
+import os.path
+import tempfile
+
+from pip._internal.utils.misc import rmtree
+
+logger = logging.getLogger(__name__)
+
+
+class TempDirectory(object):
+    """Helper class that owns and cleans up a temporary directory.
+
+    This class can be used as a context manager or as an OO representation of a
+    temporary directory.
+
+    Attributes:
+        path
+            Location to the created temporary directory or None
+        delete
+            Whether the directory should be deleted when exiting
+            (when used as a contextmanager)
+
+    Methods:
+        create()
+            Creates a temporary directory and stores its path in the path
+            attribute.
+        cleanup()
+            Deletes the temporary directory and sets path attribute to None
+
+    When used as a context manager, a temporary directory is created on
+    entering the context and, if the delete attribute is True, on exiting the
+    context the created directory is deleted.
+    """
+
+    def __init__(self, path=None, delete=None, kind="temp"):
+        super(TempDirectory, self).__init__()
+
+        if path is None and delete is None:
+            # If we were not given an explicit directory, and we were not given
+            # an explicit delete option, then we'll default to deleting.
+            delete = True
+
+        self.path = path
+        self.delete = delete
+        self.kind = kind
+
+    def __repr__(self):
+        return "<{} {!r}>".format(self.__class__.__name__, self.path)
+
+    def __enter__(self):
+        self.create()
+        return self
+
+    def __exit__(self, exc, value, tb):
+        if self.delete:
+            self.cleanup()
+
+    def create(self):
+        """Create a temporary directory and store its path in self.path
+        """
+        if self.path is not None:
+            logger.debug(
+                "Skipped creation of temporary directory: {}".format(self.path)
+            )
+            return
+        # We realpath here because some systems have their default tmpdir
+        # symlinked to another directory.  This tends to confuse build
+        # scripts, so we canonicalize the path by traversing potential
+        # symlinks here.
+        self.path = os.path.realpath(
+            tempfile.mkdtemp(prefix="pip-{}-".format(self.kind))
+        )
+        logger.debug("Created temporary directory: {}".format(self.path))
+
+    def cleanup(self):
+        """Remove the temporary directory created and reset state
+        """
+        if self.path is not None and os.path.exists(self.path):
+            rmtree(self.path)
+        self.path = None
+
+
+class AdjacentTempDirectory(TempDirectory):
+    """Helper class that creates a temporary directory adjacent to a real one.
+
+    Attributes:
+        original
+            The original directory to create a temp directory for.
+        path
+            After calling create() or entering, contains the full
+            path to the temporary directory.
+        delete
+            Whether the directory should be deleted when exiting
+            (when used as a contextmanager)
+
+    """
+    # The characters that may be used to name the temp directory
+    # We always prepend a ~ and then rotate through these until
+    # a usable name is found.
+    # pkg_resources raises a different error for .dist-info folder
+    # with leading '-' and invalid metadata
+    LEADING_CHARS = "-~.=%0123456789"
+
+    def __init__(self, original, delete=None):
+        super(AdjacentTempDirectory, self).__init__(delete=delete)
+        self.original = original.rstrip('/\\')
+
+    @classmethod
+    def _generate_names(cls, name):
+        """Generates a series of temporary names.
+
+        The algorithm replaces the leading characters in the name
+        with ones that are valid filesystem characters, but are not
+        valid package names (for both Python and pip definitions of
+        package).
+        """
+        for i in range(1, len(name)):
+            for candidate in itertools.combinations_with_replacement(
+                    cls.LEADING_CHARS, i - 1):
+                new_name = '~' + ''.join(candidate) + name[i:]
+                if new_name != name:
+                    yield new_name
+
+        # If we make it this far, we will have to make a longer name
+        for i in range(len(cls.LEADING_CHARS)):
+            for candidate in itertools.combinations_with_replacement(
+                    cls.LEADING_CHARS, i):
+                new_name = '~' + ''.join(candidate) + name
+                if new_name != name:
+                    yield new_name
+
+    def create(self):
+        root, name = os.path.split(self.original)
+        for candidate in self._generate_names(name):
+            path = os.path.join(root, candidate)
+            try:
+                os.mkdir(path)
+            except OSError as ex:
+                # Continue if the name exists already
+                if ex.errno != errno.EEXIST:
+                    raise
+            else:
+                self.path = os.path.realpath(path)
+                break
+
+        if not self.path:
+            # Final fallback on the default behavior.
+            self.path = os.path.realpath(
+                tempfile.mkdtemp(prefix="pip-{}-".format(self.kind))
+            )
+        logger.debug("Created temporary directory: {}".format(self.path))
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/typing.py b/lib/python3.7/site-packages/pip/_internal/utils/typing.py
new file mode 100644
index 0000000000000000000000000000000000000000..10170ce296b1099312d0c2a86a5ed248c21745d6
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/typing.py
@@ -0,0 +1,29 @@
+"""For neatly implementing static typing in pip.
+
+`mypy` - the static type analysis tool we use - uses the `typing` module, which
+provides core functionality fundamental to mypy's functioning.
+
+Generally, `typing` would be imported at runtime and used in that fashion -
+it acts as a no-op at runtime and does not have any run-time overhead by
+design.
+
+As it turns out, `typing` is not vendorable - it uses separate sources for
+Python 2/Python 3. Thus, this codebase can not expect it to be present.
+To work around this, mypy allows the typing import to be behind a False-y
+optional to prevent it from running at runtime and type-comments can be used
+to remove the need for the types to be accessible directly during runtime.
+
+This module provides the False-y guard in a nicely named fashion so that a
+curious maintainer can reach here to read this.
+
+In pip, all static-typing related imports should be guarded as follows:
+
+    from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+    if MYPY_CHECK_RUNNING:
+        from typing import ...
+
+Ref: https://github.com/python/mypy/issues/3216
+"""
+
+MYPY_CHECK_RUNNING = False
diff --git a/lib/python3.7/site-packages/pip/_internal/utils/ui.py b/lib/python3.7/site-packages/pip/_internal/utils/ui.py
new file mode 100644
index 0000000000000000000000000000000000000000..0902ce71e2a3da6ad076f0fdbe9fa193633cdb8d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/utils/ui.py
@@ -0,0 +1,424 @@
+from __future__ import absolute_import, division
+
+import contextlib
+import itertools
+import logging
+import sys
+import time
+from signal import SIGINT, default_int_handler, signal
+
+from pip._vendor import six
+from pip._vendor.progress import HIDE_CURSOR, SHOW_CURSOR
+from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar
+from pip._vendor.progress.spinner import Spinner
+
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.logging import get_indentation
+from pip._internal.utils.misc import format_size
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import Any, Iterator, IO
+
+try:
+    from pip._vendor import colorama
+# Lots of different errors can come from this, including SystemError and
+# ImportError.
+except Exception:
+    colorama = None
+
+logger = logging.getLogger(__name__)
+
+
+def _select_progress_class(preferred, fallback):
+    encoding = getattr(preferred.file, "encoding", None)
+
+    # If we don't know what encoding this file is in, then we'll just assume
+    # that it doesn't support unicode and use the ASCII bar.
+    if not encoding:
+        return fallback
+
+    # Collect all of the possible characters we want to use with the preferred
+    # bar.
+    characters = [
+        getattr(preferred, "empty_fill", six.text_type()),
+        getattr(preferred, "fill", six.text_type()),
+    ]
+    characters += list(getattr(preferred, "phases", []))
+
+    # Try to decode the characters we're using for the bar using the encoding
+    # of the given file, if this works then we'll assume that we can use the
+    # fancier bar and if not we'll fall back to the plaintext bar.
+    try:
+        six.text_type().join(characters).encode(encoding)
+    except UnicodeEncodeError:
+        return fallback
+    else:
+        return preferred
+
+
+_BaseBar = _select_progress_class(IncrementalBar, Bar)  # type: Any
+
+
+class InterruptibleMixin(object):
+    """
+    Helper to ensure that self.finish() gets called on keyboard interrupt.
+
+    This allows downloads to be interrupted without leaving temporary state
+    (like hidden cursors) behind.
+
+    This class is similar to the progress library's existing SigIntMixin
+    helper, but as of version 1.2, that helper has the following problems:
+
+    1. It calls sys.exit().
+    2. It discards the existing SIGINT handler completely.
+    3. It leaves its own handler in place even after an uninterrupted finish,
+       which will have unexpected delayed effects if the user triggers an
+       unrelated keyboard interrupt some time after a progress-displaying
+       download has already completed, for example.
+    """
+
+    def __init__(self, *args, **kwargs):
+        """
+        Save the original SIGINT handler for later.
+        """
+        super(InterruptibleMixin, self).__init__(*args, **kwargs)
+
+        self.original_handler = signal(SIGINT, self.handle_sigint)
+
+        # If signal() returns None, the previous handler was not installed from
+        # Python, and we cannot restore it. This probably should not happen,
+        # but if it does, we must restore something sensible instead, at least.
+        # The least bad option should be Python's default SIGINT handler, which
+        # just raises KeyboardInterrupt.
+        if self.original_handler is None:
+            self.original_handler = default_int_handler
+
+    def finish(self):
+        """
+        Restore the original SIGINT handler after finishing.
+
+        This should happen regardless of whether the progress display finishes
+        normally, or gets interrupted.
+        """
+        super(InterruptibleMixin, self).finish()
+        signal(SIGINT, self.original_handler)
+
+    def handle_sigint(self, signum, frame):
+        """
+        Call self.finish() before delegating to the original SIGINT handler.
+
+        This handler should only be in place while the progress display is
+        active.
+        """
+        self.finish()
+        self.original_handler(signum, frame)
+
+
+class SilentBar(Bar):
+
+    def update(self):
+        pass
+
+
+class BlueEmojiBar(IncrementalBar):
+
+    suffix = "%(percent)d%%"
+    bar_prefix = " "
+    bar_suffix = " "
+    phases = (u"\U0001F539", u"\U0001F537", u"\U0001F535")  # type: Any
+
+
+class DownloadProgressMixin(object):
+
+    def __init__(self, *args, **kwargs):
+        super(DownloadProgressMixin, self).__init__(*args, **kwargs)
+        self.message = (" " * (get_indentation() + 2)) + self.message
+
+    @property
+    def downloaded(self):
+        return format_size(self.index)
+
+    @property
+    def download_speed(self):
+        # Avoid zero division errors...
+        if self.avg == 0.0:
+            return "..."
+        return format_size(1 / self.avg) + "/s"
+
+    @property
+    def pretty_eta(self):
+        if self.eta:
+            return "eta %s" % self.eta_td
+        return ""
+
+    def iter(self, it, n=1):
+        for x in it:
+            yield x
+            self.next(n)
+        self.finish()
+
+
+class WindowsMixin(object):
+
+    def __init__(self, *args, **kwargs):
+        # The Windows terminal does not support the hide/show cursor ANSI codes
+        # even with colorama. So we'll ensure that hide_cursor is False on
+        # Windows.
+        # This call needs to go before the super() call, so that hide_cursor
+        # is set in time. The base progress bar class writes the "hide cursor"
+        # code to the terminal in its init, so if we don't set this soon
+        # enough, we get a "hide" with no corresponding "show"...
+        if WINDOWS and self.hide_cursor:
+            self.hide_cursor = False
+
+        super(WindowsMixin, self).__init__(*args, **kwargs)
+
+        # Check if we are running on Windows and we have the colorama module,
+        # if we do then wrap our file with it.
+        if WINDOWS and colorama:
+            self.file = colorama.AnsiToWin32(self.file)
+            # The progress code expects to be able to call self.file.isatty()
+            # but the colorama.AnsiToWin32() object doesn't have that, so we'll
+            # add it.
+            self.file.isatty = lambda: self.file.wrapped.isatty()
+            # The progress code expects to be able to call self.file.flush()
+            # but the colorama.AnsiToWin32() object doesn't have that, so we'll
+            # add it.
+            self.file.flush = lambda: self.file.wrapped.flush()
+
+
+class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin,
+                              DownloadProgressMixin):
+
+    file = sys.stdout
+    message = "%(percent)d%%"
+    suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
+
+# NOTE: The "type: ignore" comments on the following classes are there to
+#       work around https://github.com/python/typing/issues/241
+
+
+class DefaultDownloadProgressBar(BaseDownloadProgressBar,
+                                 _BaseBar):
+    pass
+
+
+class DownloadSilentBar(BaseDownloadProgressBar, SilentBar):  # type: ignore
+    pass
+
+
+class DownloadIncrementalBar(BaseDownloadProgressBar,  # type: ignore
+                             IncrementalBar):
+    pass
+
+
+class DownloadFillingCirclesBar(BaseDownloadProgressBar,  # type: ignore
+                                FillingCirclesBar):
+    pass
+
+
+class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar,  # type: ignore
+                                   BlueEmojiBar):
+    pass
+
+
+class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,
+                              DownloadProgressMixin, Spinner):
+
+    file = sys.stdout
+    suffix = "%(downloaded)s %(download_speed)s"
+
+    def next_phase(self):
+        if not hasattr(self, "_phaser"):
+            self._phaser = itertools.cycle(self.phases)
+        return next(self._phaser)
+
+    def update(self):
+        message = self.message % self
+        phase = self.next_phase()
+        suffix = self.suffix % self
+        line = ''.join([
+            message,
+            " " if message else "",
+            phase,
+            " " if suffix else "",
+            suffix,
+        ])
+
+        self.writeln(line)
+
+
+BAR_TYPES = {
+    "off": (DownloadSilentBar, DownloadSilentBar),
+    "on": (DefaultDownloadProgressBar, DownloadProgressSpinner),
+    "ascii": (DownloadIncrementalBar, DownloadProgressSpinner),
+    "pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner),
+    "emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner)
+}
+
+
+def DownloadProgressProvider(progress_bar, max=None):
+    if max is None or max == 0:
+        return BAR_TYPES[progress_bar][1]().iter
+    else:
+        return BAR_TYPES[progress_bar][0](max=max).iter
+
+
+################################################################
+# Generic "something is happening" spinners
+#
+# We don't even try using progress.spinner.Spinner here because it's actually
+# simpler to reimplement from scratch than to coerce their code into doing
+# what we need.
+################################################################
+
+@contextlib.contextmanager
+def hidden_cursor(file):
+    # type: (IO) -> Iterator[None]
+    # The Windows terminal does not support the hide/show cursor ANSI codes,
+    # even via colorama. So don't even try.
+    if WINDOWS:
+        yield
+    # We don't want to clutter the output with control characters if we're
+    # writing to a file, or if the user is running with --quiet.
+    # See https://github.com/pypa/pip/issues/3418
+    elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO:
+        yield
+    else:
+        file.write(HIDE_CURSOR)
+        try:
+            yield
+        finally:
+            file.write(SHOW_CURSOR)
+
+
+class RateLimiter(object):
+    def __init__(self, min_update_interval_seconds):
+        # type: (float) -> None
+        self._min_update_interval_seconds = min_update_interval_seconds
+        self._last_update = 0  # type: float
+
+    def ready(self):
+        # type: () -> bool
+        now = time.time()
+        delta = now - self._last_update
+        return delta >= self._min_update_interval_seconds
+
+    def reset(self):
+        # type: () -> None
+        self._last_update = time.time()
+
+
+class SpinnerInterface(object):
+    def spin(self):
+        # type: () -> None
+        raise NotImplementedError()
+
+    def finish(self, final_status):
+        # type: (str) -> None
+        raise NotImplementedError()
+
+
+class InteractiveSpinner(SpinnerInterface):
+    def __init__(self, message, file=None, spin_chars="-\\|/",
+                 # Empirically, 8 updates/second looks nice
+                 min_update_interval_seconds=0.125):
+        self._message = message
+        if file is None:
+            file = sys.stdout
+        self._file = file
+        self._rate_limiter = RateLimiter(min_update_interval_seconds)
+        self._finished = False
+
+        self._spin_cycle = itertools.cycle(spin_chars)
+
+        self._file.write(" " * get_indentation() + self._message + " ... ")
+        self._width = 0
+
+    def _write(self, status):
+        assert not self._finished
+        # Erase what we wrote before by backspacing to the beginning, writing
+        # spaces to overwrite the old text, and then backspacing again
+        backup = "\b" * self._width
+        self._file.write(backup + " " * self._width + backup)
+        # Now we have a blank slate to add our status
+        self._file.write(status)
+        self._width = len(status)
+        self._file.flush()
+        self._rate_limiter.reset()
+
+    def spin(self):
+        # type: () -> None
+        if self._finished:
+            return
+        if not self._rate_limiter.ready():
+            return
+        self._write(next(self._spin_cycle))
+
+    def finish(self, final_status):
+        # type: (str) -> None
+        if self._finished:
+            return
+        self._write(final_status)
+        self._file.write("\n")
+        self._file.flush()
+        self._finished = True
+
+
+# Used for dumb terminals, non-interactive installs (no tty), etc.
+# We still print updates occasionally (once every 60 seconds by default) to
+# act as a keep-alive for systems like Travis-CI that take lack-of-output as
+# an indication that a task has frozen.
+class NonInteractiveSpinner(SpinnerInterface):
+    def __init__(self, message, min_update_interval_seconds=60):
+        # type: (str, float) -> None
+        self._message = message
+        self._finished = False
+        self._rate_limiter = RateLimiter(min_update_interval_seconds)
+        self._update("started")
+
+    def _update(self, status):
+        assert not self._finished
+        self._rate_limiter.reset()
+        logger.info("%s: %s", self._message, status)
+
+    def spin(self):
+        # type: () -> None
+        if self._finished:
+            return
+        if not self._rate_limiter.ready():
+            return
+        self._update("still running...")
+
+    def finish(self, final_status):
+        # type: (str) -> None
+        if self._finished:
+            return
+        self._update("finished with status '%s'" % (final_status,))
+        self._finished = True
+
+
+@contextlib.contextmanager
+def open_spinner(message):
+    # type: (str) -> Iterator[SpinnerInterface]
+    # Interactive spinner goes directly to sys.stdout rather than being routed
+    # through the logging system, but it acts like it has level INFO,
+    # i.e. it's only displayed if we're at level INFO or better.
+    # Non-interactive spinner goes through the logging system, so it is always
+    # in sync with logging configuration.
+    if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
+        spinner = InteractiveSpinner(message)  # type: SpinnerInterface
+    else:
+        spinner = NonInteractiveSpinner(message)
+    try:
+        with hidden_cursor(sys.stdout):
+            yield spinner
+    except KeyboardInterrupt:
+        spinner.finish("canceled")
+        raise
+    except Exception:
+        spinner.finish("error")
+        raise
+    else:
+        spinner.finish("done")
diff --git a/lib/python3.7/site-packages/pip/_internal/vcs/__init__.py b/lib/python3.7/site-packages/pip/_internal/vcs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fca079300866e0ede0d7eb18c46912e3c5921c0
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/vcs/__init__.py
@@ -0,0 +1,604 @@
+"""Handles all VCS (version control) support"""
+from __future__ import absolute_import
+
+import errno
+import logging
+import os
+import shutil
+import sys
+
+from pip._vendor import pkg_resources
+from pip._vendor.six.moves.urllib import parse as urllib_parse
+
+from pip._internal.exceptions import BadCommand
+from pip._internal.utils.misc import (
+    ask_path_exists, backup_dir, call_subprocess, display_path, rmtree,
+)
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Any, Dict, Iterable, List, Mapping, Optional, Text, Tuple, Type
+    )
+    from pip._internal.utils.ui import SpinnerInterface
+
+    AuthInfo = Tuple[Optional[str], Optional[str]]
+
+__all__ = ['vcs']
+
+
+logger = logging.getLogger(__name__)
+
+
+def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None):
+    """
+    Return the URL for a VCS requirement.
+
+    Args:
+      repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+").
+      project_name: the (unescaped) project name.
+    """
+    egg_project_name = pkg_resources.to_filename(project_name)
+    req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name)
+    if subdir:
+        req += '&subdirectory={}'.format(subdir)
+
+    return req
+
+
+class RemoteNotFoundError(Exception):
+    pass
+
+
+class RevOptions(object):
+
+    """
+    Encapsulates a VCS-specific revision to install, along with any VCS
+    install options.
+
+    Instances of this class should be treated as if immutable.
+    """
+
+    def __init__(
+        self,
+        vc_class,  # type: Type[VersionControl]
+        rev=None,  # type: Optional[str]
+        extra_args=None,  # type: Optional[List[str]]
+    ):
+        # type: (...) -> None
+        """
+        Args:
+          vc_class: a VersionControl subclass.
+          rev: the name of the revision to install.
+          extra_args: a list of extra options.
+        """
+        if extra_args is None:
+            extra_args = []
+
+        self.extra_args = extra_args
+        self.rev = rev
+        self.vc_class = vc_class
+
+    def __repr__(self):
+        return '<RevOptions {}: rev={!r}>'.format(self.vc_class.name, self.rev)
+
+    @property
+    def arg_rev(self):
+        # type: () -> Optional[str]
+        if self.rev is None:
+            return self.vc_class.default_arg_rev
+
+        return self.rev
+
+    def to_args(self):
+        # type: () -> List[str]
+        """
+        Return the VCS-specific command arguments.
+        """
+        args = []  # type: List[str]
+        rev = self.arg_rev
+        if rev is not None:
+            args += self.vc_class.get_base_rev_args(rev)
+        args += self.extra_args
+
+        return args
+
+    def to_display(self):
+        # type: () -> str
+        if not self.rev:
+            return ''
+
+        return ' (to revision {})'.format(self.rev)
+
+    def make_new(self, rev):
+        # type: (str) -> RevOptions
+        """
+        Make a copy of the current instance, but with a new rev.
+
+        Args:
+          rev: the name of the revision for the new object.
+        """
+        return self.vc_class.make_rev_options(rev, extra_args=self.extra_args)
+
+
+class VcsSupport(object):
+    _registry = {}  # type: Dict[str, Type[VersionControl]]
+    schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn']
+
+    def __init__(self):
+        # type: () -> None
+        # Register more schemes with urlparse for various version control
+        # systems
+        urllib_parse.uses_netloc.extend(self.schemes)
+        # Python >= 2.7.4, 3.3 doesn't have uses_fragment
+        if getattr(urllib_parse, 'uses_fragment', None):
+            urllib_parse.uses_fragment.extend(self.schemes)
+        super(VcsSupport, self).__init__()
+
+    def __iter__(self):
+        return self._registry.__iter__()
+
+    @property
+    def backends(self):
+        # type: () -> List[Type[VersionControl]]
+        return list(self._registry.values())
+
+    @property
+    def dirnames(self):
+        # type: () -> List[str]
+        return [backend.dirname for backend in self.backends]
+
+    @property
+    def all_schemes(self):
+        # type: () -> List[str]
+        schemes = []  # type: List[str]
+        for backend in self.backends:
+            schemes.extend(backend.schemes)
+        return schemes
+
+    def register(self, cls):
+        # type: (Type[VersionControl]) -> None
+        if not hasattr(cls, 'name'):
+            logger.warning('Cannot register VCS %s', cls.__name__)
+            return
+        if cls.name not in self._registry:
+            self._registry[cls.name] = cls
+            logger.debug('Registered VCS backend: %s', cls.name)
+
+    def unregister(self, cls=None, name=None):
+        # type: (Optional[Type[VersionControl]], Optional[str]) -> None
+        if name in self._registry:
+            del self._registry[name]
+        elif cls in self._registry.values():
+            del self._registry[cls.name]
+        else:
+            logger.warning('Cannot unregister because no class or name given')
+
+    def get_backend_type(self, location):
+        # type: (str) -> Optional[Type[VersionControl]]
+        """
+        Return the type of the version control backend if found at given
+        location, e.g. vcs.get_backend_type('/path/to/vcs/checkout')
+        """
+        for vc_type in self._registry.values():
+            if vc_type.controls_location(location):
+                logger.debug('Determine that %s uses VCS: %s',
+                             location, vc_type.name)
+                return vc_type
+        return None
+
+    def get_backend(self, name):
+        # type: (str) -> Optional[Type[VersionControl]]
+        name = name.lower()
+        if name in self._registry:
+            return self._registry[name]
+        return None
+
+
+vcs = VcsSupport()
+
+
+class VersionControl(object):
+    name = ''
+    dirname = ''
+    repo_name = ''
+    # List of supported schemes for this Version Control
+    schemes = ()  # type: Tuple[str, ...]
+    # Iterable of environment variable names to pass to call_subprocess().
+    unset_environ = ()  # type: Tuple[str, ...]
+    default_arg_rev = None  # type: Optional[str]
+
+    @classmethod
+    def should_add_vcs_url_prefix(cls, remote_url):
+        """
+        Return whether the vcs prefix (e.g. "git+") should be added to a
+        repository's remote url when used in a requirement.
+        """
+        return not remote_url.lower().startswith('{}:'.format(cls.name))
+
+    @classmethod
+    def get_subdirectory(cls, repo_dir):
+        """
+        Return the path to setup.py, relative to the repo root.
+        """
+        return None
+
+    @classmethod
+    def get_requirement_revision(cls, repo_dir):
+        """
+        Return the revision string that should be used in a requirement.
+        """
+        return cls.get_revision(repo_dir)
+
+    @classmethod
+    def get_src_requirement(cls, repo_dir, project_name):
+        """
+        Return the requirement string to use to redownload the files
+        currently at the given repository directory.
+
+        Args:
+          project_name: the (unescaped) project name.
+
+        The return value has a form similar to the following:
+
+            {repository_url}@{revision}#egg={project_name}
+        """
+        repo_url = cls.get_remote_url(repo_dir)
+        if repo_url is None:
+            return None
+
+        if cls.should_add_vcs_url_prefix(repo_url):
+            repo_url = '{}+{}'.format(cls.name, repo_url)
+
+        revision = cls.get_requirement_revision(repo_dir)
+        subdir = cls.get_subdirectory(repo_dir)
+        req = make_vcs_requirement_url(repo_url, revision, project_name,
+                                       subdir=subdir)
+
+        return req
+
+    def __init__(self, url=None, *args, **kwargs):
+        self.url = url
+        super(VersionControl, self).__init__(*args, **kwargs)
+
+    @staticmethod
+    def get_base_rev_args(rev):
+        """
+        Return the base revision arguments for a vcs command.
+
+        Args:
+          rev: the name of a revision to install.  Cannot be None.
+        """
+        raise NotImplementedError
+
+    @classmethod
+    def make_rev_options(cls, rev=None, extra_args=None):
+        # type: (Optional[str], Optional[List[str]]) -> RevOptions
+        """
+        Return a RevOptions object.
+
+        Args:
+          rev: the name of a revision to install.
+          extra_args: a list of extra options.
+        """
+        return RevOptions(cls, rev, extra_args=extra_args)
+
+    @classmethod
+    def _is_local_repository(cls, repo):
+        # type: (str) -> bool
+        """
+           posix absolute paths start with os.path.sep,
+           win32 ones start with drive (like c:\\folder)
+        """
+        drive, tail = os.path.splitdrive(repo)
+        return repo.startswith(os.path.sep) or bool(drive)
+
+    def export(self, location):
+        """
+        Export the repository at the url to the destination location
+        i.e. only download the files, without vcs informations
+        """
+        raise NotImplementedError
+
+    @classmethod
+    def get_netloc_and_auth(cls, netloc, scheme):
+        """
+        Parse the repository URL's netloc, and return the new netloc to use
+        along with auth information.
+
+        Args:
+          netloc: the original repository URL netloc.
+          scheme: the repository URL's scheme without the vcs prefix.
+
+        This is mainly for the Subversion class to override, so that auth
+        information can be provided via the --username and --password options
+        instead of through the URL.  For other subclasses like Git without
+        such an option, auth information must stay in the URL.
+
+        Returns: (netloc, (username, password)).
+        """
+        return netloc, (None, None)
+
+    @classmethod
+    def get_url_rev_and_auth(cls, url):
+        # type: (str) -> Tuple[str, Optional[str], AuthInfo]
+        """
+        Parse the repository URL to use, and return the URL, revision,
+        and auth info to use.
+
+        Returns: (url, rev, (username, password)).
+        """
+        scheme, netloc, path, query, frag = urllib_parse.urlsplit(url)
+        if '+' not in scheme:
+            raise ValueError(
+                "Sorry, {!r} is a malformed VCS url. "
+                "The format is <vcs>+<protocol>://<url>, "
+                "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url)
+            )
+        # Remove the vcs prefix.
+        scheme = scheme.split('+', 1)[1]
+        netloc, user_pass = cls.get_netloc_and_auth(netloc, scheme)
+        rev = None
+        if '@' in path:
+            path, rev = path.rsplit('@', 1)
+        url = urllib_parse.urlunsplit((scheme, netloc, path, query, ''))
+        return url, rev, user_pass
+
+    @staticmethod
+    def make_rev_args(username, password):
+        """
+        Return the RevOptions "extra arguments" to use in obtain().
+        """
+        return []
+
+    def get_url_rev_options(self, url):
+        # type: (str) -> Tuple[str, RevOptions]
+        """
+        Return the URL and RevOptions object to use in obtain() and in
+        some cases export(), as a tuple (url, rev_options).
+        """
+        url, rev, user_pass = self.get_url_rev_and_auth(url)
+        username, password = user_pass
+        extra_args = self.make_rev_args(username, password)
+        rev_options = self.make_rev_options(rev, extra_args=extra_args)
+
+        return url, rev_options
+
+    @staticmethod
+    def normalize_url(url):
+        # type: (str) -> str
+        """
+        Normalize a URL for comparison by unquoting it and removing any
+        trailing slash.
+        """
+        return urllib_parse.unquote(url).rstrip('/')
+
+    @classmethod
+    def compare_urls(cls, url1, url2):
+        # type: (str, str) -> bool
+        """
+        Compare two repo URLs for identity, ignoring incidental differences.
+        """
+        return (cls.normalize_url(url1) == cls.normalize_url(url2))
+
+    @classmethod
+    def fetch_new(cls, dest, url, rev_options):
+        """
+        Fetch a revision from a repository, in the case that this is the
+        first fetch from the repository.
+
+        Args:
+          dest: the directory to fetch the repository to.
+          rev_options: a RevOptions object.
+        """
+        raise NotImplementedError
+
+    def switch(self, dest, url, rev_options):
+        """
+        Switch the repo at ``dest`` to point to ``URL``.
+
+        Args:
+          rev_options: a RevOptions object.
+        """
+        raise NotImplementedError
+
+    def update(self, dest, url, rev_options):
+        """
+        Update an already-existing repo to the given ``rev_options``.
+
+        Args:
+          rev_options: a RevOptions object.
+        """
+        raise NotImplementedError
+
+    @classmethod
+    def is_commit_id_equal(cls, dest, name):
+        """
+        Return whether the id of the current commit equals the given name.
+
+        Args:
+          dest: the repository directory.
+          name: a string name.
+        """
+        raise NotImplementedError
+
+    def obtain(self, dest):
+        # type: (str) -> None
+        """
+        Install or update in editable mode the package represented by this
+        VersionControl object.
+
+        Args:
+          dest: the repository directory in which to install or update.
+        """
+        url, rev_options = self.get_url_rev_options(self.url)
+
+        if not os.path.exists(dest):
+            self.fetch_new(dest, url, rev_options)
+            return
+
+        rev_display = rev_options.to_display()
+        if self.is_repository_directory(dest):
+            existing_url = self.get_remote_url(dest)
+            if self.compare_urls(existing_url, url):
+                logger.debug(
+                    '%s in %s exists, and has correct URL (%s)',
+                    self.repo_name.title(),
+                    display_path(dest),
+                    url,
+                )
+                if not self.is_commit_id_equal(dest, rev_options.rev):
+                    logger.info(
+                        'Updating %s %s%s',
+                        display_path(dest),
+                        self.repo_name,
+                        rev_display,
+                    )
+                    self.update(dest, url, rev_options)
+                else:
+                    logger.info('Skipping because already up-to-date.')
+                return
+
+            logger.warning(
+                '%s %s in %s exists with URL %s',
+                self.name,
+                self.repo_name,
+                display_path(dest),
+                existing_url,
+            )
+            prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ',
+                      ('s', 'i', 'w', 'b'))
+        else:
+            logger.warning(
+                'Directory %s already exists, and is not a %s %s.',
+                dest,
+                self.name,
+                self.repo_name,
+            )
+            # https://github.com/python/mypy/issues/1174
+            prompt = ('(i)gnore, (w)ipe, (b)ackup ',  # type: ignore
+                      ('i', 'w', 'b'))
+
+        logger.warning(
+            'The plan is to install the %s repository %s',
+            self.name,
+            url,
+        )
+        response = ask_path_exists('What to do?  %s' % prompt[0], prompt[1])
+
+        if response == 'a':
+            sys.exit(-1)
+
+        if response == 'w':
+            logger.warning('Deleting %s', display_path(dest))
+            rmtree(dest)
+            self.fetch_new(dest, url, rev_options)
+            return
+
+        if response == 'b':
+            dest_dir = backup_dir(dest)
+            logger.warning(
+                'Backing up %s to %s', display_path(dest), dest_dir,
+            )
+            shutil.move(dest, dest_dir)
+            self.fetch_new(dest, url, rev_options)
+            return
+
+        # Do nothing if the response is "i".
+        if response == 's':
+            logger.info(
+                'Switching %s %s to %s%s',
+                self.repo_name,
+                display_path(dest),
+                url,
+                rev_display,
+            )
+            self.switch(dest, url, rev_options)
+
+    def unpack(self, location):
+        # type: (str) -> None
+        """
+        Clean up current location and download the url repository
+        (and vcs infos) into location
+        """
+        if os.path.exists(location):
+            rmtree(location)
+        self.obtain(location)
+
+    @classmethod
+    def get_remote_url(cls, location):
+        """
+        Return the url used at location
+
+        Raises RemoteNotFoundError if the repository does not have a remote
+        url configured.
+        """
+        raise NotImplementedError
+
+    @classmethod
+    def get_revision(cls, location):
+        """
+        Return the current commit id of the files at the given location.
+        """
+        raise NotImplementedError
+
+    @classmethod
+    def run_command(
+        cls,
+        cmd,  # type: List[str]
+        show_stdout=True,  # type: bool
+        cwd=None,  # type: Optional[str]
+        on_returncode='raise',  # type: str
+        extra_ok_returncodes=None,  # type: Optional[Iterable[int]]
+        command_desc=None,  # type: Optional[str]
+        extra_environ=None,  # type: Optional[Mapping[str, Any]]
+        spinner=None  # type: Optional[SpinnerInterface]
+    ):
+        # type: (...) -> Optional[Text]
+        """
+        Run a VCS subcommand
+        This is simply a wrapper around call_subprocess that adds the VCS
+        command name, and checks that the VCS is available
+        """
+        cmd = [cls.name] + cmd
+        try:
+            return call_subprocess(cmd, show_stdout, cwd,
+                                   on_returncode=on_returncode,
+                                   extra_ok_returncodes=extra_ok_returncodes,
+                                   command_desc=command_desc,
+                                   extra_environ=extra_environ,
+                                   unset_environ=cls.unset_environ,
+                                   spinner=spinner)
+        except OSError as e:
+            # errno.ENOENT = no such file or directory
+            # In other words, the VCS executable isn't available
+            if e.errno == errno.ENOENT:
+                raise BadCommand(
+                    'Cannot find command %r - do you have '
+                    '%r installed and in your '
+                    'PATH?' % (cls.name, cls.name))
+            else:
+                raise  # re-raise exception if a different error occurred
+
+    @classmethod
+    def is_repository_directory(cls, path):
+        # type: (str) -> bool
+        """
+        Return whether a directory path is a repository directory.
+        """
+        logger.debug('Checking in %s for %s (%s)...',
+                     path, cls.dirname, cls.name)
+        return os.path.exists(os.path.join(path, cls.dirname))
+
+    @classmethod
+    def controls_location(cls, location):
+        # type: (str) -> bool
+        """
+        Check if a location is controlled by the vcs.
+        It is meant to be overridden to implement smarter detection
+        mechanisms for specific vcs.
+
+        This can do more than is_repository_directory() alone.  For example,
+        the Git override checks that Git is actually available.
+        """
+        return cls.is_repository_directory(location)
diff --git a/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..02a4ad9053e6f3dbbd0b73ec48916b8ae7852273
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..688679404736ca9639d81e30a1dec84428e00e6f
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/bazaar.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/git.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/git.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2be49287f6979c90ba9cb6099ce4e90f0ad6a47a
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/git.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..738317f3d976da039b71957b366115fbb942d3f9
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/mercurial.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-37.pyc b/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eda141321da564843cc75224944d86ea1ee67266
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_internal/vcs/__pycache__/subversion.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_internal/vcs/bazaar.py b/lib/python3.7/site-packages/pip/_internal/vcs/bazaar.py
new file mode 100644
index 0000000000000000000000000000000000000000..3587b3deb69974237eeecb2bc0ad584dbc41e84f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/vcs/bazaar.py
@@ -0,0 +1,103 @@
+from __future__ import absolute_import
+
+import logging
+import os
+
+from pip._vendor.six.moves.urllib import parse as urllib_parse
+
+from pip._internal.download import path_to_url
+from pip._internal.utils.misc import display_path, rmtree
+from pip._internal.vcs import VersionControl, vcs
+
+logger = logging.getLogger(__name__)
+
+
+class Bazaar(VersionControl):
+    name = 'bzr'
+    dirname = '.bzr'
+    repo_name = 'branch'
+    schemes = (
+        'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
+        'bzr+lp',
+    )
+
+    def __init__(self, url=None, *args, **kwargs):
+        super(Bazaar, self).__init__(url, *args, **kwargs)
+        # This is only needed for python <2.7.5
+        # Register lp but do not expose as a scheme to support bzr+lp.
+        if getattr(urllib_parse, 'uses_fragment', None):
+            urllib_parse.uses_fragment.extend(['lp'])
+
+    @staticmethod
+    def get_base_rev_args(rev):
+        return ['-r', rev]
+
+    def export(self, location):
+        """
+        Export the Bazaar repository at the url to the destination location
+        """
+        # Remove the location to make sure Bazaar can export it correctly
+        if os.path.exists(location):
+            rmtree(location)
+
+        url, rev_options = self.get_url_rev_options(self.url)
+        self.run_command(
+            ['export', location, url] + rev_options.to_args(),
+            show_stdout=False,
+        )
+
+    @classmethod
+    def fetch_new(cls, dest, url, rev_options):
+        rev_display = rev_options.to_display()
+        logger.info(
+            'Checking out %s%s to %s',
+            url,
+            rev_display,
+            display_path(dest),
+        )
+        cmd_args = ['branch', '-q'] + rev_options.to_args() + [url, dest]
+        cls.run_command(cmd_args)
+
+    def switch(self, dest, url, rev_options):
+        self.run_command(['switch', url], cwd=dest)
+
+    def update(self, dest, url, rev_options):
+        cmd_args = ['pull', '-q'] + rev_options.to_args()
+        self.run_command(cmd_args, cwd=dest)
+
+    @classmethod
+    def get_url_rev_and_auth(cls, url):
+        # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
+        url, rev, user_pass = super(Bazaar, cls).get_url_rev_and_auth(url)
+        if url.startswith('ssh://'):
+            url = 'bzr+' + url
+        return url, rev, user_pass
+
+    @classmethod
+    def get_remote_url(cls, location):
+        urls = cls.run_command(['info'], show_stdout=False, cwd=location)
+        for line in urls.splitlines():
+            line = line.strip()
+            for x in ('checkout of branch: ',
+                      'parent branch: '):
+                if line.startswith(x):
+                    repo = line.split(x)[1]
+                    if cls._is_local_repository(repo):
+                        return path_to_url(repo)
+                    return repo
+        return None
+
+    @classmethod
+    def get_revision(cls, location):
+        revision = cls.run_command(
+            ['revno'], show_stdout=False, cwd=location,
+        )
+        return revision.splitlines()[-1]
+
+    @classmethod
+    def is_commit_id_equal(cls, dest, name):
+        """Always assume the versions don't match"""
+        return False
+
+
+vcs.register(Bazaar)
diff --git a/lib/python3.7/site-packages/pip/_internal/vcs/git.py b/lib/python3.7/site-packages/pip/_internal/vcs/git.py
new file mode 100644
index 0000000000000000000000000000000000000000..35ea9301944a756f125e7fbc6857c53e28d6f0bd
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/vcs/git.py
@@ -0,0 +1,362 @@
+from __future__ import absolute_import
+
+import logging
+import os.path
+import re
+
+from pip._vendor.packaging.version import parse as parse_version
+from pip._vendor.six.moves.urllib import parse as urllib_parse
+from pip._vendor.six.moves.urllib import request as urllib_request
+
+from pip._internal.exceptions import BadCommand
+from pip._internal.utils.compat import samefile
+from pip._internal.utils.misc import display_path, redact_password_from_url
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.vcs import RemoteNotFoundError, VersionControl, vcs
+
+urlsplit = urllib_parse.urlsplit
+urlunsplit = urllib_parse.urlunsplit
+
+
+logger = logging.getLogger(__name__)
+
+
+HASH_REGEX = re.compile('[a-fA-F0-9]{40}')
+
+
+def looks_like_hash(sha):
+    return bool(HASH_REGEX.match(sha))
+
+
+class Git(VersionControl):
+    name = 'git'
+    dirname = '.git'
+    repo_name = 'clone'
+    schemes = (
+        'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
+    )
+    # Prevent the user's environment variables from interfering with pip:
+    # https://github.com/pypa/pip/issues/1130
+    unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')
+    default_arg_rev = 'HEAD'
+
+    def __init__(self, url=None, *args, **kwargs):
+
+        # Works around an apparent Git bug
+        # (see https://article.gmane.org/gmane.comp.version-control.git/146500)
+        if url:
+            scheme, netloc, path, query, fragment = urlsplit(url)
+            if scheme.endswith('file'):
+                initial_slashes = path[:-len(path.lstrip('/'))]
+                newpath = (
+                    initial_slashes +
+                    urllib_request.url2pathname(path)
+                    .replace('\\', '/').lstrip('/')
+                )
+                url = urlunsplit((scheme, netloc, newpath, query, fragment))
+                after_plus = scheme.find('+') + 1
+                url = scheme[:after_plus] + urlunsplit(
+                    (scheme[after_plus:], netloc, newpath, query, fragment),
+                )
+
+        super(Git, self).__init__(url, *args, **kwargs)
+
+    @staticmethod
+    def get_base_rev_args(rev):
+        return [rev]
+
+    def get_git_version(self):
+        VERSION_PFX = 'git version '
+        version = self.run_command(['version'], show_stdout=False)
+        if version.startswith(VERSION_PFX):
+            version = version[len(VERSION_PFX):].split()[0]
+        else:
+            version = ''
+        # get first 3 positions of the git version because
+        # on windows it is x.y.z.windows.t, and this parses as
+        # LegacyVersion which always smaller than a Version.
+        version = '.'.join(version.split('.')[:3])
+        return parse_version(version)
+
+    @classmethod
+    def get_current_branch(cls, location):
+        """
+        Return the current branch, or None if HEAD isn't at a branch
+        (e.g. detached HEAD).
+        """
+        # git-symbolic-ref exits with empty stdout if "HEAD" is a detached
+        # HEAD rather than a symbolic ref.  In addition, the -q causes the
+        # command to exit with status code 1 instead of 128 in this case
+        # and to suppress the message to stderr.
+        args = ['symbolic-ref', '-q', 'HEAD']
+        output = cls.run_command(
+            args, extra_ok_returncodes=(1, ), show_stdout=False, cwd=location,
+        )
+        ref = output.strip()
+
+        if ref.startswith('refs/heads/'):
+            return ref[len('refs/heads/'):]
+
+        return None
+
+    def export(self, location):
+        """Export the Git repository at the url to the destination location"""
+        if not location.endswith('/'):
+            location = location + '/'
+
+        with TempDirectory(kind="export") as temp_dir:
+            self.unpack(temp_dir.path)
+            self.run_command(
+                ['checkout-index', '-a', '-f', '--prefix', location],
+                show_stdout=False, cwd=temp_dir.path
+            )
+
+    @classmethod
+    def get_revision_sha(cls, dest, rev):
+        """
+        Return (sha_or_none, is_branch), where sha_or_none is a commit hash
+        if the revision names a remote branch or tag, otherwise None.
+
+        Args:
+          dest: the repository directory.
+          rev: the revision name.
+        """
+        # Pass rev to pre-filter the list.
+        output = cls.run_command(['show-ref', rev], cwd=dest,
+                                 show_stdout=False, on_returncode='ignore')
+        refs = {}
+        for line in output.strip().splitlines():
+            try:
+                sha, ref = line.split()
+            except ValueError:
+                # Include the offending line to simplify troubleshooting if
+                # this error ever occurs.
+                raise ValueError('unexpected show-ref line: {!r}'.format(line))
+
+            refs[ref] = sha
+
+        branch_ref = 'refs/remotes/origin/{}'.format(rev)
+        tag_ref = 'refs/tags/{}'.format(rev)
+
+        sha = refs.get(branch_ref)
+        if sha is not None:
+            return (sha, True)
+
+        sha = refs.get(tag_ref)
+
+        return (sha, False)
+
+    @classmethod
+    def resolve_revision(cls, dest, url, rev_options):
+        """
+        Resolve a revision to a new RevOptions object with the SHA1 of the
+        branch, tag, or ref if found.
+
+        Args:
+          rev_options: a RevOptions object.
+        """
+        rev = rev_options.arg_rev
+        sha, is_branch = cls.get_revision_sha(dest, rev)
+
+        if sha is not None:
+            rev_options = rev_options.make_new(sha)
+            rev_options.branch_name = rev if is_branch else None
+
+            return rev_options
+
+        # Do not show a warning for the common case of something that has
+        # the form of a Git commit hash.
+        if not looks_like_hash(rev):
+            logger.warning(
+                "Did not find branch or tag '%s', assuming revision or ref.",
+                rev,
+            )
+
+        if not rev.startswith('refs/'):
+            return rev_options
+
+        # If it looks like a ref, we have to fetch it explicitly.
+        cls.run_command(
+            ['fetch', '-q', url] + rev_options.to_args(),
+            cwd=dest,
+        )
+        # Change the revision to the SHA of the ref we fetched
+        sha = cls.get_revision(dest, rev='FETCH_HEAD')
+        rev_options = rev_options.make_new(sha)
+
+        return rev_options
+
+    @classmethod
+    def is_commit_id_equal(cls, dest, name):
+        """
+        Return whether the current commit hash equals the given name.
+
+        Args:
+          dest: the repository directory.
+          name: a string name.
+        """
+        if not name:
+            # Then avoid an unnecessary subprocess call.
+            return False
+
+        return cls.get_revision(dest) == name
+
+    @classmethod
+    def fetch_new(cls, dest, url, rev_options):
+        rev_display = rev_options.to_display()
+        logger.info(
+            'Cloning %s%s to %s', redact_password_from_url(url),
+            rev_display, display_path(dest),
+        )
+        cls.run_command(['clone', '-q', url, dest])
+
+        if rev_options.rev:
+            # Then a specific revision was requested.
+            rev_options = cls.resolve_revision(dest, url, rev_options)
+            branch_name = getattr(rev_options, 'branch_name', None)
+            if branch_name is None:
+                # Only do a checkout if the current commit id doesn't match
+                # the requested revision.
+                if not cls.is_commit_id_equal(dest, rev_options.rev):
+                    cmd_args = ['checkout', '-q'] + rev_options.to_args()
+                    cls.run_command(cmd_args, cwd=dest)
+            elif cls.get_current_branch(dest) != branch_name:
+                # Then a specific branch was requested, and that branch
+                # is not yet checked out.
+                track_branch = 'origin/{}'.format(branch_name)
+                cmd_args = [
+                    'checkout', '-b', branch_name, '--track', track_branch,
+                ]
+                cls.run_command(cmd_args, cwd=dest)
+
+        #: repo may contain submodules
+        cls.update_submodules(dest)
+
+    def switch(self, dest, url, rev_options):
+        self.run_command(['config', 'remote.origin.url', url], cwd=dest)
+        cmd_args = ['checkout', '-q'] + rev_options.to_args()
+        self.run_command(cmd_args, cwd=dest)
+
+        self.update_submodules(dest)
+
+    def update(self, dest, url, rev_options):
+        # First fetch changes from the default remote
+        if self.get_git_version() >= parse_version('1.9.0'):
+            # fetch tags in addition to everything else
+            self.run_command(['fetch', '-q', '--tags'], cwd=dest)
+        else:
+            self.run_command(['fetch', '-q'], cwd=dest)
+        # Then reset to wanted revision (maybe even origin/master)
+        rev_options = self.resolve_revision(dest, url, rev_options)
+        cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args()
+        self.run_command(cmd_args, cwd=dest)
+        #: update submodules
+        self.update_submodules(dest)
+
+    @classmethod
+    def get_remote_url(cls, location):
+        """
+        Return URL of the first remote encountered.
+
+        Raises RemoteNotFoundError if the repository does not have a remote
+        url configured.
+        """
+        # We need to pass 1 for extra_ok_returncodes since the command
+        # exits with return code 1 if there are no matching lines.
+        stdout = cls.run_command(
+            ['config', '--get-regexp', r'remote\..*\.url'],
+            extra_ok_returncodes=(1, ), show_stdout=False, cwd=location,
+        )
+        remotes = stdout.splitlines()
+        try:
+            found_remote = remotes[0]
+        except IndexError:
+            raise RemoteNotFoundError
+
+        for remote in remotes:
+            if remote.startswith('remote.origin.url '):
+                found_remote = remote
+                break
+        url = found_remote.split(' ')[1]
+        return url.strip()
+
+    @classmethod
+    def get_revision(cls, location, rev=None):
+        if rev is None:
+            rev = 'HEAD'
+        current_rev = cls.run_command(
+            ['rev-parse', rev], show_stdout=False, cwd=location,
+        )
+        return current_rev.strip()
+
+    @classmethod
+    def get_subdirectory(cls, location):
+        # find the repo root
+        git_dir = cls.run_command(['rev-parse', '--git-dir'],
+                                  show_stdout=False, cwd=location).strip()
+        if not os.path.isabs(git_dir):
+            git_dir = os.path.join(location, git_dir)
+        root_dir = os.path.join(git_dir, '..')
+        # find setup.py
+        orig_location = location
+        while not os.path.exists(os.path.join(location, 'setup.py')):
+            last_location = location
+            location = os.path.dirname(location)
+            if location == last_location:
+                # We've traversed up to the root of the filesystem without
+                # finding setup.py
+                logger.warning(
+                    "Could not find setup.py for directory %s (tried all "
+                    "parent directories)",
+                    orig_location,
+                )
+                return None
+        # relative path of setup.py to repo root
+        if samefile(root_dir, location):
+            return None
+        return os.path.relpath(location, root_dir)
+
+    @classmethod
+    def get_url_rev_and_auth(cls, url):
+        """
+        Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
+        That's required because although they use SSH they sometimes don't
+        work with a ssh:// scheme (e.g. GitHub). But we need a scheme for
+        parsing. Hence we remove it again afterwards and return it as a stub.
+        """
+        if '://' not in url:
+            assert 'file:' not in url
+            url = url.replace('git+', 'git+ssh://')
+            url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url)
+            url = url.replace('ssh://', '')
+        else:
+            url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url)
+
+        return url, rev, user_pass
+
+    @classmethod
+    def update_submodules(cls, location):
+        if not os.path.exists(os.path.join(location, '.gitmodules')):
+            return
+        cls.run_command(
+            ['submodule', 'update', '--init', '--recursive', '-q'],
+            cwd=location,
+        )
+
+    @classmethod
+    def controls_location(cls, location):
+        if super(Git, cls).controls_location(location):
+            return True
+        try:
+            r = cls.run_command(['rev-parse'],
+                                cwd=location,
+                                show_stdout=False,
+                                on_returncode='ignore')
+            return not r
+        except BadCommand:
+            logger.debug("could not determine if %s is under git control "
+                         "because git is not available", location)
+            return False
+
+
+vcs.register(Git)
diff --git a/lib/python3.7/site-packages/pip/_internal/vcs/mercurial.py b/lib/python3.7/site-packages/pip/_internal/vcs/mercurial.py
new file mode 100644
index 0000000000000000000000000000000000000000..81edc4a06e5b2b56ab6e2bcf992f6321721031d4
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/vcs/mercurial.py
@@ -0,0 +1,105 @@
+from __future__ import absolute_import
+
+import logging
+import os
+
+from pip._vendor.six.moves import configparser
+
+from pip._internal.download import path_to_url
+from pip._internal.utils.misc import display_path
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.vcs import VersionControl, vcs
+
+logger = logging.getLogger(__name__)
+
+
+class Mercurial(VersionControl):
+    name = 'hg'
+    dirname = '.hg'
+    repo_name = 'clone'
+    schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
+
+    @staticmethod
+    def get_base_rev_args(rev):
+        return [rev]
+
+    def export(self, location):
+        """Export the Hg repository at the url to the destination location"""
+        with TempDirectory(kind="export") as temp_dir:
+            self.unpack(temp_dir.path)
+
+            self.run_command(
+                ['archive', location], show_stdout=False, cwd=temp_dir.path
+            )
+
+    @classmethod
+    def fetch_new(cls, dest, url, rev_options):
+        rev_display = rev_options.to_display()
+        logger.info(
+            'Cloning hg %s%s to %s',
+            url,
+            rev_display,
+            display_path(dest),
+        )
+        cls.run_command(['clone', '--noupdate', '-q', url, dest])
+        cmd_args = ['update', '-q'] + rev_options.to_args()
+        cls.run_command(cmd_args, cwd=dest)
+
+    def switch(self, dest, url, rev_options):
+        repo_config = os.path.join(dest, self.dirname, 'hgrc')
+        config = configparser.SafeConfigParser()
+        try:
+            config.read(repo_config)
+            config.set('paths', 'default', url)
+            with open(repo_config, 'w') as config_file:
+                config.write(config_file)
+        except (OSError, configparser.NoSectionError) as exc:
+            logger.warning(
+                'Could not switch Mercurial repository to %s: %s', url, exc,
+            )
+        else:
+            cmd_args = ['update', '-q'] + rev_options.to_args()
+            self.run_command(cmd_args, cwd=dest)
+
+    def update(self, dest, url, rev_options):
+        self.run_command(['pull', '-q'], cwd=dest)
+        cmd_args = ['update', '-q'] + rev_options.to_args()
+        self.run_command(cmd_args, cwd=dest)
+
+    @classmethod
+    def get_remote_url(cls, location):
+        url = cls.run_command(
+            ['showconfig', 'paths.default'],
+            show_stdout=False, cwd=location).strip()
+        if cls._is_local_repository(url):
+            url = path_to_url(url)
+        return url.strip()
+
+    @classmethod
+    def get_revision(cls, location):
+        """
+        Return the repository-local changeset revision number, as an integer.
+        """
+        current_revision = cls.run_command(
+            ['parents', '--template={rev}'],
+            show_stdout=False, cwd=location).strip()
+        return current_revision
+
+    @classmethod
+    def get_requirement_revision(cls, location):
+        """
+        Return the changeset identification hash, as a 40-character
+        hexadecimal string
+        """
+        current_rev_hash = cls.run_command(
+            ['parents', '--template={node}'],
+            show_stdout=False, cwd=location).strip()
+        return current_rev_hash
+
+    @classmethod
+    def is_commit_id_equal(cls, dest, name):
+        """Always assume the versions don't match"""
+        return False
+
+
+vcs.register(Mercurial)
diff --git a/lib/python3.7/site-packages/pip/_internal/vcs/subversion.py b/lib/python3.7/site-packages/pip/_internal/vcs/subversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..01bb16122f73d98b73a071b5105a56cc8871b879
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/vcs/subversion.py
@@ -0,0 +1,234 @@
+from __future__ import absolute_import
+
+import logging
+import os
+import re
+
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import (
+    display_path, rmtree, split_auth_from_netloc,
+)
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+from pip._internal.vcs import VersionControl, vcs
+
+_svn_xml_url_re = re.compile('url="([^"]+)"')
+_svn_rev_re = re.compile(r'committed-rev="(\d+)"')
+_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
+_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
+
+
+if MYPY_CHECK_RUNNING:
+    from typing import Optional, Tuple
+
+logger = logging.getLogger(__name__)
+
+
+class Subversion(VersionControl):
+    name = 'svn'
+    dirname = '.svn'
+    repo_name = 'checkout'
+    schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
+
+    @classmethod
+    def should_add_vcs_url_prefix(cls, remote_url):
+        return True
+
+    @staticmethod
+    def get_base_rev_args(rev):
+        return ['-r', rev]
+
+    def get_vcs_version(self):
+        # type: () -> Optional[Tuple[int, ...]]
+        """Return the version of the currently installed Subversion client.
+
+        :return: A tuple containing the parts of the version information or
+            ``None`` if the version returned from ``svn`` could not be parsed.
+        :raises: BadCommand: If ``svn`` is not installed.
+        """
+        # Example versions:
+        #   svn, version 1.10.3 (r1842928)
+        #      compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0
+        #   svn, version 1.7.14 (r1542130)
+        #      compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu
+        version_prefix = 'svn, version '
+        version = self.run_command(['--version'], show_stdout=False)
+        if not version.startswith(version_prefix):
+            return None
+
+        version = version[len(version_prefix):].split()[0]
+        version_list = version.split('.')
+        try:
+            parsed_version = tuple(map(int, version_list))
+        except ValueError:
+            return None
+
+        if not parsed_version:
+            return None
+
+        return parsed_version
+
+    def export(self, location):
+        """Export the svn repository at the url to the destination location"""
+        url, rev_options = self.get_url_rev_options(self.url)
+
+        logger.info('Exporting svn repository %s to %s', url, location)
+        with indent_log():
+            if os.path.exists(location):
+                # Subversion doesn't like to check out over an existing
+                # directory --force fixes this, but was only added in svn 1.5
+                rmtree(location)
+            cmd_args = ['export'] + rev_options.to_args() + [url, location]
+            self.run_command(cmd_args, show_stdout=False)
+
+    @classmethod
+    def fetch_new(cls, dest, url, rev_options):
+        rev_display = rev_options.to_display()
+        logger.info(
+            'Checking out %s%s to %s',
+            url,
+            rev_display,
+            display_path(dest),
+        )
+        cmd_args = ['checkout', '-q'] + rev_options.to_args() + [url, dest]
+        cls.run_command(cmd_args)
+
+    def switch(self, dest, url, rev_options):
+        cmd_args = ['switch'] + rev_options.to_args() + [url, dest]
+        self.run_command(cmd_args)
+
+    def update(self, dest, url, rev_options):
+        cmd_args = ['update'] + rev_options.to_args() + [dest]
+        self.run_command(cmd_args)
+
+    @classmethod
+    def get_revision(cls, location):
+        """
+        Return the maximum revision for all files under a given location
+        """
+        # Note: taken from setuptools.command.egg_info
+        revision = 0
+
+        for base, dirs, files in os.walk(location):
+            if cls.dirname not in dirs:
+                dirs[:] = []
+                continue    # no sense walking uncontrolled subdirs
+            dirs.remove(cls.dirname)
+            entries_fn = os.path.join(base, cls.dirname, 'entries')
+            if not os.path.exists(entries_fn):
+                # FIXME: should we warn?
+                continue
+
+            dirurl, localrev = cls._get_svn_url_rev(base)
+
+            if base == location:
+                base = dirurl + '/'   # save the root url
+            elif not dirurl or not dirurl.startswith(base):
+                dirs[:] = []
+                continue    # not part of the same svn tree, skip it
+            revision = max(revision, localrev)
+        return revision
+
+    @classmethod
+    def get_netloc_and_auth(cls, netloc, scheme):
+        """
+        This override allows the auth information to be passed to svn via the
+        --username and --password options instead of via the URL.
+        """
+        if scheme == 'ssh':
+            # The --username and --password options can't be used for
+            # svn+ssh URLs, so keep the auth information in the URL.
+            return super(Subversion, cls).get_netloc_and_auth(netloc, scheme)
+
+        return split_auth_from_netloc(netloc)
+
+    @classmethod
+    def get_url_rev_and_auth(cls, url):
+        # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
+        url, rev, user_pass = super(Subversion, cls).get_url_rev_and_auth(url)
+        if url.startswith('ssh://'):
+            url = 'svn+' + url
+        return url, rev, user_pass
+
+    @staticmethod
+    def make_rev_args(username, password):
+        extra_args = []
+        if username:
+            extra_args += ['--username', username]
+        if password:
+            extra_args += ['--password', password]
+
+        return extra_args
+
+    @classmethod
+    def get_remote_url(cls, location):
+        # In cases where the source is in a subdirectory, not alongside
+        # setup.py we have to look up in the location until we find a real
+        # setup.py
+        orig_location = location
+        while not os.path.exists(os.path.join(location, 'setup.py')):
+            last_location = location
+            location = os.path.dirname(location)
+            if location == last_location:
+                # We've traversed up to the root of the filesystem without
+                # finding setup.py
+                logger.warning(
+                    "Could not find setup.py for directory %s (tried all "
+                    "parent directories)",
+                    orig_location,
+                )
+                return None
+
+        return cls._get_svn_url_rev(location)[0]
+
+    @classmethod
+    def _get_svn_url_rev(cls, location):
+        from pip._internal.exceptions import InstallationError
+
+        entries_path = os.path.join(location, cls.dirname, 'entries')
+        if os.path.exists(entries_path):
+            with open(entries_path) as f:
+                data = f.read()
+        else:  # subversion >= 1.7 does not have the 'entries' file
+            data = ''
+
+        if (data.startswith('8') or
+                data.startswith('9') or
+                data.startswith('10')):
+            data = list(map(str.splitlines, data.split('\n\x0c\n')))
+            del data[0][0]  # get rid of the '8'
+            url = data[0][3]
+            revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
+        elif data.startswith('<?xml'):
+            match = _svn_xml_url_re.search(data)
+            if not match:
+                raise ValueError('Badly formatted data: %r' % data)
+            url = match.group(1)    # get repository URL
+            revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
+        else:
+            try:
+                # subversion >= 1.7
+                xml = cls.run_command(
+                    ['info', '--xml', location],
+                    show_stdout=False,
+                )
+                url = _svn_info_xml_url_re.search(xml).group(1)
+                revs = [
+                    int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)
+                ]
+            except InstallationError:
+                url, revs = None, []
+
+        if revs:
+            rev = max(revs)
+        else:
+            rev = 0
+
+        return url, rev
+
+    @classmethod
+    def is_commit_id_equal(cls, dest, name):
+        """Always assume the versions don't match"""
+        return False
+
+
+vcs.register(Subversion)
diff --git a/lib/python3.7/site-packages/pip/_internal/wheel.py b/lib/python3.7/site-packages/pip/_internal/wheel.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bdbe93ab7e829a6fdfe24b20c4ab3c614bad32d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_internal/wheel.py
@@ -0,0 +1,1098 @@
+"""
+Support for installing and building the "wheel" binary package format.
+"""
+from __future__ import absolute_import
+
+import collections
+import compileall
+import csv
+import hashlib
+import logging
+import os.path
+import re
+import shutil
+import stat
+import sys
+import warnings
+from base64 import urlsafe_b64encode
+from email.parser import Parser
+
+from pip._vendor import pkg_resources
+from pip._vendor.distlib.scripts import ScriptMaker
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.six import StringIO
+
+from pip._internal import pep425tags
+from pip._internal.download import path_to_url, unpack_url
+from pip._internal.exceptions import (
+    InstallationError, InvalidWheelFilename, UnsupportedWheel,
+)
+from pip._internal.locations import (
+    PIP_DELETE_MARKER_FILENAME, distutils_scheme,
+)
+from pip._internal.models.link import Link
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import (
+    LOG_DIVIDER, call_subprocess, captured_stdout, ensure_dir,
+    format_command_args, read_chunks,
+)
+from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.utils.typing import MYPY_CHECK_RUNNING
+from pip._internal.utils.ui import open_spinner
+
+if MYPY_CHECK_RUNNING:
+    from typing import (
+        Dict, List, Optional, Sequence, Mapping, Tuple, IO, Text, Any, Iterable
+    )
+    from pip._vendor.packaging.requirements import Requirement
+    from pip._internal.req.req_install import InstallRequirement
+    from pip._internal.download import PipSession
+    from pip._internal.index import FormatControl, PackageFinder
+    from pip._internal.operations.prepare import (
+        RequirementPreparer
+    )
+    from pip._internal.cache import WheelCache
+    from pip._internal.pep425tags import Pep425Tag
+
+    InstalledCSVRow = Tuple[str, ...]
+
+
+VERSION_COMPATIBLE = (1, 0)
+
+
+logger = logging.getLogger(__name__)
+
+
+def normpath(src, p):
+    return os.path.relpath(src, p).replace(os.path.sep, '/')
+
+
+def rehash(path, blocksize=1 << 20):
+    # type: (str, int) -> Tuple[str, str]
+    """Return (hash, length) for path using hashlib.sha256()"""
+    h = hashlib.sha256()
+    length = 0
+    with open(path, 'rb') as f:
+        for block in read_chunks(f, size=blocksize):
+            length += len(block)
+            h.update(block)
+    digest = 'sha256=' + urlsafe_b64encode(
+        h.digest()
+    ).decode('latin1').rstrip('=')
+    # unicode/str python2 issues
+    return (digest, str(length))  # type: ignore
+
+
+def open_for_csv(name, mode):
+    # type: (str, Text) -> IO
+    if sys.version_info[0] < 3:
+        nl = {}  # type: Dict[str, Any]
+        bin = 'b'
+    else:
+        nl = {'newline': ''}  # type: Dict[str, Any]
+        bin = ''
+    return open(name, mode + bin, **nl)
+
+
+def replace_python_tag(wheelname, new_tag):
+    # type: (str, str) -> str
+    """Replace the Python tag in a wheel file name with a new value.
+    """
+    parts = wheelname.split('-')
+    parts[-3] = new_tag
+    return '-'.join(parts)
+
+
+def fix_script(path):
+    # type: (str) -> Optional[bool]
+    """Replace #!python with #!/path/to/python
+    Return True if file was changed."""
+    # XXX RECORD hashes will need to be updated
+    if os.path.isfile(path):
+        with open(path, 'rb') as script:
+            firstline = script.readline()
+            if not firstline.startswith(b'#!python'):
+                return False
+            exename = sys.executable.encode(sys.getfilesystemencoding())
+            firstline = b'#!' + exename + os.linesep.encode("ascii")
+            rest = script.read()
+        with open(path, 'wb') as script:
+            script.write(firstline)
+            script.write(rest)
+        return True
+    return None
+
+
+dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>.+?))?)
+                                \.dist-info$""", re.VERBOSE)
+
+
+def root_is_purelib(name, wheeldir):
+    # type: (str, str) -> bool
+    """
+    Return True if the extracted wheel in wheeldir should go into purelib.
+    """
+    name_folded = name.replace("-", "_")
+    for item in os.listdir(wheeldir):
+        match = dist_info_re.match(item)
+        if match and match.group('name') == name_folded:
+            with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
+                for line in wheel:
+                    line = line.lower().rstrip()
+                    if line == "root-is-purelib: true":
+                        return True
+    return False
+
+
+def get_entrypoints(filename):
+    # type: (str) -> Tuple[Dict[str, str], Dict[str, str]]
+    if not os.path.exists(filename):
+        return {}, {}
+
+    # This is done because you can pass a string to entry_points wrappers which
+    # means that they may or may not be valid INI files. The attempt here is to
+    # strip leading and trailing whitespace in order to make them valid INI
+    # files.
+    with open(filename) as fp:
+        data = StringIO()
+        for line in fp:
+            data.write(line.strip())
+            data.write("\n")
+        data.seek(0)
+
+    # get the entry points and then the script names
+    entry_points = pkg_resources.EntryPoint.parse_map(data)
+    console = entry_points.get('console_scripts', {})
+    gui = entry_points.get('gui_scripts', {})
+
+    def _split_ep(s):
+        """get the string representation of EntryPoint, remove space and split
+        on '='"""
+        return str(s).replace(" ", "").split("=")
+
+    # convert the EntryPoint objects into strings with module:function
+    console = dict(_split_ep(v) for v in console.values())
+    gui = dict(_split_ep(v) for v in gui.values())
+    return console, gui
+
+
+def message_about_scripts_not_on_PATH(scripts):
+    # type: (Sequence[str]) -> Optional[str]
+    """Determine if any scripts are not on PATH and format a warning.
+
+    Returns a warning message if one or more scripts are not on PATH,
+    otherwise None.
+    """
+    if not scripts:
+        return None
+
+    # Group scripts by the path they were installed in
+    grouped_by_dir = collections.defaultdict(set)  # type: Dict[str, set]
+    for destfile in scripts:
+        parent_dir = os.path.dirname(destfile)
+        script_name = os.path.basename(destfile)
+        grouped_by_dir[parent_dir].add(script_name)
+
+    # We don't want to warn for directories that are on PATH.
+    not_warn_dirs = [
+        os.path.normcase(i).rstrip(os.sep) for i in
+        os.environ.get("PATH", "").split(os.pathsep)
+    ]
+    # If an executable sits with sys.executable, we don't warn for it.
+    #     This covers the case of venv invocations without activating the venv.
+    not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable)))
+    warn_for = {
+        parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items()
+        if os.path.normcase(parent_dir) not in not_warn_dirs
+    }
+    if not warn_for:
+        return None
+
+    # Format a message
+    msg_lines = []
+    for parent_dir, scripts in warn_for.items():
+        sorted_scripts = sorted(scripts)  # type: List[str]
+        if len(sorted_scripts) == 1:
+            start_text = "script {} is".format(sorted_scripts[0])
+        else:
+            start_text = "scripts {} are".format(
+                ", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1]
+            )
+
+        msg_lines.append(
+            "The {} installed in '{}' which is not on PATH."
+            .format(start_text, parent_dir)
+        )
+
+    last_line_fmt = (
+        "Consider adding {} to PATH or, if you prefer "
+        "to suppress this warning, use --no-warn-script-location."
+    )
+    if len(msg_lines) == 1:
+        msg_lines.append(last_line_fmt.format("this directory"))
+    else:
+        msg_lines.append(last_line_fmt.format("these directories"))
+
+    # Returns the formatted multiline message
+    return "\n".join(msg_lines)
+
+
+def sorted_outrows(outrows):
+    # type: (Iterable[InstalledCSVRow]) -> List[InstalledCSVRow]
+    """
+    Return the given rows of a RECORD file in sorted order.
+
+    Each row is a 3-tuple (path, hash, size) and corresponds to a record of
+    a RECORD file (see PEP 376 and PEP 427 for details).  For the rows
+    passed to this function, the size can be an integer as an int or string,
+    or the empty string.
+    """
+    # Normally, there should only be one row per path, in which case the
+    # second and third elements don't come into play when sorting.
+    # However, in cases in the wild where a path might happen to occur twice,
+    # we don't want the sort operation to trigger an error (but still want
+    # determinism).  Since the third element can be an int or string, we
+    # coerce each element to a string to avoid a TypeError in this case.
+    # For additional background, see--
+    # https://github.com/pypa/pip/issues/5868
+    return sorted(outrows, key=lambda row: tuple(str(x) for x in row))
+
+
+def get_csv_rows_for_installed(
+    old_csv_rows,  # type: Iterable[List[str]]
+    installed,  # type: Dict[str, str]
+    changed,  # type: set
+    generated,  # type: List[str]
+    lib_dir,  # type: str
+):
+    # type: (...) -> List[InstalledCSVRow]
+    """
+    :param installed: A map from archive RECORD path to installation RECORD
+        path.
+    """
+    installed_rows = []  # type: List[InstalledCSVRow]
+    for row in old_csv_rows:
+        if len(row) > 3:
+            logger.warning(
+                'RECORD line has more than three elements: {}'.format(row)
+            )
+        # Make a copy because we are mutating the row.
+        row = list(row)
+        old_path = row[0]
+        new_path = installed.pop(old_path, old_path)
+        row[0] = new_path
+        if new_path in changed:
+            digest, length = rehash(new_path)
+            row[1] = digest
+            row[2] = length
+        installed_rows.append(tuple(row))
+    for f in generated:
+        digest, length = rehash(f)
+        installed_rows.append((normpath(f, lib_dir), digest, str(length)))
+    for f in installed:
+        installed_rows.append((installed[f], '', ''))
+    return installed_rows
+
+
+def move_wheel_files(
+    name,  # type: str
+    req,  # type: Requirement
+    wheeldir,  # type: str
+    user=False,  # type: bool
+    home=None,  # type: Optional[str]
+    root=None,  # type: Optional[str]
+    pycompile=True,  # type: bool
+    scheme=None,  # type: Optional[Mapping[str, str]]
+    isolated=False,  # type: bool
+    prefix=None,  # type: Optional[str]
+    warn_script_location=True  # type: bool
+):
+    # type: (...) -> None
+    """Install a wheel"""
+    # TODO: Investigate and break this up.
+    # TODO: Look into moving this into a dedicated class for representing an
+    #       installation.
+
+    if not scheme:
+        scheme = distutils_scheme(
+            name, user=user, home=home, root=root, isolated=isolated,
+            prefix=prefix,
+        )
+
+    if root_is_purelib(name, wheeldir):
+        lib_dir = scheme['purelib']
+    else:
+        lib_dir = scheme['platlib']
+
+    info_dir = []  # type: List[str]
+    data_dirs = []
+    source = wheeldir.rstrip(os.path.sep) + os.path.sep
+
+    # Record details of the files moved
+    #   installed = files copied from the wheel to the destination
+    #   changed = files changed while installing (scripts #! line typically)
+    #   generated = files newly generated during the install (script wrappers)
+    installed = {}  # type: Dict[str, str]
+    changed = set()
+    generated = []  # type: List[str]
+
+    # Compile all of the pyc files that we're going to be installing
+    if pycompile:
+        with captured_stdout() as stdout:
+            with warnings.catch_warnings():
+                warnings.filterwarnings('ignore')
+                compileall.compile_dir(source, force=True, quiet=True)
+        logger.debug(stdout.getvalue())
+
+    def record_installed(srcfile, destfile, modified=False):
+        """Map archive RECORD paths to installation RECORD paths."""
+        oldpath = normpath(srcfile, wheeldir)
+        newpath = normpath(destfile, lib_dir)
+        installed[oldpath] = newpath
+        if modified:
+            changed.add(destfile)
+
+    def clobber(source, dest, is_base, fixer=None, filter=None):
+        ensure_dir(dest)  # common for the 'include' path
+
+        for dir, subdirs, files in os.walk(source):
+            basedir = dir[len(source):].lstrip(os.path.sep)
+            destdir = os.path.join(dest, basedir)
+            if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
+                continue
+            for s in subdirs:
+                destsubdir = os.path.join(dest, basedir, s)
+                if is_base and basedir == '' and destsubdir.endswith('.data'):
+                    data_dirs.append(s)
+                    continue
+                elif (is_base and
+                        s.endswith('.dist-info') and
+                        canonicalize_name(s).startswith(
+                            canonicalize_name(req.name))):
+                    assert not info_dir, ('Multiple .dist-info directories: ' +
+                                          destsubdir + ', ' +
+                                          ', '.join(info_dir))
+                    info_dir.append(destsubdir)
+            for f in files:
+                # Skip unwanted files
+                if filter and filter(f):
+                    continue
+                srcfile = os.path.join(dir, f)
+                destfile = os.path.join(dest, basedir, f)
+                # directory creation is lazy and after the file filtering above
+                # to ensure we don't install empty dirs; empty dirs can't be
+                # uninstalled.
+                ensure_dir(destdir)
+
+                # copyfile (called below) truncates the destination if it
+                # exists and then writes the new contents. This is fine in most
+                # cases, but can cause a segfault if pip has loaded a shared
+                # object (e.g. from pyopenssl through its vendored urllib3)
+                # Since the shared object is mmap'd an attempt to call a
+                # symbol in it will then cause a segfault. Unlinking the file
+                # allows writing of new contents while allowing the process to
+                # continue to use the old copy.
+                if os.path.exists(destfile):
+                    os.unlink(destfile)
+
+                # We use copyfile (not move, copy, or copy2) to be extra sure
+                # that we are not moving directories over (copyfile fails for
+                # directories) as well as to ensure that we are not copying
+                # over any metadata because we want more control over what
+                # metadata we actually copy over.
+                shutil.copyfile(srcfile, destfile)
+
+                # Copy over the metadata for the file, currently this only
+                # includes the atime and mtime.
+                st = os.stat(srcfile)
+                if hasattr(os, "utime"):
+                    os.utime(destfile, (st.st_atime, st.st_mtime))
+
+                # If our file is executable, then make our destination file
+                # executable.
+                if os.access(srcfile, os.X_OK):
+                    st = os.stat(srcfile)
+                    permissions = (
+                        st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+                    )
+                    os.chmod(destfile, permissions)
+
+                changed = False
+                if fixer:
+                    changed = fixer(destfile)
+                record_installed(srcfile, destfile, changed)
+
+    clobber(source, lib_dir, True)
+
+    assert info_dir, "%s .dist-info directory not found" % req
+
+    # Get the defined entry points
+    ep_file = os.path.join(info_dir[0], 'entry_points.txt')
+    console, gui = get_entrypoints(ep_file)
+
+    def is_entrypoint_wrapper(name):
+        # EP, EP.exe and EP-script.py are scripts generated for
+        # entry point EP by setuptools
+        if name.lower().endswith('.exe'):
+            matchname = name[:-4]
+        elif name.lower().endswith('-script.py'):
+            matchname = name[:-10]
+        elif name.lower().endswith(".pya"):
+            matchname = name[:-4]
+        else:
+            matchname = name
+        # Ignore setuptools-generated scripts
+        return (matchname in console or matchname in gui)
+
+    for datadir in data_dirs:
+        fixer = None
+        filter = None
+        for subdir in os.listdir(os.path.join(wheeldir, datadir)):
+            fixer = None
+            if subdir == 'scripts':
+                fixer = fix_script
+                filter = is_entrypoint_wrapper
+            source = os.path.join(wheeldir, datadir, subdir)
+            dest = scheme[subdir]
+            clobber(source, dest, False, fixer=fixer, filter=filter)
+
+    maker = ScriptMaker(None, scheme['scripts'])
+
+    # Ensure old scripts are overwritten.
+    # See https://github.com/pypa/pip/issues/1800
+    maker.clobber = True
+
+    # Ensure we don't generate any variants for scripts because this is almost
+    # never what somebody wants.
+    # See https://bitbucket.org/pypa/distlib/issue/35/
+    maker.variants = {''}
+
+    # This is required because otherwise distlib creates scripts that are not
+    # executable.
+    # See https://bitbucket.org/pypa/distlib/issue/32/
+    maker.set_mode = True
+
+    # Simplify the script and fix the fact that the default script swallows
+    # every single stack trace.
+    # See https://bitbucket.org/pypa/distlib/issue/34/
+    # See https://bitbucket.org/pypa/distlib/issue/33/
+    def _get_script_text(entry):
+        if entry.suffix is None:
+            raise InstallationError(
+                "Invalid script entry point: %s for req: %s - A callable "
+                "suffix is required. Cf https://packaging.python.org/en/"
+                "latest/distributing.html#console-scripts for more "
+                "information." % (entry, req)
+            )
+        return maker.script_template % {
+            "module": entry.prefix,
+            "import_name": entry.suffix.split(".")[0],
+            "func": entry.suffix,
+        }
+    # ignore type, because mypy disallows assigning to a method,
+    # see https://github.com/python/mypy/issues/2427
+    maker._get_script_text = _get_script_text  # type: ignore
+    maker.script_template = r"""# -*- coding: utf-8 -*-
+import re
+import sys
+
+from %(module)s import %(import_name)s
+
+if __name__ == '__main__':
+    sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+    sys.exit(%(func)s())
+"""
+
+    # Special case pip and setuptools to generate versioned wrappers
+    #
+    # The issue is that some projects (specifically, pip and setuptools) use
+    # code in setup.py to create "versioned" entry points - pip2.7 on Python
+    # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
+    # the wheel metadata at build time, and so if the wheel is installed with
+    # a *different* version of Python the entry points will be wrong. The
+    # correct fix for this is to enhance the metadata to be able to describe
+    # such versioned entry points, but that won't happen till Metadata 2.0 is
+    # available.
+    # In the meantime, projects using versioned entry points will either have
+    # incorrect versioned entry points, or they will not be able to distribute
+    # "universal" wheels (i.e., they will need a wheel per Python version).
+    #
+    # Because setuptools and pip are bundled with _ensurepip and virtualenv,
+    # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
+    # override the versioned entry points in the wheel and generate the
+    # correct ones. This code is purely a short-term measure until Metadata 2.0
+    # is available.
+    #
+    # To add the level of hack in this section of code, in order to support
+    # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
+    # variable which will control which version scripts get installed.
+    #
+    # ENSUREPIP_OPTIONS=altinstall
+    #   - Only pipX.Y and easy_install-X.Y will be generated and installed
+    # ENSUREPIP_OPTIONS=install
+    #   - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
+    #     that this option is technically if ENSUREPIP_OPTIONS is set and is
+    #     not altinstall
+    # DEFAULT
+    #   - The default behavior is to install pip, pipX, pipX.Y, easy_install
+    #     and easy_install-X.Y.
+    pip_script = console.pop('pip', None)
+    if pip_script:
+        if "ENSUREPIP_OPTIONS" not in os.environ:
+            spec = 'pip = ' + pip_script
+            generated.extend(maker.make(spec))
+
+        if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
+            spec = 'pip%s = %s' % (sys.version[:1], pip_script)
+            generated.extend(maker.make(spec))
+
+        spec = 'pip%s = %s' % (sys.version[:3], pip_script)
+        generated.extend(maker.make(spec))
+        # Delete any other versioned pip entry points
+        pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
+        for k in pip_ep:
+            del console[k]
+    easy_install_script = console.pop('easy_install', None)
+    if easy_install_script:
+        if "ENSUREPIP_OPTIONS" not in os.environ:
+            spec = 'easy_install = ' + easy_install_script
+            generated.extend(maker.make(spec))
+
+        spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
+        generated.extend(maker.make(spec))
+        # Delete any other versioned easy_install entry points
+        easy_install_ep = [
+            k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
+        ]
+        for k in easy_install_ep:
+            del console[k]
+
+    # Generate the console and GUI entry points specified in the wheel
+    if len(console) > 0:
+        generated_console_scripts = maker.make_multiple(
+            ['%s = %s' % kv for kv in console.items()]
+        )
+        generated.extend(generated_console_scripts)
+
+        if warn_script_location:
+            msg = message_about_scripts_not_on_PATH(generated_console_scripts)
+            if msg is not None:
+                logger.warning(msg)
+
+    if len(gui) > 0:
+        generated.extend(
+            maker.make_multiple(
+                ['%s = %s' % kv for kv in gui.items()],
+                {'gui': True}
+            )
+        )
+
+    # Record pip as the installer
+    installer = os.path.join(info_dir[0], 'INSTALLER')
+    temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip')
+    with open(temp_installer, 'wb') as installer_file:
+        installer_file.write(b'pip\n')
+    shutil.move(temp_installer, installer)
+    generated.append(installer)
+
+    # Record details of all files installed
+    record = os.path.join(info_dir[0], 'RECORD')
+    temp_record = os.path.join(info_dir[0], 'RECORD.pip')
+    with open_for_csv(record, 'r') as record_in:
+        with open_for_csv(temp_record, 'w+') as record_out:
+            reader = csv.reader(record_in)
+            outrows = get_csv_rows_for_installed(
+                reader, installed=installed, changed=changed,
+                generated=generated, lib_dir=lib_dir,
+            )
+            writer = csv.writer(record_out)
+            # Sort to simplify testing.
+            for row in sorted_outrows(outrows):
+                writer.writerow(row)
+    shutil.move(temp_record, record)
+
+
+def wheel_version(source_dir):
+    # type: (Optional[str]) -> Optional[Tuple[int, ...]]
+    """
+    Return the Wheel-Version of an extracted wheel, if possible.
+
+    Otherwise, return None if we couldn't parse / extract it.
+    """
+    try:
+        dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
+
+        wheel_data = dist.get_metadata('WHEEL')
+        wheel_data = Parser().parsestr(wheel_data)
+
+        version = wheel_data['Wheel-Version'].strip()
+        version = tuple(map(int, version.split('.')))
+        return version
+    except Exception:
+        return None
+
+
+def check_compatibility(version, name):
+    # type: (Optional[Tuple[int, ...]], str) -> None
+    """
+    Raises errors or warns if called with an incompatible Wheel-Version.
+
+    Pip should refuse to install a Wheel-Version that's a major series
+    ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
+    installing a version only minor version ahead (e.g 1.2 > 1.1).
+
+    version: a 2-tuple representing a Wheel-Version (Major, Minor)
+    name: name of wheel or package to raise exception about
+
+    :raises UnsupportedWheel: when an incompatible Wheel-Version is given
+    """
+    if not version:
+        raise UnsupportedWheel(
+            "%s is in an unsupported or invalid wheel" % name
+        )
+    if version[0] > VERSION_COMPATIBLE[0]:
+        raise UnsupportedWheel(
+            "%s's Wheel-Version (%s) is not compatible with this version "
+            "of pip" % (name, '.'.join(map(str, version)))
+        )
+    elif version > VERSION_COMPATIBLE:
+        logger.warning(
+            'Installing from a newer Wheel-Version (%s)',
+            '.'.join(map(str, version)),
+        )
+
+
+class Wheel(object):
+    """A wheel file"""
+
+    # TODO: Maybe move the class into the models sub-package
+    # TODO: Maybe move the install code into this class
+
+    wheel_file_re = re.compile(
+        r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.*?))
+        ((-(?P<build>\d[^-]*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
+        \.whl|\.dist-info)$""",
+        re.VERBOSE
+    )
+
+    def __init__(self, filename):
+        # type: (str) -> None
+        """
+        :raises InvalidWheelFilename: when the filename is invalid for a wheel
+        """
+        wheel_info = self.wheel_file_re.match(filename)
+        if not wheel_info:
+            raise InvalidWheelFilename(
+                "%s is not a valid wheel filename." % filename
+            )
+        self.filename = filename
+        self.name = wheel_info.group('name').replace('_', '-')
+        # we'll assume "_" means "-" due to wheel naming scheme
+        # (https://github.com/pypa/pip/issues/1150)
+        self.version = wheel_info.group('ver').replace('_', '-')
+        self.build_tag = wheel_info.group('build')
+        self.pyversions = wheel_info.group('pyver').split('.')
+        self.abis = wheel_info.group('abi').split('.')
+        self.plats = wheel_info.group('plat').split('.')
+
+        # All the tag combinations from this file
+        self.file_tags = {
+            (x, y, z) for x in self.pyversions
+            for y in self.abis for z in self.plats
+        }
+
+    def support_index_min(self, tags=None):
+        # type: (Optional[List[Pep425Tag]]) -> Optional[int]
+        """
+        Return the lowest index that one of the wheel's file_tag combinations
+        achieves in the supported_tags list e.g. if there are 8 supported tags,
+        and one of the file tags is first in the list, then return 0.  Returns
+        None is the wheel is not supported.
+        """
+        if tags is None:  # for mock
+            tags = pep425tags.get_supported()
+        indexes = [tags.index(c) for c in self.file_tags if c in tags]
+        return min(indexes) if indexes else None
+
+    def supported(self, tags=None):
+        # type: (Optional[List[Pep425Tag]]) -> bool
+        """Is this wheel supported on this system?"""
+        if tags is None:  # for mock
+            tags = pep425tags.get_supported()
+        return bool(set(tags).intersection(self.file_tags))
+
+
+def _contains_egg_info(
+        s, _egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
+    """Determine whether the string looks like an egg_info.
+
+    :param s: The string to parse. E.g. foo-2.1
+    """
+    return bool(_egg_info_re.search(s))
+
+
+def should_use_ephemeral_cache(
+    req,  # type: InstallRequirement
+    format_control,  # type: FormatControl
+    autobuilding,  # type: bool
+    cache_available  # type: bool
+):
+    # type: (...) -> Optional[bool]
+    """
+    Return whether to build an InstallRequirement object using the
+    ephemeral cache.
+
+    :param cache_available: whether a cache directory is available for the
+        autobuilding=True case.
+
+    :return: True or False to build the requirement with ephem_cache=True
+        or False, respectively; or None not to build the requirement.
+    """
+    if req.constraint:
+        return None
+    if req.is_wheel:
+        if not autobuilding:
+            logger.info(
+                'Skipping %s, due to already being wheel.', req.name,
+            )
+        return None
+    if not autobuilding:
+        return False
+
+    if req.editable or not req.source_dir:
+        return None
+
+    if req.link and not req.link.is_artifact:
+        # VCS checkout. Build wheel just for this run.
+        return True
+
+    if "binary" not in format_control.get_allowed_formats(
+            canonicalize_name(req.name)):
+        logger.info(
+            "Skipping bdist_wheel for %s, due to binaries "
+            "being disabled for it.", req.name,
+        )
+        return None
+
+    link = req.link
+    base, ext = link.splitext()
+    if cache_available and _contains_egg_info(base):
+        return False
+
+    # Otherwise, build the wheel just for this run using the ephemeral
+    # cache since we are either in the case of e.g. a local directory, or
+    # no cache directory is available to use.
+    return True
+
+
+def format_command_result(
+    command_args,  # type: List[str]
+    command_output,  # type: str
+):
+    # type: (...) -> str
+    """
+    Format command information for logging.
+    """
+    command_desc = format_command_args(command_args)
+    text = 'Command arguments: {}\n'.format(command_desc)
+
+    if not command_output:
+        text += 'Command output: None'
+    elif logger.getEffectiveLevel() > logging.DEBUG:
+        text += 'Command output: [use --verbose to show]'
+    else:
+        if not command_output.endswith('\n'):
+            command_output += '\n'
+        text += 'Command output:\n{}{}'.format(command_output, LOG_DIVIDER)
+
+    return text
+
+
+def get_legacy_build_wheel_path(
+    names,  # type: List[str]
+    temp_dir,  # type: str
+    req,  # type: InstallRequirement
+    command_args,  # type: List[str]
+    command_output,  # type: str
+):
+    # type: (...) -> Optional[str]
+    """
+    Return the path to the wheel in the temporary build directory.
+    """
+    # Sort for determinism.
+    names = sorted(names)
+    if not names:
+        msg = (
+            'Legacy build of wheel for {!r} created no files.\n'
+        ).format(req.name)
+        msg += format_command_result(command_args, command_output)
+        logger.warning(msg)
+        return None
+
+    if len(names) > 1:
+        msg = (
+            'Legacy build of wheel for {!r} created more than one file.\n'
+            'Filenames (choosing first): {}\n'
+        ).format(req.name, names)
+        msg += format_command_result(command_args, command_output)
+        logger.warning(msg)
+
+    return os.path.join(temp_dir, names[0])
+
+
+class WheelBuilder(object):
+    """Build wheels from a RequirementSet."""
+
+    def __init__(
+        self,
+        finder,  # type: PackageFinder
+        preparer,  # type: RequirementPreparer
+        wheel_cache,  # type: WheelCache
+        build_options=None,  # type: Optional[List[str]]
+        global_options=None,  # type: Optional[List[str]]
+        no_clean=False  # type: bool
+    ):
+        # type: (...) -> None
+        self.finder = finder
+        self.preparer = preparer
+        self.wheel_cache = wheel_cache
+
+        self._wheel_dir = preparer.wheel_download_dir
+
+        self.build_options = build_options or []
+        self.global_options = global_options or []
+        self.no_clean = no_clean
+
+    def _build_one(self, req, output_dir, python_tag=None):
+        """Build one wheel.
+
+        :return: The filename of the built wheel, or None if the build failed.
+        """
+        # Install build deps into temporary directory (PEP 518)
+        with req.build_env:
+            return self._build_one_inside_env(req, output_dir,
+                                              python_tag=python_tag)
+
+    def _build_one_inside_env(self, req, output_dir, python_tag=None):
+        with TempDirectory(kind="wheel") as temp_dir:
+            if req.use_pep517:
+                builder = self._build_one_pep517
+            else:
+                builder = self._build_one_legacy
+            wheel_path = builder(req, temp_dir.path, python_tag=python_tag)
+            if wheel_path is not None:
+                wheel_name = os.path.basename(wheel_path)
+                dest_path = os.path.join(output_dir, wheel_name)
+                try:
+                    shutil.move(wheel_path, dest_path)
+                    logger.info('Stored in directory: %s', output_dir)
+                    return dest_path
+                except Exception:
+                    pass
+            # Ignore return, we can't do anything else useful.
+            self._clean_one(req)
+            return None
+
+    def _base_setup_args(self, req):
+        # NOTE: Eventually, we'd want to also -S to the flags here, when we're
+        # isolating. Currently, it breaks Python in virtualenvs, because it
+        # relies on site.py to find parts of the standard library outside the
+        # virtualenv.
+        return [
+            sys.executable, '-u', '-c',
+            SETUPTOOLS_SHIM % req.setup_py
+        ] + list(self.global_options)
+
+    def _build_one_pep517(self, req, tempd, python_tag=None):
+        """Build one InstallRequirement using the PEP 517 build process.
+
+        Returns path to wheel if successfully built. Otherwise, returns None.
+        """
+        assert req.metadata_directory is not None
+        if self.build_options:
+            # PEP 517 does not support --build-options
+            logger.error('Cannot build wheel for %s using PEP 517 when '
+                         '--build-options is present' % (req.name,))
+            return None
+        try:
+            req.spin_message = 'Building wheel for %s (PEP 517)' % (req.name,)
+            logger.debug('Destination directory: %s', tempd)
+            wheel_name = req.pep517_backend.build_wheel(
+                tempd,
+                metadata_directory=req.metadata_directory
+            )
+            if python_tag:
+                # General PEP 517 backends don't necessarily support
+                # a "--python-tag" option, so we rename the wheel
+                # file directly.
+                new_name = replace_python_tag(wheel_name, python_tag)
+                os.rename(
+                    os.path.join(tempd, wheel_name),
+                    os.path.join(tempd, new_name)
+                )
+                # Reassign to simplify the return at the end of function
+                wheel_name = new_name
+        except Exception:
+            logger.error('Failed building wheel for %s', req.name)
+            return None
+        return os.path.join(tempd, wheel_name)
+
+    def _build_one_legacy(self, req, tempd, python_tag=None):
+        """Build one InstallRequirement using the "legacy" build process.
+
+        Returns path to wheel if successfully built. Otherwise, returns None.
+        """
+        base_args = self._base_setup_args(req)
+
+        spin_message = 'Building wheel for %s (setup.py)' % (req.name,)
+        with open_spinner(spin_message) as spinner:
+            logger.debug('Destination directory: %s', tempd)
+            wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+                + self.build_options
+
+            if python_tag is not None:
+                wheel_args += ["--python-tag", python_tag]
+
+            try:
+                output = call_subprocess(wheel_args, cwd=req.setup_py_dir,
+                                         spinner=spinner)
+            except Exception:
+                spinner.finish("error")
+                logger.error('Failed building wheel for %s', req.name)
+                return None
+            names = os.listdir(tempd)
+            wheel_path = get_legacy_build_wheel_path(
+                names=names,
+                temp_dir=tempd,
+                req=req,
+                command_args=wheel_args,
+                command_output=output,
+            )
+            return wheel_path
+
+    def _clean_one(self, req):
+        base_args = self._base_setup_args(req)
+
+        logger.info('Running setup.py clean for %s', req.name)
+        clean_args = base_args + ['clean', '--all']
+        try:
+            call_subprocess(clean_args, cwd=req.source_dir)
+            return True
+        except Exception:
+            logger.error('Failed cleaning build dir for %s', req.name)
+            return False
+
+    def build(
+        self,
+        requirements,  # type: Iterable[InstallRequirement]
+        session,  # type: PipSession
+        autobuilding=False  # type: bool
+    ):
+        # type: (...) -> List[InstallRequirement]
+        """Build wheels.
+
+        :param unpack: If True, replace the sdist we built from with the
+            newly built wheel, in preparation for installation.
+        :return: True if all the wheels built correctly.
+        """
+        buildset = []
+        format_control = self.finder.format_control
+        # Whether a cache directory is available for autobuilding=True.
+        cache_available = bool(self._wheel_dir or self.wheel_cache.cache_dir)
+
+        for req in requirements:
+            ephem_cache = should_use_ephemeral_cache(
+                req, format_control=format_control, autobuilding=autobuilding,
+                cache_available=cache_available,
+            )
+            if ephem_cache is None:
+                continue
+
+            buildset.append((req, ephem_cache))
+
+        if not buildset:
+            return []
+
+        # Is any wheel build not using the ephemeral cache?
+        if any(not ephem_cache for _, ephem_cache in buildset):
+            have_directory_for_build = self._wheel_dir or (
+                autobuilding and self.wheel_cache.cache_dir
+            )
+            assert have_directory_for_build
+
+        # TODO by @pradyunsg
+        # Should break up this method into 2 separate methods.
+
+        # Build the wheels.
+        logger.info(
+            'Building wheels for collected packages: %s',
+            ', '.join([req.name for (req, _) in buildset]),
+        )
+        _cache = self.wheel_cache  # shorter name
+        with indent_log():
+            build_success, build_failure = [], []
+            for req, ephem in buildset:
+                python_tag = None
+                if autobuilding:
+                    python_tag = pep425tags.implementation_tag
+                    if ephem:
+                        output_dir = _cache.get_ephem_path_for_link(req.link)
+                    else:
+                        output_dir = _cache.get_path_for_link(req.link)
+                    try:
+                        ensure_dir(output_dir)
+                    except OSError as e:
+                        logger.warning("Building wheel for %s failed: %s",
+                                       req.name, e)
+                        build_failure.append(req)
+                        continue
+                else:
+                    output_dir = self._wheel_dir
+                wheel_file = self._build_one(
+                    req, output_dir,
+                    python_tag=python_tag,
+                )
+                if wheel_file:
+                    build_success.append(req)
+                    if autobuilding:
+                        # XXX: This is mildly duplicative with prepare_files,
+                        # but not close enough to pull out to a single common
+                        # method.
+                        # The code below assumes temporary source dirs -
+                        # prevent it doing bad things.
+                        if req.source_dir and not os.path.exists(os.path.join(
+                                req.source_dir, PIP_DELETE_MARKER_FILENAME)):
+                            raise AssertionError(
+                                "bad source dir - missing marker")
+                        # Delete the source we built the wheel from
+                        req.remove_temporary_source()
+                        # set the build directory again - name is known from
+                        # the work prepare_files did.
+                        req.source_dir = req.build_location(
+                            self.preparer.build_dir
+                        )
+                        # Update the link for this.
+                        req.link = Link(path_to_url(wheel_file))
+                        assert req.link.is_wheel
+                        # extract the wheel into the dir
+                        unpack_url(
+                            req.link, req.source_dir, None, False,
+                            session=session,
+                        )
+                else:
+                    build_failure.append(req)
+
+        # notify success/failure
+        if build_success:
+            logger.info(
+                'Successfully built %s',
+                ' '.join([req.name for req in build_success]),
+            )
+        if build_failure:
+            logger.info(
+                'Failed to build %s',
+                ' '.join([req.name for req in build_failure]),
+            )
+        # Return a list of requirements that failed to build
+        return build_failure
diff --git a/lib/python3.7/site-packages/pip/_vendor/__init__.py b/lib/python3.7/site-packages/pip/_vendor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1d9508dd4ceb943402785d1b2b99c2e3db3d5c2
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/__init__.py
@@ -0,0 +1,109 @@
+"""
+pip._vendor is for vendoring dependencies of pip to prevent needing pip to
+depend on something external.
+
+Files inside of pip._vendor should be considered immutable and should only be
+updated to versions from upstream.
+"""
+from __future__ import absolute_import
+
+import glob
+import os.path
+import sys
+
+# Downstream redistributors which have debundled our dependencies should also
+# patch this value to be true. This will trigger the additional patching
+# to cause things like "six" to be available as pip.
+DEBUNDLED = False
+
+# By default, look in this directory for a bunch of .whl files which we will
+# add to the beginning of sys.path before attempting to import anything. This
+# is done to support downstream re-distributors like Debian and Fedora who
+# wish to create their own Wheels for our dependencies to aid in debundling.
+WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
+
+
+# Define a small helper function to alias our vendored modules to the real ones
+# if the vendored ones do not exist. This idea of this was taken from
+# https://github.com/kennethreitz/requests/pull/2567.
+def vendored(modulename):
+    vendored_name = "{0}.{1}".format(__name__, modulename)
+
+    try:
+        __import__(modulename, globals(), locals(), level=0)
+    except ImportError:
+        # We can just silently allow import failures to pass here. If we
+        # got to this point it means that ``import pip._vendor.whatever``
+        # failed and so did ``import whatever``. Since we're importing this
+        # upfront in an attempt to alias imports, not erroring here will
+        # just mean we get a regular import error whenever pip *actually*
+        # tries to import one of these modules to use it, which actually
+        # gives us a better error message than we would have otherwise
+        # gotten.
+        pass
+    else:
+        sys.modules[vendored_name] = sys.modules[modulename]
+        base, head = vendored_name.rsplit(".", 1)
+        setattr(sys.modules[base], head, sys.modules[modulename])
+
+
+# If we're operating in a debundled setup, then we want to go ahead and trigger
+# the aliasing of our vendored libraries as well as looking for wheels to add
+# to our sys.path. This will cause all of this code to be a no-op typically
+# however downstream redistributors can enable it in a consistent way across
+# all platforms.
+if DEBUNDLED:
+    # Actually look inside of WHEEL_DIR to find .whl files and add them to the
+    # front of our sys.path.
+    sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
+
+    # Actually alias all of our vendored dependencies.
+    vendored("cachecontrol")
+    vendored("colorama")
+    vendored("distlib")
+    vendored("distro")
+    vendored("html5lib")
+    vendored("lockfile")
+    vendored("six")
+    vendored("six.moves")
+    vendored("six.moves.urllib")
+    vendored("six.moves.urllib.parse")
+    vendored("packaging")
+    vendored("packaging.version")
+    vendored("packaging.specifiers")
+    vendored("pep517")
+    vendored("pkg_resources")
+    vendored("progress")
+    vendored("pytoml")
+    vendored("retrying")
+    vendored("requests")
+    vendored("requests.exceptions")
+    vendored("requests.packages")
+    vendored("requests.packages.urllib3")
+    vendored("requests.packages.urllib3._collections")
+    vendored("requests.packages.urllib3.connection")
+    vendored("requests.packages.urllib3.connectionpool")
+    vendored("requests.packages.urllib3.contrib")
+    vendored("requests.packages.urllib3.contrib.ntlmpool")
+    vendored("requests.packages.urllib3.contrib.pyopenssl")
+    vendored("requests.packages.urllib3.exceptions")
+    vendored("requests.packages.urllib3.fields")
+    vendored("requests.packages.urllib3.filepost")
+    vendored("requests.packages.urllib3.packages")
+    vendored("requests.packages.urllib3.packages.ordered_dict")
+    vendored("requests.packages.urllib3.packages.six")
+    vendored("requests.packages.urllib3.packages.ssl_match_hostname")
+    vendored("requests.packages.urllib3.packages.ssl_match_hostname."
+             "_implementation")
+    vendored("requests.packages.urllib3.poolmanager")
+    vendored("requests.packages.urllib3.request")
+    vendored("requests.packages.urllib3.response")
+    vendored("requests.packages.urllib3.util")
+    vendored("requests.packages.urllib3.util.connection")
+    vendored("requests.packages.urllib3.util.request")
+    vendored("requests.packages.urllib3.util.response")
+    vendored("requests.packages.urllib3.util.retry")
+    vendored("requests.packages.urllib3.util.ssl_")
+    vendored("requests.packages.urllib3.util.timeout")
+    vendored("requests.packages.urllib3.util.url")
+    vendored("urllib3")
diff --git a/lib/python3.7/site-packages/pip/_vendor/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f57bfa27790861c00cbd64b5b62b6a242bbf598
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/__pycache__/appdirs.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/__pycache__/appdirs.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79edb16417835e0c37f85ad9fe5ddb526aecbc8c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/__pycache__/appdirs.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/__pycache__/distro.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/__pycache__/distro.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ae7763ebb983968f285d50ccb564bd833a81723
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/__pycache__/distro.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/__pycache__/ipaddress.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/__pycache__/ipaddress.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19bcede24c01253c13b17780b1a31329d5fdb024
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/__pycache__/ipaddress.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/__pycache__/pyparsing.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/__pycache__/pyparsing.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9460b6e33e5f043a3b416f78f472a6068105f5ed
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/__pycache__/pyparsing.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/__pycache__/retrying.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/__pycache__/retrying.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b81c857e878e8bbfb1bdd96587c0cae2aa2932e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/__pycache__/retrying.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/__pycache__/six.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/__pycache__/six.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c5ef454d9f33207ad8df5f78e8586f9591a53b6
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/__pycache__/six.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/appdirs.py b/lib/python3.7/site-packages/pip/_vendor/appdirs.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bd3911028102c3108dd843a0443ae5f3249be3a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/appdirs.py
@@ -0,0 +1,604 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2005-2010 ActiveState Software Inc.
+# Copyright (c) 2013 Eddy Petrișor
+
+"""Utilities for determining application-specific dirs.
+
+See <http://github.com/ActiveState/appdirs> for details and usage.
+"""
+# Dev Notes:
+# - MSDN on where to store app data files:
+#   http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
+# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
+# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+__version_info__ = (1, 4, 3)
+__version__ = '.'.join(map(str, __version_info__))
+
+
+import sys
+import os
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    unicode = str
+
+if sys.platform.startswith('java'):
+    import platform
+    os_name = platform.java_ver()[3][0]
+    if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
+        system = 'win32'
+    elif os_name.startswith('Mac'): # "Mac OS X", etc.
+        system = 'darwin'
+    else: # "Linux", "SunOS", "FreeBSD", etc.
+        # Setting this to "linux2" is not ideal, but only Windows or Mac
+        # are actually checked for and the rest of the module expects
+        # *sys.platform* style strings.
+        system = 'linux2'
+else:
+    system = sys.platform
+
+
+
+def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
+    r"""Return full path to the user-specific data dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "roaming" (boolean, default False) can be set True to use the Windows
+            roaming appdata directory. That means that for users on a Windows
+            network setup for roaming profiles, this user data will be
+            sync'd on login. See
+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+            for a discussion of issues.
+
+    Typical user data directories are:
+        Mac OS X:               ~/Library/Application Support/<AppName>
+        Unix:                   ~/.local/share/<AppName>    # or in $XDG_DATA_HOME, if defined
+        Win XP (not roaming):   C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
+        Win XP (roaming):       C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
+        Win 7  (not roaming):   C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
+        Win 7  (roaming):       C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
+
+    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
+    That means, by default "~/.local/share/<AppName>".
+    """
+    if system == "win32":
+        if appauthor is None:
+            appauthor = appname
+        const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
+        path = os.path.normpath(_get_win_folder(const))
+        if appname:
+            if appauthor is not False:
+                path = os.path.join(path, appauthor, appname)
+            else:
+                path = os.path.join(path, appname)
+    elif system == 'darwin':
+        path = os.path.expanduser('~/Library/Application Support/')
+        if appname:
+            path = os.path.join(path, appname)
+    else:
+        path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
+    r"""Return full path to the user-shared data dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "multipath" is an optional parameter only applicable to *nix
+            which indicates that the entire list of data dirs should be
+            returned. By default, the first item from XDG_DATA_DIRS is
+            returned, or '/usr/local/share/<AppName>',
+            if XDG_DATA_DIRS is not set
+
+    Typical site data directories are:
+        Mac OS X:   /Library/Application Support/<AppName>
+        Unix:       /usr/local/share/<AppName> or /usr/share/<AppName>
+        Win XP:     C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
+        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+        Win 7:      C:\ProgramData\<AppAuthor>\<AppName>   # Hidden, but writeable on Win 7.
+
+    For Unix, this is using the $XDG_DATA_DIRS[0] default.
+
+    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+    """
+    if system == "win32":
+        if appauthor is None:
+            appauthor = appname
+        path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
+        if appname:
+            if appauthor is not False:
+                path = os.path.join(path, appauthor, appname)
+            else:
+                path = os.path.join(path, appname)
+    elif system == 'darwin':
+        path = os.path.expanduser('/Library/Application Support')
+        if appname:
+            path = os.path.join(path, appname)
+    else:
+        # XDG default for $XDG_DATA_DIRS
+        # only first, if multipath is False
+        path = os.getenv('XDG_DATA_DIRS',
+                         os.pathsep.join(['/usr/local/share', '/usr/share']))
+        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+        if appname:
+            if version:
+                appname = os.path.join(appname, version)
+            pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+        if multipath:
+            path = os.pathsep.join(pathlist)
+        else:
+            path = pathlist[0]
+        return path
+
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
+    r"""Return full path to the user-specific config dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "roaming" (boolean, default False) can be set True to use the Windows
+            roaming appdata directory. That means that for users on a Windows
+            network setup for roaming profiles, this user data will be
+            sync'd on login. See
+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+            for a discussion of issues.
+
+    Typical user config directories are:
+        Mac OS X:               same as user_data_dir
+        Unix:                   ~/.config/<AppName>     # or in $XDG_CONFIG_HOME, if defined
+        Win *:                  same as user_data_dir
+
+    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
+    That means, by default "~/.config/<AppName>".
+    """
+    if system in ["win32", "darwin"]:
+        path = user_data_dir(appname, appauthor, None, roaming)
+    else:
+        path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
+    r"""Return full path to the user-shared data dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "multipath" is an optional parameter only applicable to *nix
+            which indicates that the entire list of config dirs should be
+            returned. By default, the first item from XDG_CONFIG_DIRS is
+            returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
+
+    Typical site config directories are:
+        Mac OS X:   same as site_data_dir
+        Unix:       /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
+                    $XDG_CONFIG_DIRS
+        Win *:      same as site_data_dir
+        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+
+    For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
+
+    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+    """
+    if system in ["win32", "darwin"]:
+        path = site_data_dir(appname, appauthor)
+        if appname and version:
+            path = os.path.join(path, version)
+    else:
+        # XDG default for $XDG_CONFIG_DIRS
+        # only first, if multipath is False
+        path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
+        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+        if appname:
+            if version:
+                appname = os.path.join(appname, version)
+            pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+        if multipath:
+            path = os.pathsep.join(pathlist)
+        else:
+            path = pathlist[0]
+    return path
+
+
+def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
+    r"""Return full path to the user-specific cache dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "opinion" (boolean) can be False to disable the appending of
+            "Cache" to the base app data dir for Windows. See
+            discussion below.
+
+    Typical user cache directories are:
+        Mac OS X:   ~/Library/Caches/<AppName>
+        Unix:       ~/.cache/<AppName> (XDG default)
+        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
+        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
+
+    On Windows the only suggestion in the MSDN docs is that local settings go in
+    the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
+    app data dir (the default returned by `user_data_dir` above). Apps typically
+    put cache data somewhere *under* the given dir here. Some examples:
+        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
+        ...\Acme\SuperApp\Cache\1.0
+    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
+    This can be disabled with the `opinion=False` option.
+    """
+    if system == "win32":
+        if appauthor is None:
+            appauthor = appname
+        path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
+        if appname:
+            if appauthor is not False:
+                path = os.path.join(path, appauthor, appname)
+            else:
+                path = os.path.join(path, appname)
+            if opinion:
+                path = os.path.join(path, "Cache")
+    elif system == 'darwin':
+        path = os.path.expanduser('~/Library/Caches')
+        if appname:
+            path = os.path.join(path, appname)
+    else:
+        path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
+    r"""Return full path to the user-specific state dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "roaming" (boolean, default False) can be set True to use the Windows
+            roaming appdata directory. That means that for users on a Windows
+            network setup for roaming profiles, this user data will be
+            sync'd on login. See
+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+            for a discussion of issues.
+
+    Typical user state directories are:
+        Mac OS X:  same as user_data_dir
+        Unix:      ~/.local/state/<AppName>   # or in $XDG_STATE_HOME, if defined
+        Win *:     same as user_data_dir
+
+    For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
+    to extend the XDG spec and support $XDG_STATE_HOME.
+
+    That means, by default "~/.local/state/<AppName>".
+    """
+    if system in ["win32", "darwin"]:
+        path = user_data_dir(appname, appauthor, None, roaming)
+    else:
+        path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
+    r"""Return full path to the user-specific log dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "opinion" (boolean) can be False to disable the appending of
+            "Logs" to the base app data dir for Windows, and "log" to the
+            base cache dir for Unix. See discussion below.
+
+    Typical user log directories are:
+        Mac OS X:   ~/Library/Logs/<AppName>
+        Unix:       ~/.cache/<AppName>/log  # or under $XDG_CACHE_HOME if defined
+        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
+        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
+
+    On Windows the only suggestion in the MSDN docs is that local settings
+    go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
+    examples of what some windows apps use for a logs dir.)
+
+    OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
+    value for Windows and appends "log" to the user cache dir for Unix.
+    This can be disabled with the `opinion=False` option.
+    """
+    if system == "darwin":
+        path = os.path.join(
+            os.path.expanduser('~/Library/Logs'),
+            appname)
+    elif system == "win32":
+        path = user_data_dir(appname, appauthor, version)
+        version = False
+        if opinion:
+            path = os.path.join(path, "Logs")
+    else:
+        path = user_cache_dir(appname, appauthor, version)
+        version = False
+        if opinion:
+            path = os.path.join(path, "log")
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+class AppDirs(object):
+    """Convenience wrapper for getting application dirs."""
+    def __init__(self, appname=None, appauthor=None, version=None,
+            roaming=False, multipath=False):
+        self.appname = appname
+        self.appauthor = appauthor
+        self.version = version
+        self.roaming = roaming
+        self.multipath = multipath
+
+    @property
+    def user_data_dir(self):
+        return user_data_dir(self.appname, self.appauthor,
+                             version=self.version, roaming=self.roaming)
+
+    @property
+    def site_data_dir(self):
+        return site_data_dir(self.appname, self.appauthor,
+                             version=self.version, multipath=self.multipath)
+
+    @property
+    def user_config_dir(self):
+        return user_config_dir(self.appname, self.appauthor,
+                               version=self.version, roaming=self.roaming)
+
+    @property
+    def site_config_dir(self):
+        return site_config_dir(self.appname, self.appauthor,
+                             version=self.version, multipath=self.multipath)
+
+    @property
+    def user_cache_dir(self):
+        return user_cache_dir(self.appname, self.appauthor,
+                              version=self.version)
+
+    @property
+    def user_state_dir(self):
+        return user_state_dir(self.appname, self.appauthor,
+                              version=self.version)
+
+    @property
+    def user_log_dir(self):
+        return user_log_dir(self.appname, self.appauthor,
+                            version=self.version)
+
+
+#---- internal support stuff
+
+def _get_win_folder_from_registry(csidl_name):
+    """This is a fallback technique at best. I'm not sure if using the
+    registry for this guarantees us the correct answer for all CSIDL_*
+    names.
+    """
+    if PY3:
+      import winreg as _winreg
+    else:
+      import _winreg
+
+    shell_folder_name = {
+        "CSIDL_APPDATA": "AppData",
+        "CSIDL_COMMON_APPDATA": "Common AppData",
+        "CSIDL_LOCAL_APPDATA": "Local AppData",
+    }[csidl_name]
+
+    key = _winreg.OpenKey(
+        _winreg.HKEY_CURRENT_USER,
+        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+    )
+    dir, type = _winreg.QueryValueEx(key, shell_folder_name)
+    return dir
+
+
+def _get_win_folder_with_pywin32(csidl_name):
+    from win32com.shell import shellcon, shell
+    dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
+    # Try to make this a unicode path because SHGetFolderPath does
+    # not return unicode strings when there is unicode data in the
+    # path.
+    try:
+        dir = unicode(dir)
+
+        # Downgrade to short path name if have highbit chars. See
+        # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+        has_high_char = False
+        for c in dir:
+            if ord(c) > 255:
+                has_high_char = True
+                break
+        if has_high_char:
+            try:
+                import win32api
+                dir = win32api.GetShortPathName(dir)
+            except ImportError:
+                pass
+    except UnicodeError:
+        pass
+    return dir
+
+
+def _get_win_folder_with_ctypes(csidl_name):
+    import ctypes
+
+    csidl_const = {
+        "CSIDL_APPDATA": 26,
+        "CSIDL_COMMON_APPDATA": 35,
+        "CSIDL_LOCAL_APPDATA": 28,
+    }[csidl_name]
+
+    buf = ctypes.create_unicode_buffer(1024)
+    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+    # Downgrade to short path name if have highbit chars. See
+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+    has_high_char = False
+    for c in buf:
+        if ord(c) > 255:
+            has_high_char = True
+            break
+    if has_high_char:
+        buf2 = ctypes.create_unicode_buffer(1024)
+        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+            buf = buf2
+
+    return buf.value
+
+def _get_win_folder_with_jna(csidl_name):
+    import array
+    from com.sun import jna
+    from com.sun.jna.platform import win32
+
+    buf_size = win32.WinDef.MAX_PATH * 2
+    buf = array.zeros('c', buf_size)
+    shell = win32.Shell32.INSTANCE
+    shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
+    dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+    # Downgrade to short path name if have highbit chars. See
+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+    has_high_char = False
+    for c in dir:
+        if ord(c) > 255:
+            has_high_char = True
+            break
+    if has_high_char:
+        buf = array.zeros('c', buf_size)
+        kernel = win32.Kernel32.INSTANCE
+        if kernel.GetShortPathName(dir, buf, buf_size):
+            dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+    return dir
+
+if system == "win32":
+    try:
+        from ctypes import windll
+        _get_win_folder = _get_win_folder_with_ctypes
+    except ImportError:
+        try:
+            import com.sun.jna
+            _get_win_folder = _get_win_folder_with_jna
+        except ImportError:
+            _get_win_folder = _get_win_folder_from_registry
+
+
+#---- self test code
+
+if __name__ == "__main__":
+    appname = "MyApp"
+    appauthor = "MyCompany"
+
+    props = ("user_data_dir",
+             "user_config_dir",
+             "user_cache_dir",
+             "user_state_dir",
+             "user_log_dir",
+             "site_data_dir",
+             "site_config_dir")
+
+    print("-- app dirs %s --" % __version__)
+
+    print("-- app dirs (with optional 'version')")
+    dirs = AppDirs(appname, appauthor, version="1.0")
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
+
+    print("\n-- app dirs (without optional 'version')")
+    dirs = AppDirs(appname, appauthor)
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
+
+    print("\n-- app dirs (without optional 'appauthor')")
+    dirs = AppDirs(appname)
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
+
+    print("\n-- app dirs (with disabled 'appauthor')")
+    dirs = AppDirs(appname, appauthor=False)
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__init__.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fdee66ffe66d7ca8bb6545587e21039cd8b25cb
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__init__.py
@@ -0,0 +1,11 @@
+"""CacheControl import Interface.
+
+Make it easy to import from cachecontrol without long namespaces.
+"""
+__author__ = "Eric Larson"
+__email__ = "eric@ionrock.org"
+__version__ = "0.12.5"
+
+from .wrapper import CacheControl
+from .adapter import CacheControlAdapter
+from .controller import CacheController
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd53da1f8715eae1c6858266eadfe15f6d2de391
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..352fb2a0e77a275450803827157e03ea472bb8a4
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/_cmd.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..512b2061f89d162c7f1fc3197a4a2d27e5e81d48
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/adapter.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c42b0407d219e9a1ac28f8b02865591721fdf9e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/cache.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/compat.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d6b9c6f2c5a1fd3bc5480208983b2b9722ea97b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c6dadbbd96e4957b65ae6ab48d1443fb208df6f
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/controller.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1137435044ce7cca37fbd71c324a44f90787704
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/filewrapper.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..705148060ea1c88a4e632f6c3046cb93cd0b118d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/heuristics.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e84c753bf2f7b124cebdebfaf5e276d81ddabc2f
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/serialize.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6a60e78efb8b949783c9e549803afd3183312740
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/__pycache__/wrapper.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/_cmd.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/_cmd.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1e0ad94a1ce2cd06bf381845fafbecbd9846f88
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/_cmd.py
@@ -0,0 +1,57 @@
+import logging
+
+from pip._vendor import requests
+
+from pip._vendor.cachecontrol.adapter import CacheControlAdapter
+from pip._vendor.cachecontrol.cache import DictCache
+from pip._vendor.cachecontrol.controller import logger
+
+from argparse import ArgumentParser
+
+
+def setup_logging():
+    logger.setLevel(logging.DEBUG)
+    handler = logging.StreamHandler()
+    logger.addHandler(handler)
+
+
+def get_session():
+    adapter = CacheControlAdapter(
+        DictCache(), cache_etags=True, serializer=None, heuristic=None
+    )
+    sess = requests.Session()
+    sess.mount("http://", adapter)
+    sess.mount("https://", adapter)
+
+    sess.cache_controller = adapter.controller
+    return sess
+
+
+def get_args():
+    parser = ArgumentParser()
+    parser.add_argument("url", help="The URL to try and cache")
+    return parser.parse_args()
+
+
+def main(args=None):
+    args = get_args()
+    sess = get_session()
+
+    # Make a request to get a response
+    resp = sess.get(args.url)
+
+    # Turn on logging
+    setup_logging()
+
+    # try setting the cache
+    sess.cache_controller.cache_response(resp.request, resp.raw)
+
+    # Now try to get it
+    if sess.cache_controller.cached_request(resp.request):
+        print("Cached!")
+    else:
+        print("Not cached :(")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/adapter.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..780eb2883b942dcf49467d80528b8c074d76fb7b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/adapter.py
@@ -0,0 +1,133 @@
+import types
+import functools
+import zlib
+
+from pip._vendor.requests.adapters import HTTPAdapter
+
+from .controller import CacheController
+from .cache import DictCache
+from .filewrapper import CallbackFileWrapper
+
+
+class CacheControlAdapter(HTTPAdapter):
+    invalidating_methods = {"PUT", "DELETE"}
+
+    def __init__(
+        self,
+        cache=None,
+        cache_etags=True,
+        controller_class=None,
+        serializer=None,
+        heuristic=None,
+        cacheable_methods=None,
+        *args,
+        **kw
+    ):
+        super(CacheControlAdapter, self).__init__(*args, **kw)
+        self.cache = cache or DictCache()
+        self.heuristic = heuristic
+        self.cacheable_methods = cacheable_methods or ("GET",)
+
+        controller_factory = controller_class or CacheController
+        self.controller = controller_factory(
+            self.cache, cache_etags=cache_etags, serializer=serializer
+        )
+
+    def send(self, request, cacheable_methods=None, **kw):
+        """
+        Send a request. Use the request information to see if it
+        exists in the cache and cache the response if we need to and can.
+        """
+        cacheable = cacheable_methods or self.cacheable_methods
+        if request.method in cacheable:
+            try:
+                cached_response = self.controller.cached_request(request)
+            except zlib.error:
+                cached_response = None
+            if cached_response:
+                return self.build_response(request, cached_response, from_cache=True)
+
+            # check for etags and add headers if appropriate
+            request.headers.update(self.controller.conditional_headers(request))
+
+        resp = super(CacheControlAdapter, self).send(request, **kw)
+
+        return resp
+
+    def build_response(
+        self, request, response, from_cache=False, cacheable_methods=None
+    ):
+        """
+        Build a response by making a request or using the cache.
+
+        This will end up calling send and returning a potentially
+        cached response
+        """
+        cacheable = cacheable_methods or self.cacheable_methods
+        if not from_cache and request.method in cacheable:
+            # Check for any heuristics that might update headers
+            # before trying to cache.
+            if self.heuristic:
+                response = self.heuristic.apply(response)
+
+            # apply any expiration heuristics
+            if response.status == 304:
+                # We must have sent an ETag request. This could mean
+                # that we've been expired already or that we simply
+                # have an etag. In either case, we want to try and
+                # update the cache if that is the case.
+                cached_response = self.controller.update_cached_response(
+                    request, response
+                )
+
+                if cached_response is not response:
+                    from_cache = True
+
+                # We are done with the server response, read a
+                # possible response body (compliant servers will
+                # not return one, but we cannot be 100% sure) and
+                # release the connection back to the pool.
+                response.read(decode_content=False)
+                response.release_conn()
+
+                response = cached_response
+
+            # We always cache the 301 responses
+            elif response.status == 301:
+                self.controller.cache_response(request, response)
+            else:
+                # Wrap the response file with a wrapper that will cache the
+                #   response when the stream has been consumed.
+                response._fp = CallbackFileWrapper(
+                    response._fp,
+                    functools.partial(
+                        self.controller.cache_response, request, response
+                    ),
+                )
+                if response.chunked:
+                    super_update_chunk_length = response._update_chunk_length
+
+                    def _update_chunk_length(self):
+                        super_update_chunk_length()
+                        if self.chunk_left == 0:
+                            self._fp._close()
+
+                    response._update_chunk_length = types.MethodType(
+                        _update_chunk_length, response
+                    )
+
+        resp = super(CacheControlAdapter, self).build_response(request, response)
+
+        # See if we should invalidate the cache.
+        if request.method in self.invalidating_methods and resp.ok:
+            cache_url = self.controller.cache_url(request.url)
+            self.cache.delete(cache_url)
+
+        # Give the request a from_cache attr to let people use it
+        resp.from_cache = from_cache
+
+        return resp
+
+    def close(self):
+        self.cache.close()
+        super(CacheControlAdapter, self).close()
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/cache.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..94e07732d91fdf38f4559da04db4221d26553d40
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/cache.py
@@ -0,0 +1,39 @@
+"""
+The cache object API for implementing caches. The default is a thread
+safe in-memory dictionary.
+"""
+from threading import Lock
+
+
+class BaseCache(object):
+
+    def get(self, key):
+        raise NotImplementedError()
+
+    def set(self, key, value):
+        raise NotImplementedError()
+
+    def delete(self, key):
+        raise NotImplementedError()
+
+    def close(self):
+        pass
+
+
+class DictCache(BaseCache):
+
+    def __init__(self, init_dict=None):
+        self.lock = Lock()
+        self.data = init_dict or {}
+
+    def get(self, key):
+        return self.data.get(key, None)
+
+    def set(self, key, value):
+        with self.lock:
+            self.data.update({key: value})
+
+    def delete(self, key):
+        with self.lock:
+            if key in self.data:
+                self.data.pop(key)
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e1658fa5e5e498bcfa84702ed1a53dfcec071ff
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__init__.py
@@ -0,0 +1,2 @@
+from .file_cache import FileCache  # noqa
+from .redis_cache import RedisCache  # noqa
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aec2975a6cb93e8c1ea430bc54636c26cf8c8d2d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d4f43061a02c4a328799054a88d5bee20ba0c11
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/file_cache.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..85d124891f683a0c49cb2b2aa62e0fbab99e763b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/__pycache__/redis_cache.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ba00806cc31cd92e44d022aac4d8a59f0260d84
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
@@ -0,0 +1,146 @@
+import hashlib
+import os
+from textwrap import dedent
+
+from ..cache import BaseCache
+from ..controller import CacheController
+
+try:
+    FileNotFoundError
+except NameError:
+    # py2.X
+    FileNotFoundError = (IOError, OSError)
+
+
+def _secure_open_write(filename, fmode):
+    # We only want to write to this file, so open it in write only mode
+    flags = os.O_WRONLY
+
+    # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
+    #  will open *new* files.
+    # We specify this because we want to ensure that the mode we pass is the
+    # mode of the file.
+    flags |= os.O_CREAT | os.O_EXCL
+
+    # Do not follow symlinks to prevent someone from making a symlink that
+    # we follow and insecurely open a cache file.
+    if hasattr(os, "O_NOFOLLOW"):
+        flags |= os.O_NOFOLLOW
+
+    # On Windows we'll mark this file as binary
+    if hasattr(os, "O_BINARY"):
+        flags |= os.O_BINARY
+
+    # Before we open our file, we want to delete any existing file that is
+    # there
+    try:
+        os.remove(filename)
+    except (IOError, OSError):
+        # The file must not exist already, so we can just skip ahead to opening
+        pass
+
+    # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
+    # race condition happens between the os.remove and this line, that an
+    # error will be raised. Because we utilize a lockfile this should only
+    # happen if someone is attempting to attack us.
+    fd = os.open(filename, flags, fmode)
+    try:
+        return os.fdopen(fd, "wb")
+
+    except:
+        # An error occurred wrapping our FD in a file object
+        os.close(fd)
+        raise
+
+
+class FileCache(BaseCache):
+
+    def __init__(
+        self,
+        directory,
+        forever=False,
+        filemode=0o0600,
+        dirmode=0o0700,
+        use_dir_lock=None,
+        lock_class=None,
+    ):
+
+        if use_dir_lock is not None and lock_class is not None:
+            raise ValueError("Cannot use use_dir_lock and lock_class together")
+
+        try:
+            from pip._vendor.lockfile import LockFile
+            from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
+        except ImportError:
+            notice = dedent(
+                """
+            NOTE: In order to use the FileCache you must have
+            lockfile installed. You can install it via pip:
+              pip install lockfile
+            """
+            )
+            raise ImportError(notice)
+
+        else:
+            if use_dir_lock:
+                lock_class = MkdirLockFile
+
+            elif lock_class is None:
+                lock_class = LockFile
+
+        self.directory = directory
+        self.forever = forever
+        self.filemode = filemode
+        self.dirmode = dirmode
+        self.lock_class = lock_class
+
+    @staticmethod
+    def encode(x):
+        return hashlib.sha224(x.encode()).hexdigest()
+
+    def _fn(self, name):
+        # NOTE: This method should not change as some may depend on it.
+        #       See: https://github.com/ionrock/cachecontrol/issues/63
+        hashed = self.encode(name)
+        parts = list(hashed[:5]) + [hashed]
+        return os.path.join(self.directory, *parts)
+
+    def get(self, key):
+        name = self._fn(key)
+        try:
+            with open(name, "rb") as fh:
+                return fh.read()
+
+        except FileNotFoundError:
+            return None
+
+    def set(self, key, value):
+        name = self._fn(key)
+
+        # Make sure the directory exists
+        try:
+            os.makedirs(os.path.dirname(name), self.dirmode)
+        except (IOError, OSError):
+            pass
+
+        with self.lock_class(name) as lock:
+            # Write our actual file
+            with _secure_open_write(lock.path, self.filemode) as fh:
+                fh.write(value)
+
+    def delete(self, key):
+        name = self._fn(key)
+        if not self.forever:
+            try:
+                os.remove(name)
+            except FileNotFoundError:
+                pass
+
+
+def url_to_file_path(url, filecache):
+    """Return the file cache path based on the URL.
+
+    This does not ensure the file exists!
+    """
+    key = CacheController.cache_url(url)
+    return filecache._fn(key)
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed705ce7df63140ec54a8eee1ccae2b1dcd41950
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py
@@ -0,0 +1,33 @@
+from __future__ import division
+
+from datetime import datetime
+from pip._vendor.cachecontrol.cache import BaseCache
+
+
+class RedisCache(BaseCache):
+
+    def __init__(self, conn):
+        self.conn = conn
+
+    def get(self, key):
+        return self.conn.get(key)
+
+    def set(self, key, value, expires=None):
+        if not expires:
+            self.conn.set(key, value)
+        else:
+            expires = expires - datetime.utcnow()
+            self.conn.setex(key, int(expires.total_seconds()), value)
+
+    def delete(self, key):
+        self.conn.delete(key)
+
+    def clear(self):
+        """Helper for clearing all the keys in a database. Use with
+        caution!"""
+        for key in self.conn.keys():
+            self.conn.delete(key)
+
+    def close(self):
+        """Redis uses connection pooling, no need to close the connection."""
+        pass
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/compat.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..33b5aed0a322d9dfdf35daffeb293bc56ff39c54
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/compat.py
@@ -0,0 +1,29 @@
+try:
+    from urllib.parse import urljoin
+except ImportError:
+    from urlparse import urljoin
+
+
+try:
+    import cPickle as pickle
+except ImportError:
+    import pickle
+
+
+# Handle the case where the requests module has been patched to not have
+# urllib3 bundled as part of its source.
+try:
+    from pip._vendor.requests.packages.urllib3.response import HTTPResponse
+except ImportError:
+    from pip._vendor.urllib3.response import HTTPResponse
+
+try:
+    from pip._vendor.requests.packages.urllib3.util import is_fp_closed
+except ImportError:
+    from pip._vendor.urllib3.util import is_fp_closed
+
+# Replicate some six behaviour
+try:
+    text_type = unicode
+except NameError:
+    text_type = str
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/controller.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/controller.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b2b943cb9b53b063396e39f6db3ff93788824a1
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/controller.py
@@ -0,0 +1,367 @@
+"""
+The httplib2 algorithms ported for use with requests.
+"""
+import logging
+import re
+import calendar
+import time
+from email.utils import parsedate_tz
+
+from pip._vendor.requests.structures import CaseInsensitiveDict
+
+from .cache import DictCache
+from .serialize import Serializer
+
+
+logger = logging.getLogger(__name__)
+
+URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
+
+
+def parse_uri(uri):
+    """Parses a URI using the regex given in Appendix B of RFC 3986.
+
+        (scheme, authority, path, query, fragment) = parse_uri(uri)
+    """
+    groups = URI.match(uri).groups()
+    return (groups[1], groups[3], groups[4], groups[6], groups[8])
+
+
+class CacheController(object):
+    """An interface to see if request should cached or not.
+    """
+
+    def __init__(
+        self, cache=None, cache_etags=True, serializer=None, status_codes=None
+    ):
+        self.cache = cache or DictCache()
+        self.cache_etags = cache_etags
+        self.serializer = serializer or Serializer()
+        self.cacheable_status_codes = status_codes or (200, 203, 300, 301)
+
+    @classmethod
+    def _urlnorm(cls, uri):
+        """Normalize the URL to create a safe key for the cache"""
+        (scheme, authority, path, query, fragment) = parse_uri(uri)
+        if not scheme or not authority:
+            raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
+
+        scheme = scheme.lower()
+        authority = authority.lower()
+
+        if not path:
+            path = "/"
+
+        # Could do syntax based normalization of the URI before
+        # computing the digest. See Section 6.2.2 of Std 66.
+        request_uri = query and "?".join([path, query]) or path
+        defrag_uri = scheme + "://" + authority + request_uri
+
+        return defrag_uri
+
+    @classmethod
+    def cache_url(cls, uri):
+        return cls._urlnorm(uri)
+
+    def parse_cache_control(self, headers):
+        known_directives = {
+            # https://tools.ietf.org/html/rfc7234#section-5.2
+            "max-age": (int, True),
+            "max-stale": (int, False),
+            "min-fresh": (int, True),
+            "no-cache": (None, False),
+            "no-store": (None, False),
+            "no-transform": (None, False),
+            "only-if-cached": (None, False),
+            "must-revalidate": (None, False),
+            "public": (None, False),
+            "private": (None, False),
+            "proxy-revalidate": (None, False),
+            "s-maxage": (int, True),
+        }
+
+        cc_headers = headers.get("cache-control", headers.get("Cache-Control", ""))
+
+        retval = {}
+
+        for cc_directive in cc_headers.split(","):
+            if not cc_directive.strip():
+                continue
+
+            parts = cc_directive.split("=", 1)
+            directive = parts[0].strip()
+
+            try:
+                typ, required = known_directives[directive]
+            except KeyError:
+                logger.debug("Ignoring unknown cache-control directive: %s", directive)
+                continue
+
+            if not typ or not required:
+                retval[directive] = None
+            if typ:
+                try:
+                    retval[directive] = typ(parts[1].strip())
+                except IndexError:
+                    if required:
+                        logger.debug(
+                            "Missing value for cache-control " "directive: %s",
+                            directive,
+                        )
+                except ValueError:
+                    logger.debug(
+                        "Invalid value for cache-control directive " "%s, must be %s",
+                        directive,
+                        typ.__name__,
+                    )
+
+        return retval
+
+    def cached_request(self, request):
+        """
+        Return a cached response if it exists in the cache, otherwise
+        return False.
+        """
+        cache_url = self.cache_url(request.url)
+        logger.debug('Looking up "%s" in the cache', cache_url)
+        cc = self.parse_cache_control(request.headers)
+
+        # Bail out if the request insists on fresh data
+        if "no-cache" in cc:
+            logger.debug('Request header has "no-cache", cache bypassed')
+            return False
+
+        if "max-age" in cc and cc["max-age"] == 0:
+            logger.debug('Request header has "max_age" as 0, cache bypassed')
+            return False
+
+        # Request allows serving from the cache, let's see if we find something
+        cache_data = self.cache.get(cache_url)
+        if cache_data is None:
+            logger.debug("No cache entry available")
+            return False
+
+        # Check whether it can be deserialized
+        resp = self.serializer.loads(request, cache_data)
+        if not resp:
+            logger.warning("Cache entry deserialization failed, entry ignored")
+            return False
+
+        # If we have a cached 301, return it immediately. We don't
+        # need to test our response for other headers b/c it is
+        # intrinsically "cacheable" as it is Permanent.
+        # See:
+        #   https://tools.ietf.org/html/rfc7231#section-6.4.2
+        #
+        # Client can try to refresh the value by repeating the request
+        # with cache busting headers as usual (ie no-cache).
+        if resp.status == 301:
+            msg = (
+                'Returning cached "301 Moved Permanently" response '
+                "(ignoring date and etag information)"
+            )
+            logger.debug(msg)
+            return resp
+
+        headers = CaseInsensitiveDict(resp.headers)
+        if not headers or "date" not in headers:
+            if "etag" not in headers:
+                # Without date or etag, the cached response can never be used
+                # and should be deleted.
+                logger.debug("Purging cached response: no date or etag")
+                self.cache.delete(cache_url)
+            logger.debug("Ignoring cached response: no date")
+            return False
+
+        now = time.time()
+        date = calendar.timegm(parsedate_tz(headers["date"]))
+        current_age = max(0, now - date)
+        logger.debug("Current age based on date: %i", current_age)
+
+        # TODO: There is an assumption that the result will be a
+        #       urllib3 response object. This may not be best since we
+        #       could probably avoid instantiating or constructing the
+        #       response until we know we need it.
+        resp_cc = self.parse_cache_control(headers)
+
+        # determine freshness
+        freshness_lifetime = 0
+
+        # Check the max-age pragma in the cache control header
+        if "max-age" in resp_cc:
+            freshness_lifetime = resp_cc["max-age"]
+            logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
+
+        # If there isn't a max-age, check for an expires header
+        elif "expires" in headers:
+            expires = parsedate_tz(headers["expires"])
+            if expires is not None:
+                expire_time = calendar.timegm(expires) - date
+                freshness_lifetime = max(0, expire_time)
+                logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
+
+        # Determine if we are setting freshness limit in the
+        # request. Note, this overrides what was in the response.
+        if "max-age" in cc:
+            freshness_lifetime = cc["max-age"]
+            logger.debug(
+                "Freshness lifetime from request max-age: %i", freshness_lifetime
+            )
+
+        if "min-fresh" in cc:
+            min_fresh = cc["min-fresh"]
+            # adjust our current age by our min fresh
+            current_age += min_fresh
+            logger.debug("Adjusted current age from min-fresh: %i", current_age)
+
+        # Return entry if it is fresh enough
+        if freshness_lifetime > current_age:
+            logger.debug('The response is "fresh", returning cached response')
+            logger.debug("%i > %i", freshness_lifetime, current_age)
+            return resp
+
+        # we're not fresh. If we don't have an Etag, clear it out
+        if "etag" not in headers:
+            logger.debug('The cached response is "stale" with no etag, purging')
+            self.cache.delete(cache_url)
+
+        # return the original handler
+        return False
+
+    def conditional_headers(self, request):
+        cache_url = self.cache_url(request.url)
+        resp = self.serializer.loads(request, self.cache.get(cache_url))
+        new_headers = {}
+
+        if resp:
+            headers = CaseInsensitiveDict(resp.headers)
+
+            if "etag" in headers:
+                new_headers["If-None-Match"] = headers["ETag"]
+
+            if "last-modified" in headers:
+                new_headers["If-Modified-Since"] = headers["Last-Modified"]
+
+        return new_headers
+
+    def cache_response(self, request, response, body=None, status_codes=None):
+        """
+        Algorithm for caching requests.
+
+        This assumes a requests Response object.
+        """
+        # From httplib2: Don't cache 206's since we aren't going to
+        #                handle byte range requests
+        cacheable_status_codes = status_codes or self.cacheable_status_codes
+        if response.status not in cacheable_status_codes:
+            logger.debug(
+                "Status code %s not in %s", response.status, cacheable_status_codes
+            )
+            return
+
+        response_headers = CaseInsensitiveDict(response.headers)
+
+        # If we've been given a body, our response has a Content-Length, that
+        # Content-Length is valid then we can check to see if the body we've
+        # been given matches the expected size, and if it doesn't we'll just
+        # skip trying to cache it.
+        if (
+            body is not None
+            and "content-length" in response_headers
+            and response_headers["content-length"].isdigit()
+            and int(response_headers["content-length"]) != len(body)
+        ):
+            return
+
+        cc_req = self.parse_cache_control(request.headers)
+        cc = self.parse_cache_control(response_headers)
+
+        cache_url = self.cache_url(request.url)
+        logger.debug('Updating cache with response from "%s"', cache_url)
+
+        # Delete it from the cache if we happen to have it stored there
+        no_store = False
+        if "no-store" in cc:
+            no_store = True
+            logger.debug('Response header has "no-store"')
+        if "no-store" in cc_req:
+            no_store = True
+            logger.debug('Request header has "no-store"')
+        if no_store and self.cache.get(cache_url):
+            logger.debug('Purging existing cache entry to honor "no-store"')
+            self.cache.delete(cache_url)
+        if no_store:
+            return
+
+        # If we've been given an etag, then keep the response
+        if self.cache_etags and "etag" in response_headers:
+            logger.debug("Caching due to etag")
+            self.cache.set(
+                cache_url, self.serializer.dumps(request, response, body=body)
+            )
+
+        # Add to the cache any 301s. We do this before looking that
+        # the Date headers.
+        elif response.status == 301:
+            logger.debug("Caching permanant redirect")
+            self.cache.set(cache_url, self.serializer.dumps(request, response))
+
+        # Add to the cache if the response headers demand it. If there
+        # is no date header then we can't do anything about expiring
+        # the cache.
+        elif "date" in response_headers:
+            # cache when there is a max-age > 0
+            if "max-age" in cc and cc["max-age"] > 0:
+                logger.debug("Caching b/c date exists and max-age > 0")
+                self.cache.set(
+                    cache_url, self.serializer.dumps(request, response, body=body)
+                )
+
+            # If the request can expire, it means we should cache it
+            # in the meantime.
+            elif "expires" in response_headers:
+                if response_headers["expires"]:
+                    logger.debug("Caching b/c of expires header")
+                    self.cache.set(
+                        cache_url, self.serializer.dumps(request, response, body=body)
+                    )
+
+    def update_cached_response(self, request, response):
+        """On a 304 we will get a new set of headers that we want to
+        update our cached value with, assuming we have one.
+
+        This should only ever be called when we've sent an ETag and
+        gotten a 304 as the response.
+        """
+        cache_url = self.cache_url(request.url)
+
+        cached_response = self.serializer.loads(request, self.cache.get(cache_url))
+
+        if not cached_response:
+            # we didn't have a cached response
+            return response
+
+        # Lets update our headers with the headers from the new request:
+        # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
+        #
+        # The server isn't supposed to send headers that would make
+        # the cached body invalid. But... just in case, we'll be sure
+        # to strip out ones we know that might be problmatic due to
+        # typical assumptions.
+        excluded_headers = ["content-length"]
+
+        cached_response.headers.update(
+            dict(
+                (k, v)
+                for k, v in response.headers.items()
+                if k.lower() not in excluded_headers
+            )
+        )
+
+        # we want a 200 b/c we have content via the cache
+        cached_response.status = 200
+
+        # update our cache
+        self.cache.set(cache_url, self.serializer.dumps(request, cached_response))
+
+        return cached_response
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..30ed4c5a62a99906ab662a8acfba2ab75e82af2e
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/filewrapper.py
@@ -0,0 +1,80 @@
+from io import BytesIO
+
+
+class CallbackFileWrapper(object):
+    """
+    Small wrapper around a fp object which will tee everything read into a
+    buffer, and when that file is closed it will execute a callback with the
+    contents of that buffer.
+
+    All attributes are proxied to the underlying file object.
+
+    This class uses members with a double underscore (__) leading prefix so as
+    not to accidentally shadow an attribute.
+    """
+
+    def __init__(self, fp, callback):
+        self.__buf = BytesIO()
+        self.__fp = fp
+        self.__callback = callback
+
+    def __getattr__(self, name):
+        # The vaguaries of garbage collection means that self.__fp is
+        # not always set.  By using __getattribute__ and the private
+        # name[0] allows looking up the attribute value and raising an
+        # AttributeError when it doesn't exist. This stop thigns from
+        # infinitely recursing calls to getattr in the case where
+        # self.__fp hasn't been set.
+        #
+        # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
+        fp = self.__getattribute__("_CallbackFileWrapper__fp")
+        return getattr(fp, name)
+
+    def __is_fp_closed(self):
+        try:
+            return self.__fp.fp is None
+
+        except AttributeError:
+            pass
+
+        try:
+            return self.__fp.closed
+
+        except AttributeError:
+            pass
+
+        # We just don't cache it then.
+        # TODO: Add some logging here...
+        return False
+
+    def _close(self):
+        if self.__callback:
+            self.__callback(self.__buf.getvalue())
+
+        # We assign this to None here, because otherwise we can get into
+        # really tricky problems where the CPython interpreter dead locks
+        # because the callback is holding a reference to something which
+        # has a __del__ method. Setting this to None breaks the cycle
+        # and allows the garbage collector to do it's thing normally.
+        self.__callback = None
+
+    def read(self, amt=None):
+        data = self.__fp.read(amt)
+        self.__buf.write(data)
+        if self.__is_fp_closed():
+            self._close()
+
+        return data
+
+    def _safe_read(self, amt):
+        data = self.__fp._safe_read(amt)
+        if amt == 2 and data == b"\r\n":
+            # urllib executes this read to toss the CRLF at the end
+            # of the chunk.
+            return data
+
+        self.__buf.write(data)
+        if self.__is_fp_closed():
+            self._close()
+
+        return data
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/heuristics.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/heuristics.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c0e9790d5d0764f2ca005ae955d644bc2098d75
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/heuristics.py
@@ -0,0 +1,135 @@
+import calendar
+import time
+
+from email.utils import formatdate, parsedate, parsedate_tz
+
+from datetime import datetime, timedelta
+
+TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT"
+
+
+def expire_after(delta, date=None):
+    date = date or datetime.utcnow()
+    return date + delta
+
+
+def datetime_to_header(dt):
+    return formatdate(calendar.timegm(dt.timetuple()))
+
+
+class BaseHeuristic(object):
+
+    def warning(self, response):
+        """
+        Return a valid 1xx warning header value describing the cache
+        adjustments.
+
+        The response is provided too allow warnings like 113
+        http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
+        to explicitly say response is over 24 hours old.
+        """
+        return '110 - "Response is Stale"'
+
+    def update_headers(self, response):
+        """Update the response headers with any new headers.
+
+        NOTE: This SHOULD always include some Warning header to
+              signify that the response was cached by the client, not
+              by way of the provided headers.
+        """
+        return {}
+
+    def apply(self, response):
+        updated_headers = self.update_headers(response)
+
+        if updated_headers:
+            response.headers.update(updated_headers)
+            warning_header_value = self.warning(response)
+            if warning_header_value is not None:
+                response.headers.update({"Warning": warning_header_value})
+
+        return response
+
+
+class OneDayCache(BaseHeuristic):
+    """
+    Cache the response by providing an expires 1 day in the
+    future.
+    """
+
+    def update_headers(self, response):
+        headers = {}
+
+        if "expires" not in response.headers:
+            date = parsedate(response.headers["date"])
+            expires = expire_after(timedelta(days=1), date=datetime(*date[:6]))
+            headers["expires"] = datetime_to_header(expires)
+            headers["cache-control"] = "public"
+        return headers
+
+
+class ExpiresAfter(BaseHeuristic):
+    """
+    Cache **all** requests for a defined time period.
+    """
+
+    def __init__(self, **kw):
+        self.delta = timedelta(**kw)
+
+    def update_headers(self, response):
+        expires = expire_after(self.delta)
+        return {"expires": datetime_to_header(expires), "cache-control": "public"}
+
+    def warning(self, response):
+        tmpl = "110 - Automatically cached for %s. Response might be stale"
+        return tmpl % self.delta
+
+
+class LastModified(BaseHeuristic):
+    """
+    If there is no Expires header already, fall back on Last-Modified
+    using the heuristic from
+    http://tools.ietf.org/html/rfc7234#section-4.2.2
+    to calculate a reasonable value.
+
+    Firefox also does something like this per
+    https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
+    http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
+    Unlike mozilla we limit this to 24-hr.
+    """
+    cacheable_by_default_statuses = {
+        200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501
+    }
+
+    def update_headers(self, resp):
+        headers = resp.headers
+
+        if "expires" in headers:
+            return {}
+
+        if "cache-control" in headers and headers["cache-control"] != "public":
+            return {}
+
+        if resp.status not in self.cacheable_by_default_statuses:
+            return {}
+
+        if "date" not in headers or "last-modified" not in headers:
+            return {}
+
+        date = calendar.timegm(parsedate_tz(headers["date"]))
+        last_modified = parsedate(headers["last-modified"])
+        if date is None or last_modified is None:
+            return {}
+
+        now = time.time()
+        current_age = max(0, now - date)
+        delta = date - calendar.timegm(last_modified)
+        freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
+        if freshness_lifetime <= current_age:
+            return {}
+
+        expires = date + freshness_lifetime
+        return {"expires": time.strftime(TIME_FMT, time.gmtime(expires))}
+
+    def warning(self, resp):
+        return None
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/serialize.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/serialize.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec43ff27a873e96c3cb5f0964fedbb110e57b1f3
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/serialize.py
@@ -0,0 +1,186 @@
+import base64
+import io
+import json
+import zlib
+
+from pip._vendor import msgpack
+from pip._vendor.requests.structures import CaseInsensitiveDict
+
+from .compat import HTTPResponse, pickle, text_type
+
+
+def _b64_decode_bytes(b):
+    return base64.b64decode(b.encode("ascii"))
+
+
+def _b64_decode_str(s):
+    return _b64_decode_bytes(s).decode("utf8")
+
+
+class Serializer(object):
+
+    def dumps(self, request, response, body=None):
+        response_headers = CaseInsensitiveDict(response.headers)
+
+        if body is None:
+            body = response.read(decode_content=False)
+
+            # NOTE: 99% sure this is dead code. I'm only leaving it
+            #       here b/c I don't have a test yet to prove
+            #       it. Basically, before using
+            #       `cachecontrol.filewrapper.CallbackFileWrapper`,
+            #       this made an effort to reset the file handle. The
+            #       `CallbackFileWrapper` short circuits this code by
+            #       setting the body as the content is consumed, the
+            #       result being a `body` argument is *always* passed
+            #       into cache_response, and in turn,
+            #       `Serializer.dump`.
+            response._fp = io.BytesIO(body)
+
+        # NOTE: This is all a bit weird, but it's really important that on
+        #       Python 2.x these objects are unicode and not str, even when
+        #       they contain only ascii. The problem here is that msgpack
+        #       understands the difference between unicode and bytes and we
+        #       have it set to differentiate between them, however Python 2
+        #       doesn't know the difference. Forcing these to unicode will be
+        #       enough to have msgpack know the difference.
+        data = {
+            u"response": {
+                u"body": body,
+                u"headers": dict(
+                    (text_type(k), text_type(v)) for k, v in response.headers.items()
+                ),
+                u"status": response.status,
+                u"version": response.version,
+                u"reason": text_type(response.reason),
+                u"strict": response.strict,
+                u"decode_content": response.decode_content,
+            }
+        }
+
+        # Construct our vary headers
+        data[u"vary"] = {}
+        if u"vary" in response_headers:
+            varied_headers = response_headers[u"vary"].split(",")
+            for header in varied_headers:
+                header = text_type(header).strip()
+                header_value = request.headers.get(header, None)
+                if header_value is not None:
+                    header_value = text_type(header_value)
+                data[u"vary"][header] = header_value
+
+        return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)])
+
+    def loads(self, request, data):
+        # Short circuit if we've been given an empty set of data
+        if not data:
+            return
+
+        # Determine what version of the serializer the data was serialized
+        # with
+        try:
+            ver, data = data.split(b",", 1)
+        except ValueError:
+            ver = b"cc=0"
+
+        # Make sure that our "ver" is actually a version and isn't a false
+        # positive from a , being in the data stream.
+        if ver[:3] != b"cc=":
+            data = ver + data
+            ver = b"cc=0"
+
+        # Get the version number out of the cc=N
+        ver = ver.split(b"=", 1)[-1].decode("ascii")
+
+        # Dispatch to the actual load method for the given version
+        try:
+            return getattr(self, "_loads_v{}".format(ver))(request, data)
+
+        except AttributeError:
+            # This is a version we don't have a loads function for, so we'll
+            # just treat it as a miss and return None
+            return
+
+    def prepare_response(self, request, cached):
+        """Verify our vary headers match and construct a real urllib3
+        HTTPResponse object.
+        """
+        # Special case the '*' Vary value as it means we cannot actually
+        # determine if the cached response is suitable for this request.
+        if "*" in cached.get("vary", {}):
+            return
+
+        # Ensure that the Vary headers for the cached response match our
+        # request
+        for header, value in cached.get("vary", {}).items():
+            if request.headers.get(header, None) != value:
+                return
+
+        body_raw = cached["response"].pop("body")
+
+        headers = CaseInsensitiveDict(data=cached["response"]["headers"])
+        if headers.get("transfer-encoding", "") == "chunked":
+            headers.pop("transfer-encoding")
+
+        cached["response"]["headers"] = headers
+
+        try:
+            body = io.BytesIO(body_raw)
+        except TypeError:
+            # This can happen if cachecontrol serialized to v1 format (pickle)
+            # using Python 2. A Python 2 str(byte string) will be unpickled as
+            # a Python 3 str (unicode string), which will cause the above to
+            # fail with:
+            #
+            #     TypeError: 'str' does not support the buffer interface
+            body = io.BytesIO(body_raw.encode("utf8"))
+
+        return HTTPResponse(body=body, preload_content=False, **cached["response"])
+
+    def _loads_v0(self, request, data):
+        # The original legacy cache data. This doesn't contain enough
+        # information to construct everything we need, so we'll treat this as
+        # a miss.
+        return
+
+    def _loads_v1(self, request, data):
+        try:
+            cached = pickle.loads(data)
+        except ValueError:
+            return
+
+        return self.prepare_response(request, cached)
+
+    def _loads_v2(self, request, data):
+        try:
+            cached = json.loads(zlib.decompress(data).decode("utf8"))
+        except (ValueError, zlib.error):
+            return
+
+        # We need to decode the items that we've base64 encoded
+        cached["response"]["body"] = _b64_decode_bytes(cached["response"]["body"])
+        cached["response"]["headers"] = dict(
+            (_b64_decode_str(k), _b64_decode_str(v))
+            for k, v in cached["response"]["headers"].items()
+        )
+        cached["response"]["reason"] = _b64_decode_str(cached["response"]["reason"])
+        cached["vary"] = dict(
+            (_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
+            for k, v in cached["vary"].items()
+        )
+
+        return self.prepare_response(request, cached)
+
+    def _loads_v3(self, request, data):
+        # Due to Python 2 encoding issues, it's impossible to know for sure
+        # exactly how to load v3 entries, thus we'll treat these as a miss so
+        # that they get rewritten out as v4 entries.
+        return
+
+    def _loads_v4(self, request, data):
+        try:
+            cached = msgpack.loads(data, encoding="utf-8")
+        except ValueError:
+            return
+
+        return self.prepare_response(request, cached)
diff --git a/lib/python3.7/site-packages/pip/_vendor/cachecontrol/wrapper.py b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..265bfc8bc1ee4328465f88d894a43e3cdfac11e0
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/cachecontrol/wrapper.py
@@ -0,0 +1,29 @@
+from .adapter import CacheControlAdapter
+from .cache import DictCache
+
+
+def CacheControl(
+    sess,
+    cache=None,
+    cache_etags=True,
+    serializer=None,
+    heuristic=None,
+    controller_class=None,
+    adapter_class=None,
+    cacheable_methods=None,
+):
+
+    cache = cache or DictCache()
+    adapter_class = adapter_class or CacheControlAdapter
+    adapter = adapter_class(
+        cache,
+        cache_etags=cache_etags,
+        serializer=serializer,
+        heuristic=heuristic,
+        controller_class=controller_class,
+        cacheable_methods=cacheable_methods,
+    )
+    sess.mount("http://", adapter)
+    sess.mount("https://", adapter)
+
+    return sess
diff --git a/lib/python3.7/site-packages/pip/_vendor/certifi/__init__.py b/lib/python3.7/site-packages/pip/_vendor/certifi/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..632db8e132546269b7aa946494a0552b31fc8217
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/certifi/__init__.py
@@ -0,0 +1,3 @@
+from .core import where
+
+__version__ = "2019.03.09"
diff --git a/lib/python3.7/site-packages/pip/_vendor/certifi/__main__.py b/lib/python3.7/site-packages/pip/_vendor/certifi/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae2aff5c804e56e500ad1e7657c73f856beb9a58
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/certifi/__main__.py
@@ -0,0 +1,2 @@
+from pip._vendor.certifi import where
+print(where())
diff --git a/lib/python3.7/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a8735aed1afac3e45ad1c513e8399b11a54f04ff
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/certifi/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f283d09451216568c6f97103ad2d340743350ce
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/certifi/__pycache__/__main__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..169f7d264be8f30d6e43de195dea58506e0244c5
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/certifi/__pycache__/core.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/certifi/cacert.pem b/lib/python3.7/site-packages/pip/_vendor/certifi/cacert.pem
new file mode 100644
index 0000000000000000000000000000000000000000..84636dde7d854e0cbca11092e897a4eab9d127f7
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/certifi/cacert.pem
@@ -0,0 +1,4658 @@
+
+# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Label: "GlobalSign Root CA"
+# Serial: 4835703278459707669005204
+# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a
+# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c
+# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
+A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
+b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
+MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
+YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
+aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
+jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
+xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
+1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
+snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
+U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
+9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
+AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
+yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
+38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
+AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
+DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
+HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2
+# Label: "GlobalSign Root CA - R2"
+# Serial: 4835703278459682885658125
+# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30
+# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe
+# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e
+-----BEGIN CERTIFICATE-----
+MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1
+MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL
+v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8
+eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq
+tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd
+C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa
+zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB
+mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH
+V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n
+bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG
+3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs
+J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO
+291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS
+ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd
+AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7
+TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only
+# Label: "Verisign Class 3 Public Primary Certification Authority - G3"
+# Serial: 206684696279472310254277870180966723415
+# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09
+# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6
+# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44
+-----BEGIN CERTIFICATE-----
+MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl
+cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu
+LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT
+aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp
+dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD
+VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT
+aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ
+bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu
+IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b
+N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t
+KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu
+kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm
+CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ
+Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu
+imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te
+2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe
+DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC
+/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p
+F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt
+TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Label: "Entrust.net Premium 2048 Secure Server CA"
+# Serial: 946069240
+# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90
+# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31
+# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3
+MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub
+j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo
+U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
+zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b
+u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+
+bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er
+fF6adulZkMV8gzURZVE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Label: "Baltimore CyberTrust Root"
+# Serial: 33554617
+# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4
+# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74
+# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
+RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
+VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
+DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
+ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
+VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
+mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
+IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
+mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
+XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
+dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
+jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
+BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
+DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
+9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
+jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
+Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
+ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
+R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+
+# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
+# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network
+# Label: "AddTrust External Root"
+# Serial: 1
+# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f
+# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68
+# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2
+-----BEGIN CERTIFICATE-----
+MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU
+MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs
+IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290
+MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux
+FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h
+bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v
+dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt
+H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9
+uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX
+mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX
+a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN
+E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0
+WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD
+VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0
+Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU
+cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx
+IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN
+AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH
+YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5
+6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC
+Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX
+c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a
+mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Label: "Entrust Root Certification Authority"
+# Serial: 1164660820
+# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4
+# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9
+# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
+Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
+KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
+NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
+NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
+ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
+BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
+Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
+4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
+KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
+rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
+94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
+sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
+gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
+kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
+vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
+O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
+AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
+9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
+eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
+0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc.
+# Subject: CN=GeoTrust Global CA O=GeoTrust Inc.
+# Label: "GeoTrust Global CA"
+# Serial: 144470
+# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5
+# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12
+# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a
+-----BEGIN CERTIFICATE-----
+MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i
+YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG
+EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg
+R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9
+9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq
+fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv
+iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU
+1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+
+bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW
+MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA
+ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l
+uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn
+Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS
+tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF
+PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un
+hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV
+5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc.
+# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc.
+# Label: "GeoTrust Universal CA"
+# Serial: 1
+# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48
+# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79
+# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12
+-----BEGIN CERTIFICATE-----
+MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE
+BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0
+IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV
+VaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8
+cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT
+QjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh
+F7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v
+c7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w
+mZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd
+VHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX
+teGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ
+f9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe
+Bi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+
+nhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB
+/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY
+MBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG
+9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc
+aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX
+IwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn
+ANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z
+uzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN
+Pnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja
+QI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW
+koRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9
+ER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt
+DF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm
+bJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
+# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.
+# Label: "GeoTrust Universal CA 2"
+# Serial: 1
+# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7
+# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79
+# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b
+-----BEGIN CERTIFICATE-----
+MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW
+MBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy
+c2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD
+VQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1
+c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81
+WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG
+FF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq
+XbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL
+se4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb
+KNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd
+IgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73
+y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt
+hAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc
+QIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4
+Lt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV
+HSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ
+KoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z
+dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ
+L1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr
+Fg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo
+ag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY
+T1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz
+GDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m
+1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV
+OCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH
+6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX
+QMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS
+-----END CERTIFICATE-----
+
+# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
+# Subject: CN=AAA Certificate Services O=Comodo CA Limited
+# Label: "Comodo AAA Services root"
+# Serial: 1
+# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0
+# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49
+# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
+YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
+GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
+BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
+3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
+YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
+rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
+ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
+oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
+QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
+b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
+AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
+GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
+G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
+l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
+smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
+# Subject: CN=QuoVadis Root Certification Authority O=QuoVadis Limited OU=Root Certification Authority
+# Label: "QuoVadis Root CA"
+# Serial: 985026699
+# MD5 Fingerprint: 27:de:36:fe:72:b7:00:03:00:9d:f4:f0:1e:6c:04:24
+# SHA1 Fingerprint: de:3f:40:bd:50:93:d3:9b:6c:60:f6:da:bc:07:62:01:00:89:76:c9
+# SHA256 Fingerprint: a4:5e:de:3b:bb:f0:9c:8a:e1:5c:72:ef:c0:72:68:d6:93:a2:1c:99:6f:d5:1e:67:ca:07:94:60:fd:6d:88:73
+-----BEGIN CERTIFICATE-----
+MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0
+aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz
+MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw
+IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR
+dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp
+li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D
+rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ
+WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug
+F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU
+xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC
+Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv
+dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw
+ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl
+IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh
+c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy
+ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh
+Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI
+KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T
+KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq
+y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p
+dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD
+VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL
+MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk
+fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8
+7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R
+cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y
+mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW
+xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK
+SnQ2+Q==
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2"
+# Serial: 1289
+# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b
+# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7
+# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86
+-----BEGIN CERTIFICATE-----
+MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa
+GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg
+Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J
+WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB
+rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp
++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1
+ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i
+Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz
+PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og
+/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH
+oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI
+yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud
+EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2
+A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL
+MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
+ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f
+BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn
+g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl
+fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K
+WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha
+B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc
+hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR
+TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD
+mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z
+ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y
+4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza
+8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3"
+# Serial: 1478
+# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf
+# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85
+# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35
+-----BEGIN CERTIFICATE-----
+MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM
+V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB
+4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr
+H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd
+8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv
+vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT
+mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe
+btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc
+T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt
+WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ
+c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A
+4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD
+VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG
+CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0
+aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
+aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu
+dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw
+czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G
+A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg
+Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0
+7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem
+d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd
++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B
+4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN
+t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x
+DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57
+k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s
+zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j
+Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT
+mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK
+4SVhM7JZG+Ju1zdXtg2pEto=
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1
+# Subject: O=SECOM Trust.net OU=Security Communication RootCA1
+# Label: "Security Communication Root CA"
+# Serial: 0
+# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a
+# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7
+# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c
+-----BEGIN CERTIFICATE-----
+MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY
+MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t
+dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5
+WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD
+VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8
+9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ
+DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9
+Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N
+QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ
+xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G
+A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG
+kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr
+Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5
+Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU
+JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot
+RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Sonera Class2 CA O=Sonera
+# Subject: CN=Sonera Class2 CA O=Sonera
+# Label: "Sonera Class 2 Root CA"
+# Serial: 29
+# MD5 Fingerprint: a3:ec:75:0f:2e:88:df:fa:48:01:4e:0b:5c:48:6f:fb
+# SHA1 Fingerprint: 37:f7:6d:e6:07:7c:90:c5:b1:3e:93:1a:b7:41:10:b4:f2:e4:9a:27
+# SHA256 Fingerprint: 79:08:b4:03:14:c1:38:10:0b:51:8d:07:35:80:7f:fb:fc:f8:51:8a:00:95:33:71:05:ba:38:6b:15:3d:d9:27
+-----BEGIN CERTIFICATE-----
+MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP
+MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx
+MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV
+BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o
+Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt
+5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s
+3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej
+vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu
+8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw
+DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG
+MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil
+zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/
+3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD
+FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6
+Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2
+ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M
+-----END CERTIFICATE-----
+
+# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Label: "XRamp Global CA Root"
+# Serial: 107108908803651509692980124233745014957
+# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1
+# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6
+# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
+gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
+MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
+UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
+NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
+dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
+dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
+38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
+KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
+DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
+qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
+JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
+PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
+BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
+jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
+eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
+ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
+vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
+IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
+i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
+O+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+
+# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Label: "Go Daddy Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67
+# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4
+# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
+MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
+YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
+MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
+ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
+MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
+ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
+PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
+wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
+EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
+avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
+sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
+/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
+IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
+OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
+TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
+dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
+ReYNnyicsbkqWletNw+vHX/bvZ8=
+-----END CERTIFICATE-----
+
+# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Label: "Starfield Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24
+# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a
+# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
+MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
+U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
+NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
+ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
+ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
+DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
+8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
+X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
+K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
+1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
+A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
+zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
+YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
+bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
+L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
+eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
+VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
+WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+
+# Issuer: O=Government Root Certification Authority
+# Subject: O=Government Root Certification Authority
+# Label: "Taiwan GRCA"
+# Serial: 42023070807708724159991140556527066870
+# MD5 Fingerprint: 37:85:44:53:32:45:1f:20:f0:f3:95:e1:25:c4:43:4e
+# SHA1 Fingerprint: f4:8b:11:bf:de:ab:be:94:54:20:71:e6:41:de:6b:be:88:2b:40:b9
+# SHA256 Fingerprint: 76:00:29:5e:ef:e8:5b:9e:1f:d6:24:db:76:06:2a:aa:ae:59:81:8a:54:d2:77:4c:d4:c0:b2:c0:11:31:e1:b3
+-----BEGIN CERTIFICATE-----
+MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/
+MQswCQYDVQQGEwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5MB4XDTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1ow
+PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
+AJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qNw8XR
+IePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1q
+gQdW8or5BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKy
+yhwOeYHWtXBiCAEuTk8O1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAts
+F/tnyMKtsc2AtJfcdgEWFelq16TheEfOhtX7MfP6Mb40qij7cEwdScevLJ1tZqa2
+jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wovJ5pGfaENda1UhhXcSTvx
+ls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7Q3hub/FC
+VGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHK
+YS1tB6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoH
+EgKXTiCQ8P8NHuJBO9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThN
+Xo+EHWbNxWCWtFJaBYmOlXqYwZE8lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1Ud
+DgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNVHRMEBTADAQH/MDkGBGcqBwAE
+MTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg209yewDL7MTqK
+UWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ
+TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyf
+qzvS/3WXy6TjZwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaK
+ZEk9GhiHkASfQlK3T8v+R0F2Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFE
+JPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlUD7gsL0u8qV1bYH+Mh6XgUmMqvtg7
+hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6QzDxARvBMB1uUO07+1
+EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+HbkZ6Mm
+nD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WX
+udpVBrkk7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44Vbnz
+ssQwmSNOXfJIoRIM3BKQCZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDe
+LMDDav7v3Aun+kbfYNucpllQdSNpc5Oy+fwC00fmcc4QAu4njIT/rEUNE1yDMuAl
+pYYsfPQS
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root CA"
+# Serial: 17154717934120587862167794914071425081
+# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72
+# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43
+# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
+JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
+mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
+VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
+AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
+AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
+pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
+dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
+fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
+NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
+H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root CA"
+# Serial: 10944719598952040374951832963794454346
+# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e
+# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36
+# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
+CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
+nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
+43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
+T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
+gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
+TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
+DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
+hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
+06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
+PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
+YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert High Assurance EV Root CA"
+# Serial: 3553400076410547919724730734378100087
+# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a
+# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25
+# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
+ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
+LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
+RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
+PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
+xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
+Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
+hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
+EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
+FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
+nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
+eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
+hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
+Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
++OkuE6N36B9K
+-----END CERTIFICATE-----
+
+# Issuer: CN=Class 2 Primary CA O=Certplus
+# Subject: CN=Class 2 Primary CA O=Certplus
+# Label: "Certplus Class 2 Primary CA"
+# Serial: 177770208045934040241468760488327595043
+# MD5 Fingerprint: 88:2c:8c:52:b8:a2:3c:f3:f7:bb:03:ea:ae:ac:42:0b
+# SHA1 Fingerprint: 74:20:74:41:72:9c:dd:92:ec:79:31:d8:23:10:8d:c2:81:92:e2:bb
+# SHA256 Fingerprint: 0f:99:3c:8a:ef:97:ba:af:56:87:14:0e:d5:9a:d1:82:1b:b4:af:ac:f0:aa:9a:58:b5:d5:7a:33:8a:3a:fb:cb
+-----BEGIN CERTIFICATE-----
+MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw
+PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz
+cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9
+MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz
+IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ
+ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR
+VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL
+kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd
+EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas
+H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0
+HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud
+DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4
+QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu
+Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/
+AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8
+yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR
+FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA
+ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB
+kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7
+l7+ijrRU
+-----END CERTIFICATE-----
+
+# Issuer: CN=DST Root CA X3 O=Digital Signature Trust Co.
+# Subject: CN=DST Root CA X3 O=Digital Signature Trust Co.
+# Label: "DST Root CA X3"
+# Serial: 91299735575339953335919266965803778155
+# MD5 Fingerprint: 41:03:52:dc:0f:f7:50:1b:16:f0:02:8e:ba:6f:45:c5
+# SHA1 Fingerprint: da:c9:02:4f:54:d8:f6:df:94:93:5f:b1:73:26:38:ca:6a:d7:7c:13
+# SHA256 Fingerprint: 06:87:26:03:31:a7:24:03:d9:09:f1:05:e6:9b:cf:0d:32:e1:bd:24:93:ff:c6:d9:20:6d:11:bc:d6:77:07:39
+-----BEGIN CERTIFICATE-----
+MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/
+MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
+DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow
+PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD
+Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O
+rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq
+OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b
+xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw
+7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD
+aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG
+SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69
+ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr
+AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz
+R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5
+JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo
+Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Label: "SwissSign Gold CA - G2"
+# Serial: 13492815561806991280
+# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93
+# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61
+# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95
+-----BEGIN CERTIFICATE-----
+MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln
+biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF
+MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT
+d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8
+76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+
+bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c
+6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE
+emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd
+MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt
+MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y
+MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y
+FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi
+aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM
+gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB
+qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7
+lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn
+8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
+L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6
+45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO
+UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5
+O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC
+bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv
+GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a
+77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC
+hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3
+92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp
+Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w
+ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt
+Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Label: "SwissSign Silver CA - G2"
+# Serial: 5700383053117599563
+# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13
+# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb
+# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5
+-----BEGIN CERTIFICATE-----
+MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE
+BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu
+IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow
+RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY
+U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
+MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv
+Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br
+YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF
+nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH
+6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt
+eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/
+c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ
+MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH
+HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf
+jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6
+5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB
+rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c
+wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
+cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB
+AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp
+WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9
+xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ
+2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ
+IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8
+aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X
+em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR
+dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/
+OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+
+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
+tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
+# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.
+# Label: "GeoTrust Primary Certification Authority"
+# Serial: 32798226551256963324313806436981982369
+# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf
+# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96
+# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c
+-----BEGIN CERTIFICATE-----
+MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY
+MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo
+R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx
+MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK
+Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9
+AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA
+ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0
+7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W
+kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI
+mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ
+KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1
+6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl
+4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K
+oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj
+UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU
+AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA"
+# Serial: 69529181992039203566298953787712940909
+# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12
+# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81
+# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB
+qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV
+BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw
+NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j
+LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG
+A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
+IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs
+W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta
+3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk
+6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6
+Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J
+NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA
+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP
+r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU
+DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz
+YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX
+xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2
+/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/
+LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7
+jVaMaA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Class 3 Public Primary Certification Authority - G5"
+# Serial: 33037644167568058970164719475676101450
+# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c
+# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5
+# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df
+-----BEGIN CERTIFICATE-----
+MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB
+yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW
+ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1
+nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex
+t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz
+SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG
+BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+
+rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/
+NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E
+BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH
+BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy
+aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv
+MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE
+p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y
+5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK
+WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ
+4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N
+hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureTrust CA O=SecureTrust Corporation
+# Subject: CN=SecureTrust CA O=SecureTrust Corporation
+# Label: "SecureTrust CA"
+# Serial: 17199774589125277788362757014266862032
+# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1
+# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11
+# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73
+-----BEGIN CERTIFICATE-----
+MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
+MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
+cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
+Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
+0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
+wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
+7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
+8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
+BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
+JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
+6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
+3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
+D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
+CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
+3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Secure Global CA O=SecureTrust Corporation
+# Subject: CN=Secure Global CA O=SecureTrust Corporation
+# Label: "Secure Global CA"
+# Serial: 9751836167731051554232119481456978597
+# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de
+# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b
+# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69
+-----BEGIN CERTIFICATE-----
+MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
+MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
+Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
+iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
+/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
+jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
+HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
+sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
+gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
+KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
+AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
+URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
+H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
+I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
+iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
+f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO Certification Authority O=COMODO CA Limited
+# Label: "COMODO Certification Authority"
+# Serial: 104350513648249232941998508985834464573
+# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75
+# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b
+# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
+gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
+BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
+MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
+YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
+RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
+UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
+2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
+Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
+nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
+/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
+PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
+QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
+SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
+IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
+zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
+BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ZQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.
+# Label: "Network Solutions Certificate Authority"
+# Serial: 116697915152937497490437556386812487904
+# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e
+# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce
+# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c
+-----BEGIN CERTIFICATE-----
+MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi
+MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu
+MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp
+dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV
+UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO
+ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz
+c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP
+OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl
+mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF
+BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4
+qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw
+gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu
+bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp
+dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8
+6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/
+h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH
+/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv
+wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN
+pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Label: "COMODO ECC Certification Authority"
+# Serial: 41578283867086692638256921589707938090
+# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23
+# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11
+# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
+IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
+MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
+ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
+T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
+FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
+cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
+BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
+fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
+GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GA CA O=WISeKey OU=Copyright (c) 2005/OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GA CA"
+# Serial: 86718877871133159090080555911823548314
+# MD5 Fingerprint: bc:6c:51:33:a7:e9:d3:66:63:54:15:72:1b:21:92:93
+# SHA1 Fingerprint: 59:22:a1:e1:5a:ea:16:35:21:f8:98:39:6a:46:46:b0:44:1b:0f:a9
+# SHA256 Fingerprint: 41:c9:23:86:6a:b4:ca:d6:b7:ad:57:80:81:58:2e:02:07:97:a6:cb:df:4f:ff:78:ce:83:96:b3:89:37:d7:f5
+-----BEGIN CERTIFICATE-----
+MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB
+ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly
+aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl
+ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w
+NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G
+A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD
+VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX
+SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR
+VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2
+w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF
+mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg
+4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9
+4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw
+EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx
+SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2
+ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8
+vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa
+hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi
+Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ
+/L7fCg0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna O=Dhimyotis
+# Subject: CN=Certigna O=Dhimyotis
+# Label: "Certigna"
+# Serial: 18364802974209362175
+# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff
+# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97
+# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d
+-----BEGIN CERTIFICATE-----
+MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV
+BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X
+DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ
+BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4
+QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny
+gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw
+zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q
+130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2
+JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw
+ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT
+AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj
+AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG
+9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h
+bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc
+fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu
+HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w
+t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
+WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
+# Subject: CN=Deutsche Telekom Root CA 2 O=Deutsche Telekom AG OU=T-TeleSec Trust Center
+# Label: "Deutsche Telekom Root CA 2"
+# Serial: 38
+# MD5 Fingerprint: 74:01:4a:91:b1:08:c4:58:ce:47:cd:f0:dd:11:53:08
+# SHA1 Fingerprint: 85:a4:08:c0:9c:19:3e:5d:51:58:7d:cd:d6:13:30:fd:8c:de:37:bf
+# SHA256 Fingerprint: b6:19:1a:50:d0:c3:97:7f:7d:a9:9b:cd:aa:c8:6a:22:7d:ae:b9:67:9e:c7:0b:a3:b0:c9:d9:22:71:c1:70:d3
+-----BEGIN CERTIFICATE-----
+MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc
+MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj
+IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB
+IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE
+RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl
+U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290
+IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU
+ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC
+QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr
+rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S
+NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc
+QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH
+txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP
+BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC
+AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp
+tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa
+IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl
+6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+
+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU
+Cm26OWMohpLzGITY+9HPBVZkVw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc
+# Label: "Cybertrust Global Root"
+# Serial: 4835703278459682877484360
+# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1
+# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6
+# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3
+-----BEGIN CERTIFICATE-----
+MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG
+A1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh
+bCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE
+ChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS
+b290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5
+7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS
+J8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y
+HLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP
+t3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz
+FtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY
+XSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/
+MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw
+hi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js
+MB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA
+A4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj
+Wqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx
+XOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o
+omcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc
+A06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW
+WL1WMRJOEcgh4LMRkWXbtKaIOM5V
+-----END CERTIFICATE-----
+
+# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Label: "ePKI Root Certification Authority"
+# Serial: 28956088682735189655030529057352760477
+# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3
+# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0
+# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5
+-----BEGIN CERTIFICATE-----
+MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw
+IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL
+SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH
+SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh
+ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X
+DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1
+TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ
+fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA
+sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU
+WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS
+nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH
+dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip
+NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC
+AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF
+MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
+ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB
+uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl
+PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP
+JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/
+gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2
+j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6
+5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB
+o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS
+/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z
+Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE
+W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D
+hNQ+IIX3Sj0rnP0qCglN6oH4EZw=
+-----END CERTIFICATE-----
+
+# Issuer: O=certSIGN OU=certSIGN ROOT CA
+# Subject: O=certSIGN OU=certSIGN ROOT CA
+# Label: "certSIGN ROOT CA"
+# Serial: 35210227249154
+# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17
+# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b
+# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb
+-----BEGIN CERTIFICATE-----
+MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT
+AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD
+QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP
+MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do
+0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ
+UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d
+RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ
+OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv
+JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C
+AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O
+BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ
+LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY
+MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ
+44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I
+Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw
+i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
+9u6wWk5JRFRYX0KD
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
+# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only
+# Label: "GeoTrust Primary Certification Authority - G3"
+# Serial: 28809105769928564313984085209975885599
+# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05
+# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd
+# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4
+-----BEGIN CERTIFICATE-----
+MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB
+mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT
+MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s
+eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv
+cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ
+BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg
+MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0
+BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg
+LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz
++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm
+hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn
+5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W
+JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL
+DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC
+huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw
+HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB
+AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB
+zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN
+kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD
+AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH
+SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G
+spki4cErx5z481+oghLrGREt
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA - G2"
+# Serial: 71758320672825410020661621085256472406
+# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f
+# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12
+# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57
+-----BEGIN CERTIFICATE-----
+MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp
+IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi
+BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw
+MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh
+d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig
+YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v
+dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/
+BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6
+papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K
+DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3
+KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox
+XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
+# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only
+# Label: "thawte Primary Root CA - G3"
+# Serial: 127614157056681299805556476275995414779
+# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31
+# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2
+# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB
+rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf
+Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw
+MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV
+BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa
+Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl
+LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u
+MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl
+ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm
+gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8
+YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf
+b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9
+9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S
+zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk
+OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV
+HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA
+2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW
+oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu
+t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c
+KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM
+m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu
+MdRAGmI0Nj81Aa6sY6A=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
+# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only
+# Label: "GeoTrust Primary Certification Authority - G2"
+# Serial: 80682863203381065782177908751794619243
+# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a
+# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0
+# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66
+-----BEGIN CERTIFICATE-----
+MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL
+MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj
+KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2
+MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0
+eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV
+BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw
+NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV
+BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH
+MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL
+So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal
+tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG
+CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT
+qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz
+rD6ogRLQy7rQkgu2npaqBA+K
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Universal Root Certification Authority"
+# Serial: 85209574734084581917763752644031726877
+# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19
+# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54
+# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c
+-----BEGIN CERTIFICATE-----
+MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB
+vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL
+ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp
+U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W
+ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX
+MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0
+IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y
+IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh
+bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF
+9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH
+H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H
+LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN
+/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT
+rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud
+EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw
+WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs
+exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud
+DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4
+sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+
+seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz
+4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+
+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR
+lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3
+7M2CYfE45k+XmCpajQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
+# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only
+# Label: "VeriSign Class 3 Public Primary Certification Authority - G4"
+# Serial: 63143484348153506665311985501458640051
+# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41
+# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a
+# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79
+-----BEGIN CERTIFICATE-----
+MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL
+MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW
+ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln
+biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp
+U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG
+A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp
+U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg
+SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln
+biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm
+GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve
+fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ
+aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj
+aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW
+kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC
+4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga
+FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny"
+# Serial: 80544274841616
+# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88
+# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91
+# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG
+EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3
+MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl
+cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR
+dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB
+pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM
+b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm
+aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz
+IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT
+lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz
+AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5
+VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG
+ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2
+BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG
+AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M
+U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh
+bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C
++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
+bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F
+uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
+XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden Root CA - G2 O=Staat der Nederlanden
+# Label: "Staat der Nederlanden Root CA - G2"
+# Serial: 10000012
+# MD5 Fingerprint: 7c:a5:0f:f8:5b:9a:7d:6d:30:ae:54:5a:e3:42:a2:8a
+# SHA1 Fingerprint: 59:af:82:79:91:86:c7:b4:75:07:cb:cf:03:57:46:eb:04:dd:b7:16
+# SHA256 Fingerprint: 66:8c:83:94:7d:a6:3b:72:4b:ec:e1:74:3c:31:a0:e6:ae:d0:db:8e:c5:b3:1b:e3:77:bb:78:4f:91:b6:71:6f
+-----BEGIN CERTIFICATE-----
+MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX
+DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
+ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
+b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291
+qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp
+uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU
+Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE
+pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp
+5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M
+UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN
+GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy
+5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv
+6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK
+eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6
+B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/
+BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov
+L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV
+HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG
+SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS
+CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen
+5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897
+IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK
+gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL
++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL
+vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm
+bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk
+N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC
+Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z
+ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Label: "Hongkong Post Root CA 1"
+# Serial: 1000
+# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca
+# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58
+# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2
+-----BEGIN CERTIFICATE-----
+MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx
+FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg
+Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG
+A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr
+b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ
+jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn
+PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh
+ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9
+nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h
+q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED
+MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC
+mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3
+7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB
+oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs
+EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO
+fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi
+AmvZWg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Label: "SecureSign RootCA11"
+# Serial: 1
+# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26
+# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3
+# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12
+-----BEGIN CERTIFICATE-----
+MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr
+MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG
+A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0
+MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp
+Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD
+QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz
+i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8
+h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV
+MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9
+UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni
+8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC
+h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD
+VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm
+KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ
+X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr
+QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5
+pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN
+QSdJQO7e5iNEOdyhIta6A/I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Label: "Microsec e-Szigno Root CA 2009"
+# Serial: 14014712776195784473
+# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1
+# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e
+# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78
+-----BEGIN CERTIFICATE-----
+MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
+VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0
+ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G
+CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y
+OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx
+FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp
+Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
+dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP
+kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc
+cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U
+fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7
+N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC
+xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1
++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM
+Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG
+SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h
+mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk
+ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
+tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c
+2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t
+HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Label: "GlobalSign Root CA - R3"
+# Serial: 4835703278459759426209954
+# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28
+# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad
+# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
+MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
+RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
+gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
+KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
+QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
+XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
+LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
+RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
+jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
+6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
+mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
+Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
+WD9f
+-----END CERTIFICATE-----
+
+# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
+# Serial: 6047274297262753887
+# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3
+# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa
+# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE
+BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
+cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy
+MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
+Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
+thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
+cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
+L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
+NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
+X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
+m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
+Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
+EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
+KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
+6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
+OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD
+VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD
+VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
+cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv
+ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl
+AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF
+661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9
+am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1
+ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481
+PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS
+3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k
+SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF
+3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM
+ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g
+StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz
+Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB
+jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
+-----END CERTIFICATE-----
+
+# Issuer: CN=Izenpe.com O=IZENPE S.A.
+# Subject: CN=Izenpe.com O=IZENPE S.A.
+# Label: "Izenpe.com"
+# Serial: 917563065490389241595536686991402621
+# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73
+# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19
+# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f
+-----BEGIN CERTIFICATE-----
+MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4
+MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6
+ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD
+VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j
+b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq
+scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO
+xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H
+LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX
+uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD
+yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+
+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q
+rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN
+BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L
+hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB
+QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+
+HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu
+Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg
+QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB
+BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
+MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA
+A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb
+laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56
+awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo
+JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw
+LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT
+VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk
+LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb
+UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/
+QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+
+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
+QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
+# Subject: CN=Chambers of Commerce Root - 2008 O=AC Camerfirma S.A.
+# Label: "Chambers of Commerce Root - 2008"
+# Serial: 11806822484801597146
+# MD5 Fingerprint: 5e:80:9e:84:5a:0e:65:0b:17:02:f3:55:18:2a:3e:d7
+# SHA1 Fingerprint: 78:6a:74:ac:76:ab:14:7f:9c:6a:30:50:ba:9e:a8:7e:fe:9a:ce:3c
+# SHA256 Fingerprint: 06:3e:4a:fa:c4:91:df:d3:32:f3:08:9b:85:42:e9:46:17:d8:93:d7:fe:94:4e:10:a7:93:7e:e2:9d:96:93:c0
+-----BEGIN CERTIFICATE-----
+MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD
+VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
+IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
+MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz
+IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz
+MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj
+dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw
+EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp
+MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G
+CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9
+28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq
+VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q
+DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR
+5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL
+ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a
+Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl
+UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s
++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5
+Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj
+ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx
+hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV
+HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1
++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN
+YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t
+L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy
+ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt
+IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV
+HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w
+DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW
+PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF
+5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1
+glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH
+FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2
+pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD
+xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG
+tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq
+jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De
+fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg
+OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ
+d0jQ
+-----END CERTIFICATE-----
+
+# Issuer: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
+# Subject: CN=Global Chambersign Root - 2008 O=AC Camerfirma S.A.
+# Label: "Global Chambersign Root - 2008"
+# Serial: 14541511773111788494
+# MD5 Fingerprint: 9e:80:ff:78:01:0c:2e:c1:36:bd:fe:96:90:6e:08:f3
+# SHA1 Fingerprint: 4a:bd:ee:ec:95:0d:35:9c:89:ae:c7:52:a1:2c:5b:29:f6:d6:aa:0c
+# SHA256 Fingerprint: 13:63:35:43:93:34:a7:69:80:16:a0:d3:24:de:72:28:4e:07:9d:7b:52:20:bb:8f:bd:74:78:16:ee:be:ba:ca
+-----BEGIN CERTIFICATE-----
+MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD
+VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0
+IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3
+MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD
+aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx
+MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy
+cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG
+A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl
+BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI
+hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed
+KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7
+G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2
+zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4
+ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG
+HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2
+Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V
+yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e
+beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r
+6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh
+wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog
+zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW
+BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr
+ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp
+ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk
+cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt
+YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC
+CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow
+KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI
+hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ
+UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz
+X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x
+fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz
+a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd
+Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd
+SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O
+AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso
+M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge
+v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z
+09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B
+-----END CERTIFICATE-----
+
+# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Label: "Go Daddy Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01
+# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b
+# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96
+# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e
+# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Services Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2
+# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f
+# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Commercial O=AffirmTrust
+# Subject: CN=AffirmTrust Commercial O=AffirmTrust
+# Label: "AffirmTrust Commercial"
+# Serial: 8608355977964138876
+# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7
+# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7
+# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Networking O=AffirmTrust
+# Subject: CN=AffirmTrust Networking O=AffirmTrust
+# Label: "AffirmTrust Networking"
+# Serial: 8957382827206547757
+# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f
+# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f
+# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium O=AffirmTrust
+# Subject: CN=AffirmTrust Premium O=AffirmTrust
+# Label: "AffirmTrust Premium"
+# Serial: 7893706540734352110
+# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57
+# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27
+# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Label: "AffirmTrust Premium ECC"
+# Serial: 8401224907861490260
+# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d
+# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb
+# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA"
+# Serial: 279744
+# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78
+# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e
+# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e
+-----BEGIN CERTIFICATE-----
+MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
+MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
+ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
+cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
+WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
+Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
+IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
+UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
+TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
+BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
+kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
+AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
+HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
+sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
+I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
+J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
+VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
+03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Root Certification Authority"
+# Serial: 1
+# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79
+# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48
+# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES
+MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU
+V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz
+WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO
+LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE
+AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH
+K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX
+RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z
+rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx
+3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq
+hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC
+MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls
+XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D
+lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn
+aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ
+YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Label: "Security Communication RootCA2"
+# Serial: 0
+# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43
+# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74
+# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl
+MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe
+U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX
+DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy
+dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj
+YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV
+OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr
+zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM
+VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ
+hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO
+ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw
+awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs
+OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF
+coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc
+okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8
+t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
+1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/
+SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2011 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2011"
+# Serial: 0
+# MD5 Fingerprint: 73:9f:4c:4b:73:5b:79:e9:fa:ba:1c:ef:6e:cb:d5:c9
+# SHA1 Fingerprint: fe:45:65:9b:79:03:5b:98:a1:61:b5:51:2e:ac:da:58:09:48:22:4d
+# SHA256 Fingerprint: bc:10:4f:15:a4:8b:e7:09:dc:a5:42:a7:e1:d4:b9:df:6f:05:45:27:e8:02:ea:a9:2d:59:54:44:25:8a:fe:71
+-----BEGIN CERTIFICATE-----
+MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix
+RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p
+YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw
+NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK
+EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl
+cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz
+dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ
+fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns
+bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD
+75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP
+FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV
+HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp
+5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu
+b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA
+A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p
+6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8
+TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7
+dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys
+Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI
+l7WdmplNsDz4SgCbZN2fOUvRJ9e4
+-----END CERTIFICATE-----
+
+# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Label: "Actalis Authentication Root CA"
+# Serial: 6271844772424770508
+# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6
+# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac
+# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66
+-----BEGIN CERTIFICATE-----
+MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
+BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
+MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
+IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
+SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
+ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
+UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
+4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
+KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
+gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
+rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
+51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
+be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
+KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
+v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
+fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
+jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
+ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
+ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
+e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
+jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
+WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
+SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
+pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
+X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
+fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
+K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
+ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
+LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
+LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
+-----END CERTIFICATE-----
+
+# Issuer: O=Trustis Limited OU=Trustis FPS Root CA
+# Subject: O=Trustis Limited OU=Trustis FPS Root CA
+# Label: "Trustis FPS Root CA"
+# Serial: 36053640375399034304724988975563710553
+# MD5 Fingerprint: 30:c9:e7:1e:6b:e6:14:eb:65:b2:16:69:20:31:67:4d
+# SHA1 Fingerprint: 3b:c0:38:0b:33:c3:f6:a6:0c:86:15:22:93:d9:df:f5:4b:81:c0:04
+# SHA256 Fingerprint: c1:b4:82:99:ab:a5:20:8f:e9:63:0a:ce:55:ca:68:a0:3e:da:5a:51:9c:88:02:a0:d3:a6:73:be:8f:8e:55:7d
+-----BEGIN CERTIFICATE-----
+MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF
+MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL
+ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx
+MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc
+MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+
+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH
+iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj
+vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA
+0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB
+OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/
+BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E
+FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01
+GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW
+zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4
+1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE
+f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F
+jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN
+ZetX2fNXlrtIzYE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 2 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29
+# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99
+# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr
+6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV
+L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91
+1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx
+MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ
+QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB
+arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr
+Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi
+FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS
+P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN
+9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz
+uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h
+9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
+A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t
+OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo
++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7
+KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2
+DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us
+H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ
+I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7
+5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h
+3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz
+Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 3 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec
+# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57
+# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y
+ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E
+N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9
+tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX
+0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c
+/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X
+KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY
+zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS
+O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D
+34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP
+K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv
+Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj
+QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
+cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS
+IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2
+HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa
+O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv
+033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u
+dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE
+kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41
+3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD
+u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq
+4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 3"
+# Serial: 1
+# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef
+# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1
+# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN
+8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/
+RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4
+hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5
+ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM
+EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1
+A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy
+WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ
+1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30
+6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT
+91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
+e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
+TpPDpFQUWw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
+# Subject: CN=EE Certification Centre Root CA O=AS Sertifitseerimiskeskus
+# Label: "EE Certification Centre Root CA"
+# Serial: 112324828676200291871926431888494945866
+# MD5 Fingerprint: 43:5e:88:d4:7d:1a:4a:7e:fd:84:2e:52:eb:01:d4:6f
+# SHA1 Fingerprint: c9:a8:b9:e7:55:80:5e:58:e3:53:77:a7:25:eb:af:c3:7b:27:cc:d7
+# SHA256 Fingerprint: 3e:84:ba:43:42:90:85:16:e7:75:73:c0:99:2f:09:79:ca:08:4e:46:85:68:1f:f1:95:cc:ba:8a:22:9b:8a:76
+-----BEGIN CERTIFICATE-----
+MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1
+MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1
+czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG
+CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy
+MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl
+ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS
+b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy
+euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO
+bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw
+WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d
+MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE
+1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/
+zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB
+BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF
+BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV
+v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG
+E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u
+uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW
+iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v
+GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 2009"
+# Serial: 623603
+# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f
+# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0
+# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1
+-----BEGIN CERTIFICATE-----
+MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha
+ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM
+HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03
+UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42
+tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R
+ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM
+lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp
+/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G
+A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G
+A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj
+dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy
+MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl
+cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js
+L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL
+BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni
+acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
+o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K
+zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8
+PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y
+Johw1+qRzT65ysCQblrGXnRl11z+o+I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 EV 2009"
+# Serial: 623604
+# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6
+# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83
+# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw
+NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV
+BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn
+ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0
+3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z
+qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR
+p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8
+HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw
+ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea
+HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw
+Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh
+c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E
+RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt
+dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku
+Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp
+3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
+nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF
+CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na
+xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX
+KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1
+-----END CERTIFICATE-----
+
+# Issuer: CN=CA Disig Root R2 O=Disig a.s.
+# Subject: CN=CA Disig Root R2 O=Disig a.s.
+# Label: "CA Disig Root R2"
+# Serial: 10572350602393338211
+# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03
+# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71
+# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV
+BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
+MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy
+MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
+EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw
+ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe
+NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH
+PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I
+x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe
+QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR
+yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO
+QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912
+H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ
+QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD
+i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs
+nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1
+rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI
+hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
+tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf
+GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb
+lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka
++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal
+TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i
+nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3
+gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr
+G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os
+zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x
+L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL
+-----END CERTIFICATE-----
+
+# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Label: "ACCVRAIZ1"
+# Serial: 6828503384748696800
+# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02
+# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17
+# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13
+-----BEGIN CERTIFICATE-----
+MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE
+AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw
+CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ
+BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND
+VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb
+qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY
+HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo
+G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA
+lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr
+IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/
+0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH
+k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47
+4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO
+m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa
+cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl
+uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI
+KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls
+ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG
+AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
+VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT
+VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG
+CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA
+cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA
+QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA
+7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA
+cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA
+QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA
+czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu
+aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt
+aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud
+DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF
+BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp
+D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU
+JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m
+AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD
+vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms
+tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH
+7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
+I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA
+h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF
+d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H
+pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Global Root CA"
+# Serial: 3262
+# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96
+# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65
+# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx
+EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT
+VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5
+NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT
+B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF
+10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz
+0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh
+MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH
+zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc
+46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2
+yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi
+laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP
+oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA
+BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE
+qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm
+4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL
+1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
+LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF
+H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo
+RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+
+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh
+15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW
+6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW
+nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j
+wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz
+aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy
+KwbQBM0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Label: "TeliaSonera Root CA v1"
+# Serial: 199041966741090107964904287217786801558
+# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c
+# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37
+# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89
+-----BEGIN CERTIFICATE-----
+MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw
+NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv
+b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD
+VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F
+VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1
+7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X
+Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+
+/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs
+81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm
+dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe
+Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu
+sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4
+pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs
+slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ
+arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD
+VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG
+9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl
+dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
+0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj
+TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed
+Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7
+Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI
+OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7
+vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW
+t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn
+HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
+SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+# Label: "E-Tugra Certification Authority"
+# Serial: 7667447206703254355
+# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49
+# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39
+# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV
+BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC
+aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV
+BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1
+Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz
+MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+
+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp
+em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
+ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY
+B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH
+D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF
+Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo
+q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D
+k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH
+fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut
+dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM
+ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8
+zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
+rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX
+U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6
+Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5
+XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF
+Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR
+HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY
+GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c
+77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3
++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK
+vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6
+FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl
+yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P
+AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD
+y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d
+NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 2"
+# Serial: 1
+# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a
+# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9
+# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd
+AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC
+FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi
+1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq
+jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ
+wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/
+WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy
+NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC
+uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw
+IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6
+g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
+9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP
+BSeOE6Fuwg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Atos TrustedRoot 2011 O=Atos
+# Subject: CN=Atos TrustedRoot 2011 O=Atos
+# Label: "Atos TrustedRoot 2011"
+# Serial: 6643877497813316402
+# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56
+# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21
+# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE
+AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG
+EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM
+FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC
+REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp
+Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM
+VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+
+SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ
+4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L
+cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi
+eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG
+A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3
+DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j
+vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP
+DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc
+maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D
+lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv
+KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 1 G3"
+# Serial: 687049649626669250736271037606554624078720034195
+# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab
+# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67
+# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00
+MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV
+wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe
+rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341
+68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh
+4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp
+UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o
+abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc
+3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G
+KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt
+hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO
+Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt
+zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD
+ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
+MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2
+cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN
+qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5
+YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv
+b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2
+8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k
+NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj
+ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp
+q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt
+nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2 G3"
+# Serial: 390156079458959257446133169266079962026824725800
+# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06
+# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36
+# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3 G3"
+# Serial: 268090761170461462463995952157327242137089239581
+# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7
+# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d
+# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00
+MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR
+/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu
+FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR
+U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c
+ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR
+FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k
+A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw
+eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl
+sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp
+VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q
+A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+
+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD
+ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
+KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI
+FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv
+oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg
+u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP
+0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf
+3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl
+8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+
+DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN
+PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/
+ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G2"
+# Serial: 15385348160840213938643033620894905419
+# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d
+# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f
+# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85
+-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA
+n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc
+biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp
+EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA
+bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu
+YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB
+AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW
+BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI
+QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I
+0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni
+lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9
+B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv
+ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
+IhNzbM8m9Yop5w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G3"
+# Serial: 15459312981008553731928384953135426796
+# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb
+# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89
+# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2
+-----BEGIN CERTIFICATE-----
+MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
+RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
+Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
+RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
+AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
+JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
+6pZjamVFkpUBtA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G2"
+# Serial: 4293743540046975378534879503202253541
+# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44
+# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4
+# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f
+-----BEGIN CERTIFICATE-----
+MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH
+MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI
+2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx
+1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ
+q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
+tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
+vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
+5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
+1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
+NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
+Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
+8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
+pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
+MrY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G3"
+# Serial: 7089244469030293291760083333884364146
+# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca
+# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e
+# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0
+-----BEGIN CERTIFICATE-----
+MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe
+Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw
+EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x
+IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG
+fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO
+Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd
+BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx
+AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/
+oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8
+sycX
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Trusted Root G4"
+# Serial: 7451500558977370777930084869016614236
+# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49
+# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4
+# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88
+-----BEGIN CERTIFICATE-----
+MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
+RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
+ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
+xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
+ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
+DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
+jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
+CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
+EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
+fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
+uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
+chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
+9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
+ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
+SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
+fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
+sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
+cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
+0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
+4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
+r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
+/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
+gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Label: "COMODO RSA Certification Authority"
+# Serial: 101909084537582093308941363524873193117
+# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18
+# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4
+# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34
+-----BEGIN CERTIFICATE-----
+MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB
+hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
+BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
+EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
+Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR
+6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X
+pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC
+9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV
+/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf
+Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z
++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w
+qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah
+SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC
+u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf
+Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq
+crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
+FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB
+/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl
+wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM
+4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV
+2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna
+FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ
+CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK
+boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke
+jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL
+S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb
+QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl
+0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB
+NVOFBkpdn627G190
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Label: "USERTrust RSA Certification Authority"
+# Serial: 2645093764781058787591871645665788717
+# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5
+# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e
+# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
+iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
+cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
+BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
+MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
+BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
+3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
+tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
+Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
+VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
+79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
+c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
+Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
+c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
+UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
+Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
+BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
+Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
+VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
+ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
+8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
+iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
+Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
+XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
+qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
+VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
+L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
+jjxDah2nGN59PRbxYvnKkKj9
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Label: "USERTrust ECC Certification Authority"
+# Serial: 123013823720199481456569720443997572134
+# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1
+# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0
+# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a
+-----BEGIN CERTIFICATE-----
+MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL
+MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl
+eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT
+JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT
+Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg
+VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo
+I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng
+o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G
+A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB
+zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
+RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Label: "GlobalSign ECC Root CA - R4"
+# Serial: 14367148294922964480859022125800977897474
+# MD5 Fingerprint: 20:f0:27:68:d1:7e:a0:9d:0e:e6:2a:ca:df:5c:89:8e
+# SHA1 Fingerprint: 69:69:56:2e:40:80:f4:24:a1:e7:19:9f:14:ba:f3:ee:58:ab:6a:bb
+# SHA256 Fingerprint: be:c9:49:11:c2:95:56:76:db:6c:0a:55:09:86:d7:6e:3b:a0:05:66:7c:44:2c:97:62:b4:fb:b7:73:de:22:8c
+-----BEGIN CERTIFICATE-----
+MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ
+FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F
+uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX
+kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs
+ewv4n4Q=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Label: "GlobalSign ECC Root CA - R5"
+# Serial: 32785792099990507226680698011560947931244
+# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08
+# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa
+# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24
+-----BEGIN CERTIFICATE-----
+MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc
+8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke
+hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI
+KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
+515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO
+xwy8p2Fp8fc74SrL+SvzZpA3
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden Root CA - G3 O=Staat der Nederlanden
+# Label: "Staat der Nederlanden Root CA - G3"
+# Serial: 10003001
+# MD5 Fingerprint: 0b:46:67:07:db:10:2f:19:8c:35:50:60:d1:0b:f4:37
+# SHA1 Fingerprint: d8:eb:6b:41:51:92:59:e0:f3:e7:85:00:c0:3d:b6:88:97:c9:ee:fc
+# SHA256 Fingerprint: 3c:4f:b0:b9:5a:b8:b3:00:32:f4:32:b8:6f:53:5f:e1:72:c1:85:d0:fd:39:86:58:37:cf:36:18:7f:a6:f4:28
+-----BEGIN CERTIFICATE-----
+MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX
+DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl
+ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv
+b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP
+cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW
+IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX
+xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy
+KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR
+9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az
+5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8
+6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7
+Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP
+bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt
+BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt
+XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF
+MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd
+INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD
+U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp
+LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8
+Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp
+gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh
+/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw
+0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A
+fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq
+4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR
+1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/
+QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM
+94B7IWcnMFk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
+# Subject: CN=Staat der Nederlanden EV Root CA O=Staat der Nederlanden
+# Label: "Staat der Nederlanden EV Root CA"
+# Serial: 10000013
+# MD5 Fingerprint: fc:06:af:7b:e8:1a:f1:9a:b4:e8:d2:70:1f:c0:f5:ba
+# SHA1 Fingerprint: 76:e2:7e:c1:4f:db:82:c1:c0:a6:75:b5:05:be:3d:29:b4:ed:db:bb
+# SHA256 Fingerprint: 4d:24:91:41:4c:fe:95:67:46:ec:4c:ef:a6:cf:6f:72:e2:8a:13:29:43:2f:9d:8a:90:7a:c4:cb:5d:ad:c1:5a
+-----BEGIN CERTIFICATE-----
+MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO
+TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh
+dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y
+MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg
+TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS
+b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS
+M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC
+UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d
+Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p
+rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l
+pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb
+j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC
+KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS
+/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X
+cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH
+1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP
+px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7
+MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI
+eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u
+2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS
+v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC
+wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy
+CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e
+vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6
+Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa
+Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL
+eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8
+FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc
+7uzXLg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Label: "IdenTrust Commercial Root CA 1"
+# Serial: 13298821034946342390520003877796839426
+# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7
+# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25
+# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu
+VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw
+MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw
+JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT
+3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU
++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp
+S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1
+bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi
+T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL
+vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK
+Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK
+dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT
+c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv
+l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N
+iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD
+ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
+6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt
+LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93
+nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3
++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK
+W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT
+AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq
+l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG
+4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ
+mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A
+7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Label: "IdenTrust Public Sector Root CA 1"
+# Serial: 13298821034946342390521976156843933698
+# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba
+# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd
+# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu
+VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN
+MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0
+MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7
+ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy
+RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS
+bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF
+/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R
+3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw
+EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy
+9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V
+GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ
+2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV
+WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD
+W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN
+AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
+t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV
+DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9
+TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G
+lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW
+mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df
+WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5
++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ
+tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA
+GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv
+8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G2"
+# Serial: 1246989352
+# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2
+# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4
+# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39
+-----BEGIN CERTIFICATE-----
+MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
+cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
+IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
+dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
+NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
+dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
+dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
+aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
+RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
+cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
+wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
+U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
+jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
+BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
+jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
+Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
+1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
+nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
+VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - EC1"
+# Serial: 51543124481930649114116133369
+# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc
+# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47
+# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG
+A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3
+d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu
+dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq
+RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy
+MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD
+VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0
+L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g
+Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi
+A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt
+ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH
+Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC
+R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX
+hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
+-----END CERTIFICATE-----
+
+# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Label: "CFCA EV ROOT"
+# Serial: 407555286
+# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30
+# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83
+# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd
+-----BEGIN CERTIFICATE-----
+MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD
+TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx
+MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j
+aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP
+T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03
+sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL
+TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5
+/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp
+7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz
+EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt
+hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP
+a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot
+aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg
+TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV
+PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv
+cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL
+tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd
+BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
+ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT
+ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL
+jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS
+ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy
+P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19
+xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d
+Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN
+5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe
+/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z
+AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
+5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
+# Subject: CN=Certinomis - Root CA O=Certinomis OU=0002 433998903
+# Label: "Certinomis - Root CA"
+# Serial: 1
+# MD5 Fingerprint: 14:0a:fd:8d:a8:28:b5:38:69:db:56:7e:61:22:03:3f
+# SHA1 Fingerprint: 9d:70:bb:01:a5:a4:a0:18:11:2e:f7:1c:01:b9:32:c5:34:e7:88:a8
+# SHA256 Fingerprint: 2a:99:f5:bc:11:74:b7:3c:bb:1d:62:08:84:e0:1c:34:e5:1c:cb:39:78:da:12:5f:0e:33:26:88:83:bf:41:58
+-----BEGIN CERTIFICATE-----
+MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET
+MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb
+BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz
+MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx
+FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g
+Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2
+fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl
+LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV
+WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF
+TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb
+5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc
+CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri
+wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ
+wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG
+m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4
+F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng
+WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB
+BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0
+2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF
+AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/
+0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw
+F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS
+g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj
+qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN
+h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/
+ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V
+btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj
+Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ
+8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW
+gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GB CA"
+# Serial: 157768595616588414422159278966750757568
+# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d
+# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed
+# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
+MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
+Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
+YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
+CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
+b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
+bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
+HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
+WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
+1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
+u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
+99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
+M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
+BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
+cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
+gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
+ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
+aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
+Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Label: "SZAFIR ROOT CA2"
+# Serial: 357043034767186914217277344587386743377558296292
+# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99
+# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de
+# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe
+-----BEGIN CERTIFICATE-----
+MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL
+BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6
+ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw
+NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L
+cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg
+Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN
+QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT
+3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw
+3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6
+3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5
+BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN
+XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
+AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF
+AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw
+8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG
+nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP
+oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy
+d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg
+LvWpCz/UXeHPhJ/iGcJfitYgHuNztw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA 2"
+# Serial: 44979900017204383099463764357512596969
+# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2
+# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92
+# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04
+-----BEGIN CERTIFICATE-----
+MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB
+gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu
+QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG
+A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz
+OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ
+VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3
+b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA
+DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn
+0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB
+OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE
+fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E
+Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m
+o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i
+sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW
+OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez
+Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS
+adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n
+3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC
+AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ
+F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf
+CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29
+XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm
+djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/
+WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb
+AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq
+P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko
+b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj
+XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P
+5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi
+DrW5viSP
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce
+# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6
+# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36
+-----BEGIN CERTIFICATE-----
+MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix
+DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k
+IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT
+N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v
+dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG
+A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh
+ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx
+QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA
+4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0
+AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10
+4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C
+ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV
+9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD
+gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6
+Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq
+NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko
+LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc
+Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd
+ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I
+XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI
+M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot
+9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V
+Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea
+j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh
+X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ
+l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf
+bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4
+pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK
+e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0
+vm9qp/UsQu0yrbYhnr68
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef
+# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66
+# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33
+-----BEGIN CERTIFICATE-----
+MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN
+BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl
+bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv
+b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ
+BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj
+YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5
+MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0
+dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg
+QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa
+jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC
+MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi
+C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep
+lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof
+TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR
+-----END CERTIFICATE-----
+
+# Issuer: CN=ISRG Root X1 O=Internet Security Research Group
+# Subject: CN=ISRG Root X1 O=Internet Security Research Group
+# Label: "ISRG Root X1"
+# Serial: 172886928669790476064670243504169061120
+# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e
+# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8
+# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
+
+# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Label: "AC RAIZ FNMT-RCM"
+# Serial: 485876308206448804701554682760554759
+# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d
+# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20
+# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx
+CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ
+WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ
+BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG
+Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/
+yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf
+BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz
+WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF
+tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z
+374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC
+IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL
+mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7
+wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS
+MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2
+ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet
+UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H
+YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3
+LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD
+nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1
+RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM
+LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf
+77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N
+JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm
+fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp
+6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp
+1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B
+9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok
+RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv
+uu8wd+RU4riEmViAqhOLUTpPSPaLtrM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 1 O=Amazon
+# Subject: CN=Amazon Root CA 1 O=Amazon
+# Label: "Amazon Root CA 1"
+# Serial: 143266978916655856878034712317230054538369994
+# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6
+# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16
+# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 2 O=Amazon
+# Subject: CN=Amazon Root CA 2 O=Amazon
+# Label: "Amazon Root CA 2"
+# Serial: 143266982885963551818349160658925006970653239
+# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66
+# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a
+# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK
+gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ
+W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg
+1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K
+8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r
+2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me
+z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR
+8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj
+mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz
+7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6
++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI
+0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB
+Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm
+UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2
+LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS
+k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl
+7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm
+btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl
+urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+
+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63
+n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE
+76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H
+9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT
+4PsJYGw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 3 O=Amazon
+# Subject: CN=Amazon Root CA 3 O=Amazon
+# Label: "Amazon Root CA 3"
+# Serial: 143266986699090766294700635381230934788665930
+# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87
+# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e
+# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4
+-----BEGIN CERTIFICATE-----
+MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl
+ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr
+ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr
+BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM
+YyRIHN8wfdVoOw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 4 O=Amazon
+# Subject: CN=Amazon Root CA 4 O=Amazon
+# Label: "Amazon Root CA 4"
+# Serial: 143266989758080763974105200630763877849284878
+# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd
+# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be
+# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92
+-----BEGIN CERTIFICATE-----
+MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi
+9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk
+M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB
+MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw
+CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
+1KyLa2tJElMzrdfkviT8tQp21KW8EA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
+# Subject: CN=LuxTrust Global Root 2 O=LuxTrust S.A.
+# Label: "LuxTrust Global Root 2"
+# Serial: 59914338225734147123941058376788110305822489521
+# MD5 Fingerprint: b2:e1:09:00:61:af:f7:f1:91:6f:c4:ad:8d:5e:3b:7c
+# SHA1 Fingerprint: 1e:0e:56:19:0a:d1:8b:25:98:b2:04:44:ff:66:8a:04:17:99:5f:3f
+# SHA256 Fingerprint: 54:45:5f:71:29:c2:0b:14:47:c4:18:f9:97:16:8f:24:c5:8f:c5:02:3b:f5:da:5b:e2:eb:6e:1d:d8:90:2e:d5
+-----BEGIN CERTIFICATE-----
+MIIFwzCCA6ugAwIBAgIUCn6m30tEntpqJIWe5rgV0xZ/u7EwDQYJKoZIhvcNAQEL
+BQAwRjELMAkGA1UEBhMCTFUxFjAUBgNVBAoMDUx1eFRydXN0IFMuQS4xHzAdBgNV
+BAMMFkx1eFRydXN0IEdsb2JhbCBSb290IDIwHhcNMTUwMzA1MTMyMTU3WhcNMzUw
+MzA1MTMyMTU3WjBGMQswCQYDVQQGEwJMVTEWMBQGA1UECgwNTHV4VHJ1c3QgUy5B
+LjEfMB0GA1UEAwwWTHV4VHJ1c3QgR2xvYmFsIFJvb3QgMjCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBANeFl78RmOnwYoNMPIf5U2o3C/IPPIfOb9wmKb3F
+ibrJgz337spbxm1Jc7TJRqMbNBM/wYlFV/TZsfs2ZUv7COJIcRHIbjuend+JZTem
+hfY7RBi2xjcwYkSSl2l9QjAk5A0MiWtj3sXh306pFGxT4GHO9hcvHTy95iJMHZP1
+EMShduxq3sVs35a0VkBCwGKSMKEtFZSg0iAGCW5qbeXrt77U8PEVfIvmTroTzEsn
+Xpk8F12PgX8zPU/TPxvsXD/wPEx1bvKm1Z3aLQdjAsZy6ZS8TEmVT4hSyNvoaYL4
+zDRbIvCGp4m9SAptZoFtyMhk+wHh9OHe2Z7d21vUKpkmFRseTJIpgp7VkoGSQXAZ
+96Tlk0u8d2cx3Rz9MXANF5kM+Qw5GSoXtTBxVdUPrljhPS80m8+f9niFwpN6cj5m
+j5wWEWCPnolvZ77gR1o7DJpni89Gxq44o/KnvObWhWszJHAiS8sIm7vI+AIpHb4g
+DEa/a4ebsypmQjVGbKq6rfmYe+lQVRQxv7HaLe2ArWgk+2mr2HETMOZns4dA/Yl+
+8kPREd8vZS9kzl8UubG/Mb2HeFpZZYiq/FkySIbWTLkpS5XTdvN3JW1CHDiDTf2j
+X5t/Lax5Gw5CMZdjpPuKadUiDTSQMC6otOBttpSsvItO13D8xTiOZCXhTTmQzsmH
+hFhxAgMBAAGjgagwgaUwDwYDVR0TAQH/BAUwAwEB/zBCBgNVHSAEOzA5MDcGByuB
+KwEBAQowLDAqBggrBgEFBQcCARYeaHR0cHM6Ly9yZXBvc2l0b3J5Lmx1eHRydXN0
+Lmx1MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBT/GCh2+UgFLKGu8SsbK7JT
++Et8szAdBgNVHQ4EFgQU/xgodvlIBSyhrvErGyuyU/hLfLMwDQYJKoZIhvcNAQEL
+BQADggIBAGoZFO1uecEsh9QNcH7X9njJCwROxLHOk3D+sFTAMs2ZMGQXvw/l4jP9
+BzZAcg4atmpZ1gDlaCDdLnINH2pkMSCEfUmmWjfrRcmF9dTHF5kH5ptV5AzoqbTO
+jFu1EVzPig4N1qx3gf4ynCSecs5U89BvolbW7MM3LGVYvlcAGvI1+ut7MV3CwRI9
+loGIlonBWVx65n9wNOeD4rHh4bhY79SV5GCc8JaXcozrhAIuZY+kt9J/Z93I055c
+qqmkoCUUBpvsT34tC38ddfEz2O3OuHVtPlu5mB0xDVbYQw8wkbIEa91WvpWAVWe+
+2M2D2RjuLg+GLZKecBPs3lHJQ3gCpU3I+V/EkVhGFndadKpAvAefMLmx9xIX3eP/
+JEAdemrRTxgKqpAd60Ae36EeRJIQmvKN4dFLRp7oRUKX6kWZ8+xm1QL68qZKJKre
+zrnK+T+Tb/mjuuqlPpmt/f97mfVl7vBZKGfXkJWkE4SphMHozs51k2MavDzq1WQf
+LSoSOcbDWjLtR5EWDrw4wVDej8oqkDQc7kGUnF4ZLvhFSZl0kbAEb+MEWrGrKqv+
+x9CWttrhSmQGbmBNvUJO/3jaJMobtNeWOWyu8Q6qp31IiyBMz2TWuJdGsE7RKlY6
+oJO9r4Ak4Ap+58rVyuiFVdw2KuGUaJPHZnJED4AhMmwlxyOAgwrr
+-----END CERTIFICATE-----
+
+# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1"
+# Serial: 1
+# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49
+# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca
+# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16
+-----BEGIN CERTIFICATE-----
+MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx
+GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp
+bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w
+KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0
+BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy
+dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG
+EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll
+IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU
+QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT
+TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg
+LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7
+a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr
+LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr
+N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X
+YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/
+iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f
+AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH
+V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh
+AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf
+IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4
+lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c
+8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf
+lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Label: "GDCA TrustAUTH R5 ROOT"
+# Serial: 9009899650740120186
+# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4
+# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4
+# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93
+-----BEGIN CERTIFICATE-----
+MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE
+BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ
+IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0
+MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV
+BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w
+HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj
+Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj
+TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u
+KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj
+qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm
+MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12
+ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP
+zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk
+L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC
+jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA
+HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC
+AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg
+p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm
+DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5
+COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry
+L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf
+JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg
+IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io
+2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV
+09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ
+XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq
+T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe
+MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Subject: CN=TrustCor RootCert CA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Label: "TrustCor RootCert CA-1"
+# Serial: 15752444095811006489
+# MD5 Fingerprint: 6e:85:f1:dc:1a:00:d3:22:d5:b2:b2:ac:6b:37:05:45
+# SHA1 Fingerprint: ff:bd:cd:e7:82:c8:43:5e:3c:6f:26:86:5c:ca:a8:3a:45:5b:c3:0a
+# SHA256 Fingerprint: d4:0e:9c:86:cd:8f:e4:68:c1:77:69:59:f4:9e:a7:74:fa:54:86:84:b6:c4:06:f3:90:92:61:f4:dc:e2:57:5c
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYD
+VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk
+MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
+cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29y
+IFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkxMjMxMTcyMzE2WjCB
+pDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFuYW1h
+IENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUG
+A1UECwweVHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZU
+cnVzdENvciBSb290Q2VydCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEAv463leLCJhJrMxnHQFgKq1mqjQCj/IDHUHuO1CAmujIS2CNUSSUQIpid
+RtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4pQa81QBeCQryJ3pS/C3V
+seq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0JEsq1pme
+9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CV
+EY4hgLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorW
+hnAbJN7+KIor0Gqw/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/
+DeOxCbeKyKsZn3MzUOcwHwYDVR0jBBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQAD
+ggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5mDo4Nvu7Zp5I
+/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf
+ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZ
+yonnMlo2HD6CqFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djts
+L1Ac59v2Z3kf9YKVmgenFK+P3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdN
+zl/HHk484IkzlQsPpTLWPFp5LBk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Subject: CN=TrustCor RootCert CA-2 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Label: "TrustCor RootCert CA-2"
+# Serial: 2711694510199101698
+# MD5 Fingerprint: a2:e1:f8:18:0b:ba:45:d5:c7:41:2a:bb:37:52:45:64
+# SHA1 Fingerprint: b8:be:6d:cb:56:f1:55:b9:63:d4:12:ca:4e:06:34:c7:94:b2:1c:c0
+# SHA256 Fingerprint: 07:53:e9:40:37:8c:1b:d5:e3:83:6e:39:5d:ae:a5:cb:83:9e:50:46:f1:bd:0e:ae:19:51:cf:10:fe:c7:c9:65
+-----BEGIN CERTIFICATE-----
+MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNV
+BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw
+IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy
+dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEfMB0GA1UEAwwWVHJ1c3RDb3Ig
+Um9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEyMzExNzI2MzlaMIGk
+MQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEg
+Q2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYD
+VQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRy
+dXN0Q29yIFJvb3RDZXJ0IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCnIG7CKqJiJJWQdsg4foDSq8GbZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+
+QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9NkRvRUqdw6VC0xK5mC8tkq
+1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1oYxOdqHp
+2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nK
+DOObXUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hape
+az6LMvYHL1cEksr1/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF
+3wP+TfSvPd9cW436cOGlfifHhi5qjxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88
+oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQPeSghYA2FFn3XVDjxklb9tTNM
+g9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+CtgrKAmrhQhJ8Z3
+mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh
+8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAd
+BgNVHQ4EFgQU2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6U
+nrybPZx9mCAZ5YwwYrIwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYw
+DQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/hOsh80QA9z+LqBrWyOrsGS2h60COX
+dKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnpkpfbsEZC89NiqpX+
+MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv2wnL
+/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RX
+CI/hOWB3S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYa
+ZH9bDTMJBzN7Bj8RpFxwPIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW
+2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dvDDqPys/cA8GiCcjl/YBeyGBCARsaU1q7
+N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYURpFHmygk71dSTlxCnKr3
+Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANExdqtvArB
+As8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp
+5KeXRKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu
+1uwJ
+-----END CERTIFICATE-----
+
+# Issuer: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Subject: CN=TrustCor ECA-1 O=TrustCor Systems S. de R.L. OU=TrustCor Certificate Authority
+# Label: "TrustCor ECA-1"
+# Serial: 9548242946988625984
+# MD5 Fingerprint: 27:92:23:1d:0a:f5:40:7c:e9:e6:6b:9d:d8:f5:e7:6c
+# SHA1 Fingerprint: 58:d1:df:95:95:67:6b:63:c0:f0:5b:1c:17:4d:8b:84:0b:c8:78:bd
+# SHA256 Fingerprint: 5a:88:5d:b1:9c:01:d9:12:c5:75:93:88:93:8c:af:bb:df:03:1a:b2:d4:8e:91:ee:15:58:9b:42:97:1d:03:9c
+-----BEGIN CERTIFICATE-----
+MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYD
+VQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEk
+MCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U
+cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFzAVBgNVBAMMDlRydXN0Q29y
+IEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3MjgwN1owgZwxCzAJBgNV
+BAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQw
+IgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRy
+dXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3Ig
+RUNBLTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb
+3w9U73NjKYKtR8aja+3+XzP4Q1HpGjORMRegdMTUpwHmspI+ap3tDvl0mEDTPwOA
+BoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23xFUfJ3zSCNV2HykVh0A5
+3ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmcp0yJF4Ou
+owReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/
+wZ0+fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZF
+ZtS6mFjBAgMBAAGjYzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAf
+BgNVHSMEGDAWgBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/
+MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAQEABT41XBVwm8nHc2Fv
+civUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u/ukZMjgDfxT2
+AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F
+hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50
+soIipX1TH0XsJ5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BI
+WJZpTdwHjFGTot+fDz2LYLSCjaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1Wi
+tJ/X5g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Label: "SSL.com Root Certification Authority RSA"
+# Serial: 8875640296558310041
+# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29
+# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb
+# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69
+-----BEGIN CERTIFICATE-----
+MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE
+BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK
+DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz
+OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv
+bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R
+xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX
+qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC
+C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3
+6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh
+/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF
+YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E
+JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc
+US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8
+ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm
++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi
+M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G
+A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV
+cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc
+Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs
+PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/
+q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0
+cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr
+a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I
+H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y
+K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu
+nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf
+oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY
+Ic2wBlX7Jz9TkHCpBB5XJ7k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com Root Certification Authority ECC"
+# Serial: 8495723813297216424
+# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e
+# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a
+# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65
+-----BEGIN CERTIFICATE-----
+MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz
+WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0
+b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS
+b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI
+7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg
+CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud
+EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD
+VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T
+kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+
+gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority RSA R2"
+# Serial: 6248227494352943350
+# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95
+# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a
+# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c
+-----BEGIN CERTIFICATE-----
+MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV
+BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE
+CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy
+MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G
+A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD
+DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq
+M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf
+OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa
+4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9
+HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR
+aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA
+b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ
+Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV
+PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO
+pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu
+UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY
+MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV
+HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4
+9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW
+s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5
+Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg
+cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM
+79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz
+/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt
+ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm
+Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK
+QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ
+w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi
+S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07
+mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority ECC"
+# Serial: 3182246526754555285
+# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90
+# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d
+# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8
+-----BEGIN CERTIFICATE-----
+MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx
+NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv
+bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49
+AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA
+VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku
+WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP
+MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX
+5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ
+ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg
+h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6
+# Label: "GlobalSign Root CA - R6"
+# Serial: 1417766617973444989252670301619537
+# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae
+# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1
+# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg
+MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh
+bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx
+MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET
+MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI
+xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k
+ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD
+aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw
+LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw
+1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX
+k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2
+SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h
+bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n
+WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY
+rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce
+MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu
+bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN
+nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt
+Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61
+55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj
+vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf
+cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz
+oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp
+nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs
+pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v
+JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R
+8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4
+5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GC CA"
+# Serial: 44084345621038548146064804565436152554
+# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23
+# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31
+# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d
+-----BEGIN CERTIFICATE-----
+MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw
+CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91
+bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg
+Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ
+BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu
+ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS
+b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni
+eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W
+p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T
+rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV
+57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg
+Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R1 O=Google Trust Services LLC
+# Subject: CN=GTS Root R1 O=Google Trust Services LLC
+# Label: "GTS Root R1"
+# Serial: 146587175971765017618439757810265552097
+# MD5 Fingerprint: 82:1a:ef:d4:d2:4a:f2:9f:e2:3d:97:06:14:70:72:85
+# SHA1 Fingerprint: e1:c9:50:e6:ef:22:f8:4c:56:45:72:8b:92:20:60:d7:d5:a7:a3:e8
+# SHA256 Fingerprint: 2a:57:54:71:e3:13:40:bc:21:58:1c:bd:2c:f1:3e:15:84:63:20:3e:ce:94:bc:f9:d3:cc:19:6b:f0:9a:54:72
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBH
+MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
+QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy
+MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl
+cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM
+f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vX
+mX7wCl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7
+zUjwTcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0P
+fyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtc
+vfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4
+Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUsp
+zBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOO
+Rc92wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYW
+k70paDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+
+DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgF
+lQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBADiW
+Cu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1
+d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6Z
+XPYfcX3v73svfuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZR
+gyFmxhE+885H7pwoHyXa/6xmld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3
+d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9bgsiG1eGZbYwE8na6SfZu6W0eX6Dv
+J4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq4BjFbkerQUIpm/Zg
+DdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWErtXvM
++SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyy
+F62ARPBopY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9
+SQ98POyDGCBDTtWTurQ0sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdws
+E3PYJ/HQcu51OyLemGhmW/HGY0dVHLqlCFF1pkgl
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R2 O=Google Trust Services LLC
+# Subject: CN=GTS Root R2 O=Google Trust Services LLC
+# Label: "GTS Root R2"
+# Serial: 146587176055767053814479386953112547951
+# MD5 Fingerprint: 44:ed:9a:0e:a4:09:3b:00:f2:ae:4c:a3:c6:61:b0:8b
+# SHA1 Fingerprint: d2:73:96:2a:2a:5e:39:9f:73:3f:e1:c7:1e:64:3f:03:38:34:fc:4d
+# SHA256 Fingerprint: c4:5d:7b:b0:8e:6d:67:e6:2e:42:35:11:0b:56:4e:5f:78:fd:92:ef:05:8c:84:0a:ea:4e:64:55:d7:58:5c:60
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBH
+MQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExM
+QzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIy
+MDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNl
+cnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv
+CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3Kg
+GjSY6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9Bu
+XvAuMC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOd
+re7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXu
+PuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1
+mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K
+8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqj
+x5RWIr9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsR
+nTKaG73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0
+kzCqgc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9Ok
+twIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBALZp
+8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT
+vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiT
+z9D2PGcDFWEJ+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiA
+pJiS4wGWAqoC7o87xdFtCjMwc3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvb
+pxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3DaWsYDQvTtN6LwG1BUSw7YhN4ZKJmB
+R64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5rn/WkhLx3+WuXrD5R
+RaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56GtmwfuNmsk
+0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC
+5AwiWVIQ7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiF
+izoHCBy69Y9Vmhh1fuXsgWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLn
+yOd/xCxgXS/Dr55FBcOEArf9LAhST4Ldo/DUhgkC
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R3 O=Google Trust Services LLC
+# Subject: CN=GTS Root R3 O=Google Trust Services LLC
+# Label: "GTS Root R3"
+# Serial: 146587176140553309517047991083707763997
+# MD5 Fingerprint: 1a:79:5b:6b:04:52:9c:5d:c7:74:33:1b:25:9a:f9:25
+# SHA1 Fingerprint: 30:d4:24:6f:07:ff:db:91:89:8a:0b:e9:49:66:11:eb:8c:5e:46:e5
+# SHA256 Fingerprint: 15:d5:b8:77:46:19:ea:7d:54:ce:1c:a6:d0:b0:c4:03:e0:37:a9:17:f1:31:e8:a0:4e:1e:6b:7a:71:ba:bc:e5
+-----BEGIN CERTIFICATE-----
+MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout
+736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2A
+DDL24CejQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFuk
+fCPAlaUs3L6JbyO5o91lAFJekazInXJ0glMLfalAvWhgxeG4VDvBNhcl2MG9AjEA
+njWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOaKaqW04MjyaR7YbPMAuhd
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R4 O=Google Trust Services LLC
+# Subject: CN=GTS Root R4 O=Google Trust Services LLC
+# Label: "GTS Root R4"
+# Serial: 146587176229350439916519468929765261721
+# MD5 Fingerprint: 5d:b6:6a:c4:60:17:24:6a:1a:99:a8:4b:ee:5e:b4:26
+# SHA1 Fingerprint: 2a:1d:60:27:d9:4a:b1:0a:1c:4d:91:5c:cd:33:a0:cb:3e:2d:54:cb
+# SHA256 Fingerprint: 71:cc:a5:39:1f:9e:79:4b:04:80:25:30:b3:63:e1:21:da:8a:30:43:bb:26:66:2f:ea:4d:ca:7f:c9:51:a4:bd
+-----BEGIN CERTIFICATE-----
+MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu
+hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/l
+xKvRHYqjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0
+CMRw3J5QdCHojXohw0+WbhXRIjVhLfoIN+4Zba3bssx9BzT1YBkstTTZbyACMANx
+sbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11xzPKwTdb+mciUqXWi4w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=UCA Global G2 Root O=UniTrust
+# Subject: CN=UCA Global G2 Root O=UniTrust
+# Label: "UCA Global G2 Root"
+# Serial: 124779693093741543919145257850076631279
+# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8
+# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a
+# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9
+MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH
+bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x
+CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds
+b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr
+b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9
+kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm
+VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R
+VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc
+C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj
+tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY
+D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv
+j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl
+NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6
+iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP
+O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/
+BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV
+ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj
+L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5
+1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl
+1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU
+b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV
+PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj
+y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb
+EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg
+DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI
++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy
+YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX
+UB+K+wb1whnw0A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=UCA Extended Validation Root O=UniTrust
+# Subject: CN=UCA Extended Validation Root O=UniTrust
+# Label: "UCA Extended Validation Root"
+# Serial: 106100277556486529736699587978573607008
+# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2
+# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a
+# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH
+MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF
+eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx
+MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV
+BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog
+D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS
+sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop
+O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk
+sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi
+c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj
+VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz
+KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/
+TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G
+sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs
+1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD
+fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T
+AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN
+l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR
+ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ
+VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5
+c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp
+4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s
+t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj
+2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO
+vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C
+xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx
+cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM
+fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
+# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
+# Label: "Certigna Root CA"
+# Serial: 269714418870597844693661054334862075617
+# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77
+# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43
+# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68
+-----BEGIN CERTIFICATE-----
+MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw
+WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw
+MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x
+MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD
+VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX
+BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
+ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO
+ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M
+CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu
+I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm
+TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh
+C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf
+ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz
+IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT
+Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k
+JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5
+hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB
+GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of
+1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov
+L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo
+dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr
+aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq
+hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L
+6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG
+HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6
+0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB
+lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi
+o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1
+gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v
+faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63
+Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh
+jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw
+3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI
+# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI
+# Label: "emSign Root CA - G1"
+# Serial: 235931866688319308814040
+# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac
+# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c
+# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67
+-----BEGIN CERTIFICATE-----
+MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD
+VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU
+ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH
+MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO
+MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv
+Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz
+f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO
+8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq
+d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM
+tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt
+Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB
+o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD
+AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x
+PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM
+wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d
+GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH
+6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby
+RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx
+iN66zB+Afko=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI
+# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI
+# Label: "emSign ECC Root CA - G3"
+# Serial: 287880440101571086945156
+# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40
+# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1
+# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b
+-----BEGIN CERTIFICATE-----
+MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG
+EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo
+bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g
+RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ
+TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s
+b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw
+djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0
+WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS
+fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB
+zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq
+hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB
+CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD
++JbNR6iC8hZVdyR+EhCVBCyj
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI
+# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI
+# Label: "emSign Root CA - C1"
+# Serial: 825510296613316004955058
+# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68
+# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01
+# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG
+A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg
+SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw
+MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln
+biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v
+dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ
+BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ
+HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH
+3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH
+GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c
+xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1
+aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq
+TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87
+/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4
+kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG
+YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT
++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo
+WXzhriKi4gp6D/piq1JM4fHfyr6DDUI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI
+# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI
+# Label: "emSign ECC Root CA - C3"
+# Serial: 582948710642506000014504
+# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5
+# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66
+# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3
+-----BEGIN CERTIFICATE-----
+MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG
+EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx
+IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw
+MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln
+biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND
+IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci
+MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti
+sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O
+BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c
+3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J
+0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post
+# Label: "Hongkong Post Root CA 3"
+# Serial: 46170865288971385588281144162979347873371282084
+# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0
+# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02
+# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6
+-----BEGIN CERTIFICATE-----
+MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL
+BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ
+SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n
+a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5
+NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT
+CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u
+Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO
+dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI
+VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV
+9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY
+2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY
+vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt
+bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb
+x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+
+l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK
+TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj
+Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e
+i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw
+DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG
+7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk
+MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr
+gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk
+GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS
+3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm
+Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+
+l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c
+JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP
+L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa
+LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG
+mpv0
+-----END CERTIFICATE-----
diff --git a/lib/python3.7/site-packages/pip/_vendor/certifi/core.py b/lib/python3.7/site-packages/pip/_vendor/certifi/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..7271acf40ee1e1f6bcba51cc620b6f07c8bfe044
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/certifi/core.py
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-
+
+"""
+certifi.py
+~~~~~~~~~~
+
+This module returns the installation location of cacert.pem.
+"""
+import os
+
+
+def where():
+    f = os.path.dirname(__file__)
+
+    return os.path.join(f, 'cacert.pem')
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__init__.py b/lib/python3.7/site-packages/pip/_vendor/chardet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f9f820ef6e5f21042efd0e9dc18d097388d7207
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/__init__.py
@@ -0,0 +1,39 @@
+######################## BEGIN LICENSE BLOCK ########################
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+
+from .compat import PY2, PY3
+from .universaldetector import UniversalDetector
+from .version import __version__, VERSION
+
+
+def detect(byte_str):
+    """
+    Detect the encoding of the given byte string.
+
+    :param byte_str:     The byte sequence to examine.
+    :type byte_str:      ``bytes`` or ``bytearray``
+    """
+    if not isinstance(byte_str, bytearray):
+        if not isinstance(byte_str, bytes):
+            raise TypeError('Expected object of type bytes or bytearray, got: '
+                            '{0}'.format(type(byte_str)))
+        else:
+            byte_str = bytearray(byte_str)
+    detector = UniversalDetector()
+    detector.feed(byte_str)
+    return detector.close()
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..96bc479ae6ab38f6312c82d013b1dc3f1913bcf9
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/big5freq.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/big5freq.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..23914d67b0595629e0ed21e2df032b918cabc0b1
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/big5freq.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/big5prober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/big5prober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fadc1f1686a57259426fa94546c78be3cf60caa6
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/big5prober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/chardistribution.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/chardistribution.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..68d95dabbc8c850e87a78e438c48578baf53562e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/chardistribution.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4f6babfb6d0c0657c6dafa3d5d7ce968751dddb
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/charsetgroupprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/charsetprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/charsetprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..150f26142fb72d52b0ac5c9a6647d48c838be907
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/charsetprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/codingstatemachine.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/codingstatemachine.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9fbaf88c8c139838080b2f1c65cfe87197139623
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/codingstatemachine.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/compat.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d2dde57d624372d8b3fcd85050b51a55ed44a031
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/cp949prober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/cp949prober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..03b2cc0202ecb56ac131197980cdcbe06eec04af
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/cp949prober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/enums.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/enums.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3c039082d643606d3963f23970030eccabba0e10
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/enums.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/escprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/escprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..90d0f729f2f84a0072644d4b31a871fcc5144635
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/escprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/escsm.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/escsm.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d3d5bf0ddf90bd795f3e28dee16721beb24fb2a2
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/escsm.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/eucjpprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/eucjpprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e4b08e903d92be47e484780c27ca1dcd0a96e0c9
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/eucjpprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euckrfreq.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euckrfreq.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0191efd4740d2d95eba07bfd415e9392ba651d2a
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euckrfreq.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euckrprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euckrprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e26131c2a5f73c9f3a20b42e00c2f66e4005a93
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euckrprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euctwfreq.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euctwfreq.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11549c0c9ac87b67eb968b1e9b9d41b76502c3e8
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euctwfreq.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euctwprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euctwprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba1262665223f2d55c52ae3f5ee25f9894a46fc3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/euctwprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/gb2312freq.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/gb2312freq.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36eef3321eaf3f350432b4131c0d37f730e06604
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/gb2312freq.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/gb2312prober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/gb2312prober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1505b1e7b72e63bab9cc6ba8557b6413aafd39a
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/gb2312prober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/hebrewprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/hebrewprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74514b03fdb386d66d2629957a4614a283e7f2b2
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/hebrewprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/jisfreq.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/jisfreq.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7f0d4d627f5e7da1ca68c0994b334424f9aab249
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/jisfreq.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/jpcntx.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/jpcntx.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b546764c5784ae7a2d6be706a2ff249e0ee9398
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/jpcntx.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1c13263f03fc95e125e61e796d08a74e23299e1
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langbulgarianmodel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langcyrillicmodel.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langcyrillicmodel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e7f636e3fb89114e97110afb6cba1651db698747
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langcyrillicmodel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langgreekmodel.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langgreekmodel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26eb73e493db32b401c3f46cf2521a3df8824668
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langgreekmodel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5a5bf9eee66707b418aa45f11e1454358e772cc
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langhebrewmodel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59612fddde8b63d207366abb4de3d1600e3c004c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langhungarianmodel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langthaimodel.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langthaimodel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6113a8937c4011a323c1d3427637aa5a208c9cd
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langthaimodel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langturkishmodel.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langturkishmodel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..594254ddd2001ae361bb2d7fb087df8caa8acb77
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/langturkishmodel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/latin1prober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/latin1prober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fe1f1a59f5f07c6b3436c4ea4295a9564a4c4f6f
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/latin1prober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10271741050d71971b75263468cb8b88befea26e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/mbcharsetprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2538d0a4acf5a5ee0e75e32618d236267777097d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/mbcsgroupprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/mbcssm.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/mbcssm.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..447cc19712355ba3273da8e29491ebfae88a3b98
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/mbcssm.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..60ac5ab9b0330251c0c98f8bd7cc332c94733195
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/sbcharsetprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..08776559b4add1303a8d9d89efc59ff855ba87a1
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/sbcsgroupprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/sjisprober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/sjisprober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..acf70ecc6430595fc5a0f5e30991ef5937737491
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/sjisprober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/universaldetector.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/universaldetector.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc53055495537b23f6654c995a12b74f576fa82e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/universaldetector.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/utf8prober.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/utf8prober.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c356939f162900c509f40d29f99e56c41493b143
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/utf8prober.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/version.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/version.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..133e4005ffacf39b465a61382610c1d97cd76750
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/__pycache__/version.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/big5freq.py b/lib/python3.7/site-packages/pip/_vendor/chardet/big5freq.py
new file mode 100644
index 0000000000000000000000000000000000000000..38f32517aa8f6cf5970f7ceddd1a415289184c3e
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/big5freq.py
@@ -0,0 +1,386 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# Big5 frequency table
+# by Taiwan's Mandarin Promotion Council
+# <http://www.edu.tw:81/mandr/>
+#
+# 128  --> 0.42261
+# 256  --> 0.57851
+# 512  --> 0.74851
+# 1024 --> 0.89384
+# 2048 --> 0.97583
+#
+# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
+# Random Distribution Ration = 512/(5401-512)=0.105
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
+
+BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
+
+#Char to FreqOrder table
+BIG5_TABLE_SIZE = 5376
+
+BIG5_CHAR_TO_FREQ_ORDER = (
+   1,1801,1506, 255,1431, 198,   9,  82,   6,5008, 177, 202,3681,1256,2821, 110, #   16
+3814,  33,3274, 261,  76,  44,2114,  16,2946,2187,1176, 659,3971,  26,3451,2653, #   32
+1198,3972,3350,4202, 410,2215, 302, 590, 361,1964,   8, 204,  58,4510,5009,1932, #   48
+  63,5010,5011, 317,1614,  75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, #   64
+3682,   3,  10,3973,1471,  29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, #   80
+4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947,  34,3556,3204,  64, 604, #   96
+5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337,  72, 406,5017,  80, #  112
+ 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449,  69,2987, 591, #  128
+ 179,2096, 471, 115,2035,1844,  60,  50,2988, 134, 806,1869, 734,2036,3454, 180, #  144
+ 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, #  160
+2502,  90,2716,1338, 663,  11, 906,1099,2553,  20,2441, 182, 532,1716,5019, 732, #  176
+1376,4204,1311,1420,3206,  25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, #  192
+3276, 475,1447,3683,5020, 117,  21, 656, 810,1297,2300,2334,3557,5021, 126,4205, #  208
+ 706, 456, 150, 613,4513,  71,1118,2037,4206, 145,3092,  85, 835, 486,2115,1246, #  224
+1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, #  240
+3558,3135,5023,1956,1153,4207,  83, 296,1199,3093, 192, 624,  93,5024, 822,1898, #  256
+2823,3136, 795,2065, 991,1554,1542,1592,  27,  43,2867, 859, 139,1456, 860,4514, #  272
+ 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, #  288
+3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, #  304
+1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, #  320
+5026,5027,2176,3207,3685,2682, 593, 845,1062,3277,  88,1723,2038,3978,1951, 212, #  336
+ 266, 152, 149, 468,1899,4208,4516,  77, 187,5028,3038,  37,   5,2990,5029,3979, #  352
+5030,5031,  39,2524,4517,2908,3208,2079,  55, 148,  74,4518, 545, 483,1474,1029, #  368
+1665, 217,1870,1531,3138,1104,2655,4209,  24, 172,3562, 900,3980,3563,3564,4519, #  384
+  32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683,   4,3039,3351,1427,1789, #  400
+ 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, #  416
+3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439,  38,5037,1063,5038, 794, #  432
+3982,1435,2301,  46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804,  35, 707, #  448
+ 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, #  464
+2129,1363,3689,1423, 697, 100,3094,  48,  70,1231, 495,3139,2196,5043,1294,5044, #  480
+2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, #  496
+ 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, #  512
+ 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, #  528
+3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, #  544
+1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, #  560
+1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, #  576
+1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381,   7, #  592
+2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, #  608
+ 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, #  624
+4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, #  640
+1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, #  656
+5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, #  672
+2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, #  688
+ 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, #  704
+  98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, #  720
+ 523,2789,2790,2658,5061, 141,2235,1333,  68, 176, 441, 876, 907,4220, 603,2602, #  736
+ 710, 171,3464, 404, 549,  18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, #  752
+5063,2991, 368,5064, 146, 366,  99, 871,3693,1543, 748, 807,1586,1185,  22,2263, #  768
+ 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, #  784
+1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068,  59,5069, #  800
+ 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, #  816
+ 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, #  832
+5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, #  848
+1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, #  864
+ 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, #  880
+3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, #  896
+4224,  57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, #  912
+3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, #  928
+ 279,3145,  51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, #  944
+ 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, #  960
+1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, #  976
+4227,2475,1436, 953,4228,2055,4545, 671,2400,  79,4229,2446,3285, 608, 567,2689, #  992
+3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
+3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
+2402,5097,5098,5099,4232,3045,   0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
+5101, 233,4233,3697,1819,4550,4551,5102,  96,1777,1315,2083,5103, 257,5104,1810, # 1056
+3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
+5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
+1484,5110,1712, 127,  67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
+2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
+1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
+  78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
+1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
+4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
+3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
+ 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
+ 165, 243,4559,3703,2528, 123, 683,4239, 764,4560,  36,3998,1793, 589,2916, 816, # 1232
+ 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
+2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
+5122, 611,1156, 854,2386,1316,2875,   2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
+1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
+2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
+1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
+1994,5135,4564,5136,5137,2198,  13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
+5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
+5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
+5149, 128,2133,  92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
+3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
+4567,2252,  94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
+4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
+2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
+5163,2337,2068,  23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
+3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
+ 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
+5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863,  41, # 1520
+5170,5171,4575,5172,1657,2338,  19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
+1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
+2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
+3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
+4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
+5182,2692, 733,  40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
+3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
+4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
+1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
+1871,2762,3004,5187, 435,5188, 343,1108, 596,  17,1751,4579,2239,3477,3709,5189, # 1680
+4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
+1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
+ 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
+1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
+1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
+3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
+ 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
+5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
+2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
+1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
+1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551,  30,2268,4266, # 1856
+5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
+ 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
+4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
+ 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
+2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
+ 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
+1041,3005, 293,1168,  87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
+1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
+ 730,1515, 184,2840,  66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
+4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
+4021,5231,5232,1186,  15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
+1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
+3596,1342,1681,1718, 766,3297, 286,  89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
+5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
+5240,3298, 310, 313,3482,2304, 770,4278,  54,3054, 189,4611,3105,3848,4025,5241, # 2096
+1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
+2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
+1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
+3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
+2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
+3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
+2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
+4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
+4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
+3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
+  97,  81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
+3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
+ 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
+3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
+4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
+3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
+1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
+5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
+ 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
+5286, 587,  14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
+1702,1226, 102,1547,  62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
+ 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
+4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294,  86,1494,1730, # 2464
+4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
+ 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
+2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
+2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885,  28,2695, # 2528
+3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
+1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
+4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
+2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
+1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
+1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
+2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
+3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
+1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
+5313,3493,5314,5315,5316,3310,2698,1433,3311, 131,  95,1504,4049, 723,4303,3166, # 2688
+1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
+4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654,  53,5320,3014,5321, # 2720
+1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
+ 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
+1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
+4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
+4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
+2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
+1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
+4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
+ 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
+5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
+2322,3316,5346,5347,4308,5348,4309,  84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
+3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
+4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
+ 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
+5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
+5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
+1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
+4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
+4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
+2699,1516,3614,1121,1082,1329,3317,4073,1449,3873,  65,1128,2848,2927,2769,1590, # 3040
+3874,5370,5371,  12,2668,  45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
+3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
+2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
+1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
+4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
+3736,1859,  91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
+3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
+2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
+4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771,  61,4079,3738,1823,4080, # 3184
+5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
+3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
+2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
+3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
+1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
+2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
+3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
+4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063,  56,1396,3113, # 3312
+2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
+2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
+5418,1076,  49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
+1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
+2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
+1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
+3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
+4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629,  31,2851, # 3440
+2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
+3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
+3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
+2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
+4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
+2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
+3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
+4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
+5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
+3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
+ 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
+1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412,  42,3119, 464,5455,2642, # 3632
+4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
+1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
+4701,5462,3020, 962, 588,3629, 289,3250,2644,1116,  52,5463,3067,1797,5464,5465, # 3680
+5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
+ 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
+5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
+5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
+2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
+3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
+2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
+2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
+ 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
+1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
+4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
+3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
+3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
+ 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
+2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
+ 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
+2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
+4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
+1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
+4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
+1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
+3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
+ 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
+3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
+5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
+5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
+3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
+3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
+1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
+2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
+5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
+1561,2674,1452,4113,1375,5549,5550,  47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
+1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
+3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
+ 919,2352,2975,2353,1270,4727,4115,  73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
+1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
+4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
+5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
+2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
+3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
+ 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
+1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
+2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
+2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
+5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
+5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
+5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
+2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
+2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
+1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
+4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
+3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
+3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
+4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
+4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
+2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
+2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
+5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
+4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
+5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
+4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
+ 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
+ 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
+1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
+3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
+4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
+1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
+5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
+2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
+2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
+3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
+5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
+1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
+3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
+5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
+1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
+5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
+2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
+3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
+2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
+3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
+3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
+3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
+4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
+ 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
+2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
+4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
+3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
+5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
+1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
+5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
+ 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
+1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
+ 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
+4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
+1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
+4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
+1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
+ 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
+3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
+4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
+5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
+ 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
+3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
+ 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
+2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376
+)
+
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/big5prober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/big5prober.py
new file mode 100644
index 0000000000000000000000000000000000000000..98f9970122088c14a5830e091ca8a12fc8e4c563
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/big5prober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import Big5DistributionAnalysis
+from .mbcssm import BIG5_SM_MODEL
+
+
+class Big5Prober(MultiByteCharSetProber):
+    def __init__(self):
+        super(Big5Prober, self).__init__()
+        self.coding_sm = CodingStateMachine(BIG5_SM_MODEL)
+        self.distribution_analyzer = Big5DistributionAnalysis()
+        self.reset()
+
+    @property
+    def charset_name(self):
+        return "Big5"
+
+    @property
+    def language(self):
+        return "Chinese"
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/chardistribution.py b/lib/python3.7/site-packages/pip/_vendor/chardet/chardistribution.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0395f4a45aaa5c4ba1824a81d8ef8f69b46dc60
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/chardistribution.py
@@ -0,0 +1,233 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE,
+                        EUCTW_TYPICAL_DISTRIBUTION_RATIO)
+from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE,
+                        EUCKR_TYPICAL_DISTRIBUTION_RATIO)
+from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE,
+                         GB2312_TYPICAL_DISTRIBUTION_RATIO)
+from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE,
+                       BIG5_TYPICAL_DISTRIBUTION_RATIO)
+from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE,
+                      JIS_TYPICAL_DISTRIBUTION_RATIO)
+
+
+class CharDistributionAnalysis(object):
+    ENOUGH_DATA_THRESHOLD = 1024
+    SURE_YES = 0.99
+    SURE_NO = 0.01
+    MINIMUM_DATA_THRESHOLD = 3
+
+    def __init__(self):
+        # Mapping table to get frequency order from char order (get from
+        # GetOrder())
+        self._char_to_freq_order = None
+        self._table_size = None  # Size of above table
+        # This is a constant value which varies from language to language,
+        # used in calculating confidence.  See
+        # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
+        # for further detail.
+        self.typical_distribution_ratio = None
+        self._done = None
+        self._total_chars = None
+        self._freq_chars = None
+        self.reset()
+
+    def reset(self):
+        """reset analyser, clear any state"""
+        # If this flag is set to True, detection is done and conclusion has
+        # been made
+        self._done = False
+        self._total_chars = 0  # Total characters encountered
+        # The number of characters whose frequency order is less than 512
+        self._freq_chars = 0
+
+    def feed(self, char, char_len):
+        """feed a character with known length"""
+        if char_len == 2:
+            # we only care about 2-bytes character in our distribution analysis
+            order = self.get_order(char)
+        else:
+            order = -1
+        if order >= 0:
+            self._total_chars += 1
+            # order is valid
+            if order < self._table_size:
+                if 512 > self._char_to_freq_order[order]:
+                    self._freq_chars += 1
+
+    def get_confidence(self):
+        """return confidence based on existing data"""
+        # if we didn't receive any character in our consideration range,
+        # return negative answer
+        if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD:
+            return self.SURE_NO
+
+        if self._total_chars != self._freq_chars:
+            r = (self._freq_chars / ((self._total_chars - self._freq_chars)
+                 * self.typical_distribution_ratio))
+            if r < self.SURE_YES:
+                return r
+
+        # normalize confidence (we don't want to be 100% sure)
+        return self.SURE_YES
+
+    def got_enough_data(self):
+        # It is not necessary to receive all data to draw conclusion.
+        # For charset detection, certain amount of data is enough
+        return self._total_chars > self.ENOUGH_DATA_THRESHOLD
+
+    def get_order(self, byte_str):
+        # We do not handle characters based on the original encoding string,
+        # but convert this encoding string to a number, here called order.
+        # This allows multiple encodings of a language to share one frequency
+        # table.
+        return -1
+
+
+class EUCTWDistributionAnalysis(CharDistributionAnalysis):
+    def __init__(self):
+        super(EUCTWDistributionAnalysis, self).__init__()
+        self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
+        self._table_size = EUCTW_TABLE_SIZE
+        self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
+
+    def get_order(self, byte_str):
+        # for euc-TW encoding, we are interested
+        #   first  byte range: 0xc4 -- 0xfe
+        #   second byte range: 0xa1 -- 0xfe
+        # no validation needed here. State machine has done that
+        first_char = byte_str[0]
+        if first_char >= 0xC4:
+            return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
+        else:
+            return -1
+
+
+class EUCKRDistributionAnalysis(CharDistributionAnalysis):
+    def __init__(self):
+        super(EUCKRDistributionAnalysis, self).__init__()
+        self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
+        self._table_size = EUCKR_TABLE_SIZE
+        self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
+
+    def get_order(self, byte_str):
+        # for euc-KR encoding, we are interested
+        #   first  byte range: 0xb0 -- 0xfe
+        #   second byte range: 0xa1 -- 0xfe
+        # no validation needed here. State machine has done that
+        first_char = byte_str[0]
+        if first_char >= 0xB0:
+            return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
+        else:
+            return -1
+
+
+class GB2312DistributionAnalysis(CharDistributionAnalysis):
+    def __init__(self):
+        super(GB2312DistributionAnalysis, self).__init__()
+        self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
+        self._table_size = GB2312_TABLE_SIZE
+        self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
+
+    def get_order(self, byte_str):
+        # for GB2312 encoding, we are interested
+        #  first  byte range: 0xb0 -- 0xfe
+        #  second byte range: 0xa1 -- 0xfe
+        # no validation needed here. State machine has done that
+        first_char, second_char = byte_str[0], byte_str[1]
+        if (first_char >= 0xB0) and (second_char >= 0xA1):
+            return 94 * (first_char - 0xB0) + second_char - 0xA1
+        else:
+            return -1
+
+
+class Big5DistributionAnalysis(CharDistributionAnalysis):
+    def __init__(self):
+        super(Big5DistributionAnalysis, self).__init__()
+        self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
+        self._table_size = BIG5_TABLE_SIZE
+        self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
+
+    def get_order(self, byte_str):
+        # for big5 encoding, we are interested
+        #   first  byte range: 0xa4 -- 0xfe
+        #   second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
+        # no validation needed here. State machine has done that
+        first_char, second_char = byte_str[0], byte_str[1]
+        if first_char >= 0xA4:
+            if second_char >= 0xA1:
+                return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
+            else:
+                return 157 * (first_char - 0xA4) + second_char - 0x40
+        else:
+            return -1
+
+
+class SJISDistributionAnalysis(CharDistributionAnalysis):
+    def __init__(self):
+        super(SJISDistributionAnalysis, self).__init__()
+        self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
+        self._table_size = JIS_TABLE_SIZE
+        self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
+
+    def get_order(self, byte_str):
+        # for sjis encoding, we are interested
+        #   first  byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
+        #   second byte range: 0x40 -- 0x7e,  0x81 -- oxfe
+        # no validation needed here. State machine has done that
+        first_char, second_char = byte_str[0], byte_str[1]
+        if (first_char >= 0x81) and (first_char <= 0x9F):
+            order = 188 * (first_char - 0x81)
+        elif (first_char >= 0xE0) and (first_char <= 0xEF):
+            order = 188 * (first_char - 0xE0 + 31)
+        else:
+            return -1
+        order = order + second_char - 0x40
+        if second_char > 0x7F:
+            order = -1
+        return order
+
+
+class EUCJPDistributionAnalysis(CharDistributionAnalysis):
+    def __init__(self):
+        super(EUCJPDistributionAnalysis, self).__init__()
+        self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
+        self._table_size = JIS_TABLE_SIZE
+        self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
+
+    def get_order(self, byte_str):
+        # for euc-JP encoding, we are interested
+        #   first  byte range: 0xa0 -- 0xfe
+        #   second byte range: 0xa1 -- 0xfe
+        # no validation needed here. State machine has done that
+        char = byte_str[0]
+        if char >= 0xA0:
+            return 94 * (char - 0xA1) + byte_str[1] - 0xa1
+        else:
+            return -1
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/charsetgroupprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/charsetgroupprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b3738efd8ea1e42d196d381d2809beae7f4c649
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/charsetgroupprober.py
@@ -0,0 +1,106 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import ProbingState
+from .charsetprober import CharSetProber
+
+
+class CharSetGroupProber(CharSetProber):
+    def __init__(self, lang_filter=None):
+        super(CharSetGroupProber, self).__init__(lang_filter=lang_filter)
+        self._active_num = 0
+        self.probers = []
+        self._best_guess_prober = None
+
+    def reset(self):
+        super(CharSetGroupProber, self).reset()
+        self._active_num = 0
+        for prober in self.probers:
+            if prober:
+                prober.reset()
+                prober.active = True
+                self._active_num += 1
+        self._best_guess_prober = None
+
+    @property
+    def charset_name(self):
+        if not self._best_guess_prober:
+            self.get_confidence()
+            if not self._best_guess_prober:
+                return None
+        return self._best_guess_prober.charset_name
+
+    @property
+    def language(self):
+        if not self._best_guess_prober:
+            self.get_confidence()
+            if not self._best_guess_prober:
+                return None
+        return self._best_guess_prober.language
+
+    def feed(self, byte_str):
+        for prober in self.probers:
+            if not prober:
+                continue
+            if not prober.active:
+                continue
+            state = prober.feed(byte_str)
+            if not state:
+                continue
+            if state == ProbingState.FOUND_IT:
+                self._best_guess_prober = prober
+                return self.state
+            elif state == ProbingState.NOT_ME:
+                prober.active = False
+                self._active_num -= 1
+                if self._active_num <= 0:
+                    self._state = ProbingState.NOT_ME
+                    return self.state
+        return self.state
+
+    def get_confidence(self):
+        state = self.state
+        if state == ProbingState.FOUND_IT:
+            return 0.99
+        elif state == ProbingState.NOT_ME:
+            return 0.01
+        best_conf = 0.0
+        self._best_guess_prober = None
+        for prober in self.probers:
+            if not prober:
+                continue
+            if not prober.active:
+                self.logger.debug('%s not active', prober.charset_name)
+                continue
+            conf = prober.get_confidence()
+            self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf)
+            if best_conf < conf:
+                best_conf = conf
+                self._best_guess_prober = prober
+        if not self._best_guess_prober:
+            return 0.0
+        return best_conf
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/charsetprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/charsetprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..eac4e5986578636ad414648e6015e8b7e9f10432
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/charsetprober.py
@@ -0,0 +1,145 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#   Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+import logging
+import re
+
+from .enums import ProbingState
+
+
+class CharSetProber(object):
+
+    SHORTCUT_THRESHOLD = 0.95
+
+    def __init__(self, lang_filter=None):
+        self._state = None
+        self.lang_filter = lang_filter
+        self.logger = logging.getLogger(__name__)
+
+    def reset(self):
+        self._state = ProbingState.DETECTING
+
+    @property
+    def charset_name(self):
+        return None
+
+    def feed(self, buf):
+        pass
+
+    @property
+    def state(self):
+        return self._state
+
+    def get_confidence(self):
+        return 0.0
+
+    @staticmethod
+    def filter_high_byte_only(buf):
+        buf = re.sub(b'([\x00-\x7F])+', b' ', buf)
+        return buf
+
+    @staticmethod
+    def filter_international_words(buf):
+        """
+        We define three types of bytes:
+        alphabet: english alphabets [a-zA-Z]
+        international: international characters [\x80-\xFF]
+        marker: everything else [^a-zA-Z\x80-\xFF]
+
+        The input buffer can be thought to contain a series of words delimited
+        by markers. This function works to filter all words that contain at
+        least one international character. All contiguous sequences of markers
+        are replaced by a single space ascii character.
+
+        This filter applies to all scripts which do not use English characters.
+        """
+        filtered = bytearray()
+
+        # This regex expression filters out only words that have at-least one
+        # international character. The word may include one marker character at
+        # the end.
+        words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?',
+                           buf)
+
+        for word in words:
+            filtered.extend(word[:-1])
+
+            # If the last character in the word is a marker, replace it with a
+            # space as markers shouldn't affect our analysis (they are used
+            # similarly across all languages and may thus have similar
+            # frequencies).
+            last_char = word[-1:]
+            if not last_char.isalpha() and last_char < b'\x80':
+                last_char = b' '
+            filtered.extend(last_char)
+
+        return filtered
+
+    @staticmethod
+    def filter_with_english_letters(buf):
+        """
+        Returns a copy of ``buf`` that retains only the sequences of English
+        alphabet and high byte characters that are not between <> characters.
+        Also retains English alphabet and high byte characters immediately
+        before occurrences of >.
+
+        This filter can be applied to all scripts which contain both English
+        characters and extended ASCII characters, but is currently only used by
+        ``Latin1Prober``.
+        """
+        filtered = bytearray()
+        in_tag = False
+        prev = 0
+
+        for curr in range(len(buf)):
+            # Slice here to get bytes instead of an int with Python 3
+            buf_char = buf[curr:curr + 1]
+            # Check if we're coming out of or entering an HTML tag
+            if buf_char == b'>':
+                in_tag = False
+            elif buf_char == b'<':
+                in_tag = True
+
+            # If current character is not extended-ASCII and not alphabetic...
+            if buf_char < b'\x80' and not buf_char.isalpha():
+                # ...and we're not in a tag
+                if curr > prev and not in_tag:
+                    # Keep everything after last non-extended-ASCII,
+                    # non-alphabetic character
+                    filtered.extend(buf[prev:curr])
+                    # Output a space to delimit stretch we kept
+                    filtered.extend(b' ')
+                prev = curr + 1
+
+        # If we're not in a tag...
+        if not in_tag:
+            # Keep everything after last non-extended-ASCII, non-alphabetic
+            # character
+            filtered.extend(buf[prev:])
+
+        return filtered
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/cli/__init__.py b/lib/python3.7/site-packages/pip/_vendor/chardet/cli/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/cli/__init__.py
@@ -0,0 +1 @@
+
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/cli/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/cli/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..20325c453e418ea7c1c31c13ff989a772b9c1d98
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/cli/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/cli/__pycache__/chardetect.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/chardet/cli/__pycache__/chardetect.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3d2d1ae06c13e2041c107b17bb94eab7f1f6397
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/chardet/cli/__pycache__/chardetect.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/cli/chardetect.py b/lib/python3.7/site-packages/pip/_vendor/chardet/cli/chardetect.py
new file mode 100644
index 0000000000000000000000000000000000000000..c61136b639e57a46acd72d2b6b7142a61979d694
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/cli/chardetect.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+"""
+Script which takes one or more file paths and reports on their detected
+encodings
+
+Example::
+
+    % chardetect somefile someotherfile
+    somefile: windows-1252 with confidence 0.5
+    someotherfile: ascii with confidence 1.0
+
+If no paths are provided, it takes its input from stdin.
+
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import argparse
+import sys
+
+from pip._vendor.chardet import __version__
+from pip._vendor.chardet.compat import PY2
+from pip._vendor.chardet.universaldetector import UniversalDetector
+
+
+def description_of(lines, name='stdin'):
+    """
+    Return a string describing the probable encoding of a file or
+    list of strings.
+
+    :param lines: The lines to get the encoding of.
+    :type lines: Iterable of bytes
+    :param name: Name of file or collection of lines
+    :type name: str
+    """
+    u = UniversalDetector()
+    for line in lines:
+        line = bytearray(line)
+        u.feed(line)
+        # shortcut out of the loop to save reading further - particularly useful if we read a BOM.
+        if u.done:
+            break
+    u.close()
+    result = u.result
+    if PY2:
+        name = name.decode(sys.getfilesystemencoding(), 'ignore')
+    if result['encoding']:
+        return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
+                                                     result['confidence'])
+    else:
+        return '{0}: no result'.format(name)
+
+
+def main(argv=None):
+    """
+    Handles command line arguments and gets things started.
+
+    :param argv: List of arguments, as if specified on the command-line.
+                 If None, ``sys.argv[1:]`` is used instead.
+    :type argv: list of str
+    """
+    # Get command line arguments
+    parser = argparse.ArgumentParser(
+        description="Takes one or more file paths and reports their detected \
+                     encodings")
+    parser.add_argument('input',
+                        help='File whose encoding we would like to determine. \
+                              (default: stdin)',
+                        type=argparse.FileType('rb'), nargs='*',
+                        default=[sys.stdin if PY2 else sys.stdin.buffer])
+    parser.add_argument('--version', action='version',
+                        version='%(prog)s {0}'.format(__version__))
+    args = parser.parse_args(argv)
+
+    for f in args.input:
+        if f.isatty():
+            print("You are running chardetect interactively. Press " +
+                  "CTRL-D twice at the start of a blank line to signal the " +
+                  "end of your input. If you want help, run chardetect " +
+                  "--help\n", file=sys.stderr)
+        print(description_of(f, f.name))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/codingstatemachine.py b/lib/python3.7/site-packages/pip/_vendor/chardet/codingstatemachine.py
new file mode 100644
index 0000000000000000000000000000000000000000..68fba44f14366c448f13db3cf9cf1665af2e498c
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/codingstatemachine.py
@@ -0,0 +1,88 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+import logging
+
+from .enums import MachineState
+
+
+class CodingStateMachine(object):
+    """
+    A state machine to verify a byte sequence for a particular encoding. For
+    each byte the detector receives, it will feed that byte to every active
+    state machine available, one byte at a time. The state machine changes its
+    state based on its previous state and the byte it receives. There are 3
+    states in a state machine that are of interest to an auto-detector:
+
+    START state: This is the state to start with, or a legal byte sequence
+                 (i.e. a valid code point) for character has been identified.
+
+    ME state:  This indicates that the state machine identified a byte sequence
+               that is specific to the charset it is designed for and that
+               there is no other possible encoding which can contain this byte
+               sequence. This will to lead to an immediate positive answer for
+               the detector.
+
+    ERROR state: This indicates the state machine identified an illegal byte
+                 sequence for that encoding. This will lead to an immediate
+                 negative answer for this encoding. Detector will exclude this
+                 encoding from consideration from here on.
+    """
+    def __init__(self, sm):
+        self._model = sm
+        self._curr_byte_pos = 0
+        self._curr_char_len = 0
+        self._curr_state = None
+        self.logger = logging.getLogger(__name__)
+        self.reset()
+
+    def reset(self):
+        self._curr_state = MachineState.START
+
+    def next_state(self, c):
+        # for each byte we get its class
+        # if it is first byte, we also get byte length
+        byte_class = self._model['class_table'][c]
+        if self._curr_state == MachineState.START:
+            self._curr_byte_pos = 0
+            self._curr_char_len = self._model['char_len_table'][byte_class]
+        # from byte's class and state_table, we get its next state
+        curr_state = (self._curr_state * self._model['class_factor']
+                      + byte_class)
+        self._curr_state = self._model['state_table'][curr_state]
+        self._curr_byte_pos += 1
+        return self._curr_state
+
+    def get_current_charlen(self):
+        return self._curr_char_len
+
+    def get_coding_state_machine(self):
+        return self._model['name']
+
+    @property
+    def language(self):
+        return self._model['language']
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/compat.py b/lib/python3.7/site-packages/pip/_vendor/chardet/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddd74687c02a12ecc296ee803997a345e984bc9d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/compat.py
@@ -0,0 +1,34 @@
+######################## BEGIN LICENSE BLOCK ########################
+# Contributor(s):
+#   Dan Blanchard
+#   Ian Cordasco
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+import sys
+
+
+if sys.version_info < (3, 0):
+    PY2 = True
+    PY3 = False
+    base_str = (str, unicode)
+    text_type = unicode
+else:
+    PY2 = False
+    PY3 = True
+    base_str = (bytes, str)
+    text_type = str
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/cp949prober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/cp949prober.py
new file mode 100644
index 0000000000000000000000000000000000000000..efd793abca4bf496001a4e46a67557e5a6f16bba
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/cp949prober.py
@@ -0,0 +1,49 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .chardistribution import EUCKRDistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import CP949_SM_MODEL
+
+
+class CP949Prober(MultiByteCharSetProber):
+    def __init__(self):
+        super(CP949Prober, self).__init__()
+        self.coding_sm = CodingStateMachine(CP949_SM_MODEL)
+        # NOTE: CP949 is a superset of EUC-KR, so the distribution should be
+        #       not different.
+        self.distribution_analyzer = EUCKRDistributionAnalysis()
+        self.reset()
+
+    @property
+    def charset_name(self):
+        return "CP949"
+
+    @property
+    def language(self):
+        return "Korean"
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/enums.py b/lib/python3.7/site-packages/pip/_vendor/chardet/enums.py
new file mode 100644
index 0000000000000000000000000000000000000000..04512072251c429e63ed110cdbafaf4b3cca3412
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/enums.py
@@ -0,0 +1,76 @@
+"""
+All of the Enums that are used throughout the chardet package.
+
+:author: Dan Blanchard (dan.blanchard@gmail.com)
+"""
+
+
+class InputState(object):
+    """
+    This enum represents the different states a universal detector can be in.
+    """
+    PURE_ASCII = 0
+    ESC_ASCII = 1
+    HIGH_BYTE = 2
+
+
+class LanguageFilter(object):
+    """
+    This enum represents the different language filters we can apply to a
+    ``UniversalDetector``.
+    """
+    CHINESE_SIMPLIFIED = 0x01
+    CHINESE_TRADITIONAL = 0x02
+    JAPANESE = 0x04
+    KOREAN = 0x08
+    NON_CJK = 0x10
+    ALL = 0x1F
+    CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL
+    CJK = CHINESE | JAPANESE | KOREAN
+
+
+class ProbingState(object):
+    """
+    This enum represents the different states a prober can be in.
+    """
+    DETECTING = 0
+    FOUND_IT = 1
+    NOT_ME = 2
+
+
+class MachineState(object):
+    """
+    This enum represents the different states a state machine can be in.
+    """
+    START = 0
+    ERROR = 1
+    ITS_ME = 2
+
+
+class SequenceLikelihood(object):
+    """
+    This enum represents the likelihood of a character following the previous one.
+    """
+    NEGATIVE = 0
+    UNLIKELY = 1
+    LIKELY = 2
+    POSITIVE = 3
+
+    @classmethod
+    def get_num_categories(cls):
+        """:returns: The number of likelihood categories in the enum."""
+        return 4
+
+
+class CharacterCategory(object):
+    """
+    This enum represents the different categories language models for
+    ``SingleByteCharsetProber`` put characters into.
+
+    Anything less than CONTROL is considered a letter.
+    """
+    UNDEFINED = 255
+    LINE_BREAK = 254
+    SYMBOL = 253
+    DIGIT = 252
+    CONTROL = 251
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/escprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/escprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..c70493f2b131b32378612044f30173eabbfbc3f4
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/escprober.py
@@ -0,0 +1,101 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .codingstatemachine import CodingStateMachine
+from .enums import LanguageFilter, ProbingState, MachineState
+from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL,
+                    ISO2022KR_SM_MODEL)
+
+
+class EscCharSetProber(CharSetProber):
+    """
+    This CharSetProber uses a "code scheme" approach for detecting encodings,
+    whereby easily recognizable escape or shift sequences are relied on to
+    identify these encodings.
+    """
+
+    def __init__(self, lang_filter=None):
+        super(EscCharSetProber, self).__init__(lang_filter=lang_filter)
+        self.coding_sm = []
+        if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
+            self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
+            self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL))
+        if self.lang_filter & LanguageFilter.JAPANESE:
+            self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL))
+        if self.lang_filter & LanguageFilter.KOREAN:
+            self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL))
+        self.active_sm_count = None
+        self._detected_charset = None
+        self._detected_language = None
+        self._state = None
+        self.reset()
+
+    def reset(self):
+        super(EscCharSetProber, self).reset()
+        for coding_sm in self.coding_sm:
+            if not coding_sm:
+                continue
+            coding_sm.active = True
+            coding_sm.reset()
+        self.active_sm_count = len(self.coding_sm)
+        self._detected_charset = None
+        self._detected_language = None
+
+    @property
+    def charset_name(self):
+        return self._detected_charset
+
+    @property
+    def language(self):
+        return self._detected_language
+
+    def get_confidence(self):
+        if self._detected_charset:
+            return 0.99
+        else:
+            return 0.00
+
+    def feed(self, byte_str):
+        for c in byte_str:
+            for coding_sm in self.coding_sm:
+                if not coding_sm or not coding_sm.active:
+                    continue
+                coding_state = coding_sm.next_state(c)
+                if coding_state == MachineState.ERROR:
+                    coding_sm.active = False
+                    self.active_sm_count -= 1
+                    if self.active_sm_count <= 0:
+                        self._state = ProbingState.NOT_ME
+                        return self.state
+                elif coding_state == MachineState.ITS_ME:
+                    self._state = ProbingState.FOUND_IT
+                    self._detected_charset = coding_sm.get_coding_state_machine()
+                    self._detected_language = coding_sm.language
+                    return self.state
+
+        return self.state
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/escsm.py b/lib/python3.7/site-packages/pip/_vendor/chardet/escsm.py
new file mode 100644
index 0000000000000000000000000000000000000000..0069523a04969dd920ea4b43a497162157729174
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/escsm.py
@@ -0,0 +1,246 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import MachineState
+
+HZ_CLS = (
+1,0,0,0,0,0,0,0,  # 00 - 07
+0,0,0,0,0,0,0,0,  # 08 - 0f
+0,0,0,0,0,0,0,0,  # 10 - 17
+0,0,0,1,0,0,0,0,  # 18 - 1f
+0,0,0,0,0,0,0,0,  # 20 - 27
+0,0,0,0,0,0,0,0,  # 28 - 2f
+0,0,0,0,0,0,0,0,  # 30 - 37
+0,0,0,0,0,0,0,0,  # 38 - 3f
+0,0,0,0,0,0,0,0,  # 40 - 47
+0,0,0,0,0,0,0,0,  # 48 - 4f
+0,0,0,0,0,0,0,0,  # 50 - 57
+0,0,0,0,0,0,0,0,  # 58 - 5f
+0,0,0,0,0,0,0,0,  # 60 - 67
+0,0,0,0,0,0,0,0,  # 68 - 6f
+0,0,0,0,0,0,0,0,  # 70 - 77
+0,0,0,4,0,5,2,0,  # 78 - 7f
+1,1,1,1,1,1,1,1,  # 80 - 87
+1,1,1,1,1,1,1,1,  # 88 - 8f
+1,1,1,1,1,1,1,1,  # 90 - 97
+1,1,1,1,1,1,1,1,  # 98 - 9f
+1,1,1,1,1,1,1,1,  # a0 - a7
+1,1,1,1,1,1,1,1,  # a8 - af
+1,1,1,1,1,1,1,1,  # b0 - b7
+1,1,1,1,1,1,1,1,  # b8 - bf
+1,1,1,1,1,1,1,1,  # c0 - c7
+1,1,1,1,1,1,1,1,  # c8 - cf
+1,1,1,1,1,1,1,1,  # d0 - d7
+1,1,1,1,1,1,1,1,  # d8 - df
+1,1,1,1,1,1,1,1,  # e0 - e7
+1,1,1,1,1,1,1,1,  # e8 - ef
+1,1,1,1,1,1,1,1,  # f0 - f7
+1,1,1,1,1,1,1,1,  # f8 - ff
+)
+
+HZ_ST = (
+MachineState.START,MachineState.ERROR,     3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,     4,MachineState.ERROR,# 10-17
+     5,MachineState.ERROR,     6,MachineState.ERROR,     5,     5,     4,MachineState.ERROR,# 18-1f
+     4,MachineState.ERROR,     4,     4,     4,MachineState.ERROR,     4,MachineState.ERROR,# 20-27
+     4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f
+)
+
+HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
+
+HZ_SM_MODEL = {'class_table': HZ_CLS,
+               'class_factor': 6,
+               'state_table': HZ_ST,
+               'char_len_table': HZ_CHAR_LEN_TABLE,
+               'name': "HZ-GB-2312",
+               'language': 'Chinese'}
+
+ISO2022CN_CLS = (
+2,0,0,0,0,0,0,0,  # 00 - 07
+0,0,0,0,0,0,0,0,  # 08 - 0f
+0,0,0,0,0,0,0,0,  # 10 - 17
+0,0,0,1,0,0,0,0,  # 18 - 1f
+0,0,0,0,0,0,0,0,  # 20 - 27
+0,3,0,0,0,0,0,0,  # 28 - 2f
+0,0,0,0,0,0,0,0,  # 30 - 37
+0,0,0,0,0,0,0,0,  # 38 - 3f
+0,0,0,4,0,0,0,0,  # 40 - 47
+0,0,0,0,0,0,0,0,  # 48 - 4f
+0,0,0,0,0,0,0,0,  # 50 - 57
+0,0,0,0,0,0,0,0,  # 58 - 5f
+0,0,0,0,0,0,0,0,  # 60 - 67
+0,0,0,0,0,0,0,0,  # 68 - 6f
+0,0,0,0,0,0,0,0,  # 70 - 77
+0,0,0,0,0,0,0,0,  # 78 - 7f
+2,2,2,2,2,2,2,2,  # 80 - 87
+2,2,2,2,2,2,2,2,  # 88 - 8f
+2,2,2,2,2,2,2,2,  # 90 - 97
+2,2,2,2,2,2,2,2,  # 98 - 9f
+2,2,2,2,2,2,2,2,  # a0 - a7
+2,2,2,2,2,2,2,2,  # a8 - af
+2,2,2,2,2,2,2,2,  # b0 - b7
+2,2,2,2,2,2,2,2,  # b8 - bf
+2,2,2,2,2,2,2,2,  # c0 - c7
+2,2,2,2,2,2,2,2,  # c8 - cf
+2,2,2,2,2,2,2,2,  # d0 - d7
+2,2,2,2,2,2,2,2,  # d8 - df
+2,2,2,2,2,2,2,2,  # e0 - e7
+2,2,2,2,2,2,2,2,  # e8 - ef
+2,2,2,2,2,2,2,2,  # f0 - f7
+2,2,2,2,2,2,2,2,  # f8 - ff
+)
+
+ISO2022CN_ST = (
+MachineState.START,     3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
+MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
+MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     4,MachineState.ERROR,# 18-1f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27
+     5,     6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f
+)
+
+ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS,
+                      'class_factor': 9,
+                      'state_table': ISO2022CN_ST,
+                      'char_len_table': ISO2022CN_CHAR_LEN_TABLE,
+                      'name': "ISO-2022-CN",
+                      'language': 'Chinese'}
+
+ISO2022JP_CLS = (
+2,0,0,0,0,0,0,0,  # 00 - 07
+0,0,0,0,0,0,2,2,  # 08 - 0f
+0,0,0,0,0,0,0,0,  # 10 - 17
+0,0,0,1,0,0,0,0,  # 18 - 1f
+0,0,0,0,7,0,0,0,  # 20 - 27
+3,0,0,0,0,0,0,0,  # 28 - 2f
+0,0,0,0,0,0,0,0,  # 30 - 37
+0,0,0,0,0,0,0,0,  # 38 - 3f
+6,0,4,0,8,0,0,0,  # 40 - 47
+0,9,5,0,0,0,0,0,  # 48 - 4f
+0,0,0,0,0,0,0,0,  # 50 - 57
+0,0,0,0,0,0,0,0,  # 58 - 5f
+0,0,0,0,0,0,0,0,  # 60 - 67
+0,0,0,0,0,0,0,0,  # 68 - 6f
+0,0,0,0,0,0,0,0,  # 70 - 77
+0,0,0,0,0,0,0,0,  # 78 - 7f
+2,2,2,2,2,2,2,2,  # 80 - 87
+2,2,2,2,2,2,2,2,  # 88 - 8f
+2,2,2,2,2,2,2,2,  # 90 - 97
+2,2,2,2,2,2,2,2,  # 98 - 9f
+2,2,2,2,2,2,2,2,  # a0 - a7
+2,2,2,2,2,2,2,2,  # a8 - af
+2,2,2,2,2,2,2,2,  # b0 - b7
+2,2,2,2,2,2,2,2,  # b8 - bf
+2,2,2,2,2,2,2,2,  # c0 - c7
+2,2,2,2,2,2,2,2,  # c8 - cf
+2,2,2,2,2,2,2,2,  # d0 - d7
+2,2,2,2,2,2,2,2,  # d8 - df
+2,2,2,2,2,2,2,2,  # e0 - e7
+2,2,2,2,2,2,2,2,  # e8 - ef
+2,2,2,2,2,2,2,2,  # f0 - f7
+2,2,2,2,2,2,2,2,  # f8 - ff
+)
+
+ISO2022JP_ST = (
+MachineState.START,     3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
+MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f
+MachineState.ERROR,     5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     4,MachineState.ERROR,MachineState.ERROR,# 20-27
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47
+)
+
+ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS,
+                      'class_factor': 10,
+                      'state_table': ISO2022JP_ST,
+                      'char_len_table': ISO2022JP_CHAR_LEN_TABLE,
+                      'name': "ISO-2022-JP",
+                      'language': 'Japanese'}
+
+ISO2022KR_CLS = (
+2,0,0,0,0,0,0,0,  # 00 - 07
+0,0,0,0,0,0,0,0,  # 08 - 0f
+0,0,0,0,0,0,0,0,  # 10 - 17
+0,0,0,1,0,0,0,0,  # 18 - 1f
+0,0,0,0,3,0,0,0,  # 20 - 27
+0,4,0,0,0,0,0,0,  # 28 - 2f
+0,0,0,0,0,0,0,0,  # 30 - 37
+0,0,0,0,0,0,0,0,  # 38 - 3f
+0,0,0,5,0,0,0,0,  # 40 - 47
+0,0,0,0,0,0,0,0,  # 48 - 4f
+0,0,0,0,0,0,0,0,  # 50 - 57
+0,0,0,0,0,0,0,0,  # 58 - 5f
+0,0,0,0,0,0,0,0,  # 60 - 67
+0,0,0,0,0,0,0,0,  # 68 - 6f
+0,0,0,0,0,0,0,0,  # 70 - 77
+0,0,0,0,0,0,0,0,  # 78 - 7f
+2,2,2,2,2,2,2,2,  # 80 - 87
+2,2,2,2,2,2,2,2,  # 88 - 8f
+2,2,2,2,2,2,2,2,  # 90 - 97
+2,2,2,2,2,2,2,2,  # 98 - 9f
+2,2,2,2,2,2,2,2,  # a0 - a7
+2,2,2,2,2,2,2,2,  # a8 - af
+2,2,2,2,2,2,2,2,  # b0 - b7
+2,2,2,2,2,2,2,2,  # b8 - bf
+2,2,2,2,2,2,2,2,  # c0 - c7
+2,2,2,2,2,2,2,2,  # c8 - cf
+2,2,2,2,2,2,2,2,  # d0 - d7
+2,2,2,2,2,2,2,2,  # d8 - df
+2,2,2,2,2,2,2,2,  # e0 - e7
+2,2,2,2,2,2,2,2,  # e8 - ef
+2,2,2,2,2,2,2,2,  # f0 - f7
+2,2,2,2,2,2,2,2,  # f8 - ff
+)
+
+ISO2022KR_ST = (
+MachineState.START,     3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     4,MachineState.ERROR,MachineState.ERROR,# 10-17
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27
+)
+
+ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
+
+ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS,
+                      'class_factor': 6,
+                      'state_table': ISO2022KR_ST,
+                      'char_len_table': ISO2022KR_CHAR_LEN_TABLE,
+                      'name': "ISO-2022-KR",
+                      'language': 'Korean'}
+
+
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/eucjpprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/eucjpprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..20ce8f7d15bad9d48a3e0363de2286093a4a28cc
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/eucjpprober.py
@@ -0,0 +1,92 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import ProbingState, MachineState
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCJPDistributionAnalysis
+from .jpcntx import EUCJPContextAnalysis
+from .mbcssm import EUCJP_SM_MODEL
+
+
+class EUCJPProber(MultiByteCharSetProber):
+    def __init__(self):
+        super(EUCJPProber, self).__init__()
+        self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
+        self.distribution_analyzer = EUCJPDistributionAnalysis()
+        self.context_analyzer = EUCJPContextAnalysis()
+        self.reset()
+
+    def reset(self):
+        super(EUCJPProber, self).reset()
+        self.context_analyzer.reset()
+
+    @property
+    def charset_name(self):
+        return "EUC-JP"
+
+    @property
+    def language(self):
+        return "Japanese"
+
+    def feed(self, byte_str):
+        for i in range(len(byte_str)):
+            # PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
+            coding_state = self.coding_sm.next_state(byte_str[i])
+            if coding_state == MachineState.ERROR:
+                self.logger.debug('%s %s prober hit error at byte %s',
+                                  self.charset_name, self.language, i)
+                self._state = ProbingState.NOT_ME
+                break
+            elif coding_state == MachineState.ITS_ME:
+                self._state = ProbingState.FOUND_IT
+                break
+            elif coding_state == MachineState.START:
+                char_len = self.coding_sm.get_current_charlen()
+                if i == 0:
+                    self._last_char[1] = byte_str[0]
+                    self.context_analyzer.feed(self._last_char, char_len)
+                    self.distribution_analyzer.feed(self._last_char, char_len)
+                else:
+                    self.context_analyzer.feed(byte_str[i - 1:i + 1],
+                                                char_len)
+                    self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+                                                     char_len)
+
+        self._last_char[0] = byte_str[-1]
+
+        if self.state == ProbingState.DETECTING:
+            if (self.context_analyzer.got_enough_data() and
+               (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
+                self._state = ProbingState.FOUND_IT
+
+        return self.state
+
+    def get_confidence(self):
+        context_conf = self.context_analyzer.get_confidence()
+        distrib_conf = self.distribution_analyzer.get_confidence()
+        return max(context_conf, distrib_conf)
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/euckrfreq.py b/lib/python3.7/site-packages/pip/_vendor/chardet/euckrfreq.py
new file mode 100644
index 0000000000000000000000000000000000000000..b68078cb9680de4cca65b1145632a37c5e751c38
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/euckrfreq.py
@@ -0,0 +1,195 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# Sampling from about 20M text materials include literature and computer technology
+
+# 128  --> 0.79
+# 256  --> 0.92
+# 512  --> 0.986
+# 1024 --> 0.99944
+# 2048 --> 0.99999
+#
+# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
+# Random Distribution Ration = 512 / (2350-512) = 0.279.
+#
+# Typical Distribution Ratio
+
+EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
+
+EUCKR_TABLE_SIZE = 2352
+
+# Char to FreqOrder table ,
+EUCKR_CHAR_TO_FREQ_ORDER = (
+  13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722,  87,
+1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
+1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488,  20,1733,1269,1734,
+ 945,1400,1735,  47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
+ 116, 987, 813,1401, 683,  75,1204, 145,1740,1741,1742,1743,  16, 847, 667, 622,
+ 708,1744,1745,1746, 966, 787, 304, 129,1747,  60, 820, 123, 676,1748,1749,1750,
+1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
+ 344,1763,1764,1765,1766,  89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
+ 709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
+1780, 337, 751,1058,  28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782,  19,
+1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
+1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
+1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
+1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
+ 885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
+1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
+1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
+1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
+1412,1837,1838,  39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
+ 544,1023,1081, 869,  91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
+1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
+ 119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
+ 893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
+1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
+ 282,  96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
+1421, 268,1877,1422,1878,1879,1880, 308,1881,   2, 537,1882,1883,1215,1884,1885,
+ 127, 791,1886,1273,1423,1887,  34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
+   0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
+1894,1123,  48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
+1899, 694,1900, 909, 734,1424, 572, 866,1425, 691,  85, 524,1010, 543, 394, 841,
+1901,1902,1903,1026,1904,1905,1906,1907,1908,1909,  30, 451, 651, 988, 310,1910,
+1911,1426, 810,1216,  93,1912,1913,1277,1217,1914, 858, 759,  45,  58, 181, 610,
+ 269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
+1919, 359,1920, 687,1921, 822,1922, 293,1923,1924,  40, 662, 118, 692,  29, 939,
+ 887, 640, 482, 174,1925,  69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
+ 217, 854,1163, 823,1927,1928,1929,1930, 834,1931,  78,1932, 859,1933,1063,1934,
+1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
+1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
+1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
+1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
+1283,1222,1960,1961,1962,1963,  36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
+1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
+  50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
+ 639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971,   7,
+ 103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
+1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
+ 818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
+1995, 560, 223,1287,  98,   8, 189, 650, 978,1288,1996,1437,1997,  17, 345, 250,
+ 423, 277, 234, 512, 226,  97, 289,  42, 167,1998, 201,1999,2000, 843, 836, 824,
+ 532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
+2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008,  71,1440, 745,
+ 619, 688,2009, 829,2010,2011, 147,2012,  33, 948,2013,2014,  74, 224,2015,  61,
+ 191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
+2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591,  52, 724, 246,2031,2032,
+2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
+2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
+ 719,1170, 959, 440, 437, 534,  84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
+ 819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
+2051,2052,2053,  59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
+ 499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
+1444,2064,2065,  41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
+2069,1292,2070,2071,1445,2072,1446,2073,2074,  55, 588,  66,1447, 271,1092,2075,
+1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
+2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
+2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
+1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
+ 949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
+2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
+2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
+  22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174,  73,1096, 231, 274,
+ 962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
+2141,2142,2143,2144,  11, 374, 844,2145, 154,1232,  46,1461,2146, 838, 830, 721,
+1233, 106,2147,  90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
+2150,1462, 761, 565,2151, 686,2152, 649,2153,  72, 173,2154, 460, 415,2155,1463,
+2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
+2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177,  23, 530, 285,
+2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
+2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193,  10,
+2194, 613, 424,2195, 979, 108, 449, 589,  27, 172,  81,1031,  80, 774, 281, 350,
+1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
+2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
+2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
+2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
+2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
+2243, 521, 486, 548,2244,2245,2246,1473,1300,  53, 549, 137, 875,  76, 158,2247,
+1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
+1475,2249,  82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
+2256,  18, 450, 206,2257, 290, 292,1142,2258, 511, 162,  99, 346, 164, 735,2259,
+1476,1477,   4, 554, 343, 798,1099,2260,1100,2261,  43, 171,1303, 139, 215,2262,
+2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
+1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272,  67,2273,
+ 295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
+2282,2283,2284,2285,2286,  70, 852,1071,2287,2288,2289,2290,  21,  56, 509, 117,
+ 432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
+2294,1046,1479,2295, 340,2296,  63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
+ 808, 494,2299,2300,2301, 903,2302,  37,1072,  14,   5,2303,  79, 675,2304, 312,
+2305,2306,2307,2308,2309,1480,   6,1307,2310,2311,2312,   1, 470,  35,  24, 229,
+2313, 695, 210,  86, 778,  15, 784, 592, 779,  32,  77, 855, 964,2314, 259,2315,
+ 501, 380,2316,2317,  83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
+2320,2321,2322,2323,2324,2325,1485,2326,2327, 128,  57,  68, 261,1048, 211, 170,
+1240,  31,2328,  51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
+ 425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
+1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
+2351,1490,1491,  62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
+1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
+2361,2362, 332,  12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
+ 416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
+2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
+1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
+2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
+1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
+2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
+1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
+ 593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
+2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
+2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
+ 644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
+ 915, 489,2449,1514,1184,2450,2451, 515,  64, 427, 495,2452, 583,2453, 483, 485,
+1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
+1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
+ 291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
+2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
+2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
+ 797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187,  65,2494,
+ 434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
+ 585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
+2499,2500,  49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
+  95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
+ 161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
+2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
+2533,  25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
+ 704, 504, 468, 758, 657,1528, 196,  44, 839,1246, 272, 750,2543, 765, 862,2544,
+2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
+1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
+ 249,1075,2556,2557,2558, 466, 743,2559,2560,2561,  92, 514, 426, 420, 526,2562,
+2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
+2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
+2584,1532,  54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
+   3, 458,   9,  38,2588, 107, 110, 890, 209,  26, 737, 498,2589,1534,2590, 431,
+ 202,  88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
+ 974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
+2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601,  94, 175, 197, 406,
+2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
+2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
+1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
+2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
+ 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642,  # 512, 256
+)
+
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/euckrprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/euckrprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..345a060d0230ada58f769b0f6b55f43a428fc3bd
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/euckrprober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCKRDistributionAnalysis
+from .mbcssm import EUCKR_SM_MODEL
+
+
+class EUCKRProber(MultiByteCharSetProber):
+    def __init__(self):
+        super(EUCKRProber, self).__init__()
+        self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
+        self.distribution_analyzer = EUCKRDistributionAnalysis()
+        self.reset()
+
+    @property
+    def charset_name(self):
+        return "EUC-KR"
+
+    @property
+    def language(self):
+        return "Korean"
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/euctwfreq.py b/lib/python3.7/site-packages/pip/_vendor/chardet/euctwfreq.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed7a995a3aa311583efa5a47e316d4490e5f3463
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/euctwfreq.py
@@ -0,0 +1,387 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# EUCTW frequency table
+# Converted from big5 work
+# by Taiwan's Mandarin Promotion Council
+# <http:#www.edu.tw:81/mandr/>
+
+# 128  --> 0.42261
+# 256  --> 0.57851
+# 512  --> 0.74851
+# 1024 --> 0.89384
+# 2048 --> 0.97583
+#
+# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
+# Random Distribution Ration = 512/(5401-512)=0.105
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
+
+EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
+
+# Char to FreqOrder table ,
+EUCTW_TABLE_SIZE = 5376
+
+EUCTW_CHAR_TO_FREQ_ORDER = (
+   1,1800,1506, 255,1431, 198,   9,  82,   6,7310, 177, 202,3615,1256,2808, 110,  # 2742
+3735,  33,3241, 261,  76,  44,2113,  16,2931,2184,1176, 659,3868,  26,3404,2643,  # 2758
+1198,3869,3313,4060, 410,2211, 302, 590, 361,1963,   8, 204,  58,4296,7311,1931,  # 2774
+  63,7312,7313, 317,1614,  75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809,  # 2790
+3616,   3,  10,3870,1471,  29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315,  # 2806
+4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932,  34,3501,3173,  64, 604,  # 2822
+7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337,  72, 406,7319,  80,  # 2838
+ 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449,  69,2969, 591,  # 2854
+ 179,2095, 471, 115,2034,1843,  60,  50,2970, 134, 806,1868, 734,2035,3407, 180,  # 2870
+ 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359,  # 2886
+2495,  90,2707,1338, 663,  11, 906,1099,2545,  20,2436, 182, 532,1716,7321, 732,  # 2902
+1376,4062,1311,1420,3175,  25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529,  # 2918
+3243, 475,1447,3617,7322, 117,  21, 656, 810,1297,2295,2329,3502,7323, 126,4063,  # 2934
+ 706, 456, 150, 613,4299,  71,1118,2036,4064, 145,3069,  85, 835, 486,2114,1246,  # 2950
+1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221,  # 2966
+3503,3110,7325,1955,1153,4065,  83, 296,1199,3070, 192, 624,  93,7326, 822,1897,  # 2982
+2810,3111, 795,2064, 991,1554,1542,1592,  27,  43,2853, 859, 139,1456, 860,4300,  # 2998
+ 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618,  # 3014
+3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228,  # 3030
+1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077,  # 3046
+7328,7329,2173,3176,3619,2673, 593, 845,1062,3244,  88,1723,2037,3875,1950, 212,  # 3062
+ 266, 152, 149, 468,1898,4066,4302,  77, 187,7330,3018,  37,   5,2972,7331,3876,  # 3078
+7332,7333,  39,2517,4303,2894,3177,2078,  55, 148,  74,4304, 545, 483,1474,1029,  # 3094
+1665, 217,1869,1531,3113,1104,2645,4067,  24, 172,3507, 900,3877,3508,3509,4305,  # 3110
+  32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674,   4,3019,3314,1427,1788,  # 3126
+ 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520,  # 3142
+3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439,  38,7339,1063,7340, 794,  # 3158
+3879,1435,2296,  46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804,  35, 707,  # 3174
+ 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409,  # 3190
+2128,1363,3623,1423, 697, 100,3071,  48,  70,1231, 495,3114,2193,7345,1294,7346,  # 3206
+2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411,  # 3222
+ 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412,  # 3238
+ 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933,  # 3254
+3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895,  # 3270
+1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369,  # 3286
+1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000,  # 3302
+1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381,   7,  # 3318
+2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313,  # 3334
+ 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513,  # 3350
+4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647,  # 3366
+1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357,  # 3382
+7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438,  # 3398
+2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978,  # 3414
+ 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210,  # 3430
+  98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642,  # 3446
+ 523,2776,2777,2648,7364, 141,2231,1333,  68, 176, 441, 876, 907,4077, 603,2592,  # 3462
+ 710, 171,3417, 404, 549,  18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320,  # 3478
+7366,2973, 368,7367, 146, 366,  99, 871,3627,1543, 748, 807,1586,1185,  22,2258,  # 3494
+ 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702,  # 3510
+1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371,  59,7372,  # 3526
+ 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836,  # 3542
+ 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629,  # 3558
+7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686,  # 3574
+1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496,  # 3590
+ 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560,  # 3606
+3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496,  # 3622
+4081,  57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082,  # 3638
+3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083,  # 3654
+ 279,3120,  51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264,  # 3670
+ 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411,  # 3686
+1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483,  # 3702
+4084,2468,1436, 953,4085,2054,4331, 671,2395,  79,4086,2441,3252, 608, 567,2680,  # 3718
+3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672,  # 3734
+3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681,  # 3750
+2397,7400,7401,7402,4089,3025,   0,7403,2469, 315, 231,2442, 301,3319,4335,2380,  # 3766
+7404, 233,4090,3631,1818,4336,4337,7405,  96,1776,1315,2082,7406, 257,7407,1809,  # 3782
+3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183,  # 3798
+7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934,  # 3814
+1484,7413,1712, 127,  67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351,  # 3830
+2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545,  # 3846
+1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358,  # 3862
+  78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338,  # 3878
+1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423,  # 3894
+4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859,  # 3910
+3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636,  # 3926
+ 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344,  # 3942
+ 165, 243,4345,3637,2521, 123, 683,4096, 764,4346,  36,3895,1792, 589,2902, 816,  # 3958
+ 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891,  # 3974
+2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662,  # 3990
+7425, 611,1156, 854,2381,1316,2861,   2, 386, 515,2904,7426,7427,3253, 868,2234,  # 4006
+1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431,  # 4022
+2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676,  # 4038
+1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437,  # 4054
+1993,7438,4350,7439,7440,2195,  13,2779,3638,2980,3124,1229,1916,7441,3756,2131,  # 4070
+7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307,  # 4086
+7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519,  # 4102
+7452, 128,2132,  92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980,  # 4118
+3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401,  # 4134
+4353,2248,  94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101,  # 4150
+1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937,  # 4166
+7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466,  # 4182
+2332,2067,  23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526,  # 4198
+7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598,  # 4214
+3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471,  # 4230
+3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863,  41,7473,  # 4246
+7474,4361,7475,1657,2333,  19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323,  # 4262
+2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416,  # 4278
+7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427,  # 4294
+ 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110,  # 4310
+4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485,  # 4326
+2683, 733,  40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428,  # 4342
+7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907,  # 4358
+3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901,  # 4374
+2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870,  # 4390
+2752,2986,7490, 435,7491, 343,1108, 596,  17,1751,4365,2235,3430,3643,7492,4366,  # 4406
+ 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031,  # 4422
+2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240,  # 4438
+1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521,  # 4454
+1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673,  # 4470
+2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260,  # 4486
+1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619,  # 4502
+7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506,  # 4518
+7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382,  # 4534
+2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324,  # 4550
+4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384,  # 4566
+1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551,  30,2263,4122,  # 4582
+7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192,  # 4598
+ 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388,  # 4614
+4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129,  # 4630
+ 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523,  # 4646
+2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692,  # 4662
+ 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915,  # 4678
+1041,2987, 293,1168,  87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219,  # 4694
+1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825,  # 4710
+ 730,1515, 184,2827,  66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975,  # 4726
+3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394,  # 4742
+3918,7535,7536,1186,  15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758,  # 4758
+1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434,  # 4774
+3541,1342,1681,1718, 766,3264, 286,  89,2946,3649,7540,1713,7541,2597,3334,2990,  # 4790
+7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335,  # 4806
+7544,3265, 310, 313,3435,2299, 770,4134,  54,3034, 189,4397,3082,3769,3922,7545,  # 4822
+1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137,  # 4838
+2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471,  # 4854
+1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555,  # 4870
+3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139,  # 4886
+2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729,  # 4902
+3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482,  # 4918
+2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652,  # 4934
+4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867,  # 4950
+4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499,  # 4966
+3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250,  # 4982
+  97,  81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830,  # 4998
+3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188,  # 5014
+ 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408,  # 5030
+3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447,  # 5046
+3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527,  # 5062
+3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932,  # 5078
+1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411,  # 5094
+7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270,  # 5110
+ 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589,  # 5126
+7590, 587,  14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591,  # 5142
+1702,1226, 102,1547,  62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756,  # 5158
+ 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145,  # 5174
+4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598,  86,1494,1730,  # 5190
+3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069,  # 5206
+ 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938,  # 5222
+2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625,  # 5238
+2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885,  28,2686,  # 5254
+3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797,  # 5270
+1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958,  # 5286
+4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528,  # 5302
+2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241,  # 5318
+1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169,  # 5334
+1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540,  # 5350
+2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342,  # 5366
+3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425,  # 5382
+1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427,  # 5398
+7617,3446,7618,7619,7620,3277,2689,1433,3278, 131,  95,1504,3946, 723,4159,3141,  # 5414
+1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949,  # 5430
+4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654,  53,7624,2996,7625,  # 5446
+1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202,  # 5462
+ 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640,  # 5478
+1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936,  # 5494
+3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955,  # 5510
+3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910,  # 5526
+2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325,  # 5542
+1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024,  # 5558
+4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340,  # 5574
+ 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918,  # 5590
+7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439,  # 5606
+2317,3283,7650,7651,4164,7652,4165,  84,4166, 112, 989,7653, 547,1059,3961, 701,  # 5622
+3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494,  # 5638
+4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285,  # 5654
+ 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077,  # 5670
+7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443,  # 5686
+7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169,  # 5702
+1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906,  # 5718
+4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968,  # 5734
+3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804,  # 5750
+2690,1516,3559,1121,1082,1329,3284,3970,1449,3794,  65,1128,2835,2913,2759,1590,  # 5766
+3795,7674,7675,  12,2658,  45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676,  # 5782
+3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680,  # 5798
+2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285,  # 5814
+1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687,  # 5830
+4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454,  # 5846
+3670,1858,  91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403,  # 5862
+3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973,  # 5878
+2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454,  # 5894
+4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761,  61,3976,3672,1822,3977,  # 5910
+7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695,  # 5926
+3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945,  # 5942
+2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460,  # 5958
+3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179,  # 5974
+1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706,  # 5990
+2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982,  # 6006
+3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183,  # 6022
+4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043,  56,1396,3090,  # 6038
+2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717,  # 6054
+2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985,  # 6070
+7722,1076,  49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184,  # 6086
+1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472,  # 6102
+2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351,  # 6118
+1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714,  # 6134
+3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404,  # 6150
+4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629,  31,2838,  # 6166
+2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620,  # 6182
+3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738,  # 6198
+3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869,  # 6214
+2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558,  # 6230
+4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107,  # 6246
+2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216,  # 6262
+3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984,  # 6278
+4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705,  # 6294
+7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687,  # 6310
+3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840,  # 6326
+ 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521,  # 6342
+1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412,  42,3096, 464,7759,2632,  # 6358
+4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295,  # 6374
+1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765,  # 6390
+4487,7766,3002, 962, 588,3574, 289,3219,2634,1116,  52,7767,3047,1796,7768,7769,  # 6406
+7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572,  # 6422
+ 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776,  # 6438
+7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911,  # 6454
+2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693,  # 6470
+1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672,  # 6486
+1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013,  # 6502
+3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816,  # 6518
+ 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010,  # 6534
+ 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175,  # 6550
+ 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473,  # 6566
+3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298,  # 6582
+2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359,  # 6598
+ 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805,  # 6614
+7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807,  # 6630
+1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810,  # 6646
+3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812,  # 6662
+7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814,  # 6678
+1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818,  # 6694
+7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821,  # 6710
+4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877,  # 6726
+1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702,  # 6742
+2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813,  # 6758
+2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503,  # 6774
+4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484,  # 6790
+ 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833,  # 6806
+ 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457,  # 6822
+3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704,  # 6838
+3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878,  # 6854
+1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508,  # 6870
+2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451,  # 6886
+7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509,  # 6902
+1561,2664,1452,4010,1375,7855,7856,  47,2959, 316,7857,1406,1591,2923,3156,7858,  # 6918
+1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428,  # 6934
+3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800,  # 6950
+ 919,2347,2960,2348,1270,4511,4012,  73,7862,7863, 647,7864,3228,2843,2255,1550,  # 6966
+1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347,  # 6982
+4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515,  # 6998
+7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665,  # 7014
+2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518,  # 7030
+3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833,  # 7046
+ 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961,  # 7062
+1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508,  # 7078
+2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482,  # 7094
+2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098,  # 7110
+7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483,  # 7126
+7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834,  # 7142
+7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904,  # 7158
+2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724,  # 7174
+2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910,  # 7190
+1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701,  # 7206
+4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062,  # 7222
+3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922,  # 7238
+3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925,  # 7254
+4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248,  # 7270
+4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487,  # 7286
+2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015,  # 7302
+2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935,  # 7318
+7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104,  # 7334
+4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580,  # 7350
+7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380,  # 7366
+2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951,  # 7382
+1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948,  # 7398
+3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488,  # 7414
+4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737,  # 7430
+2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017,  # 7446
+ 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047,  # 7462
+2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967,  # 7478
+1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385,  # 7494
+2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975,  # 7510
+2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979,  # 7526
+4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982,  # 7542
+7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306,  # 7558
+1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270,  # 7574
+3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012,  # 7590
+7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236,  # 7606
+1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550,  # 7622
+8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746,  # 7638
+2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066,  # 7654
+8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977,  # 7670
+2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009,  # 7686
+2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013,  # 7702
+8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552,  # 7718
+8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023,  # 7734
+8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143,  # 7750
+ 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278,  # 7766
+8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698,  # 7782
+4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706,  # 7798
+3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859,  # 7814
+8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344,  # 7830
+1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894,  # 7846
+8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194,  # 7862
+ 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760,  # 7878
+1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210,  # 7894
+ 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642,  # 7910
+4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013,  # 7926
+1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889,  # 7942
+4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239,  # 7958
+1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240,  # 7974
+ 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083,  # 7990
+3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088,  # 8006
+4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094,  # 8022
+8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101,  # 8038
+ 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104,  # 8054
+3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015,  # 8070
+ 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941,  # 8086
+2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118,  # 8102
+)
+
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/euctwprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/euctwprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..35669cc4dd809fa4007ee67c752f1be991135e77
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/euctwprober.py
@@ -0,0 +1,46 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCTWDistributionAnalysis
+from .mbcssm import EUCTW_SM_MODEL
+
+class EUCTWProber(MultiByteCharSetProber):
+    def __init__(self):
+        super(EUCTWProber, self).__init__()
+        self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
+        self.distribution_analyzer = EUCTWDistributionAnalysis()
+        self.reset()
+
+    @property
+    def charset_name(self):
+        return "EUC-TW"
+
+    @property
+    def language(self):
+        return "Taiwan"
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/gb2312freq.py b/lib/python3.7/site-packages/pip/_vendor/chardet/gb2312freq.py
new file mode 100644
index 0000000000000000000000000000000000000000..697837bd9a87a77fc468fd1f10dd470d01701362
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/gb2312freq.py
@@ -0,0 +1,283 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# GB2312 most frequently used character table
+#
+# Char to FreqOrder table , from hz6763
+
+# 512  --> 0.79  -- 0.79
+# 1024 --> 0.92  -- 0.13
+# 2048 --> 0.98  -- 0.06
+# 6768 --> 1.00  -- 0.02
+#
+# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
+# Random Distribution Ration = 512 / (3755 - 512) = 0.157
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
+
+GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
+
+GB2312_TABLE_SIZE = 3760
+
+GB2312_CHAR_TO_FREQ_ORDER = (
+1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
+2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
+2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
+ 249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
+1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
+1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
+ 152,1687,1539, 738,1559,  59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
+1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850,  70,3285,2729,3534,3575,
+2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
+3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
+ 544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
+1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
+ 927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
+2534,1546,2393,2760, 737,2494,  13, 447, 245,2747,  38,2765,2129,2589,1079, 606,
+ 360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
+2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
+1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
+3195,4115,5627,2489,2991,  24,2065,2697,1087,2719,  48,1634, 315,  68, 985,2052,
+ 198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
+1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
+ 253,3099,  32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
+2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
+1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563,  26,
+3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
+1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
+2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
+1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
+ 585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
+3777,3657, 643,2298,1148,1779, 190, 989,3544, 414,  11,2135,2063,2979,1471, 403,
+3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
+ 252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
+3651, 210,  33,1608,2516, 200,1520, 415, 102,   0,3389,1287, 817,  91,3299,2940,
+ 836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687,  20,1819, 121,
+1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
+3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
+2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680,  72, 842,1990, 212,1233,
+1154,1586,  75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
+ 755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
+1910, 534, 529,3309,1721,1660, 274,  39,2827, 661,2670,1578, 925,3248,3815,1094,
+4278,4901,4252,  41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
+ 887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
+3568, 194,5062,  15, 961,3870,1241,1192,2664,  66,5215,3260,2111,1295,1127,2152,
+3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426,  53,2909,
+ 509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
+1272,2363, 284,1753,3679,4064,1695,  81, 815,2677,2757,2731,1386, 859, 500,4221,
+2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
+1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
+1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
+ 389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
+3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
+3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640,  67,2360,
+4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
+ 296,3979,1739,1611,3684,  23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
+3116,  17,1074, 467,2692,2201, 387,2922,  45,1326,3055,1645,3659,2817, 958, 243,
+1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
+1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
+4046,3572,2399,1571,3281,  79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
+ 215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
+ 814,4968,3487,1548,2644,1567,1285,   2, 295,2636,  97, 946,3576, 832, 141,4257,
+3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
+1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
+ 602,1525,2608,1605,1639,3175, 694,3064,  10, 465,  76,2000,4846,4208, 444,3781,
+1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
+2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844,  89, 937,
+ 930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
+ 432, 445,2811, 206,4136,1472, 730, 349,  73, 397,2802,2547, 998,1637,1167, 789,
+ 396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
+3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
+4996, 371,1575,2436,1621,2210, 984,4033,1734,2638,  16,4529, 663,2755,3255,1451,
+3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
+ 750,2058, 165,  80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
+2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
+2357, 395,3740, 137,2075, 944,4089,2584,1267,3802,  62,1533,2285, 178, 176, 780,
+2440, 201,3707, 590, 478,1560,4354,2117,1075,  30,  74,4643,4004,1635,1441,2745,
+ 776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
+2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
+ 968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669,  43,2523,1657,
+ 163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
+ 220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
+3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
+2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
+2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024,  40,3240,1536,
+1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
+  18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
+2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
+  90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
+ 286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
+1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
+1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076,  46,4253,2873,1889,1894,
+ 915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
+ 681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
+1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
+2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
+3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
+2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
+2269,2246,1446,  36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
+2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
+3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
+1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906,  51, 369, 170,3541,
+1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
+2101,2730,2490,  82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
+1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
+3750,2289,2795, 813,3123,2610,1136,4368,   5,3391,4541,2174, 420, 429,1728, 754,
+1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
+1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
+3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
+ 795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
+2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
+1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
+4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
+1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
+1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
+3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
+1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
+  47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
+ 504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096,  99,
+1397,1769,2300,4428,1643,3455,1978,1757,3718,1440,  35,4879,3742,1296,4228,2280,
+ 160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
+1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
+1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
+ 744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
+3708, 135,2131,  87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
+4314,   9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
+3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
+2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
+2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
+1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
+3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
+2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
+1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
+1505,1911,1883,3526, 698,3629,3456,1833,1431, 746,  77,1261,2017,2296,1977,1885,
+ 125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
+2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
+2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
+3192,2910,2010, 140,2395,2859,  55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
+4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
+3399,  98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
+ 180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
+3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
+2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
+1086,1974,2034, 630, 257,3338,2788,4903,1017,  86,4790, 966,2789,1995,1696,1131,
+ 259,3095,4188,1308, 179,1463,5257, 289,4107,1248,  42,3413,1725,2288, 896,1947,
+ 774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
+3034,3310, 540,2370,1562,1288,2990, 502,4765,1147,   4,1853,2708, 207, 294,2814,
+4078,2902,2509, 684,  34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
+2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
+1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
+1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
+ 766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
+1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196,  19, 941,3624,3480,
+3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
+ 955,1089,3103,1053,  96,  88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
+ 642,4006, 903,2539,1877,2082, 596,  29,4066,1790, 722,2157, 130, 995,1569, 769,
+1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445,  50, 625, 487,2207,
+  57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
+1783, 362,   8,3433,3422, 610,2793,3277,1390,1284,1654,  21,3823, 734, 367, 623,
+ 193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
+2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
+ 158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
+2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
+2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
+1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
+1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
+2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
+ 819,1541, 142,2284,  44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
+1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
+1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
+2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
+2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434,  92,1466,4920,2616,
+3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
+1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
+4462,  64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
+ 571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
+ 845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
+3264,2855,2722,1952,1029,2839,2467,  84,4383,2215, 820,1391,2015,2448,3672, 377,
+1948,2168, 797,2545,3536,2578,2645,  94,2874,1678, 405,1259,3071, 771, 546,1315,
+ 470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928,  14,2594, 557,
+3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
+1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
+4031,2641,4067,3145,1870,  37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
+1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
+2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
+1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
+ 498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
+1178,2639,2351,  93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
+3341,1618,4126,2595,2334, 603, 651,  69, 701, 268,2662,3411,2555,1380,1606, 503,
+ 448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
+2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
+ 136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
+1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
+1281,  52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169,  27,
+1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
+3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
+2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
+3891,2868,3621,2254,  58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
+3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
+3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
+ 996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
+2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
+ 786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
+2724,1927,2333,4440, 567,  22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
+  12, 974,3783,4391, 951,1412,   1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
+1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040,  31,
+ 475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
+ 233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
+1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
+3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
+3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118,  63,2076, 314,1881,
+1348,1061, 172, 978,3515,1747, 532, 511,3970,   6, 601, 905,2699,3300,1751, 276,
+1467,3725,2668,  65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
+3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
+2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
+2754,  95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
+1985, 244,2546, 474, 495,1046,2611,1851,2061,  71,2089,1675,2590, 742,3758,2843,
+3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
+ 451,   3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
+4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
+1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
+2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078,  49,3770,
+3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
+3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
+1197,1663,4476,3127,  85,4240,2528,  25,1111,1181,3673, 407,3470,4561,2679,2713,
+ 768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
+ 391,2963, 187,  61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
+2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
+ 931, 317,2517,3027, 325, 569, 686,2107,3084,  60,1042,1333,2794, 264,3177,4014,
+1628, 258,3712,   7,4464,1176,1043,1778, 683, 114,1975,  78,1492, 383,1886, 510,
+ 386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
+1282,1289,4609, 697,1453,3044,2666,3611,1856,2412,  54, 719,1330, 568,3778,2459,
+1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
+1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
+1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421,  56,1908,1640,2387,2232,
+1917,1874,2477,4921, 148,  83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
+ 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
+ 852,1221,1400,1486, 882,2299,4036, 351,  28,1122, 700,6479,6480,6481,6482,6483,  #last 512
+)
+
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/gb2312prober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/gb2312prober.py
new file mode 100644
index 0000000000000000000000000000000000000000..8446d2dd959721cc86d4ae5a7699197454f3aa91
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/gb2312prober.py
@@ -0,0 +1,46 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import GB2312DistributionAnalysis
+from .mbcssm import GB2312_SM_MODEL
+
+class GB2312Prober(MultiByteCharSetProber):
+    def __init__(self):
+        super(GB2312Prober, self).__init__()
+        self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
+        self.distribution_analyzer = GB2312DistributionAnalysis()
+        self.reset()
+
+    @property
+    def charset_name(self):
+        return "GB2312"
+
+    @property
+    def language(self):
+        return "Chinese"
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/hebrewprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/hebrewprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0e1bf49268203d1f9d14cbe73753d95dc66c8a4
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/hebrewprober.py
@@ -0,0 +1,292 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+#          Shy Shalom
+# Portions created by the Initial Developer are Copyright (C) 2005
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState
+
+# This prober doesn't actually recognize a language or a charset.
+# It is a helper prober for the use of the Hebrew model probers
+
+### General ideas of the Hebrew charset recognition ###
+#
+# Four main charsets exist in Hebrew:
+# "ISO-8859-8" - Visual Hebrew
+# "windows-1255" - Logical Hebrew
+# "ISO-8859-8-I" - Logical Hebrew
+# "x-mac-hebrew" - ?? Logical Hebrew ??
+#
+# Both "ISO" charsets use a completely identical set of code points, whereas
+# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
+# these code points. windows-1255 defines additional characters in the range
+# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
+# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
+# x-mac-hebrew defines similar additional code points but with a different
+# mapping.
+#
+# As far as an average Hebrew text with no diacritics is concerned, all four
+# charsets are identical with respect to code points. Meaning that for the
+# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
+# (including final letters).
+#
+# The dominant difference between these charsets is their directionality.
+# "Visual" directionality means that the text is ordered as if the renderer is
+# not aware of a BIDI rendering algorithm. The renderer sees the text and
+# draws it from left to right. The text itself when ordered naturally is read
+# backwards. A buffer of Visual Hebrew generally looks like so:
+# "[last word of first line spelled backwards] [whole line ordered backwards
+# and spelled backwards] [first word of first line spelled backwards]
+# [end of line] [last word of second line] ... etc' "
+# adding punctuation marks, numbers and English text to visual text is
+# naturally also "visual" and from left to right.
+#
+# "Logical" directionality means the text is ordered "naturally" according to
+# the order it is read. It is the responsibility of the renderer to display
+# the text from right to left. A BIDI algorithm is used to place general
+# punctuation marks, numbers and English text in the text.
+#
+# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
+# what little evidence I could find, it seems that its general directionality
+# is Logical.
+#
+# To sum up all of the above, the Hebrew probing mechanism knows about two
+# charsets:
+# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
+#    backwards while line order is natural. For charset recognition purposes
+#    the line order is unimportant (In fact, for this implementation, even
+#    word order is unimportant).
+# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
+#
+# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
+#    specifically identified.
+# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
+#    that contain special punctuation marks or diacritics is displayed with
+#    some unconverted characters showing as question marks. This problem might
+#    be corrected using another model prober for x-mac-hebrew. Due to the fact
+#    that x-mac-hebrew texts are so rare, writing another model prober isn't
+#    worth the effort and performance hit.
+#
+#### The Prober ####
+#
+# The prober is divided between two SBCharSetProbers and a HebrewProber,
+# all of which are managed, created, fed data, inquired and deleted by the
+# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
+# fact some kind of Hebrew, Logical or Visual. The final decision about which
+# one is it is made by the HebrewProber by combining final-letter scores
+# with the scores of the two SBCharSetProbers to produce a final answer.
+#
+# The SBCSGroupProber is responsible for stripping the original text of HTML
+# tags, English characters, numbers, low-ASCII punctuation characters, spaces
+# and new lines. It reduces any sequence of such characters to a single space.
+# The buffer fed to each prober in the SBCS group prober is pure text in
+# high-ASCII.
+# The two SBCharSetProbers (model probers) share the same language model:
+# Win1255Model.
+# The first SBCharSetProber uses the model normally as any other
+# SBCharSetProber does, to recognize windows-1255, upon which this model was
+# built. The second SBCharSetProber is told to make the pair-of-letter
+# lookup in the language model backwards. This in practice exactly simulates
+# a visual Hebrew model using the windows-1255 logical Hebrew model.
+#
+# The HebrewProber is not using any language model. All it does is look for
+# final-letter evidence suggesting the text is either logical Hebrew or visual
+# Hebrew. Disjointed from the model probers, the results of the HebrewProber
+# alone are meaningless. HebrewProber always returns 0.00 as confidence
+# since it never identifies a charset by itself. Instead, the pointer to the
+# HebrewProber is passed to the model probers as a helper "Name Prober".
+# When the Group prober receives a positive identification from any prober,
+# it asks for the name of the charset identified. If the prober queried is a
+# Hebrew model prober, the model prober forwards the call to the
+# HebrewProber to make the final decision. In the HebrewProber, the
+# decision is made according to the final-letters scores maintained and Both
+# model probers scores. The answer is returned in the form of the name of the
+# charset identified, either "windows-1255" or "ISO-8859-8".
+
+class HebrewProber(CharSetProber):
+    # windows-1255 / ISO-8859-8 code points of interest
+    FINAL_KAF = 0xea
+    NORMAL_KAF = 0xeb
+    FINAL_MEM = 0xed
+    NORMAL_MEM = 0xee
+    FINAL_NUN = 0xef
+    NORMAL_NUN = 0xf0
+    FINAL_PE = 0xf3
+    NORMAL_PE = 0xf4
+    FINAL_TSADI = 0xf5
+    NORMAL_TSADI = 0xf6
+
+    # Minimum Visual vs Logical final letter score difference.
+    # If the difference is below this, don't rely solely on the final letter score
+    # distance.
+    MIN_FINAL_CHAR_DISTANCE = 5
+
+    # Minimum Visual vs Logical model score difference.
+    # If the difference is below this, don't rely at all on the model score
+    # distance.
+    MIN_MODEL_DISTANCE = 0.01
+
+    VISUAL_HEBREW_NAME = "ISO-8859-8"
+    LOGICAL_HEBREW_NAME = "windows-1255"
+
+    def __init__(self):
+        super(HebrewProber, self).__init__()
+        self._final_char_logical_score = None
+        self._final_char_visual_score = None
+        self._prev = None
+        self._before_prev = None
+        self._logical_prober = None
+        self._visual_prober = None
+        self.reset()
+
+    def reset(self):
+        self._final_char_logical_score = 0
+        self._final_char_visual_score = 0
+        # The two last characters seen in the previous buffer,
+        # mPrev and mBeforePrev are initialized to space in order to simulate
+        # a word delimiter at the beginning of the data
+        self._prev = ' '
+        self._before_prev = ' '
+        # These probers are owned by the group prober.
+
+    def set_model_probers(self, logicalProber, visualProber):
+        self._logical_prober = logicalProber
+        self._visual_prober = visualProber
+
+    def is_final(self, c):
+        return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN,
+                     self.FINAL_PE, self.FINAL_TSADI]
+
+    def is_non_final(self, c):
+        # The normal Tsadi is not a good Non-Final letter due to words like
+        # 'lechotet' (to chat) containing an apostrophe after the tsadi. This
+        # apostrophe is converted to a space in FilterWithoutEnglishLetters
+        # causing the Non-Final tsadi to appear at an end of a word even
+        # though this is not the case in the original text.
+        # The letters Pe and Kaf rarely display a related behavior of not being
+        # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
+        # for example legally end with a Non-Final Pe or Kaf. However, the
+        # benefit of these letters as Non-Final letters outweighs the damage
+        # since these words are quite rare.
+        return c in [self.NORMAL_KAF, self.NORMAL_MEM,
+                     self.NORMAL_NUN, self.NORMAL_PE]
+
+    def feed(self, byte_str):
+        # Final letter analysis for logical-visual decision.
+        # Look for evidence that the received buffer is either logical Hebrew
+        # or visual Hebrew.
+        # The following cases are checked:
+        # 1) A word longer than 1 letter, ending with a final letter. This is
+        #    an indication that the text is laid out "naturally" since the
+        #    final letter really appears at the end. +1 for logical score.
+        # 2) A word longer than 1 letter, ending with a Non-Final letter. In
+        #    normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
+        #    should not end with the Non-Final form of that letter. Exceptions
+        #    to this rule are mentioned above in isNonFinal(). This is an
+        #    indication that the text is laid out backwards. +1 for visual
+        #    score
+        # 3) A word longer than 1 letter, starting with a final letter. Final
+        #    letters should not appear at the beginning of a word. This is an
+        #    indication that the text is laid out backwards. +1 for visual
+        #    score.
+        #
+        # The visual score and logical score are accumulated throughout the
+        # text and are finally checked against each other in GetCharSetName().
+        # No checking for final letters in the middle of words is done since
+        # that case is not an indication for either Logical or Visual text.
+        #
+        # We automatically filter out all 7-bit characters (replace them with
+        # spaces) so the word boundary detection works properly. [MAP]
+
+        if self.state == ProbingState.NOT_ME:
+            # Both model probers say it's not them. No reason to continue.
+            return ProbingState.NOT_ME
+
+        byte_str = self.filter_high_byte_only(byte_str)
+
+        for cur in byte_str:
+            if cur == ' ':
+                # We stand on a space - a word just ended
+                if self._before_prev != ' ':
+                    # next-to-last char was not a space so self._prev is not a
+                    # 1 letter word
+                    if self.is_final(self._prev):
+                        # case (1) [-2:not space][-1:final letter][cur:space]
+                        self._final_char_logical_score += 1
+                    elif self.is_non_final(self._prev):
+                        # case (2) [-2:not space][-1:Non-Final letter][
+                        #  cur:space]
+                        self._final_char_visual_score += 1
+            else:
+                # Not standing on a space
+                if ((self._before_prev == ' ') and
+                        (self.is_final(self._prev)) and (cur != ' ')):
+                    # case (3) [-2:space][-1:final letter][cur:not space]
+                    self._final_char_visual_score += 1
+            self._before_prev = self._prev
+            self._prev = cur
+
+        # Forever detecting, till the end or until both model probers return
+        # ProbingState.NOT_ME (handled above)
+        return ProbingState.DETECTING
+
+    @property
+    def charset_name(self):
+        # Make the decision: is it Logical or Visual?
+        # If the final letter score distance is dominant enough, rely on it.
+        finalsub = self._final_char_logical_score - self._final_char_visual_score
+        if finalsub >= self.MIN_FINAL_CHAR_DISTANCE:
+            return self.LOGICAL_HEBREW_NAME
+        if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE:
+            return self.VISUAL_HEBREW_NAME
+
+        # It's not dominant enough, try to rely on the model scores instead.
+        modelsub = (self._logical_prober.get_confidence()
+                    - self._visual_prober.get_confidence())
+        if modelsub > self.MIN_MODEL_DISTANCE:
+            return self.LOGICAL_HEBREW_NAME
+        if modelsub < -self.MIN_MODEL_DISTANCE:
+            return self.VISUAL_HEBREW_NAME
+
+        # Still no good, back to final letter distance, maybe it'll save the
+        # day.
+        if finalsub < 0.0:
+            return self.VISUAL_HEBREW_NAME
+
+        # (finalsub > 0 - Logical) or (don't know what to do) default to
+        # Logical.
+        return self.LOGICAL_HEBREW_NAME
+
+    @property
+    def language(self):
+        return 'Hebrew'
+
+    @property
+    def state(self):
+        # Remain active as long as any of the model probers are active.
+        if (self._logical_prober.state == ProbingState.NOT_ME) and \
+           (self._visual_prober.state == ProbingState.NOT_ME):
+            return ProbingState.NOT_ME
+        return ProbingState.DETECTING
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/jisfreq.py b/lib/python3.7/site-packages/pip/_vendor/chardet/jisfreq.py
new file mode 100644
index 0000000000000000000000000000000000000000..83fc082b545106d02622de20f2083e8a7562f96c
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/jisfreq.py
@@ -0,0 +1,325 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# Sampling from about 20M text materials include literature and computer technology
+#
+# Japanese frequency table, applied to both S-JIS and EUC-JP
+# They are sorted in order.
+
+# 128  --> 0.77094
+# 256  --> 0.85710
+# 512  --> 0.92635
+# 1024 --> 0.97130
+# 2048 --> 0.99431
+#
+# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
+# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
+#
+# Typical Distribution Ratio, 25% of IDR
+
+JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
+
+# Char to FreqOrder table ,
+JIS_TABLE_SIZE = 4368
+
+JIS_CHAR_TO_FREQ_ORDER = (
+  40,   1,   6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, #   16
+3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247,  18, 179,5071, 856,1661, #   32
+1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, #   48
+2042,1061,1062,  48,  49,  44,  45, 433, 434,1040,1041, 996, 787,2997,1255,4305, #   64
+2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, #   80
+5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, #   96
+1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, #  112
+5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, #  128
+5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, #  144
+5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, #  160
+5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, #  176
+5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, #  192
+5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, #  208
+1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, #  224
+1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, #  240
+1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, #  256
+2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, #  272
+3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161,  26,3377,   2,3929,  20, #  288
+3691,  47,4100,  50,  17,  16,  35, 268,  27, 243,  42, 155,  24, 154,  29, 184, #  304
+   4,  91,  14,  92,  53, 396,  33, 289,   9,  37,  64, 620,  21,  39, 321,   5, #  320
+  12,  11,  52,  13,   3, 208, 138,   0,   7,  60, 526, 141, 151,1069, 181, 275, #  336
+1591,  83, 132,1475, 126, 331, 829,  15,  69, 160,  59,  22, 157,  55,1079, 312, #  352
+ 109,  38,  23,  25,  10,  19,  79,5195,  61, 382,1124,   8,  30,5196,5197,5198, #  368
+5199,5200,5201,5202,5203,5204,5205,5206,  89,  62,  74,  34,2416, 112, 139, 196, #  384
+ 271, 149,  84, 607, 131, 765,  46,  88, 153, 683,  76, 874, 101, 258,  57,  80, #  400
+  32, 364, 121,1508, 169,1547,  68, 235, 145,2999,  41, 360,3027,  70,  63,  31, #  416
+  43, 259, 262,1383,  99, 533, 194,  66,  93, 846, 217, 192,  56, 106,  58, 565, #  432
+ 280, 272, 311, 256, 146,  82, 308,  71, 100, 128, 214, 655, 110, 261, 104,1140, #  448
+  54,  51,  36,  87,  67,3070, 185,2618,2936,2020,  28,1066,2390,2059,5207,5208, #  464
+5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, #  480
+5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, #  496
+5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, #  512
+4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, #  528
+5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, #  544
+5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, #  560
+5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, #  576
+5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, #  592
+5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, #  608
+5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, #  624
+5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, #  640
+5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, #  656
+5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, #  672
+3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, #  688
+5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, #  704
+5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, #  720
+5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, #  736
+5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, #  752
+5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, #  768
+5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, #  784
+5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, #  800
+5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, #  816
+5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, #  832
+5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, #  848
+5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, #  864
+5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, #  880
+5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, #  896
+5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, #  912
+5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, #  928
+5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, #  944
+5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, #  960
+5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, #  976
+5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, #  992
+5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
+5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
+5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
+5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
+5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
+5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
+5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
+5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
+5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
+5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
+5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
+5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
+5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
+5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
+5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
+5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
+5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
+5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
+5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
+6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
+6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
+6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
+6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
+6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
+6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
+6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
+6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
+4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
+ 854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
+ 665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
+1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619,  65,3302,2045, # 1488
+1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
+ 896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
+3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
+3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
+ 804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
+3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
+3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
+ 586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
+2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
+ 277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
+3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
+1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
+ 380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
+1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
+ 850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
+2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
+2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
+2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
+2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
+1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
+1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
+1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
+1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
+2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
+1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
+2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
+1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
+1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
+1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
+1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
+1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
+1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
+ 606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
+ 684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
+1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
+2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
+2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
+2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
+3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
+3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
+ 884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
+3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
+1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876,  78,2287,1482,1277, # 2176
+ 861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
+2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
+1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
+ 576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
+3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
+4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
+2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
+1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
+2601,1919,1078,  75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
+1075, 292,3818,1756,2602, 317,  98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
+ 385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
+ 178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
+1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
+2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
+2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
+2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
+3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
+1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
+2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
+ 359,2291,1676,  73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
+ 837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
+ 855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
+1209,  96, 587,2166,1032, 260,1072,2153, 173,  94, 226,3244, 819,2006,4642,4114, # 2544
+2203, 231,1744, 782,  97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
+ 633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
+1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
+1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
+ 353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
+1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
+1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
+1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
+ 764,2861,1853, 688,2429,1920,1462,  77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
+2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
+ 278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
+2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
+3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
+2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
+1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
+6147, 441, 762,1771,3447,3607,3608,1904, 840,3037,  86, 939,1385, 572,1370,2445, # 2800
+1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
+2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
+1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
+ 470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
+  72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
+3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
+3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
+1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
+1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
+1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
+1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
+ 123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
+ 913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
+2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
+ 900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
+3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
+2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
+ 423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
+1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
+2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
+ 220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
+1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
+ 745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
+4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
+2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
+1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
+ 666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
+1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
+2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
+ 376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
+6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
+1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
+1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
+2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
+3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
+ 914,2550,2587,  81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
+3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
+1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
+ 674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
+1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
+ 199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
+3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
+ 370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
+2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
+ 414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
+4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
+2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
+1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
+1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
+1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
+ 166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
+1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
+3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
+1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
+3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
+ 264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
+ 543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
+ 983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
+2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
+1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
+ 867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
+1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
+ 894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
+1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
+ 530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
+ 839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
+ 480,2083,1774,3458, 923,2279,1350, 221,3086,  85,2233,2234,3835,1585,3010,2147, # 3872
+1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
+1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
+2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
+4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
+ 227,1351,1645,2453,2193,1421,2887, 812,2121, 634,  95,2435, 201,2312,4665,1646, # 3952
+1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
+ 328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
+1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
+3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
+1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
+2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
+2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
+1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
+1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
+2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
+ 455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
+2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
+1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
+1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
+1279,2136,1697,2335, 204, 721,2097,3838,  90,6186,2085,2505, 191,3967, 124,2148, # 4192
+1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
+3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
+2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
+2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
+ 575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
+3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
+3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
+1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
+2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
+1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
+2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368  #last 512
+)
+
+
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/jpcntx.py b/lib/python3.7/site-packages/pip/_vendor/chardet/jpcntx.py
new file mode 100644
index 0000000000000000000000000000000000000000..20044e4bc8f5a9775d82be0ecf387ce752732507
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/jpcntx.py
@@ -0,0 +1,233 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+
+# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
+jp2CharContext = (
+(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
+(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
+(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
+(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
+(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
+(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
+(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
+(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
+(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
+(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
+(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
+(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
+(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
+(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
+(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
+(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
+(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
+(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
+(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
+(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
+(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
+(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
+(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
+(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
+(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
+(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
+(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
+(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
+(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
+(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
+(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
+(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
+(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
+(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
+(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
+(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
+(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
+(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
+(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
+(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
+(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
+(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
+(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
+(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
+(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
+(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
+(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
+(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
+(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
+(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
+(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
+(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
+(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
+(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
+(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
+(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
+(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
+(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
+(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
+(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
+(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
+(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
+(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
+(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
+(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
+(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
+(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
+(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
+(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
+(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
+(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
+(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
+(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
+(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
+)
+
+class JapaneseContextAnalysis(object):
+    NUM_OF_CATEGORY = 6
+    DONT_KNOW = -1
+    ENOUGH_REL_THRESHOLD = 100
+    MAX_REL_THRESHOLD = 1000
+    MINIMUM_DATA_THRESHOLD = 4
+
+    def __init__(self):
+        self._total_rel = None
+        self._rel_sample = None
+        self._need_to_skip_char_num = None
+        self._last_char_order = None
+        self._done = None
+        self.reset()
+
+    def reset(self):
+        self._total_rel = 0  # total sequence received
+        # category counters, each integer counts sequence in its category
+        self._rel_sample = [0] * self.NUM_OF_CATEGORY
+        # if last byte in current buffer is not the last byte of a character,
+        # we need to know how many bytes to skip in next buffer
+        self._need_to_skip_char_num = 0
+        self._last_char_order = -1  # The order of previous char
+        # If this flag is set to True, detection is done and conclusion has
+        # been made
+        self._done = False
+
+    def feed(self, byte_str, num_bytes):
+        if self._done:
+            return
+
+        # The buffer we got is byte oriented, and a character may span in more than one
+        # buffers. In case the last one or two byte in last buffer is not
+        # complete, we record how many byte needed to complete that character
+        # and skip these bytes here.  We can choose to record those bytes as
+        # well and analyse the character once it is complete, but since a
+        # character will not make much difference, by simply skipping
+        # this character will simply our logic and improve performance.
+        i = self._need_to_skip_char_num
+        while i < num_bytes:
+            order, char_len = self.get_order(byte_str[i:i + 2])
+            i += char_len
+            if i > num_bytes:
+                self._need_to_skip_char_num = i - num_bytes
+                self._last_char_order = -1
+            else:
+                if (order != -1) and (self._last_char_order != -1):
+                    self._total_rel += 1
+                    if self._total_rel > self.MAX_REL_THRESHOLD:
+                        self._done = True
+                        break
+                    self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1
+                self._last_char_order = order
+
+    def got_enough_data(self):
+        return self._total_rel > self.ENOUGH_REL_THRESHOLD
+
+    def get_confidence(self):
+        # This is just one way to calculate confidence. It works well for me.
+        if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
+            return (self._total_rel - self._rel_sample[0]) / self._total_rel
+        else:
+            return self.DONT_KNOW
+
+    def get_order(self, byte_str):
+        return -1, 1
+
+class SJISContextAnalysis(JapaneseContextAnalysis):
+    def __init__(self):
+        super(SJISContextAnalysis, self).__init__()
+        self._charset_name = "SHIFT_JIS"
+
+    @property
+    def charset_name(self):
+        return self._charset_name
+
+    def get_order(self, byte_str):
+        if not byte_str:
+            return -1, 1
+        # find out current char's byte length
+        first_char = byte_str[0]
+        if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC):
+            char_len = 2
+            if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
+                self._charset_name = "CP932"
+        else:
+            char_len = 1
+
+        # return its order if it is hiragana
+        if len(byte_str) > 1:
+            second_char = byte_str[1]
+            if (first_char == 202) and (0x9F <= second_char <= 0xF1):
+                return second_char - 0x9F, char_len
+
+        return -1, char_len
+
+class EUCJPContextAnalysis(JapaneseContextAnalysis):
+    def get_order(self, byte_str):
+        if not byte_str:
+            return -1, 1
+        # find out current char's byte length
+        first_char = byte_str[0]
+        if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
+            char_len = 2
+        elif first_char == 0x8F:
+            char_len = 3
+        else:
+            char_len = 1
+
+        # return its order if it is hiragana
+        if len(byte_str) > 1:
+            second_char = byte_str[1]
+            if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
+                return second_char - 0xA1, char_len
+
+        return -1, char_len
+
+
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/langbulgarianmodel.py b/lib/python3.7/site-packages/pip/_vendor/chardet/langbulgarianmodel.py
new file mode 100644
index 0000000000000000000000000000000000000000..2aa4fb2e22fc3bfa26b569273c6cb18c4c415dd9
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/langbulgarianmodel.py
@@ -0,0 +1,228 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# Character Mapping Table:
+# this table is modified base on win1251BulgarianCharToOrderMap, so
+# only number <64 is sure valid
+
+Latin5_BulgarianCharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82,  # 40
+110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253,  # 50
+253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71,  # 60
+116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253,  # 70
+194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,  # 80
+210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,  # 90
+ 81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238,  # a0
+ 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30,  # b0
+ 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56,  # c0
+  1, 18,  9, 20, 11,  3, 23, 15,  2, 26, 12, 10, 14,  6,  4, 13,  # d0
+  7,  8,  5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16,  # e0
+ 62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253,  # f0
+)
+
+win1251BulgarianCharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82,  # 40
+110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253,  # 50
+253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71,  # 60
+116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253,  # 70
+206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220,  # 80
+221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229,  # 90
+ 88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240,  # a0
+ 73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250,  # b0
+ 31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30,  # c0
+ 39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56,  # d0
+  1, 18,  9, 20, 11,  3, 23, 15,  2, 26, 12, 10, 14,  6,  4, 13,  # e0
+  7,  8,  5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16,  # f0
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 96.9392%
+# first 1024 sequences:3.0618%
+# rest  sequences:     0.2992%
+# negative sequences:  0.0020%
+BulgarianLangModel = (
+0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
+3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
+0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
+0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
+0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
+1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
+0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
+0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
+2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
+3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
+3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
+1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
+3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
+1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
+2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
+2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
+3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
+1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
+2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
+2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
+3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
+1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
+2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
+2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
+2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
+1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
+2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
+1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
+3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
+1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
+3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
+1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
+2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
+1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
+2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
+1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
+2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
+1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
+3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
+1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
+1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
+2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
+1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
+2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
+1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
+0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
+1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
+2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
+1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
+1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
+0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
+0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
+0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
+2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
+1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
+0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
+0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
+1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
+1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
+1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+)
+
+Latin5BulgarianModel = {
+  'char_to_order_map': Latin5_BulgarianCharToOrderMap,
+  'precedence_matrix': BulgarianLangModel,
+  'typical_positive_ratio': 0.969392,
+  'keep_english_letter': False,
+  'charset_name': "ISO-8859-5",
+  'language': 'Bulgairan',
+}
+
+Win1251BulgarianModel = {
+  'char_to_order_map': win1251BulgarianCharToOrderMap,
+  'precedence_matrix': BulgarianLangModel,
+  'typical_positive_ratio': 0.969392,
+  'keep_english_letter': False,
+  'charset_name': "windows-1251",
+  'language': 'Bulgarian',
+}
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/langcyrillicmodel.py b/lib/python3.7/site-packages/pip/_vendor/chardet/langcyrillicmodel.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5f9a1fd19cca472d785bf90c6395cd11b524766
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/langcyrillicmodel.py
@@ -0,0 +1,333 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# KOI8-R language model
+# Character Mapping Table:
+KOI8R_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154,  # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253,  # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69,  # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253,  # 70
+191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,  # 80
+207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,  # 90
+223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237,  # a0
+238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,  # b0
+ 27,  3, 21, 28, 13,  2, 39, 19, 26,  4, 23, 11,  8, 12,  5,  1,  # c0
+ 15, 16,  9,  7,  6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54,  # d0
+ 59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34,  # e0
+ 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70,  # f0
+)
+
+win1251_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154,  # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253,  # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69,  # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253,  # 70
+191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
+207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
+223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
+239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
+ 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
+ 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
+  3, 21, 10, 19, 13,  2, 24, 20,  4, 23, 11,  8, 12,  5,  1, 15,
+  9,  7,  6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
+)
+
+latin5_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154,  # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253,  # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69,  # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253,  # 70
+191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
+207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
+223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
+ 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
+ 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
+  3, 21, 10, 19, 13,  2, 24, 20,  4, 23, 11,  8, 12,  5,  1, 15,
+  9,  7,  6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
+239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
+)
+
+macCyrillic_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154,  # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253,  # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69,  # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253,  # 70
+ 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
+ 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
+191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
+207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
+223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
+239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
+  3, 21, 10, 19, 13,  2, 24, 20,  4, 23, 11,  8, 12,  5,  1, 15,
+  9,  7,  6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
+)
+
+IBM855_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154,  # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253,  # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69,  # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253,  # 70
+191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
+206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
+  3, 37, 21, 44, 28, 58, 13, 41,  2, 48, 39, 53, 19, 46,218,219,
+220,221,222,223,224, 26, 55,  4, 42,225,226,227,228, 23, 60,229,
+230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
+  8, 49, 12, 38,  5, 31,  1, 34, 15,244,245,246,247, 35, 16,248,
+ 43,  9, 45,  7, 32,  6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
+250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
+)
+
+IBM866_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154,  # 40
+155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253,  # 50
+253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69,  # 60
+ 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253,  # 70
+ 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
+ 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
+  3, 21, 10, 19, 13,  2, 24, 20,  4, 23, 11,  8, 12,  5,  1, 15,
+191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
+207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
+223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
+  9,  7,  6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
+239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 97.6601%
+# first 1024 sequences: 2.3389%
+# rest  sequences:      0.1237%
+# negative sequences:   0.0009%
+RussianLangModel = (
+0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
+3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
+0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
+0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
+0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
+1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
+1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
+2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
+1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
+3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
+1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
+2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
+1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
+1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
+1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
+2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
+1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
+3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
+1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
+2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
+1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
+2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
+0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
+1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
+1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
+1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
+3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
+2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
+3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
+1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
+1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
+0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
+2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
+1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
+1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
+0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
+1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
+2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
+2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
+1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
+1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
+2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
+1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
+0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
+2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
+1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
+1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
+0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
+0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
+0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
+1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
+0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
+0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
+1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
+0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
+2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
+0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
+)
+
+Koi8rModel = {
+  'char_to_order_map': KOI8R_char_to_order_map,
+  'precedence_matrix': RussianLangModel,
+  'typical_positive_ratio': 0.976601,
+  'keep_english_letter': False,
+  'charset_name': "KOI8-R",
+  'language': 'Russian',
+}
+
+Win1251CyrillicModel = {
+  'char_to_order_map': win1251_char_to_order_map,
+  'precedence_matrix': RussianLangModel,
+  'typical_positive_ratio': 0.976601,
+  'keep_english_letter': False,
+  'charset_name': "windows-1251",
+  'language': 'Russian',
+}
+
+Latin5CyrillicModel = {
+  'char_to_order_map': latin5_char_to_order_map,
+  'precedence_matrix': RussianLangModel,
+  'typical_positive_ratio': 0.976601,
+  'keep_english_letter': False,
+  'charset_name': "ISO-8859-5",
+  'language': 'Russian',
+}
+
+MacCyrillicModel = {
+  'char_to_order_map': macCyrillic_char_to_order_map,
+  'precedence_matrix': RussianLangModel,
+  'typical_positive_ratio': 0.976601,
+  'keep_english_letter': False,
+  'charset_name': "MacCyrillic",
+  'language': 'Russian',
+}
+
+Ibm866Model = {
+  'char_to_order_map': IBM866_char_to_order_map,
+  'precedence_matrix': RussianLangModel,
+  'typical_positive_ratio': 0.976601,
+  'keep_english_letter': False,
+  'charset_name': "IBM866",
+  'language': 'Russian',
+}
+
+Ibm855Model = {
+  'char_to_order_map': IBM855_char_to_order_map,
+  'precedence_matrix': RussianLangModel,
+  'typical_positive_ratio': 0.976601,
+  'keep_english_letter': False,
+  'charset_name': "IBM855",
+  'language': 'Russian',
+}
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/langgreekmodel.py b/lib/python3.7/site-packages/pip/_vendor/chardet/langgreekmodel.py
new file mode 100644
index 0000000000000000000000000000000000000000..533222166cca9fce442655d9f3098126f50e6140
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/langgreekmodel.py
@@ -0,0 +1,225 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# Character Mapping Table:
+Latin7_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85,  # 40
+ 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253,  # 50
+253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55,  # 60
+ 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253,  # 70
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 80
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 90
+253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253,  # a0
+253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123,  # b0
+110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39,  # c0
+ 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15,  # d0
+124,  1, 29, 20, 21,  3, 32, 13, 25,  5, 11, 16, 10,  6, 30,  4,  # e0
+  9,  8, 14,  7,  2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253,  # f0
+)
+
+win1253_char_to_order_map = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85,  # 40
+ 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253,  # 50
+253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55,  # 60
+ 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253,  # 70
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 80
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 90
+253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253,  # a0
+253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123,  # b0
+110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39,  # c0
+ 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15,  # d0
+124,  1, 29, 20, 21,  3, 32, 13, 25,  5, 11, 16, 10,  6, 30,  4,  # e0
+  9,  8, 14,  7,  2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253,  # f0
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 98.2851%
+# first 1024 sequences:1.7001%
+# rest  sequences:     0.0359%
+# negative sequences:  0.0148%
+GreekLangModel = (
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
+3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
+0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
+2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
+0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
+2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
+2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
+0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
+2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
+0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
+3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
+3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
+2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
+2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
+0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
+0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
+0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
+0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
+0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
+0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
+0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
+0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
+0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
+0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
+0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
+0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
+0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
+0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
+0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
+0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
+0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
+0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
+0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
+0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
+0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
+0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
+0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
+0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
+0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
+0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
+0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
+0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
+0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
+0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
+0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
+0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
+0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
+0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
+0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+)
+
+Latin7GreekModel = {
+  'char_to_order_map': Latin7_char_to_order_map,
+  'precedence_matrix': GreekLangModel,
+  'typical_positive_ratio': 0.982851,
+  'keep_english_letter': False,
+  'charset_name': "ISO-8859-7",
+  'language': 'Greek',
+}
+
+Win1253GreekModel = {
+  'char_to_order_map': win1253_char_to_order_map,
+  'precedence_matrix': GreekLangModel,
+  'typical_positive_ratio': 0.982851,
+  'keep_english_letter': False,
+  'charset_name': "windows-1253",
+  'language': 'Greek',
+}
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/langhebrewmodel.py b/lib/python3.7/site-packages/pip/_vendor/chardet/langhebrewmodel.py
new file mode 100644
index 0000000000000000000000000000000000000000..58f4c875ec926b85256fd3866369fc8a81a14350
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/langhebrewmodel.py
@@ -0,0 +1,200 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+#          Simon Montagu
+# Portions created by the Initial Developer are Copyright (C) 2005
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#   Shy Shalom - original C code
+#   Shoshannah Forbes - original C code (?)
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# Windows-1255 language model
+# Character Mapping Table:
+WIN1255_CHAR_TO_ORDER_MAP = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85,  # 40
+ 78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253,  # 50
+253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49,  # 60
+ 66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253,  # 70
+124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
+215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
+ 34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
+106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
+ 30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
+238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
+  9,  8, 20, 16,  3,  2, 24, 14, 22,  1, 25, 15,  4, 11,  6, 23,
+ 12, 19, 13, 26, 18, 27, 21, 17,  7, 10,  5,251,252,128, 96,253,
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 98.4004%
+# first 1024 sequences: 1.5981%
+# rest  sequences:      0.087%
+# negative sequences:   0.0015%
+HEBREW_LANG_MODEL = (
+0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
+3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
+1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
+1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
+1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
+1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
+1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
+0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
+0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
+1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
+3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
+0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
+0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
+0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
+0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
+0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
+3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
+0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
+0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
+0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
+0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
+0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
+0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
+3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
+0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
+0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
+0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
+1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
+0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
+3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
+0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
+0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
+0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
+0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
+0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
+2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
+0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
+0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
+0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
+0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
+1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
+0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
+2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
+1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
+2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
+1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
+2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
+0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
+1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
+0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
+)
+
+Win1255HebrewModel = {
+  'char_to_order_map': WIN1255_CHAR_TO_ORDER_MAP,
+  'precedence_matrix': HEBREW_LANG_MODEL,
+  'typical_positive_ratio': 0.984004,
+  'keep_english_letter': False,
+  'charset_name': "windows-1255",
+  'language': 'Hebrew',
+}
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/langhungarianmodel.py b/lib/python3.7/site-packages/pip/_vendor/chardet/langhungarianmodel.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb7c095e1ea6523bd00365384e4c662954c678a0
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/langhungarianmodel.py
@@ -0,0 +1,225 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# Character Mapping Table:
+Latin2_HungarianCharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
+ 46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
+253,  2, 18, 26, 17,  1, 27, 12, 20,  9, 22,  7,  6, 13,  4,  8,
+ 23, 67, 10,  5,  3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
+159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
+175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
+191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
+ 79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
+221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
+232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
+ 82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
+245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
+)
+
+win1250HungarianCharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
+ 46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
+253,  2, 18, 26, 17,  1, 27, 12, 20,  9, 22,  7,  6, 13,  4,  8,
+ 23, 67, 10,  5,  3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
+161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
+177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
+191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
+ 81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
+221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
+232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
+ 84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
+245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 94.7368%
+# first 1024 sequences:5.2623%
+# rest  sequences:     0.8894%
+# negative sequences:  0.0009%
+HungarianLangModel = (
+0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
+3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
+3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
+3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
+0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
+3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
+0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
+3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
+3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
+3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
+0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
+0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
+3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
+1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
+1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
+1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
+3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
+2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
+2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
+2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
+2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
+2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
+3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
+2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
+2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
+2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
+1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
+1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
+3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
+1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
+1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
+2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
+2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
+2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
+3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
+2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
+1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
+1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
+2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
+2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
+1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
+1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
+2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
+1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
+1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
+2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
+2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
+2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
+1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
+1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
+1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
+0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
+2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
+2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
+1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
+2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
+1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
+1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
+2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
+2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
+2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
+1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
+2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
+0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
+1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
+0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
+1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
+0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
+2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
+0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
+)
+
+Latin2HungarianModel = {
+  'char_to_order_map': Latin2_HungarianCharToOrderMap,
+  'precedence_matrix': HungarianLangModel,
+  'typical_positive_ratio': 0.947368,
+  'keep_english_letter': True,
+  'charset_name': "ISO-8859-2",
+  'language': 'Hungarian',
+}
+
+Win1250HungarianModel = {
+  'char_to_order_map': win1250HungarianCharToOrderMap,
+  'precedence_matrix': HungarianLangModel,
+  'typical_positive_ratio': 0.947368,
+  'keep_english_letter': True,
+  'charset_name': "windows-1250",
+  'language': 'Hungarian',
+}
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/langthaimodel.py b/lib/python3.7/site-packages/pip/_vendor/chardet/langthaimodel.py
new file mode 100644
index 0000000000000000000000000000000000000000..15f94c2df021c9cccc761ebeec80146edbb000c9
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/langthaimodel.py
@@ -0,0 +1,199 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# The following result for thai was collected from a limited sample (1M).
+
+# Character Mapping Table:
+TIS620CharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255,  # 00
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,  # 10
+253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,  # 20
+252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253,  # 30
+253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111,  # 40
+188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253,  # 50
+253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82,  # 60
+ 96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253,  # 70
+209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
+223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
+236,  5, 30,237, 24,238, 75,  8, 26, 52, 34, 51,119, 47, 58, 57,
+ 49, 53, 55, 43, 20, 19, 44, 14, 48,  3, 17, 25, 39, 62, 31, 54,
+ 45,  9, 16,  2, 61, 15,239, 12, 42, 46, 18, 21, 76,  4, 66, 63,
+ 22, 10,  1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
+ 11, 28, 41, 29, 33,245, 50, 37,  6,  7, 67, 77, 38, 93,246,247,
+ 68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
+)
+
+# Model Table:
+# total sequences: 100%
+# first 512 sequences: 92.6386%
+# first 1024 sequences:7.3177%
+# rest  sequences:     1.0230%
+# negative sequences:  0.0436%
+ThaiLangModel = (
+0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
+0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
+3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
+0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
+3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
+3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
+3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
+3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
+3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
+3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
+3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
+2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
+3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
+0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
+3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
+0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
+3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
+1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
+3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
+3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
+1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
+0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
+2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
+0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
+3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
+2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
+3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
+0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
+3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
+3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
+2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
+3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
+2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
+3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
+3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
+3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
+3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
+3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
+1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
+0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
+0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
+3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
+3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
+1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
+3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
+3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
+0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
+0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
+1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
+1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
+3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
+0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
+0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
+0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
+3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
+3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
+0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
+0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
+0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
+0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
+0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
+0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
+0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
+3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
+0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
+0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
+3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
+2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
+0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
+3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
+0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
+2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
+1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
+1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
+1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
+1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+)
+
+TIS620ThaiModel = {
+  'char_to_order_map': TIS620CharToOrderMap,
+  'precedence_matrix': ThaiLangModel,
+  'typical_positive_ratio': 0.926386,
+  'keep_english_letter': False,
+  'charset_name': "TIS-620",
+  'language': 'Thai',
+}
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/langturkishmodel.py b/lib/python3.7/site-packages/pip/_vendor/chardet/langturkishmodel.py
new file mode 100644
index 0000000000000000000000000000000000000000..a427a457398de8076cdcefb5a6c391e89500bce8
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/langturkishmodel.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#   Özgür Baskın - Turkish Language Model
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+# 255: Control characters that usually does not exist in any text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+
+# Character Mapping Table:
+Latin5_TurkishCharToOrderMap = (
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,
+255, 23, 37, 47, 39, 29, 52, 36, 45, 53, 60, 16, 49, 20, 46, 42,
+ 48, 69, 44, 35, 31, 51, 38, 62, 65, 43, 56,255,255,255,255,255,
+255,  1, 21, 28, 12,  2, 18, 27, 25,  3, 24, 10,  5, 13,  4, 15,
+ 26, 64,  7,  8,  9, 14, 32, 57, 58, 11, 22,255,255,255,255,255,
+180,179,178,177,176,175,174,173,172,171,170,169,168,167,166,165,
+164,163,162,161,160,159,101,158,157,156,155,154,153,152,151,106,
+150,149,148,147,146,145,144,100,143,142,141,140,139,138,137,136,
+ 94, 80, 93,135,105,134,133, 63,132,131,130,129,128,127,126,125,
+124,104, 73, 99, 79, 85,123, 54,122, 98, 92,121,120, 91,103,119,
+ 68,118,117, 97,116,115, 50, 90,114,113,112,111, 55, 41, 40, 86,
+ 89, 70, 59, 78, 71, 82, 88, 33, 77, 66, 84, 83,110, 75, 61, 96,
+ 30, 67,109, 74, 87,102, 34, 95, 81,108, 76, 72, 17,  6, 19,107,
+)
+
+TurkishLangModel = (
+3,2,3,3,3,1,3,3,3,3,3,3,3,3,2,1,1,3,3,1,3,3,0,3,3,3,3,3,0,3,1,3,
+3,2,1,0,0,1,1,0,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1,
+3,2,2,3,3,0,3,3,3,3,3,3,3,2,3,1,0,3,3,1,3,3,0,3,3,3,3,3,0,3,0,3,
+3,1,1,0,1,0,1,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,2,2,0,0,0,1,0,1,
+3,3,2,3,3,0,3,3,3,3,3,3,3,2,3,1,1,3,3,0,3,3,1,2,3,3,3,3,0,3,0,3,
+3,1,1,0,0,0,1,0,0,0,0,1,1,0,1,2,1,0,0,0,1,0,0,0,0,2,0,0,0,0,0,1,
+3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,1,3,3,2,0,3,2,1,2,2,1,3,3,0,0,0,2,
+2,2,0,1,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,1,0,0,1,
+3,3,3,2,3,3,1,2,3,3,3,3,3,3,3,1,3,2,1,0,3,2,0,1,2,3,3,2,1,0,0,2,
+2,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,2,0,0,0,
+1,0,1,3,3,1,3,3,3,3,3,3,3,1,2,0,0,2,3,0,2,3,0,0,2,2,2,3,0,3,0,1,
+2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,0,3,2,0,2,3,2,3,3,1,0,0,2,
+3,2,0,0,1,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,1,1,1,0,2,0,0,1,
+3,3,3,2,3,3,2,3,3,3,3,2,3,3,3,0,3,3,0,0,2,1,0,0,2,3,2,2,0,0,0,2,
+2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,0,1,0,2,0,0,1,
+3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,0,1,3,2,1,1,3,2,3,2,1,0,0,2,
+2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,
+3,3,3,2,3,3,3,3,3,3,3,2,3,3,3,0,3,2,2,0,2,3,0,0,2,2,2,2,0,0,0,2,
+3,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0,
+3,3,3,3,3,3,3,2,2,2,2,3,2,3,3,0,3,3,1,1,2,2,0,0,2,2,3,2,0,0,1,3,
+0,3,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,
+3,3,3,2,3,3,3,2,1,2,2,3,2,3,3,0,3,2,0,0,1,1,0,1,1,2,1,2,0,0,0,1,
+0,3,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,1,0,0,0,
+3,3,3,2,3,3,2,3,2,2,2,3,3,3,3,1,3,1,1,0,3,2,1,1,3,3,2,3,1,0,0,1,
+1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,2,0,0,1,
+3,2,2,3,3,0,3,3,3,3,3,3,3,2,2,1,0,3,3,1,3,3,0,1,3,3,2,3,0,3,0,3,
+2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+2,2,2,3,3,0,3,3,3,3,3,3,3,3,3,0,0,3,2,0,3,3,0,3,2,3,3,3,0,3,1,3,
+2,0,0,0,0,0,0,0,0,0,0,1,0,1,2,0,1,0,0,0,0,0,0,0,2,2,0,0,1,0,0,1,
+3,3,3,1,2,3,3,1,0,0,1,0,0,3,3,2,3,0,0,2,0,0,2,0,2,0,0,0,2,0,2,0,
+0,3,1,0,1,0,0,0,2,2,1,0,1,1,2,1,2,2,2,0,2,1,1,0,0,0,2,0,0,0,0,0,
+1,2,1,3,3,0,3,3,3,3,3,2,3,0,0,0,0,2,3,0,2,3,1,0,2,3,1,3,0,3,0,2,
+3,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,1,3,3,2,2,3,2,2,0,1,2,3,0,1,2,1,0,1,0,0,0,1,0,2,2,0,0,0,1,
+1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,
+3,3,3,1,3,3,1,1,3,3,1,1,3,3,1,0,2,1,2,0,2,1,0,0,1,1,2,1,0,0,0,2,
+2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,1,0,2,1,3,0,0,2,0,0,3,3,0,3,0,0,1,0,1,2,0,0,1,1,2,2,0,1,0,
+0,1,2,1,1,0,1,0,1,1,1,1,1,0,1,1,1,2,2,1,2,0,1,0,0,0,0,0,0,1,0,0,
+3,3,3,2,3,2,3,3,0,2,2,2,3,3,3,0,3,0,0,0,2,2,0,1,2,1,1,1,0,0,0,1,
+0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
+3,3,3,3,3,3,2,1,2,2,3,3,3,3,2,0,2,0,0,0,2,2,0,0,2,1,3,3,0,0,1,1,
+1,1,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,
+1,1,2,3,3,0,3,3,3,3,3,3,2,2,0,2,0,2,3,2,3,2,2,2,2,2,2,2,1,3,2,3,
+2,0,2,1,2,2,2,2,1,1,2,2,1,2,2,1,2,0,0,2,1,1,0,2,1,0,0,1,0,0,0,1,
+2,3,3,1,1,1,0,1,1,1,2,3,2,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,
+0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,2,2,2,3,2,3,2,2,1,3,3,3,0,2,1,2,0,2,1,0,0,1,1,1,1,1,0,0,1,
+2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,2,0,1,0,0,0,
+3,3,3,2,3,3,3,3,3,2,3,1,2,3,3,1,2,0,0,0,0,0,0,0,3,2,1,1,0,0,0,0,
+2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+3,3,3,2,2,3,3,2,1,1,1,1,1,3,3,0,3,1,0,0,1,1,0,0,3,1,2,1,0,0,0,0,
+0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,
+3,3,3,2,2,3,2,2,2,3,2,1,1,3,3,0,3,0,0,0,0,1,0,0,3,1,1,2,0,0,0,1,
+1,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
+1,1,1,3,3,0,3,3,3,3,3,2,2,2,1,2,0,2,1,2,2,1,1,0,1,2,2,2,2,2,2,2,
+0,0,2,1,2,1,2,1,0,1,1,3,1,2,1,1,2,0,0,2,0,1,0,1,0,1,0,0,0,1,0,1,
+3,3,3,1,3,3,3,0,1,1,0,2,2,3,1,0,3,0,0,0,1,0,0,0,1,0,0,1,0,1,0,0,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,2,0,0,2,2,1,0,0,1,0,0,3,3,1,3,0,0,1,1,0,2,0,3,0,0,0,2,0,1,1,
+0,1,2,0,1,2,2,0,2,2,2,2,1,0,2,1,1,0,2,0,2,1,2,0,0,0,0,0,0,0,0,0,
+3,3,3,1,3,2,3,2,0,2,2,2,1,3,2,0,2,1,2,0,1,2,0,0,1,0,2,2,0,0,0,2,
+1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,
+3,3,3,0,3,3,1,1,2,3,1,0,3,2,3,0,3,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,
+1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,3,3,0,3,3,2,3,3,2,2,0,0,0,0,1,2,0,1,3,0,0,0,3,1,1,0,3,0,2,
+2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,1,2,2,1,0,3,1,1,1,1,3,3,2,3,0,0,1,0,1,2,0,2,2,0,2,2,0,2,1,
+0,2,2,1,1,1,1,0,2,1,1,0,1,1,1,1,2,1,2,1,2,0,1,0,1,0,0,0,0,0,0,0,
+3,3,3,0,1,1,3,0,0,1,1,0,0,2,2,0,3,0,0,1,1,0,1,0,0,0,0,0,2,0,0,0,
+0,3,1,0,1,0,1,0,2,0,0,1,0,1,0,1,1,1,2,1,1,0,2,0,0,0,0,0,0,0,0,0,
+3,3,3,0,2,0,2,0,1,1,1,0,0,3,3,0,2,0,0,1,0,0,2,1,1,0,1,0,1,0,1,0,
+0,2,0,1,2,0,2,0,2,1,1,0,1,0,2,1,1,0,2,1,1,0,1,0,0,0,1,1,0,0,0,0,
+3,2,3,0,1,0,0,0,0,0,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,0,2,0,0,0,
+0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,2,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,0,0,2,3,0,0,1,0,1,0,2,3,2,3,0,0,1,3,0,2,1,0,0,0,0,2,0,1,0,
+0,2,1,0,0,1,1,0,2,1,0,0,1,0,0,1,1,0,1,1,2,0,1,0,0,0,0,1,0,0,0,0,
+3,2,2,0,0,1,1,0,0,0,0,0,0,3,1,1,1,0,0,0,0,0,1,0,0,0,0,0,2,0,1,0,
+0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,3,3,0,2,3,2,2,1,2,2,1,1,2,0,1,3,2,2,2,0,0,2,2,0,0,0,1,2,1,
+3,0,2,1,1,0,1,1,1,0,1,2,2,2,1,1,2,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,
+0,1,1,2,3,0,3,3,3,2,2,2,2,1,0,1,0,1,0,1,2,2,0,0,2,2,1,3,1,1,2,1,
+0,0,1,1,2,0,1,1,0,0,1,2,0,2,1,1,2,0,0,1,0,0,0,1,0,1,0,1,0,0,0,0,
+3,3,2,0,0,3,1,0,0,0,0,0,0,3,2,1,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0,
+0,2,1,1,0,0,1,0,1,2,0,0,1,1,0,0,2,1,1,1,1,0,2,0,0,0,0,0,0,0,0,0,
+3,3,2,0,0,1,0,0,0,0,1,0,0,3,3,2,2,0,0,1,0,0,2,0,1,0,0,0,2,0,1,0,
+0,0,1,1,0,0,2,0,2,1,0,0,1,1,2,1,2,0,2,1,2,1,1,1,0,0,1,1,0,0,0,0,
+3,3,2,0,0,2,2,0,0,0,1,1,0,2,2,1,3,1,0,1,0,1,2,0,0,0,0,0,1,0,1,0,
+0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,2,0,0,0,1,0,0,1,0,0,2,3,1,2,0,0,1,0,0,2,0,0,0,1,0,2,0,2,0,
+0,1,1,2,2,1,2,0,2,1,1,0,0,1,1,0,1,1,1,1,2,1,1,0,0,0,0,0,0,0,0,0,
+3,3,3,0,2,1,2,1,0,0,1,1,0,3,3,1,2,0,0,1,0,0,2,0,2,0,1,1,2,0,0,0,
+0,0,1,1,1,1,2,0,1,1,0,1,1,1,1,0,0,0,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
+3,3,3,0,2,2,3,2,0,0,1,0,0,2,3,1,0,0,0,0,0,0,2,0,2,0,0,0,2,0,0,0,
+0,1,1,0,0,0,1,0,0,1,0,1,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,
+3,2,3,0,0,0,0,0,0,0,1,0,0,2,2,2,2,0,0,1,0,0,2,0,0,0,0,0,2,0,1,0,
+0,0,2,1,1,0,1,0,2,1,1,0,0,1,1,2,1,0,2,0,2,0,1,0,0,0,2,0,0,0,0,0,
+0,0,0,2,2,0,2,1,1,1,1,2,2,0,0,1,0,1,0,0,1,3,0,0,0,0,1,0,0,2,1,0,
+0,0,1,0,1,0,0,0,0,0,2,1,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
+2,0,0,2,3,0,2,3,1,2,2,0,2,0,0,2,0,2,1,1,1,2,1,0,0,1,2,1,1,2,1,0,
+1,0,2,0,1,0,1,1,0,0,2,2,1,2,1,1,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
+3,3,3,0,2,1,2,0,0,0,1,0,0,3,2,0,1,0,0,1,0,0,2,0,0,0,1,2,1,0,1,0,
+0,0,0,0,1,0,1,0,0,1,0,0,0,0,1,0,1,0,1,1,1,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,2,2,0,2,2,1,1,0,1,1,1,1,1,0,0,1,2,1,1,1,0,1,0,0,0,1,1,1,1,
+0,0,2,1,0,1,1,1,0,1,1,2,1,2,1,1,2,0,1,1,2,1,0,2,0,0,0,0,0,0,0,0,
+3,2,2,0,0,2,0,0,0,0,0,0,0,2,2,0,2,0,0,1,0,0,2,0,0,0,0,0,2,0,0,0,
+0,2,1,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,3,2,0,2,2,0,1,1,0,1,0,0,1,0,0,0,1,0,1,0,0,0,0,0,1,0,0,0,0,
+2,0,1,0,1,0,1,1,0,0,1,2,0,1,0,1,1,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,
+2,2,2,0,1,1,0,0,0,1,0,0,0,1,2,0,1,0,0,1,0,0,1,0,0,0,0,1,2,0,1,0,
+0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,2,1,0,1,1,1,0,0,0,0,1,2,0,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
+1,1,2,0,1,0,0,0,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,
+0,0,1,2,2,0,2,1,2,1,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,0,0,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
+2,2,2,0,0,0,1,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
+0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+2,2,2,0,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,1,0,0,0,0,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+)
+
+Latin5TurkishModel = {
+  'char_to_order_map': Latin5_TurkishCharToOrderMap,
+  'precedence_matrix': TurkishLangModel,
+  'typical_positive_ratio': 0.970290,
+  'keep_english_letter': True,
+  'charset_name': "ISO-8859-9",
+  'language': 'Turkish',
+}
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/latin1prober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/latin1prober.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d1e8c20fb09ddaa0254ae74cbd4425ffdc5dcdc
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/latin1prober.py
@@ -0,0 +1,145 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#   Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState
+
+FREQ_CAT_NUM = 4
+
+UDF = 0  # undefined
+OTH = 1  # other
+ASC = 2  # ascii capital letter
+ASS = 3  # ascii small letter
+ACV = 4  # accent capital vowel
+ACO = 5  # accent capital other
+ASV = 6  # accent small vowel
+ASO = 7  # accent small other
+CLASS_NUM = 8  # total classes
+
+Latin1_CharToClass = (
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 00 - 07
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 08 - 0F
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 10 - 17
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 18 - 1F
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 20 - 27
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 28 - 2F
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 30 - 37
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 38 - 3F
+    OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC,   # 40 - 47
+    ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC,   # 48 - 4F
+    ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC,   # 50 - 57
+    ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH,   # 58 - 5F
+    OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS,   # 60 - 67
+    ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS,   # 68 - 6F
+    ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS,   # 70 - 77
+    ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH,   # 78 - 7F
+    OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH,   # 80 - 87
+    OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF,   # 88 - 8F
+    UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # 90 - 97
+    OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO,   # 98 - 9F
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # A0 - A7
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # A8 - AF
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # B0 - B7
+    OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH,   # B8 - BF
+    ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO,   # C0 - C7
+    ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV,   # C8 - CF
+    ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH,   # D0 - D7
+    ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO,   # D8 - DF
+    ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO,   # E0 - E7
+    ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV,   # E8 - EF
+    ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH,   # F0 - F7
+    ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO,   # F8 - FF
+)
+
+# 0 : illegal
+# 1 : very unlikely
+# 2 : normal
+# 3 : very likely
+Latin1ClassModel = (
+# UDF OTH ASC ASS ACV ACO ASV ASO
+    0,  0,  0,  0,  0,  0,  0,  0,  # UDF
+    0,  3,  3,  3,  3,  3,  3,  3,  # OTH
+    0,  3,  3,  3,  3,  3,  3,  3,  # ASC
+    0,  3,  3,  3,  1,  1,  3,  3,  # ASS
+    0,  3,  3,  3,  1,  2,  1,  2,  # ACV
+    0,  3,  3,  3,  3,  3,  3,  3,  # ACO
+    0,  3,  1,  3,  1,  1,  1,  3,  # ASV
+    0,  3,  1,  3,  1,  1,  3,  3,  # ASO
+)
+
+
+class Latin1Prober(CharSetProber):
+    def __init__(self):
+        super(Latin1Prober, self).__init__()
+        self._last_char_class = None
+        self._freq_counter = None
+        self.reset()
+
+    def reset(self):
+        self._last_char_class = OTH
+        self._freq_counter = [0] * FREQ_CAT_NUM
+        CharSetProber.reset(self)
+
+    @property
+    def charset_name(self):
+        return "ISO-8859-1"
+
+    @property
+    def language(self):
+        return ""
+
+    def feed(self, byte_str):
+        byte_str = self.filter_with_english_letters(byte_str)
+        for c in byte_str:
+            char_class = Latin1_CharToClass[c]
+            freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM)
+                                    + char_class]
+            if freq == 0:
+                self._state = ProbingState.NOT_ME
+                break
+            self._freq_counter[freq] += 1
+            self._last_char_class = char_class
+
+        return self.state
+
+    def get_confidence(self):
+        if self.state == ProbingState.NOT_ME:
+            return 0.01
+
+        total = sum(self._freq_counter)
+        if total < 0.01:
+            confidence = 0.0
+        else:
+            confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0)
+                          / total)
+        if confidence < 0.0:
+            confidence = 0.0
+        # lower the confidence of latin1 so that other more accurate
+        # detector can take priority.
+        confidence = confidence * 0.73
+        return confidence
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/mbcharsetprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/mbcharsetprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..6256ecfd1e2c9ac4cfa3fac359cd12dce85b759c
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/mbcharsetprober.py
@@ -0,0 +1,91 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#   Shy Shalom - original C code
+#   Proofpoint, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState, MachineState
+
+
+class MultiByteCharSetProber(CharSetProber):
+    """
+    MultiByteCharSetProber
+    """
+
+    def __init__(self, lang_filter=None):
+        super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
+        self.distribution_analyzer = None
+        self.coding_sm = None
+        self._last_char = [0, 0]
+
+    def reset(self):
+        super(MultiByteCharSetProber, self).reset()
+        if self.coding_sm:
+            self.coding_sm.reset()
+        if self.distribution_analyzer:
+            self.distribution_analyzer.reset()
+        self._last_char = [0, 0]
+
+    @property
+    def charset_name(self):
+        raise NotImplementedError
+
+    @property
+    def language(self):
+        raise NotImplementedError
+
+    def feed(self, byte_str):
+        for i in range(len(byte_str)):
+            coding_state = self.coding_sm.next_state(byte_str[i])
+            if coding_state == MachineState.ERROR:
+                self.logger.debug('%s %s prober hit error at byte %s',
+                                  self.charset_name, self.language, i)
+                self._state = ProbingState.NOT_ME
+                break
+            elif coding_state == MachineState.ITS_ME:
+                self._state = ProbingState.FOUND_IT
+                break
+            elif coding_state == MachineState.START:
+                char_len = self.coding_sm.get_current_charlen()
+                if i == 0:
+                    self._last_char[1] = byte_str[0]
+                    self.distribution_analyzer.feed(self._last_char, char_len)
+                else:
+                    self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+                                                    char_len)
+
+        self._last_char[0] = byte_str[-1]
+
+        if self.state == ProbingState.DETECTING:
+            if (self.distribution_analyzer.got_enough_data() and
+                    (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
+                self._state = ProbingState.FOUND_IT
+
+        return self.state
+
+    def get_confidence(self):
+        return self.distribution_analyzer.get_confidence()
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/mbcsgroupprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/mbcsgroupprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..530abe75e0c00cbfcb2a310d872866f320977d0a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/mbcsgroupprober.py
@@ -0,0 +1,54 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#   Shy Shalom - original C code
+#   Proofpoint, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetgroupprober import CharSetGroupProber
+from .utf8prober import UTF8Prober
+from .sjisprober import SJISProber
+from .eucjpprober import EUCJPProber
+from .gb2312prober import GB2312Prober
+from .euckrprober import EUCKRProber
+from .cp949prober import CP949Prober
+from .big5prober import Big5Prober
+from .euctwprober import EUCTWProber
+
+
+class MBCSGroupProber(CharSetGroupProber):
+    def __init__(self, lang_filter=None):
+        super(MBCSGroupProber, self).__init__(lang_filter=lang_filter)
+        self.probers = [
+            UTF8Prober(),
+            SJISProber(),
+            EUCJPProber(),
+            GB2312Prober(),
+            EUCKRProber(),
+            CP949Prober(),
+            Big5Prober(),
+            EUCTWProber()
+        ]
+        self.reset()
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/mbcssm.py b/lib/python3.7/site-packages/pip/_vendor/chardet/mbcssm.py
new file mode 100644
index 0000000000000000000000000000000000000000..8360d0f284ef394f2980b5bb89548e234385cdf1
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/mbcssm.py
@@ -0,0 +1,572 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import MachineState
+
+# BIG5
+
+BIG5_CLS = (
+    1,1,1,1,1,1,1,1,  # 00 - 07    #allow 0x00 as legal value
+    1,1,1,1,1,1,0,0,  # 08 - 0f
+    1,1,1,1,1,1,1,1,  # 10 - 17
+    1,1,1,0,1,1,1,1,  # 18 - 1f
+    1,1,1,1,1,1,1,1,  # 20 - 27
+    1,1,1,1,1,1,1,1,  # 28 - 2f
+    1,1,1,1,1,1,1,1,  # 30 - 37
+    1,1,1,1,1,1,1,1,  # 38 - 3f
+    2,2,2,2,2,2,2,2,  # 40 - 47
+    2,2,2,2,2,2,2,2,  # 48 - 4f
+    2,2,2,2,2,2,2,2,  # 50 - 57
+    2,2,2,2,2,2,2,2,  # 58 - 5f
+    2,2,2,2,2,2,2,2,  # 60 - 67
+    2,2,2,2,2,2,2,2,  # 68 - 6f
+    2,2,2,2,2,2,2,2,  # 70 - 77
+    2,2,2,2,2,2,2,1,  # 78 - 7f
+    4,4,4,4,4,4,4,4,  # 80 - 87
+    4,4,4,4,4,4,4,4,  # 88 - 8f
+    4,4,4,4,4,4,4,4,  # 90 - 97
+    4,4,4,4,4,4,4,4,  # 98 - 9f
+    4,3,3,3,3,3,3,3,  # a0 - a7
+    3,3,3,3,3,3,3,3,  # a8 - af
+    3,3,3,3,3,3,3,3,  # b0 - b7
+    3,3,3,3,3,3,3,3,  # b8 - bf
+    3,3,3,3,3,3,3,3,  # c0 - c7
+    3,3,3,3,3,3,3,3,  # c8 - cf
+    3,3,3,3,3,3,3,3,  # d0 - d7
+    3,3,3,3,3,3,3,3,  # d8 - df
+    3,3,3,3,3,3,3,3,  # e0 - e7
+    3,3,3,3,3,3,3,3,  # e8 - ef
+    3,3,3,3,3,3,3,3,  # f0 - f7
+    3,3,3,3,3,3,3,0  # f8 - ff
+)
+
+BIG5_ST = (
+    MachineState.ERROR,MachineState.START,MachineState.START,     3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+    MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
+    MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
+)
+
+BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
+
+BIG5_SM_MODEL = {'class_table': BIG5_CLS,
+                 'class_factor': 5,
+                 'state_table': BIG5_ST,
+                 'char_len_table': BIG5_CHAR_LEN_TABLE,
+                 'name': 'Big5'}
+
+# CP949
+
+CP949_CLS  = (
+    1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0,  # 00 - 0f
+    1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1,  # 10 - 1f
+    1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,  # 20 - 2f
+    1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,  # 30 - 3f
+    1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4,  # 40 - 4f
+    4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1,  # 50 - 5f
+    1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5,  # 60 - 6f
+    5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1,  # 70 - 7f
+    0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,  # 80 - 8f
+    6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,  # 90 - 9f
+    6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8,  # a0 - af
+    7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,  # b0 - bf
+    7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2,  # c0 - cf
+    2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,  # d0 - df
+    2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,  # e0 - ef
+    2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0,  # f0 - ff
+)
+
+CP949_ST = (
+#cls=    0      1      2      3      4      5      6      7      8      9  # previous state =
+    MachineState.ERROR,MachineState.START,     3,MachineState.ERROR,MachineState.START,MachineState.START,     4,     5,MachineState.ERROR,     6, # MachineState.START
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR
+    MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
+    MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3
+    MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4
+    MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
+    MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
+)
+
+CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
+
+CP949_SM_MODEL = {'class_table': CP949_CLS,
+                  'class_factor': 10,
+                  'state_table': CP949_ST,
+                  'char_len_table': CP949_CHAR_LEN_TABLE,
+                  'name': 'CP949'}
+
+# EUC-JP
+
+EUCJP_CLS = (
+    4,4,4,4,4,4,4,4,  # 00 - 07
+    4,4,4,4,4,4,5,5,  # 08 - 0f
+    4,4,4,4,4,4,4,4,  # 10 - 17
+    4,4,4,5,4,4,4,4,  # 18 - 1f
+    4,4,4,4,4,4,4,4,  # 20 - 27
+    4,4,4,4,4,4,4,4,  # 28 - 2f
+    4,4,4,4,4,4,4,4,  # 30 - 37
+    4,4,4,4,4,4,4,4,  # 38 - 3f
+    4,4,4,4,4,4,4,4,  # 40 - 47
+    4,4,4,4,4,4,4,4,  # 48 - 4f
+    4,4,4,4,4,4,4,4,  # 50 - 57
+    4,4,4,4,4,4,4,4,  # 58 - 5f
+    4,4,4,4,4,4,4,4,  # 60 - 67
+    4,4,4,4,4,4,4,4,  # 68 - 6f
+    4,4,4,4,4,4,4,4,  # 70 - 77
+    4,4,4,4,4,4,4,4,  # 78 - 7f
+    5,5,5,5,5,5,5,5,  # 80 - 87
+    5,5,5,5,5,5,1,3,  # 88 - 8f
+    5,5,5,5,5,5,5,5,  # 90 - 97
+    5,5,5,5,5,5,5,5,  # 98 - 9f
+    5,2,2,2,2,2,2,2,  # a0 - a7
+    2,2,2,2,2,2,2,2,  # a8 - af
+    2,2,2,2,2,2,2,2,  # b0 - b7
+    2,2,2,2,2,2,2,2,  # b8 - bf
+    2,2,2,2,2,2,2,2,  # c0 - c7
+    2,2,2,2,2,2,2,2,  # c8 - cf
+    2,2,2,2,2,2,2,2,  # d0 - d7
+    2,2,2,2,2,2,2,2,  # d8 - df
+    0,0,0,0,0,0,0,0,  # e0 - e7
+    0,0,0,0,0,0,0,0,  # e8 - ef
+    0,0,0,0,0,0,0,0,  # f0 - f7
+    0,0,0,0,0,0,0,5  # f8 - ff
+)
+
+EUCJP_ST = (
+          3,     4,     3,     5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+     MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+     MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
+     MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     3,MachineState.ERROR,#18-1f
+          3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
+)
+
+EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
+
+EUCJP_SM_MODEL = {'class_table': EUCJP_CLS,
+                  'class_factor': 6,
+                  'state_table': EUCJP_ST,
+                  'char_len_table': EUCJP_CHAR_LEN_TABLE,
+                  'name': 'EUC-JP'}
+
+# EUC-KR
+
+EUCKR_CLS  = (
+    1,1,1,1,1,1,1,1,  # 00 - 07
+    1,1,1,1,1,1,0,0,  # 08 - 0f
+    1,1,1,1,1,1,1,1,  # 10 - 17
+    1,1,1,0,1,1,1,1,  # 18 - 1f
+    1,1,1,1,1,1,1,1,  # 20 - 27
+    1,1,1,1,1,1,1,1,  # 28 - 2f
+    1,1,1,1,1,1,1,1,  # 30 - 37
+    1,1,1,1,1,1,1,1,  # 38 - 3f
+    1,1,1,1,1,1,1,1,  # 40 - 47
+    1,1,1,1,1,1,1,1,  # 48 - 4f
+    1,1,1,1,1,1,1,1,  # 50 - 57
+    1,1,1,1,1,1,1,1,  # 58 - 5f
+    1,1,1,1,1,1,1,1,  # 60 - 67
+    1,1,1,1,1,1,1,1,  # 68 - 6f
+    1,1,1,1,1,1,1,1,  # 70 - 77
+    1,1,1,1,1,1,1,1,  # 78 - 7f
+    0,0,0,0,0,0,0,0,  # 80 - 87
+    0,0,0,0,0,0,0,0,  # 88 - 8f
+    0,0,0,0,0,0,0,0,  # 90 - 97
+    0,0,0,0,0,0,0,0,  # 98 - 9f
+    0,2,2,2,2,2,2,2,  # a0 - a7
+    2,2,2,2,2,3,3,3,  # a8 - af
+    2,2,2,2,2,2,2,2,  # b0 - b7
+    2,2,2,2,2,2,2,2,  # b8 - bf
+    2,2,2,2,2,2,2,2,  # c0 - c7
+    2,3,2,2,2,2,2,2,  # c8 - cf
+    2,2,2,2,2,2,2,2,  # d0 - d7
+    2,2,2,2,2,2,2,2,  # d8 - df
+    2,2,2,2,2,2,2,2,  # e0 - e7
+    2,2,2,2,2,2,2,2,  # e8 - ef
+    2,2,2,2,2,2,2,2,  # f0 - f7
+    2,2,2,2,2,2,2,0   # f8 - ff
+)
+
+EUCKR_ST = (
+    MachineState.ERROR,MachineState.START,     3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+    MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
+)
+
+EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
+
+EUCKR_SM_MODEL = {'class_table': EUCKR_CLS,
+                'class_factor': 4,
+                'state_table': EUCKR_ST,
+                'char_len_table': EUCKR_CHAR_LEN_TABLE,
+                'name': 'EUC-KR'}
+
+# EUC-TW
+
+EUCTW_CLS = (
+    2,2,2,2,2,2,2,2,  # 00 - 07
+    2,2,2,2,2,2,0,0,  # 08 - 0f
+    2,2,2,2,2,2,2,2,  # 10 - 17
+    2,2,2,0,2,2,2,2,  # 18 - 1f
+    2,2,2,2,2,2,2,2,  # 20 - 27
+    2,2,2,2,2,2,2,2,  # 28 - 2f
+    2,2,2,2,2,2,2,2,  # 30 - 37
+    2,2,2,2,2,2,2,2,  # 38 - 3f
+    2,2,2,2,2,2,2,2,  # 40 - 47
+    2,2,2,2,2,2,2,2,  # 48 - 4f
+    2,2,2,2,2,2,2,2,  # 50 - 57
+    2,2,2,2,2,2,2,2,  # 58 - 5f
+    2,2,2,2,2,2,2,2,  # 60 - 67
+    2,2,2,2,2,2,2,2,  # 68 - 6f
+    2,2,2,2,2,2,2,2,  # 70 - 77
+    2,2,2,2,2,2,2,2,  # 78 - 7f
+    0,0,0,0,0,0,0,0,  # 80 - 87
+    0,0,0,0,0,0,6,0,  # 88 - 8f
+    0,0,0,0,0,0,0,0,  # 90 - 97
+    0,0,0,0,0,0,0,0,  # 98 - 9f
+    0,3,4,4,4,4,4,4,  # a0 - a7
+    5,5,1,1,1,1,1,1,  # a8 - af
+    1,1,1,1,1,1,1,1,  # b0 - b7
+    1,1,1,1,1,1,1,1,  # b8 - bf
+    1,1,3,1,3,3,3,3,  # c0 - c7
+    3,3,3,3,3,3,3,3,  # c8 - cf
+    3,3,3,3,3,3,3,3,  # d0 - d7
+    3,3,3,3,3,3,3,3,  # d8 - df
+    3,3,3,3,3,3,3,3,  # e0 - e7
+    3,3,3,3,3,3,3,3,  # e8 - ef
+    3,3,3,3,3,3,3,3,  # f0 - f7
+    3,3,3,3,3,3,3,0   # f8 - ff
+)
+
+EUCTW_ST = (
+    MachineState.ERROR,MachineState.ERROR,MachineState.START,     3,     3,     3,     4,MachineState.ERROR,#00-07
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+    MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17
+    MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+         5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
+    MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
+)
+
+EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
+
+EUCTW_SM_MODEL = {'class_table': EUCTW_CLS,
+                'class_factor': 7,
+                'state_table': EUCTW_ST,
+                'char_len_table': EUCTW_CHAR_LEN_TABLE,
+                'name': 'x-euc-tw'}
+
+# GB2312
+
+GB2312_CLS = (
+    1,1,1,1,1,1,1,1,  # 00 - 07
+    1,1,1,1,1,1,0,0,  # 08 - 0f
+    1,1,1,1,1,1,1,1,  # 10 - 17
+    1,1,1,0,1,1,1,1,  # 18 - 1f
+    1,1,1,1,1,1,1,1,  # 20 - 27
+    1,1,1,1,1,1,1,1,  # 28 - 2f
+    3,3,3,3,3,3,3,3,  # 30 - 37
+    3,3,1,1,1,1,1,1,  # 38 - 3f
+    2,2,2,2,2,2,2,2,  # 40 - 47
+    2,2,2,2,2,2,2,2,  # 48 - 4f
+    2,2,2,2,2,2,2,2,  # 50 - 57
+    2,2,2,2,2,2,2,2,  # 58 - 5f
+    2,2,2,2,2,2,2,2,  # 60 - 67
+    2,2,2,2,2,2,2,2,  # 68 - 6f
+    2,2,2,2,2,2,2,2,  # 70 - 77
+    2,2,2,2,2,2,2,4,  # 78 - 7f
+    5,6,6,6,6,6,6,6,  # 80 - 87
+    6,6,6,6,6,6,6,6,  # 88 - 8f
+    6,6,6,6,6,6,6,6,  # 90 - 97
+    6,6,6,6,6,6,6,6,  # 98 - 9f
+    6,6,6,6,6,6,6,6,  # a0 - a7
+    6,6,6,6,6,6,6,6,  # a8 - af
+    6,6,6,6,6,6,6,6,  # b0 - b7
+    6,6,6,6,6,6,6,6,  # b8 - bf
+    6,6,6,6,6,6,6,6,  # c0 - c7
+    6,6,6,6,6,6,6,6,  # c8 - cf
+    6,6,6,6,6,6,6,6,  # d0 - d7
+    6,6,6,6,6,6,6,6,  # d8 - df
+    6,6,6,6,6,6,6,6,  # e0 - e7
+    6,6,6,6,6,6,6,6,  # e8 - ef
+    6,6,6,6,6,6,6,6,  # f0 - f7
+    6,6,6,6,6,6,6,0   # f8 - ff
+)
+
+GB2312_ST = (
+    MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,     3,MachineState.ERROR,#00-07
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+    MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17
+         4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+    MachineState.ERROR,MachineState.ERROR,     5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
+    MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
+)
+
+# To be accurate, the length of class 6 can be either 2 or 4.
+# But it is not necessary to discriminate between the two since
+# it is used for frequency analysis only, and we are validating
+# each code range there as well. So it is safe to set it to be
+# 2 here.
+GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
+
+GB2312_SM_MODEL = {'class_table': GB2312_CLS,
+                   'class_factor': 7,
+                   'state_table': GB2312_ST,
+                   'char_len_table': GB2312_CHAR_LEN_TABLE,
+                   'name': 'GB2312'}
+
+# Shift_JIS
+
+SJIS_CLS = (
+    1,1,1,1,1,1,1,1,  # 00 - 07
+    1,1,1,1,1,1,0,0,  # 08 - 0f
+    1,1,1,1,1,1,1,1,  # 10 - 17
+    1,1,1,0,1,1,1,1,  # 18 - 1f
+    1,1,1,1,1,1,1,1,  # 20 - 27
+    1,1,1,1,1,1,1,1,  # 28 - 2f
+    1,1,1,1,1,1,1,1,  # 30 - 37
+    1,1,1,1,1,1,1,1,  # 38 - 3f
+    2,2,2,2,2,2,2,2,  # 40 - 47
+    2,2,2,2,2,2,2,2,  # 48 - 4f
+    2,2,2,2,2,2,2,2,  # 50 - 57
+    2,2,2,2,2,2,2,2,  # 58 - 5f
+    2,2,2,2,2,2,2,2,  # 60 - 67
+    2,2,2,2,2,2,2,2,  # 68 - 6f
+    2,2,2,2,2,2,2,2,  # 70 - 77
+    2,2,2,2,2,2,2,1,  # 78 - 7f
+    3,3,3,3,3,2,2,3,  # 80 - 87
+    3,3,3,3,3,3,3,3,  # 88 - 8f
+    3,3,3,3,3,3,3,3,  # 90 - 97
+    3,3,3,3,3,3,3,3,  # 98 - 9f
+    #0xa0 is illegal in sjis encoding, but some pages does
+    #contain such byte. We need to be more error forgiven.
+    2,2,2,2,2,2,2,2,  # a0 - a7
+    2,2,2,2,2,2,2,2,  # a8 - af
+    2,2,2,2,2,2,2,2,  # b0 - b7
+    2,2,2,2,2,2,2,2,  # b8 - bf
+    2,2,2,2,2,2,2,2,  # c0 - c7
+    2,2,2,2,2,2,2,2,  # c8 - cf
+    2,2,2,2,2,2,2,2,  # d0 - d7
+    2,2,2,2,2,2,2,2,  # d8 - df
+    3,3,3,3,3,3,3,3,  # e0 - e7
+    3,3,3,3,3,4,4,4,  # e8 - ef
+    3,3,3,3,3,3,3,3,  # f0 - f7
+    3,3,3,3,3,0,0,0)  # f8 - ff
+
+
+SJIS_ST = (
+    MachineState.ERROR,MachineState.START,MachineState.START,     3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+    MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
+)
+
+SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
+
+SJIS_SM_MODEL = {'class_table': SJIS_CLS,
+               'class_factor': 6,
+               'state_table': SJIS_ST,
+               'char_len_table': SJIS_CHAR_LEN_TABLE,
+               'name': 'Shift_JIS'}
+
+# UCS2-BE
+
+UCS2BE_CLS = (
+    0,0,0,0,0,0,0,0,  # 00 - 07
+    0,0,1,0,0,2,0,0,  # 08 - 0f
+    0,0,0,0,0,0,0,0,  # 10 - 17
+    0,0,0,3,0,0,0,0,  # 18 - 1f
+    0,0,0,0,0,0,0,0,  # 20 - 27
+    0,3,3,3,3,3,0,0,  # 28 - 2f
+    0,0,0,0,0,0,0,0,  # 30 - 37
+    0,0,0,0,0,0,0,0,  # 38 - 3f
+    0,0,0,0,0,0,0,0,  # 40 - 47
+    0,0,0,0,0,0,0,0,  # 48 - 4f
+    0,0,0,0,0,0,0,0,  # 50 - 57
+    0,0,0,0,0,0,0,0,  # 58 - 5f
+    0,0,0,0,0,0,0,0,  # 60 - 67
+    0,0,0,0,0,0,0,0,  # 68 - 6f
+    0,0,0,0,0,0,0,0,  # 70 - 77
+    0,0,0,0,0,0,0,0,  # 78 - 7f
+    0,0,0,0,0,0,0,0,  # 80 - 87
+    0,0,0,0,0,0,0,0,  # 88 - 8f
+    0,0,0,0,0,0,0,0,  # 90 - 97
+    0,0,0,0,0,0,0,0,  # 98 - 9f
+    0,0,0,0,0,0,0,0,  # a0 - a7
+    0,0,0,0,0,0,0,0,  # a8 - af
+    0,0,0,0,0,0,0,0,  # b0 - b7
+    0,0,0,0,0,0,0,0,  # b8 - bf
+    0,0,0,0,0,0,0,0,  # c0 - c7
+    0,0,0,0,0,0,0,0,  # c8 - cf
+    0,0,0,0,0,0,0,0,  # d0 - d7
+    0,0,0,0,0,0,0,0,  # d8 - df
+    0,0,0,0,0,0,0,0,  # e0 - e7
+    0,0,0,0,0,0,0,0,  # e8 - ef
+    0,0,0,0,0,0,0,0,  # f0 - f7
+    0,0,0,0,0,0,4,5   # f8 - ff
+)
+
+UCS2BE_ST  = (
+          5,     7,     7,MachineState.ERROR,     4,     3,MachineState.ERROR,MachineState.ERROR,#00-07
+     MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+     MachineState.ITS_ME,MachineState.ITS_ME,     6,     6,     6,     6,MachineState.ERROR,MachineState.ERROR,#10-17
+          6,     6,     6,     6,     6,MachineState.ITS_ME,     6,     6,#18-1f
+          6,     6,     6,     6,     5,     7,     7,MachineState.ERROR,#20-27
+          5,     8,     6,     6,MachineState.ERROR,     6,     6,     6,#28-2f
+          6,     6,     6,     6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
+)
+
+UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
+
+UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS,
+                   'class_factor': 6,
+                   'state_table': UCS2BE_ST,
+                   'char_len_table': UCS2BE_CHAR_LEN_TABLE,
+                   'name': 'UTF-16BE'}
+
+# UCS2-LE
+
+UCS2LE_CLS = (
+    0,0,0,0,0,0,0,0,  # 00 - 07
+    0,0,1,0,0,2,0,0,  # 08 - 0f
+    0,0,0,0,0,0,0,0,  # 10 - 17
+    0,0,0,3,0,0,0,0,  # 18 - 1f
+    0,0,0,0,0,0,0,0,  # 20 - 27
+    0,3,3,3,3,3,0,0,  # 28 - 2f
+    0,0,0,0,0,0,0,0,  # 30 - 37
+    0,0,0,0,0,0,0,0,  # 38 - 3f
+    0,0,0,0,0,0,0,0,  # 40 - 47
+    0,0,0,0,0,0,0,0,  # 48 - 4f
+    0,0,0,0,0,0,0,0,  # 50 - 57
+    0,0,0,0,0,0,0,0,  # 58 - 5f
+    0,0,0,0,0,0,0,0,  # 60 - 67
+    0,0,0,0,0,0,0,0,  # 68 - 6f
+    0,0,0,0,0,0,0,0,  # 70 - 77
+    0,0,0,0,0,0,0,0,  # 78 - 7f
+    0,0,0,0,0,0,0,0,  # 80 - 87
+    0,0,0,0,0,0,0,0,  # 88 - 8f
+    0,0,0,0,0,0,0,0,  # 90 - 97
+    0,0,0,0,0,0,0,0,  # 98 - 9f
+    0,0,0,0,0,0,0,0,  # a0 - a7
+    0,0,0,0,0,0,0,0,  # a8 - af
+    0,0,0,0,0,0,0,0,  # b0 - b7
+    0,0,0,0,0,0,0,0,  # b8 - bf
+    0,0,0,0,0,0,0,0,  # c0 - c7
+    0,0,0,0,0,0,0,0,  # c8 - cf
+    0,0,0,0,0,0,0,0,  # d0 - d7
+    0,0,0,0,0,0,0,0,  # d8 - df
+    0,0,0,0,0,0,0,0,  # e0 - e7
+    0,0,0,0,0,0,0,0,  # e8 - ef
+    0,0,0,0,0,0,0,0,  # f0 - f7
+    0,0,0,0,0,0,4,5   # f8 - ff
+)
+
+UCS2LE_ST = (
+          6,     6,     7,     6,     4,     3,MachineState.ERROR,MachineState.ERROR,#00-07
+     MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+     MachineState.ITS_ME,MachineState.ITS_ME,     5,     5,     5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17
+          5,     5,     5,MachineState.ERROR,     5,MachineState.ERROR,     6,     6,#18-1f
+          7,     6,     8,     8,     5,     5,     5,MachineState.ERROR,#20-27
+          5,     5,     5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     5,     5,#28-2f
+          5,     5,     5,MachineState.ERROR,     5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
+)
+
+UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
+
+UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS,
+                 'class_factor': 6,
+                 'state_table': UCS2LE_ST,
+                 'char_len_table': UCS2LE_CHAR_LEN_TABLE,
+                 'name': 'UTF-16LE'}
+
+# UTF-8
+
+UTF8_CLS = (
+    1,1,1,1,1,1,1,1,  # 00 - 07  #allow 0x00 as a legal value
+    1,1,1,1,1,1,0,0,  # 08 - 0f
+    1,1,1,1,1,1,1,1,  # 10 - 17
+    1,1,1,0,1,1,1,1,  # 18 - 1f
+    1,1,1,1,1,1,1,1,  # 20 - 27
+    1,1,1,1,1,1,1,1,  # 28 - 2f
+    1,1,1,1,1,1,1,1,  # 30 - 37
+    1,1,1,1,1,1,1,1,  # 38 - 3f
+    1,1,1,1,1,1,1,1,  # 40 - 47
+    1,1,1,1,1,1,1,1,  # 48 - 4f
+    1,1,1,1,1,1,1,1,  # 50 - 57
+    1,1,1,1,1,1,1,1,  # 58 - 5f
+    1,1,1,1,1,1,1,1,  # 60 - 67
+    1,1,1,1,1,1,1,1,  # 68 - 6f
+    1,1,1,1,1,1,1,1,  # 70 - 77
+    1,1,1,1,1,1,1,1,  # 78 - 7f
+    2,2,2,2,3,3,3,3,  # 80 - 87
+    4,4,4,4,4,4,4,4,  # 88 - 8f
+    4,4,4,4,4,4,4,4,  # 90 - 97
+    4,4,4,4,4,4,4,4,  # 98 - 9f
+    5,5,5,5,5,5,5,5,  # a0 - a7
+    5,5,5,5,5,5,5,5,  # a8 - af
+    5,5,5,5,5,5,5,5,  # b0 - b7
+    5,5,5,5,5,5,5,5,  # b8 - bf
+    0,0,6,6,6,6,6,6,  # c0 - c7
+    6,6,6,6,6,6,6,6,  # c8 - cf
+    6,6,6,6,6,6,6,6,  # d0 - d7
+    6,6,6,6,6,6,6,6,  # d8 - df
+    7,8,8,8,8,8,8,8,  # e0 - e7
+    8,8,8,8,8,9,8,8,  # e8 - ef
+    10,11,11,11,11,11,11,11,  # f0 - f7
+    12,13,13,13,14,15,0,0    # f8 - ff
+)
+
+UTF8_ST = (
+    MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     12,   10,#00-07
+         9,     11,     8,     7,     6,     5,     4,    3,#08-0f
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+    MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27
+    MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f
+    MachineState.ERROR,MachineState.ERROR,     5,     5,     5,     5,MachineState.ERROR,MachineState.ERROR,#30-37
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     5,     5,     5,MachineState.ERROR,MachineState.ERROR,#40-47
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f
+    MachineState.ERROR,MachineState.ERROR,     7,     7,     7,     7,MachineState.ERROR,MachineState.ERROR,#50-57
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     7,     7,MachineState.ERROR,MachineState.ERROR,#60-67
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f
+    MachineState.ERROR,MachineState.ERROR,     9,     9,     9,     9,MachineState.ERROR,MachineState.ERROR,#70-77
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,     9,MachineState.ERROR,MachineState.ERROR,#80-87
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f
+    MachineState.ERROR,MachineState.ERROR,    12,    12,    12,    12,MachineState.ERROR,MachineState.ERROR,#90-97
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,    12,MachineState.ERROR,MachineState.ERROR,#a0-a7
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af
+    MachineState.ERROR,MachineState.ERROR,    12,    12,    12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf
+    MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
+    MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
+)
+
+UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
+
+UTF8_SM_MODEL = {'class_table': UTF8_CLS,
+                 'class_factor': 16,
+                 'state_table': UTF8_ST,
+                 'char_len_table': UTF8_CHAR_LEN_TABLE,
+                 'name': 'UTF-8'}
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/sbcharsetprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/sbcharsetprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..0adb51de5a210aa36849cd149ed0f4ae424fce42
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/sbcharsetprober.py
@@ -0,0 +1,132 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#   Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import CharacterCategory, ProbingState, SequenceLikelihood
+
+
+class SingleByteCharSetProber(CharSetProber):
+    SAMPLE_SIZE = 64
+    SB_ENOUGH_REL_THRESHOLD = 1024  #  0.25 * SAMPLE_SIZE^2
+    POSITIVE_SHORTCUT_THRESHOLD = 0.95
+    NEGATIVE_SHORTCUT_THRESHOLD = 0.05
+
+    def __init__(self, model, reversed=False, name_prober=None):
+        super(SingleByteCharSetProber, self).__init__()
+        self._model = model
+        # TRUE if we need to reverse every pair in the model lookup
+        self._reversed = reversed
+        # Optional auxiliary prober for name decision
+        self._name_prober = name_prober
+        self._last_order = None
+        self._seq_counters = None
+        self._total_seqs = None
+        self._total_char = None
+        self._freq_char = None
+        self.reset()
+
+    def reset(self):
+        super(SingleByteCharSetProber, self).reset()
+        # char order of last character
+        self._last_order = 255
+        self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
+        self._total_seqs = 0
+        self._total_char = 0
+        # characters that fall in our sampling range
+        self._freq_char = 0
+
+    @property
+    def charset_name(self):
+        if self._name_prober:
+            return self._name_prober.charset_name
+        else:
+            return self._model['charset_name']
+
+    @property
+    def language(self):
+        if self._name_prober:
+            return self._name_prober.language
+        else:
+            return self._model.get('language')
+
+    def feed(self, byte_str):
+        if not self._model['keep_english_letter']:
+            byte_str = self.filter_international_words(byte_str)
+        if not byte_str:
+            return self.state
+        char_to_order_map = self._model['char_to_order_map']
+        for i, c in enumerate(byte_str):
+            # XXX: Order is in range 1-64, so one would think we want 0-63 here,
+            #      but that leads to 27 more test failures than before.
+            order = char_to_order_map[c]
+            # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
+            #      CharacterCategory.SYMBOL is actually 253, so we use CONTROL
+            #      to make it closer to the original intent. The only difference
+            #      is whether or not we count digits and control characters for
+            #      _total_char purposes.
+            if order < CharacterCategory.CONTROL:
+                self._total_char += 1
+            if order < self.SAMPLE_SIZE:
+                self._freq_char += 1
+                if self._last_order < self.SAMPLE_SIZE:
+                    self._total_seqs += 1
+                    if not self._reversed:
+                        i = (self._last_order * self.SAMPLE_SIZE) + order
+                        model = self._model['precedence_matrix'][i]
+                    else:  # reverse the order of the letters in the lookup
+                        i = (order * self.SAMPLE_SIZE) + self._last_order
+                        model = self._model['precedence_matrix'][i]
+                    self._seq_counters[model] += 1
+            self._last_order = order
+
+        charset_name = self._model['charset_name']
+        if self.state == ProbingState.DETECTING:
+            if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
+                confidence = self.get_confidence()
+                if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
+                    self.logger.debug('%s confidence = %s, we have a winner',
+                                      charset_name, confidence)
+                    self._state = ProbingState.FOUND_IT
+                elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
+                    self.logger.debug('%s confidence = %s, below negative '
+                                      'shortcut threshhold %s', charset_name,
+                                      confidence,
+                                      self.NEGATIVE_SHORTCUT_THRESHOLD)
+                    self._state = ProbingState.NOT_ME
+
+        return self.state
+
+    def get_confidence(self):
+        r = 0.01
+        if self._total_seqs > 0:
+            r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) /
+                 self._total_seqs / self._model['typical_positive_ratio'])
+            r = r * self._freq_char / self._total_char
+            if r >= 1.0:
+                r = 0.99
+        return r
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/sbcsgroupprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/sbcsgroupprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..98e95dc1a3cbc65e97bc726ab7000955132719dd
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/sbcsgroupprober.py
@@ -0,0 +1,73 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#   Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetgroupprober import CharSetGroupProber
+from .sbcharsetprober import SingleByteCharSetProber
+from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
+                                Latin5CyrillicModel, MacCyrillicModel,
+                                Ibm866Model, Ibm855Model)
+from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
+from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
+# from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
+from .langthaimodel import TIS620ThaiModel
+from .langhebrewmodel import Win1255HebrewModel
+from .hebrewprober import HebrewProber
+from .langturkishmodel import Latin5TurkishModel
+
+
+class SBCSGroupProber(CharSetGroupProber):
+    def __init__(self):
+        super(SBCSGroupProber, self).__init__()
+        self.probers = [
+            SingleByteCharSetProber(Win1251CyrillicModel),
+            SingleByteCharSetProber(Koi8rModel),
+            SingleByteCharSetProber(Latin5CyrillicModel),
+            SingleByteCharSetProber(MacCyrillicModel),
+            SingleByteCharSetProber(Ibm866Model),
+            SingleByteCharSetProber(Ibm855Model),
+            SingleByteCharSetProber(Latin7GreekModel),
+            SingleByteCharSetProber(Win1253GreekModel),
+            SingleByteCharSetProber(Latin5BulgarianModel),
+            SingleByteCharSetProber(Win1251BulgarianModel),
+            # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
+            #       after we retrain model.
+            # SingleByteCharSetProber(Latin2HungarianModel),
+            # SingleByteCharSetProber(Win1250HungarianModel),
+            SingleByteCharSetProber(TIS620ThaiModel),
+            SingleByteCharSetProber(Latin5TurkishModel),
+        ]
+        hebrew_prober = HebrewProber()
+        logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel,
+                                                        False, hebrew_prober)
+        visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True,
+                                                       hebrew_prober)
+        hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
+        self.probers.extend([hebrew_prober, logical_hebrew_prober,
+                             visual_hebrew_prober])
+
+        self.reset()
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/sjisprober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/sjisprober.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e29623bdc54a7c6d11bcc167d71bb44cc9be39d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/sjisprober.py
@@ -0,0 +1,92 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import SJISDistributionAnalysis
+from .jpcntx import SJISContextAnalysis
+from .mbcssm import SJIS_SM_MODEL
+from .enums import ProbingState, MachineState
+
+
+class SJISProber(MultiByteCharSetProber):
+    def __init__(self):
+        super(SJISProber, self).__init__()
+        self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
+        self.distribution_analyzer = SJISDistributionAnalysis()
+        self.context_analyzer = SJISContextAnalysis()
+        self.reset()
+
+    def reset(self):
+        super(SJISProber, self).reset()
+        self.context_analyzer.reset()
+
+    @property
+    def charset_name(self):
+        return self.context_analyzer.charset_name
+
+    @property
+    def language(self):
+        return "Japanese"
+
+    def feed(self, byte_str):
+        for i in range(len(byte_str)):
+            coding_state = self.coding_sm.next_state(byte_str[i])
+            if coding_state == MachineState.ERROR:
+                self.logger.debug('%s %s prober hit error at byte %s',
+                                  self.charset_name, self.language, i)
+                self._state = ProbingState.NOT_ME
+                break
+            elif coding_state == MachineState.ITS_ME:
+                self._state = ProbingState.FOUND_IT
+                break
+            elif coding_state == MachineState.START:
+                char_len = self.coding_sm.get_current_charlen()
+                if i == 0:
+                    self._last_char[1] = byte_str[0]
+                    self.context_analyzer.feed(self._last_char[2 - char_len:],
+                                               char_len)
+                    self.distribution_analyzer.feed(self._last_char, char_len)
+                else:
+                    self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3
+                                                        - char_len], char_len)
+                    self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+                                                    char_len)
+
+        self._last_char[0] = byte_str[-1]
+
+        if self.state == ProbingState.DETECTING:
+            if (self.context_analyzer.got_enough_data() and
+               (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
+                self._state = ProbingState.FOUND_IT
+
+        return self.state
+
+    def get_confidence(self):
+        context_conf = self.context_analyzer.get_confidence()
+        distrib_conf = self.distribution_analyzer.get_confidence()
+        return max(context_conf, distrib_conf)
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/universaldetector.py b/lib/python3.7/site-packages/pip/_vendor/chardet/universaldetector.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b4e92d6158527736e3d14d6f725edada8f94b00
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/universaldetector.py
@@ -0,0 +1,286 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#   Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+"""
+Module containing the UniversalDetector detector class, which is the primary
+class a user of ``chardet`` should use.
+
+:author: Mark Pilgrim (initial port to Python)
+:author: Shy Shalom (original C code)
+:author: Dan Blanchard (major refactoring for 3.0)
+:author: Ian Cordasco
+"""
+
+
+import codecs
+import logging
+import re
+
+from .charsetgroupprober import CharSetGroupProber
+from .enums import InputState, LanguageFilter, ProbingState
+from .escprober import EscCharSetProber
+from .latin1prober import Latin1Prober
+from .mbcsgroupprober import MBCSGroupProber
+from .sbcsgroupprober import SBCSGroupProber
+
+
+class UniversalDetector(object):
+    """
+    The ``UniversalDetector`` class underlies the ``chardet.detect`` function
+    and coordinates all of the different charset probers.
+
+    To get a ``dict`` containing an encoding and its confidence, you can simply
+    run:
+
+    .. code::
+
+            u = UniversalDetector()
+            u.feed(some_bytes)
+            u.close()
+            detected = u.result
+
+    """
+
+    MINIMUM_THRESHOLD = 0.20
+    HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
+    ESC_DETECTOR = re.compile(b'(\033|~{)')
+    WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')
+    ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',
+                   'iso-8859-2': 'Windows-1250',
+                   'iso-8859-5': 'Windows-1251',
+                   'iso-8859-6': 'Windows-1256',
+                   'iso-8859-7': 'Windows-1253',
+                   'iso-8859-8': 'Windows-1255',
+                   'iso-8859-9': 'Windows-1254',
+                   'iso-8859-13': 'Windows-1257'}
+
+    def __init__(self, lang_filter=LanguageFilter.ALL):
+        self._esc_charset_prober = None
+        self._charset_probers = []
+        self.result = None
+        self.done = None
+        self._got_data = None
+        self._input_state = None
+        self._last_char = None
+        self.lang_filter = lang_filter
+        self.logger = logging.getLogger(__name__)
+        self._has_win_bytes = None
+        self.reset()
+
+    def reset(self):
+        """
+        Reset the UniversalDetector and all of its probers back to their
+        initial states.  This is called by ``__init__``, so you only need to
+        call this directly in between analyses of different documents.
+        """
+        self.result = {'encoding': None, 'confidence': 0.0, 'language': None}
+        self.done = False
+        self._got_data = False
+        self._has_win_bytes = False
+        self._input_state = InputState.PURE_ASCII
+        self._last_char = b''
+        if self._esc_charset_prober:
+            self._esc_charset_prober.reset()
+        for prober in self._charset_probers:
+            prober.reset()
+
+    def feed(self, byte_str):
+        """
+        Takes a chunk of a document and feeds it through all of the relevant
+        charset probers.
+
+        After calling ``feed``, you can check the value of the ``done``
+        attribute to see if you need to continue feeding the
+        ``UniversalDetector`` more data, or if it has made a prediction
+        (in the ``result`` attribute).
+
+        .. note::
+           You should always call ``close`` when you're done feeding in your
+           document if ``done`` is not already ``True``.
+        """
+        if self.done:
+            return
+
+        if not len(byte_str):
+            return
+
+        if not isinstance(byte_str, bytearray):
+            byte_str = bytearray(byte_str)
+
+        # First check for known BOMs, since these are guaranteed to be correct
+        if not self._got_data:
+            # If the data starts with BOM, we know it is UTF
+            if byte_str.startswith(codecs.BOM_UTF8):
+                # EF BB BF  UTF-8 with BOM
+                self.result = {'encoding': "UTF-8-SIG",
+                               'confidence': 1.0,
+                               'language': ''}
+            elif byte_str.startswith((codecs.BOM_UTF32_LE,
+                                      codecs.BOM_UTF32_BE)):
+                # FF FE 00 00  UTF-32, little-endian BOM
+                # 00 00 FE FF  UTF-32, big-endian BOM
+                self.result = {'encoding': "UTF-32",
+                               'confidence': 1.0,
+                               'language': ''}
+            elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
+                # FE FF 00 00  UCS-4, unusual octet order BOM (3412)
+                self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
+                               'confidence': 1.0,
+                               'language': ''}
+            elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
+                # 00 00 FF FE  UCS-4, unusual octet order BOM (2143)
+                self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
+                               'confidence': 1.0,
+                               'language': ''}
+            elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
+                # FF FE  UTF-16, little endian BOM
+                # FE FF  UTF-16, big endian BOM
+                self.result = {'encoding': "UTF-16",
+                               'confidence': 1.0,
+                               'language': ''}
+
+            self._got_data = True
+            if self.result['encoding'] is not None:
+                self.done = True
+                return
+
+        # If none of those matched and we've only see ASCII so far, check
+        # for high bytes and escape sequences
+        if self._input_state == InputState.PURE_ASCII:
+            if self.HIGH_BYTE_DETECTOR.search(byte_str):
+                self._input_state = InputState.HIGH_BYTE
+            elif self._input_state == InputState.PURE_ASCII and \
+                    self.ESC_DETECTOR.search(self._last_char + byte_str):
+                self._input_state = InputState.ESC_ASCII
+
+        self._last_char = byte_str[-1:]
+
+        # If we've seen escape sequences, use the EscCharSetProber, which
+        # uses a simple state machine to check for known escape sequences in
+        # HZ and ISO-2022 encodings, since those are the only encodings that
+        # use such sequences.
+        if self._input_state == InputState.ESC_ASCII:
+            if not self._esc_charset_prober:
+                self._esc_charset_prober = EscCharSetProber(self.lang_filter)
+            if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
+                self.result = {'encoding':
+                               self._esc_charset_prober.charset_name,
+                               'confidence':
+                               self._esc_charset_prober.get_confidence(),
+                               'language':
+                               self._esc_charset_prober.language}
+                self.done = True
+        # If we've seen high bytes (i.e., those with values greater than 127),
+        # we need to do more complicated checks using all our multi-byte and
+        # single-byte probers that are left.  The single-byte probers
+        # use character bigram distributions to determine the encoding, whereas
+        # the multi-byte probers use a combination of character unigram and
+        # bigram distributions.
+        elif self._input_state == InputState.HIGH_BYTE:
+            if not self._charset_probers:
+                self._charset_probers = [MBCSGroupProber(self.lang_filter)]
+                # If we're checking non-CJK encodings, use single-byte prober
+                if self.lang_filter & LanguageFilter.NON_CJK:
+                    self._charset_probers.append(SBCSGroupProber())
+                self._charset_probers.append(Latin1Prober())
+            for prober in self._charset_probers:
+                if prober.feed(byte_str) == ProbingState.FOUND_IT:
+                    self.result = {'encoding': prober.charset_name,
+                                   'confidence': prober.get_confidence(),
+                                   'language': prober.language}
+                    self.done = True
+                    break
+            if self.WIN_BYTE_DETECTOR.search(byte_str):
+                self._has_win_bytes = True
+
+    def close(self):
+        """
+        Stop analyzing the current document and come up with a final
+        prediction.
+
+        :returns:  The ``result`` attribute, a ``dict`` with the keys
+                   `encoding`, `confidence`, and `language`.
+        """
+        # Don't bother with checks if we're already done
+        if self.done:
+            return self.result
+        self.done = True
+
+        if not self._got_data:
+            self.logger.debug('no data received!')
+
+        # Default to ASCII if it is all we've seen so far
+        elif self._input_state == InputState.PURE_ASCII:
+            self.result = {'encoding': 'ascii',
+                           'confidence': 1.0,
+                           'language': ''}
+
+        # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
+        elif self._input_state == InputState.HIGH_BYTE:
+            prober_confidence = None
+            max_prober_confidence = 0.0
+            max_prober = None
+            for prober in self._charset_probers:
+                if not prober:
+                    continue
+                prober_confidence = prober.get_confidence()
+                if prober_confidence > max_prober_confidence:
+                    max_prober_confidence = prober_confidence
+                    max_prober = prober
+            if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
+                charset_name = max_prober.charset_name
+                lower_charset_name = max_prober.charset_name.lower()
+                confidence = max_prober.get_confidence()
+                # Use Windows encoding name instead of ISO-8859 if we saw any
+                # extra Windows-specific bytes
+                if lower_charset_name.startswith('iso-8859'):
+                    if self._has_win_bytes:
+                        charset_name = self.ISO_WIN_MAP.get(lower_charset_name,
+                                                            charset_name)
+                self.result = {'encoding': charset_name,
+                               'confidence': confidence,
+                               'language': max_prober.language}
+
+        # Log all prober confidences if none met MINIMUM_THRESHOLD
+        if self.logger.getEffectiveLevel() == logging.DEBUG:
+            if self.result['encoding'] is None:
+                self.logger.debug('no probers hit minimum threshold')
+                for group_prober in self._charset_probers:
+                    if not group_prober:
+                        continue
+                    if isinstance(group_prober, CharSetGroupProber):
+                        for prober in group_prober.probers:
+                            self.logger.debug('%s %s confidence = %s',
+                                              prober.charset_name,
+                                              prober.language,
+                                              prober.get_confidence())
+                    else:
+                        self.logger.debug('%s %s confidence = %s',
+                                          prober.charset_name,
+                                          prober.language,
+                                          prober.get_confidence())
+        return self.result
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/utf8prober.py b/lib/python3.7/site-packages/pip/_vendor/chardet/utf8prober.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c3196cc2d7e46e6756580267f5643c6f7b448dd
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/utf8prober.py
@@ -0,0 +1,82 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#   Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301  USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState, MachineState
+from .codingstatemachine import CodingStateMachine
+from .mbcssm import UTF8_SM_MODEL
+
+
+
+class UTF8Prober(CharSetProber):
+    ONE_CHAR_PROB = 0.5
+
+    def __init__(self):
+        super(UTF8Prober, self).__init__()
+        self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
+        self._num_mb_chars = None
+        self.reset()
+
+    def reset(self):
+        super(UTF8Prober, self).reset()
+        self.coding_sm.reset()
+        self._num_mb_chars = 0
+
+    @property
+    def charset_name(self):
+        return "utf-8"
+
+    @property
+    def language(self):
+        return ""
+
+    def feed(self, byte_str):
+        for c in byte_str:
+            coding_state = self.coding_sm.next_state(c)
+            if coding_state == MachineState.ERROR:
+                self._state = ProbingState.NOT_ME
+                break
+            elif coding_state == MachineState.ITS_ME:
+                self._state = ProbingState.FOUND_IT
+                break
+            elif coding_state == MachineState.START:
+                if self.coding_sm.get_current_charlen() >= 2:
+                    self._num_mb_chars += 1
+
+        if self.state == ProbingState.DETECTING:
+            if self.get_confidence() > self.SHORTCUT_THRESHOLD:
+                self._state = ProbingState.FOUND_IT
+
+        return self.state
+
+    def get_confidence(self):
+        unlike = 0.99
+        if self._num_mb_chars < 6:
+            unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
+            return 1.0 - unlike
+        else:
+            return unlike
diff --git a/lib/python3.7/site-packages/pip/_vendor/chardet/version.py b/lib/python3.7/site-packages/pip/_vendor/chardet/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb2a34a70ea760ad1f58979acfc8fa466e30c511
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/chardet/version.py
@@ -0,0 +1,9 @@
+"""
+This module exists only to simplify retrieving the version number of chardet
+from within setup.py and from chardet subpackages.
+
+:author: Dan Blanchard (dan.blanchard@gmail.com)
+"""
+
+__version__ = "3.0.4"
+VERSION = __version__.split('.')
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/__init__.py b/lib/python3.7/site-packages/pip/_vendor/colorama/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a3bf471423bbf789f13755d3dcb03826f7aff36
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/colorama/__init__.py
@@ -0,0 +1,6 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+from .initialise import init, deinit, reinit, colorama_text
+from .ansi import Fore, Back, Style, Cursor
+from .ansitowin32 import AnsiToWin32
+
+__version__ = '0.4.1'
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3166920986449e119f7538e88cb226c1726004d0
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/ansi.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/ansi.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0d8302c9f92df9f1a2e28692eedb7aa10a79436b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/ansi.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/ansitowin32.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/ansitowin32.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ee5b510ab2d0e2c4c2386cffda4eaa38244e38c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/ansitowin32.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/initialise.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/initialise.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a2a80043dde36d62b4a012a88791e36969d42dde
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/initialise.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/win32.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/win32.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e8b03751f16fc1e5b850a311f91c856700d9cdb
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/win32.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/winterm.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/winterm.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8cdb863c8de046ba498c0e37fca69365f9e4db9f
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/colorama/__pycache__/winterm.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/ansi.py b/lib/python3.7/site-packages/pip/_vendor/colorama/ansi.py
new file mode 100644
index 0000000000000000000000000000000000000000..78776588db9410924d8e4af0922fbc3960a37624
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/colorama/ansi.py
@@ -0,0 +1,102 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+'''
+This module generates ANSI character codes to printing colors to terminals.
+See: http://en.wikipedia.org/wiki/ANSI_escape_code
+'''
+
+CSI = '\033['
+OSC = '\033]'
+BEL = '\007'
+
+
+def code_to_chars(code):
+    return CSI + str(code) + 'm'
+
+def set_title(title):
+    return OSC + '2;' + title + BEL
+
+def clear_screen(mode=2):
+    return CSI + str(mode) + 'J'
+
+def clear_line(mode=2):
+    return CSI + str(mode) + 'K'
+
+
+class AnsiCodes(object):
+    def __init__(self):
+        # the subclasses declare class attributes which are numbers.
+        # Upon instantiation we define instance attributes, which are the same
+        # as the class attributes but wrapped with the ANSI escape sequence
+        for name in dir(self):
+            if not name.startswith('_'):
+                value = getattr(self, name)
+                setattr(self, name, code_to_chars(value))
+
+
+class AnsiCursor(object):
+    def UP(self, n=1):
+        return CSI + str(n) + 'A'
+    def DOWN(self, n=1):
+        return CSI + str(n) + 'B'
+    def FORWARD(self, n=1):
+        return CSI + str(n) + 'C'
+    def BACK(self, n=1):
+        return CSI + str(n) + 'D'
+    def POS(self, x=1, y=1):
+        return CSI + str(y) + ';' + str(x) + 'H'
+
+
+class AnsiFore(AnsiCodes):
+    BLACK           = 30
+    RED             = 31
+    GREEN           = 32
+    YELLOW          = 33
+    BLUE            = 34
+    MAGENTA         = 35
+    CYAN            = 36
+    WHITE           = 37
+    RESET           = 39
+
+    # These are fairly well supported, but not part of the standard.
+    LIGHTBLACK_EX   = 90
+    LIGHTRED_EX     = 91
+    LIGHTGREEN_EX   = 92
+    LIGHTYELLOW_EX  = 93
+    LIGHTBLUE_EX    = 94
+    LIGHTMAGENTA_EX = 95
+    LIGHTCYAN_EX    = 96
+    LIGHTWHITE_EX   = 97
+
+
+class AnsiBack(AnsiCodes):
+    BLACK           = 40
+    RED             = 41
+    GREEN           = 42
+    YELLOW          = 43
+    BLUE            = 44
+    MAGENTA         = 45
+    CYAN            = 46
+    WHITE           = 47
+    RESET           = 49
+
+    # These are fairly well supported, but not part of the standard.
+    LIGHTBLACK_EX   = 100
+    LIGHTRED_EX     = 101
+    LIGHTGREEN_EX   = 102
+    LIGHTYELLOW_EX  = 103
+    LIGHTBLUE_EX    = 104
+    LIGHTMAGENTA_EX = 105
+    LIGHTCYAN_EX    = 106
+    LIGHTWHITE_EX   = 107
+
+
+class AnsiStyle(AnsiCodes):
+    BRIGHT    = 1
+    DIM       = 2
+    NORMAL    = 22
+    RESET_ALL = 0
+
+Fore   = AnsiFore()
+Back   = AnsiBack()
+Style  = AnsiStyle()
+Cursor = AnsiCursor()
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/ansitowin32.py b/lib/python3.7/site-packages/pip/_vendor/colorama/ansitowin32.py
new file mode 100644
index 0000000000000000000000000000000000000000..359c92be50ee3c97aafbc8d31c16e5fbf2e6d022
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/colorama/ansitowin32.py
@@ -0,0 +1,257 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+import re
+import sys
+import os
+
+from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
+from .winterm import WinTerm, WinColor, WinStyle
+from .win32 import windll, winapi_test
+
+
+winterm = None
+if windll is not None:
+    winterm = WinTerm()
+
+
+class StreamWrapper(object):
+    '''
+    Wraps a stream (such as stdout), acting as a transparent proxy for all
+    attribute access apart from method 'write()', which is delegated to our
+    Converter instance.
+    '''
+    def __init__(self, wrapped, converter):
+        # double-underscore everything to prevent clashes with names of
+        # attributes on the wrapped stream object.
+        self.__wrapped = wrapped
+        self.__convertor = converter
+
+    def __getattr__(self, name):
+        return getattr(self.__wrapped, name)
+
+    def __enter__(self, *args, **kwargs):
+        # special method lookup bypasses __getattr__/__getattribute__, see
+        # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit
+        # thus, contextlib magic methods are not proxied via __getattr__
+        return self.__wrapped.__enter__(*args, **kwargs)
+
+    def __exit__(self, *args, **kwargs):
+        return self.__wrapped.__exit__(*args, **kwargs)
+
+    def write(self, text):
+        self.__convertor.write(text)
+
+    def isatty(self):
+        stream = self.__wrapped
+        if 'PYCHARM_HOSTED' in os.environ:
+            if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):
+                return True
+        try:
+            stream_isatty = stream.isatty
+        except AttributeError:
+            return False
+        else:
+            return stream_isatty()
+
+    @property
+    def closed(self):
+        stream = self.__wrapped
+        try:
+            return stream.closed
+        except AttributeError:
+            return True
+
+
+class AnsiToWin32(object):
+    '''
+    Implements a 'write()' method which, on Windows, will strip ANSI character
+    sequences from the text, and if outputting to a tty, will convert them into
+    win32 function calls.
+    '''
+    ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?')   # Control Sequence Introducer
+    ANSI_OSC_RE = re.compile('\001?\033\\]((?:.|;)*?)(\x07)\002?')        # Operating System Command
+
+    def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
+        # The wrapped stream (normally sys.stdout or sys.stderr)
+        self.wrapped = wrapped
+
+        # should we reset colors to defaults after every .write()
+        self.autoreset = autoreset
+
+        # create the proxy wrapping our output stream
+        self.stream = StreamWrapper(wrapped, self)
+
+        on_windows = os.name == 'nt'
+        # We test if the WinAPI works, because even if we are on Windows
+        # we may be using a terminal that doesn't support the WinAPI
+        # (e.g. Cygwin Terminal). In this case it's up to the terminal
+        # to support the ANSI codes.
+        conversion_supported = on_windows and winapi_test()
+
+        # should we strip ANSI sequences from our output?
+        if strip is None:
+            strip = conversion_supported or (not self.stream.closed and not self.stream.isatty())
+        self.strip = strip
+
+        # should we should convert ANSI sequences into win32 calls?
+        if convert is None:
+            convert = conversion_supported and not self.stream.closed and self.stream.isatty()
+        self.convert = convert
+
+        # dict of ansi codes to win32 functions and parameters
+        self.win32_calls = self.get_win32_calls()
+
+        # are we wrapping stderr?
+        self.on_stderr = self.wrapped is sys.stderr
+
+    def should_wrap(self):
+        '''
+        True if this class is actually needed. If false, then the output
+        stream will not be affected, nor will win32 calls be issued, so
+        wrapping stdout is not actually required. This will generally be
+        False on non-Windows platforms, unless optional functionality like
+        autoreset has been requested using kwargs to init()
+        '''
+        return self.convert or self.strip or self.autoreset
+
+    def get_win32_calls(self):
+        if self.convert and winterm:
+            return {
+                AnsiStyle.RESET_ALL: (winterm.reset_all, ),
+                AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
+                AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
+                AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
+                AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
+                AnsiFore.RED: (winterm.fore, WinColor.RED),
+                AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
+                AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
+                AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
+                AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
+                AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
+                AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
+                AnsiFore.RESET: (winterm.fore, ),
+                AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
+                AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
+                AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
+                AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
+                AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
+                AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
+                AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
+                AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
+                AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
+                AnsiBack.RED: (winterm.back, WinColor.RED),
+                AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
+                AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
+                AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
+                AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
+                AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
+                AnsiBack.WHITE: (winterm.back, WinColor.GREY),
+                AnsiBack.RESET: (winterm.back, ),
+                AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
+                AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
+                AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
+                AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
+                AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
+                AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
+                AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
+                AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
+            }
+        return dict()
+
+    def write(self, text):
+        if self.strip or self.convert:
+            self.write_and_convert(text)
+        else:
+            self.wrapped.write(text)
+            self.wrapped.flush()
+        if self.autoreset:
+            self.reset_all()
+
+
+    def reset_all(self):
+        if self.convert:
+            self.call_win32('m', (0,))
+        elif not self.strip and not self.stream.closed:
+            self.wrapped.write(Style.RESET_ALL)
+
+
+    def write_and_convert(self, text):
+        '''
+        Write the given text to our wrapped stream, stripping any ANSI
+        sequences from the text, and optionally converting them into win32
+        calls.
+        '''
+        cursor = 0
+        text = self.convert_osc(text)
+        for match in self.ANSI_CSI_RE.finditer(text):
+            start, end = match.span()
+            self.write_plain_text(text, cursor, start)
+            self.convert_ansi(*match.groups())
+            cursor = end
+        self.write_plain_text(text, cursor, len(text))
+
+
+    def write_plain_text(self, text, start, end):
+        if start < end:
+            self.wrapped.write(text[start:end])
+            self.wrapped.flush()
+
+
+    def convert_ansi(self, paramstring, command):
+        if self.convert:
+            params = self.extract_params(command, paramstring)
+            self.call_win32(command, params)
+
+
+    def extract_params(self, command, paramstring):
+        if command in 'Hf':
+            params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
+            while len(params) < 2:
+                # defaults:
+                params = params + (1,)
+        else:
+            params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
+            if len(params) == 0:
+                # defaults:
+                if command in 'JKm':
+                    params = (0,)
+                elif command in 'ABCD':
+                    params = (1,)
+
+        return params
+
+
+    def call_win32(self, command, params):
+        if command == 'm':
+            for param in params:
+                if param in self.win32_calls:
+                    func_args = self.win32_calls[param]
+                    func = func_args[0]
+                    args = func_args[1:]
+                    kwargs = dict(on_stderr=self.on_stderr)
+                    func(*args, **kwargs)
+        elif command in 'J':
+            winterm.erase_screen(params[0], on_stderr=self.on_stderr)
+        elif command in 'K':
+            winterm.erase_line(params[0], on_stderr=self.on_stderr)
+        elif command in 'Hf':     # cursor position - absolute
+            winterm.set_cursor_position(params, on_stderr=self.on_stderr)
+        elif command in 'ABCD':   # cursor position - relative
+            n = params[0]
+            # A - up, B - down, C - forward, D - back
+            x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
+            winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
+
+
+    def convert_osc(self, text):
+        for match in self.ANSI_OSC_RE.finditer(text):
+            start, end = match.span()
+            text = text[:start] + text[end:]
+            paramstring, command = match.groups()
+            if command in '\x07':       # \x07 = BEL
+                params = paramstring.split(";")
+                # 0 - change title and icon (we will only change title)
+                # 1 - change icon (we don't support this)
+                # 2 - change title
+                if params[0] in '02':
+                    winterm.set_title(params[1])
+        return text
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/initialise.py b/lib/python3.7/site-packages/pip/_vendor/colorama/initialise.py
new file mode 100644
index 0000000000000000000000000000000000000000..430d0668727cd0521270a52c962867907d743b34
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/colorama/initialise.py
@@ -0,0 +1,80 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+import atexit
+import contextlib
+import sys
+
+from .ansitowin32 import AnsiToWin32
+
+
+orig_stdout = None
+orig_stderr = None
+
+wrapped_stdout = None
+wrapped_stderr = None
+
+atexit_done = False
+
+
+def reset_all():
+    if AnsiToWin32 is not None:    # Issue #74: objects might become None at exit
+        AnsiToWin32(orig_stdout).reset_all()
+
+
+def init(autoreset=False, convert=None, strip=None, wrap=True):
+
+    if not wrap and any([autoreset, convert, strip]):
+        raise ValueError('wrap=False conflicts with any other arg=True')
+
+    global wrapped_stdout, wrapped_stderr
+    global orig_stdout, orig_stderr
+
+    orig_stdout = sys.stdout
+    orig_stderr = sys.stderr
+
+    if sys.stdout is None:
+        wrapped_stdout = None
+    else:
+        sys.stdout = wrapped_stdout = \
+            wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
+    if sys.stderr is None:
+        wrapped_stderr = None
+    else:
+        sys.stderr = wrapped_stderr = \
+            wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
+
+    global atexit_done
+    if not atexit_done:
+        atexit.register(reset_all)
+        atexit_done = True
+
+
+def deinit():
+    if orig_stdout is not None:
+        sys.stdout = orig_stdout
+    if orig_stderr is not None:
+        sys.stderr = orig_stderr
+
+
+@contextlib.contextmanager
+def colorama_text(*args, **kwargs):
+    init(*args, **kwargs)
+    try:
+        yield
+    finally:
+        deinit()
+
+
+def reinit():
+    if wrapped_stdout is not None:
+        sys.stdout = wrapped_stdout
+    if wrapped_stderr is not None:
+        sys.stderr = wrapped_stderr
+
+
+def wrap_stream(stream, convert, strip, autoreset, wrap):
+    if wrap:
+        wrapper = AnsiToWin32(stream,
+            convert=convert, strip=strip, autoreset=autoreset)
+        if wrapper.should_wrap():
+            stream = wrapper.stream
+    return stream
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/win32.py b/lib/python3.7/site-packages/pip/_vendor/colorama/win32.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2d836033673993e00a02d5a3802b61cd051cf08
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/colorama/win32.py
@@ -0,0 +1,152 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+
+# from winbase.h
+STDOUT = -11
+STDERR = -12
+
+try:
+    import ctypes
+    from ctypes import LibraryLoader
+    windll = LibraryLoader(ctypes.WinDLL)
+    from ctypes import wintypes
+except (AttributeError, ImportError):
+    windll = None
+    SetConsoleTextAttribute = lambda *_: None
+    winapi_test = lambda *_: None
+else:
+    from ctypes import byref, Structure, c_char, POINTER
+
+    COORD = wintypes._COORD
+
+    class CONSOLE_SCREEN_BUFFER_INFO(Structure):
+        """struct in wincon.h."""
+        _fields_ = [
+            ("dwSize", COORD),
+            ("dwCursorPosition", COORD),
+            ("wAttributes", wintypes.WORD),
+            ("srWindow", wintypes.SMALL_RECT),
+            ("dwMaximumWindowSize", COORD),
+        ]
+        def __str__(self):
+            return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
+                self.dwSize.Y, self.dwSize.X
+                , self.dwCursorPosition.Y, self.dwCursorPosition.X
+                , self.wAttributes
+                , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
+                , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
+            )
+
+    _GetStdHandle = windll.kernel32.GetStdHandle
+    _GetStdHandle.argtypes = [
+        wintypes.DWORD,
+    ]
+    _GetStdHandle.restype = wintypes.HANDLE
+
+    _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
+    _GetConsoleScreenBufferInfo.argtypes = [
+        wintypes.HANDLE,
+        POINTER(CONSOLE_SCREEN_BUFFER_INFO),
+    ]
+    _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
+
+    _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
+    _SetConsoleTextAttribute.argtypes = [
+        wintypes.HANDLE,
+        wintypes.WORD,
+    ]
+    _SetConsoleTextAttribute.restype = wintypes.BOOL
+
+    _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
+    _SetConsoleCursorPosition.argtypes = [
+        wintypes.HANDLE,
+        COORD,
+    ]
+    _SetConsoleCursorPosition.restype = wintypes.BOOL
+
+    _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
+    _FillConsoleOutputCharacterA.argtypes = [
+        wintypes.HANDLE,
+        c_char,
+        wintypes.DWORD,
+        COORD,
+        POINTER(wintypes.DWORD),
+    ]
+    _FillConsoleOutputCharacterA.restype = wintypes.BOOL
+
+    _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
+    _FillConsoleOutputAttribute.argtypes = [
+        wintypes.HANDLE,
+        wintypes.WORD,
+        wintypes.DWORD,
+        COORD,
+        POINTER(wintypes.DWORD),
+    ]
+    _FillConsoleOutputAttribute.restype = wintypes.BOOL
+
+    _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
+    _SetConsoleTitleW.argtypes = [
+        wintypes.LPCWSTR
+    ]
+    _SetConsoleTitleW.restype = wintypes.BOOL
+
+    def _winapi_test(handle):
+        csbi = CONSOLE_SCREEN_BUFFER_INFO()
+        success = _GetConsoleScreenBufferInfo(
+            handle, byref(csbi))
+        return bool(success)
+
+    def winapi_test():
+        return any(_winapi_test(h) for h in
+                   (_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
+
+    def GetConsoleScreenBufferInfo(stream_id=STDOUT):
+        handle = _GetStdHandle(stream_id)
+        csbi = CONSOLE_SCREEN_BUFFER_INFO()
+        success = _GetConsoleScreenBufferInfo(
+            handle, byref(csbi))
+        return csbi
+
+    def SetConsoleTextAttribute(stream_id, attrs):
+        handle = _GetStdHandle(stream_id)
+        return _SetConsoleTextAttribute(handle, attrs)
+
+    def SetConsoleCursorPosition(stream_id, position, adjust=True):
+        position = COORD(*position)
+        # If the position is out of range, do nothing.
+        if position.Y <= 0 or position.X <= 0:
+            return
+        # Adjust for Windows' SetConsoleCursorPosition:
+        #    1. being 0-based, while ANSI is 1-based.
+        #    2. expecting (x,y), while ANSI uses (y,x).
+        adjusted_position = COORD(position.Y - 1, position.X - 1)
+        if adjust:
+            # Adjust for viewport's scroll position
+            sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
+            adjusted_position.Y += sr.Top
+            adjusted_position.X += sr.Left
+        # Resume normal processing
+        handle = _GetStdHandle(stream_id)
+        return _SetConsoleCursorPosition(handle, adjusted_position)
+
+    def FillConsoleOutputCharacter(stream_id, char, length, start):
+        handle = _GetStdHandle(stream_id)
+        char = c_char(char.encode())
+        length = wintypes.DWORD(length)
+        num_written = wintypes.DWORD(0)
+        # Note that this is hard-coded for ANSI (vs wide) bytes.
+        success = _FillConsoleOutputCharacterA(
+            handle, char, length, start, byref(num_written))
+        return num_written.value
+
+    def FillConsoleOutputAttribute(stream_id, attr, length, start):
+        ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
+        handle = _GetStdHandle(stream_id)
+        attribute = wintypes.WORD(attr)
+        length = wintypes.DWORD(length)
+        num_written = wintypes.DWORD(0)
+        # Note that this is hard-coded for ANSI (vs wide) bytes.
+        return _FillConsoleOutputAttribute(
+            handle, attribute, length, start, byref(num_written))
+
+    def SetConsoleTitle(title):
+        return _SetConsoleTitleW(title)
diff --git a/lib/python3.7/site-packages/pip/_vendor/colorama/winterm.py b/lib/python3.7/site-packages/pip/_vendor/colorama/winterm.py
new file mode 100644
index 0000000000000000000000000000000000000000..0fdb4ec4e91090876dc3fbf207049b521fa0dd73
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/colorama/winterm.py
@@ -0,0 +1,169 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+from . import win32
+
+
+# from wincon.h
+class WinColor(object):
+    BLACK   = 0
+    BLUE    = 1
+    GREEN   = 2
+    CYAN    = 3
+    RED     = 4
+    MAGENTA = 5
+    YELLOW  = 6
+    GREY    = 7
+
+# from wincon.h
+class WinStyle(object):
+    NORMAL              = 0x00 # dim text, dim background
+    BRIGHT              = 0x08 # bright text, dim background
+    BRIGHT_BACKGROUND   = 0x80 # dim text, bright background
+
+class WinTerm(object):
+
+    def __init__(self):
+        self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
+        self.set_attrs(self._default)
+        self._default_fore = self._fore
+        self._default_back = self._back
+        self._default_style = self._style
+        # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
+        # So that LIGHT_EX colors and BRIGHT style do not clobber each other,
+        # we track them separately, since LIGHT_EX is overwritten by Fore/Back
+        # and BRIGHT is overwritten by Style codes.
+        self._light = 0
+
+    def get_attrs(self):
+        return self._fore + self._back * 16 + (self._style | self._light)
+
+    def set_attrs(self, value):
+        self._fore = value & 7
+        self._back = (value >> 4) & 7
+        self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
+
+    def reset_all(self, on_stderr=None):
+        self.set_attrs(self._default)
+        self.set_console(attrs=self._default)
+        self._light = 0
+
+    def fore(self, fore=None, light=False, on_stderr=False):
+        if fore is None:
+            fore = self._default_fore
+        self._fore = fore
+        # Emulate LIGHT_EX with BRIGHT Style
+        if light:
+            self._light |= WinStyle.BRIGHT
+        else:
+            self._light &= ~WinStyle.BRIGHT
+        self.set_console(on_stderr=on_stderr)
+
+    def back(self, back=None, light=False, on_stderr=False):
+        if back is None:
+            back = self._default_back
+        self._back = back
+        # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
+        if light:
+            self._light |= WinStyle.BRIGHT_BACKGROUND
+        else:
+            self._light &= ~WinStyle.BRIGHT_BACKGROUND
+        self.set_console(on_stderr=on_stderr)
+
+    def style(self, style=None, on_stderr=False):
+        if style is None:
+            style = self._default_style
+        self._style = style
+        self.set_console(on_stderr=on_stderr)
+
+    def set_console(self, attrs=None, on_stderr=False):
+        if attrs is None:
+            attrs = self.get_attrs()
+        handle = win32.STDOUT
+        if on_stderr:
+            handle = win32.STDERR
+        win32.SetConsoleTextAttribute(handle, attrs)
+
+    def get_position(self, handle):
+        position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
+        # Because Windows coordinates are 0-based,
+        # and win32.SetConsoleCursorPosition expects 1-based.
+        position.X += 1
+        position.Y += 1
+        return position
+
+    def set_cursor_position(self, position=None, on_stderr=False):
+        if position is None:
+            # I'm not currently tracking the position, so there is no default.
+            # position = self.get_position()
+            return
+        handle = win32.STDOUT
+        if on_stderr:
+            handle = win32.STDERR
+        win32.SetConsoleCursorPosition(handle, position)
+
+    def cursor_adjust(self, x, y, on_stderr=False):
+        handle = win32.STDOUT
+        if on_stderr:
+            handle = win32.STDERR
+        position = self.get_position(handle)
+        adjusted_position = (position.Y + y, position.X + x)
+        win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
+
+    def erase_screen(self, mode=0, on_stderr=False):
+        # 0 should clear from the cursor to the end of the screen.
+        # 1 should clear from the cursor to the beginning of the screen.
+        # 2 should clear the entire screen, and move cursor to (1,1)
+        handle = win32.STDOUT
+        if on_stderr:
+            handle = win32.STDERR
+        csbi = win32.GetConsoleScreenBufferInfo(handle)
+        # get the number of character cells in the current buffer
+        cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
+        # get number of character cells before current cursor position
+        cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
+        if mode == 0:
+            from_coord = csbi.dwCursorPosition
+            cells_to_erase = cells_in_screen - cells_before_cursor
+        elif mode == 1:
+            from_coord = win32.COORD(0, 0)
+            cells_to_erase = cells_before_cursor
+        elif mode == 2:
+            from_coord = win32.COORD(0, 0)
+            cells_to_erase = cells_in_screen
+        else:
+            # invalid mode
+            return
+        # fill the entire screen with blanks
+        win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
+        # now set the buffer's attributes accordingly
+        win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
+        if mode == 2:
+            # put the cursor where needed
+            win32.SetConsoleCursorPosition(handle, (1, 1))
+
+    def erase_line(self, mode=0, on_stderr=False):
+        # 0 should clear from the cursor to the end of the line.
+        # 1 should clear from the cursor to the beginning of the line.
+        # 2 should clear the entire line.
+        handle = win32.STDOUT
+        if on_stderr:
+            handle = win32.STDERR
+        csbi = win32.GetConsoleScreenBufferInfo(handle)
+        if mode == 0:
+            from_coord = csbi.dwCursorPosition
+            cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
+        elif mode == 1:
+            from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
+            cells_to_erase = csbi.dwCursorPosition.X
+        elif mode == 2:
+            from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
+            cells_to_erase = csbi.dwSize.X
+        else:
+            # invalid mode
+            return
+        # fill the entire screen with blanks
+        win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
+        # now set the buffer's attributes accordingly
+        win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
+
+    def set_title(self, title):
+        win32.SetConsoleTitle(title)
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__init__.py b/lib/python3.7/site-packages/pip/_vendor/distlib/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a786b4d3b70e70df89ef6f8ad6f3c8c8400a6563
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/__init__.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2017 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+import logging
+
+__version__ = '0.2.8'
+
+class DistlibException(Exception):
+    pass
+
+try:
+    from logging import NullHandler
+except ImportError: # pragma: no cover
+    class NullHandler(logging.Handler):
+        def handle(self, record): pass
+        def emit(self, record): pass
+        def createLock(self): self.lock = None
+
+logger = logging.getLogger(__name__)
+logger.addHandler(NullHandler())
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ad5aa298d06ef0c0f2352f6ecee89b669d4d6eb
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0de7d5efe3045d43f1294a41abe16988a0f2ba08
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/database.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/database.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b3d228167bd25f6f81a46ac7ac0a733a5c09573
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/database.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/index.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/index.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..95e935c52a3dd5d6c915aac986ad2f0086f676bf
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/index.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/locators.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/locators.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7b9b1f0da06fe7eeaab0a3cf2594c48554751d9
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/locators.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/manifest.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/manifest.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7dfd489746c1d4b32e91d962370952dbda1f5d08
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/manifest.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/markers.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/markers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..05efa3a86d95eac907e4a5d75e29f8b46584e3d3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/markers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/metadata.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/metadata.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25448044f47adeb564403acc03036334b08e3a65
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/metadata.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f667ba919ddb5c769c280215dfa87125c4fb0eff
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/resources.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..262232a8b15071aac07bebde76fba3c064a7a648
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/scripts.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f33b99ea070ebfa5254636c04d0e4f20b5d75052
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/util.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/version.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/version.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..440e94cffa727e196a03e1f9cb508abb33b02322
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/version.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/wheel.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/wheel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47444c6961c0e89011fb911f01835eb46f4fa9f6
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/__pycache__/wheel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__init__.py b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7dbf4c9aa8314816f9bcbe5357146369ee71391
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__init__.py
@@ -0,0 +1,6 @@
+"""Modules copied from Python 3 standard libraries, for internal use only.
+
+Individual classes and functions are found in d2._backport.misc.  Intended
+usage is to always import things missing from 3.1 from that module: the
+built-in/stdlib objects will be used if found.
+"""
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eedf086877700fc40d1f3b77d5c11c8d09111bc6
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/misc.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/misc.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1882ce0024b5436932ec5ef07292b49e4f336c97
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/misc.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/shutil.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/shutil.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8383f4a49192d6869a4232c413dc2d7808d9ddc4
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/shutil.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/sysconfig.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/sysconfig.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5d1725ed83afe68f3db82b507d2dc2df3fa13be1
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/sysconfig.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/tarfile.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/tarfile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e032f1e1d6a6bab204f16fadda35976a57f33963
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__pycache__/tarfile.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/misc.py b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfb318d34f761cde906d8b02dde3f9329688b8c6
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/misc.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""Backports for individual classes and functions."""
+
+import os
+import sys
+
+__all__ = ['cache_from_source', 'callable', 'fsencode']
+
+
+try:
+    from imp import cache_from_source
+except ImportError:
+    def cache_from_source(py_file, debug=__debug__):
+        ext = debug and 'c' or 'o'
+        return py_file + ext
+
+
+try:
+    callable = callable
+except NameError:
+    from collections import Callable
+
+    def callable(obj):
+        return isinstance(obj, Callable)
+
+
+try:
+    fsencode = os.fsencode
+except AttributeError:
+    def fsencode(filename):
+        if isinstance(filename, bytes):
+            return filename
+        elif isinstance(filename, str):
+            return filename.encode(sys.getfilesystemencoding())
+        else:
+            raise TypeError("expect bytes or str, not %s" %
+                            type(filename).__name__)
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/shutil.py b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/shutil.py
new file mode 100644
index 0000000000000000000000000000000000000000..159e49ee8c2aa698aea9a191db91a14716187324
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/shutil.py
@@ -0,0 +1,761 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""Utility functions for copying and archiving files and directory trees.
+
+XXX The functions here don't copy the resource fork or other metadata on Mac.
+
+"""
+
+import os
+import sys
+import stat
+from os.path import abspath
+import fnmatch
+import collections
+import errno
+from . import tarfile
+
+try:
+    import bz2
+    _BZ2_SUPPORTED = True
+except ImportError:
+    _BZ2_SUPPORTED = False
+
+try:
+    from pwd import getpwnam
+except ImportError:
+    getpwnam = None
+
+try:
+    from grp import getgrnam
+except ImportError:
+    getgrnam = None
+
+__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
+           "copytree", "move", "rmtree", "Error", "SpecialFileError",
+           "ExecError", "make_archive", "get_archive_formats",
+           "register_archive_format", "unregister_archive_format",
+           "get_unpack_formats", "register_unpack_format",
+           "unregister_unpack_format", "unpack_archive", "ignore_patterns"]
+
+class Error(EnvironmentError):
+    pass
+
+class SpecialFileError(EnvironmentError):
+    """Raised when trying to do a kind of operation (e.g. copying) which is
+    not supported on a special file (e.g. a named pipe)"""
+
+class ExecError(EnvironmentError):
+    """Raised when a command could not be executed"""
+
+class ReadError(EnvironmentError):
+    """Raised when an archive cannot be read"""
+
+class RegistryError(Exception):
+    """Raised when a registry operation with the archiving
+    and unpacking registries fails"""
+
+
+try:
+    WindowsError
+except NameError:
+    WindowsError = None
+
+def copyfileobj(fsrc, fdst, length=16*1024):
+    """copy data from file-like object fsrc to file-like object fdst"""
+    while 1:
+        buf = fsrc.read(length)
+        if not buf:
+            break
+        fdst.write(buf)
+
+def _samefile(src, dst):
+    # Macintosh, Unix.
+    if hasattr(os.path, 'samefile'):
+        try:
+            return os.path.samefile(src, dst)
+        except OSError:
+            return False
+
+    # All other platforms: check for same pathname.
+    return (os.path.normcase(os.path.abspath(src)) ==
+            os.path.normcase(os.path.abspath(dst)))
+
+def copyfile(src, dst):
+    """Copy data from src to dst"""
+    if _samefile(src, dst):
+        raise Error("`%s` and `%s` are the same file" % (src, dst))
+
+    for fn in [src, dst]:
+        try:
+            st = os.stat(fn)
+        except OSError:
+            # File most likely does not exist
+            pass
+        else:
+            # XXX What about other special files? (sockets, devices...)
+            if stat.S_ISFIFO(st.st_mode):
+                raise SpecialFileError("`%s` is a named pipe" % fn)
+
+    with open(src, 'rb') as fsrc:
+        with open(dst, 'wb') as fdst:
+            copyfileobj(fsrc, fdst)
+
+def copymode(src, dst):
+    """Copy mode bits from src to dst"""
+    if hasattr(os, 'chmod'):
+        st = os.stat(src)
+        mode = stat.S_IMODE(st.st_mode)
+        os.chmod(dst, mode)
+
+def copystat(src, dst):
+    """Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
+    st = os.stat(src)
+    mode = stat.S_IMODE(st.st_mode)
+    if hasattr(os, 'utime'):
+        os.utime(dst, (st.st_atime, st.st_mtime))
+    if hasattr(os, 'chmod'):
+        os.chmod(dst, mode)
+    if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
+        try:
+            os.chflags(dst, st.st_flags)
+        except OSError as why:
+            if (not hasattr(errno, 'EOPNOTSUPP') or
+                why.errno != errno.EOPNOTSUPP):
+                raise
+
+def copy(src, dst):
+    """Copy data and mode bits ("cp src dst").
+
+    The destination may be a directory.
+
+    """
+    if os.path.isdir(dst):
+        dst = os.path.join(dst, os.path.basename(src))
+    copyfile(src, dst)
+    copymode(src, dst)
+
+def copy2(src, dst):
+    """Copy data and all stat info ("cp -p src dst").
+
+    The destination may be a directory.
+
+    """
+    if os.path.isdir(dst):
+        dst = os.path.join(dst, os.path.basename(src))
+    copyfile(src, dst)
+    copystat(src, dst)
+
+def ignore_patterns(*patterns):
+    """Function that can be used as copytree() ignore parameter.
+
+    Patterns is a sequence of glob-style patterns
+    that are used to exclude files"""
+    def _ignore_patterns(path, names):
+        ignored_names = []
+        for pattern in patterns:
+            ignored_names.extend(fnmatch.filter(names, pattern))
+        return set(ignored_names)
+    return _ignore_patterns
+
+def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
+             ignore_dangling_symlinks=False):
+    """Recursively copy a directory tree.
+
+    The destination directory must not already exist.
+    If exception(s) occur, an Error is raised with a list of reasons.
+
+    If the optional symlinks flag is true, symbolic links in the
+    source tree result in symbolic links in the destination tree; if
+    it is false, the contents of the files pointed to by symbolic
+    links are copied. If the file pointed by the symlink doesn't
+    exist, an exception will be added in the list of errors raised in
+    an Error exception at the end of the copy process.
+
+    You can set the optional ignore_dangling_symlinks flag to true if you
+    want to silence this exception. Notice that this has no effect on
+    platforms that don't support os.symlink.
+
+    The optional ignore argument is a callable. If given, it
+    is called with the `src` parameter, which is the directory
+    being visited by copytree(), and `names` which is the list of
+    `src` contents, as returned by os.listdir():
+
+        callable(src, names) -> ignored_names
+
+    Since copytree() is called recursively, the callable will be
+    called once for each directory that is copied. It returns a
+    list of names relative to the `src` directory that should
+    not be copied.
+
+    The optional copy_function argument is a callable that will be used
+    to copy each file. It will be called with the source path and the
+    destination path as arguments. By default, copy2() is used, but any
+    function that supports the same signature (like copy()) can be used.
+
+    """
+    names = os.listdir(src)
+    if ignore is not None:
+        ignored_names = ignore(src, names)
+    else:
+        ignored_names = set()
+
+    os.makedirs(dst)
+    errors = []
+    for name in names:
+        if name in ignored_names:
+            continue
+        srcname = os.path.join(src, name)
+        dstname = os.path.join(dst, name)
+        try:
+            if os.path.islink(srcname):
+                linkto = os.readlink(srcname)
+                if symlinks:
+                    os.symlink(linkto, dstname)
+                else:
+                    # ignore dangling symlink if the flag is on
+                    if not os.path.exists(linkto) and ignore_dangling_symlinks:
+                        continue
+                    # otherwise let the copy occurs. copy2 will raise an error
+                    copy_function(srcname, dstname)
+            elif os.path.isdir(srcname):
+                copytree(srcname, dstname, symlinks, ignore, copy_function)
+            else:
+                # Will raise a SpecialFileError for unsupported file types
+                copy_function(srcname, dstname)
+        # catch the Error from the recursive copytree so that we can
+        # continue with other files
+        except Error as err:
+            errors.extend(err.args[0])
+        except EnvironmentError as why:
+            errors.append((srcname, dstname, str(why)))
+    try:
+        copystat(src, dst)
+    except OSError as why:
+        if WindowsError is not None and isinstance(why, WindowsError):
+            # Copying file access times may fail on Windows
+            pass
+        else:
+            errors.extend((src, dst, str(why)))
+    if errors:
+        raise Error(errors)
+
+def rmtree(path, ignore_errors=False, onerror=None):
+    """Recursively delete a directory tree.
+
+    If ignore_errors is set, errors are ignored; otherwise, if onerror
+    is set, it is called to handle the error with arguments (func,
+    path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
+    path is the argument to that function that caused it to fail; and
+    exc_info is a tuple returned by sys.exc_info().  If ignore_errors
+    is false and onerror is None, an exception is raised.
+
+    """
+    if ignore_errors:
+        def onerror(*args):
+            pass
+    elif onerror is None:
+        def onerror(*args):
+            raise
+    try:
+        if os.path.islink(path):
+            # symlinks to directories are forbidden, see bug #1669
+            raise OSError("Cannot call rmtree on a symbolic link")
+    except OSError:
+        onerror(os.path.islink, path, sys.exc_info())
+        # can't continue even if onerror hook returns
+        return
+    names = []
+    try:
+        names = os.listdir(path)
+    except os.error:
+        onerror(os.listdir, path, sys.exc_info())
+    for name in names:
+        fullname = os.path.join(path, name)
+        try:
+            mode = os.lstat(fullname).st_mode
+        except os.error:
+            mode = 0
+        if stat.S_ISDIR(mode):
+            rmtree(fullname, ignore_errors, onerror)
+        else:
+            try:
+                os.remove(fullname)
+            except os.error:
+                onerror(os.remove, fullname, sys.exc_info())
+    try:
+        os.rmdir(path)
+    except os.error:
+        onerror(os.rmdir, path, sys.exc_info())
+
+
+def _basename(path):
+    # A basename() variant which first strips the trailing slash, if present.
+    # Thus we always get the last component of the path, even for directories.
+    return os.path.basename(path.rstrip(os.path.sep))
+
+def move(src, dst):
+    """Recursively move a file or directory to another location. This is
+    similar to the Unix "mv" command.
+
+    If the destination is a directory or a symlink to a directory, the source
+    is moved inside the directory. The destination path must not already
+    exist.
+
+    If the destination already exists but is not a directory, it may be
+    overwritten depending on os.rename() semantics.
+
+    If the destination is on our current filesystem, then rename() is used.
+    Otherwise, src is copied to the destination and then removed.
+    A lot more could be done here...  A look at a mv.c shows a lot of
+    the issues this implementation glosses over.
+
+    """
+    real_dst = dst
+    if os.path.isdir(dst):
+        if _samefile(src, dst):
+            # We might be on a case insensitive filesystem,
+            # perform the rename anyway.
+            os.rename(src, dst)
+            return
+
+        real_dst = os.path.join(dst, _basename(src))
+        if os.path.exists(real_dst):
+            raise Error("Destination path '%s' already exists" % real_dst)
+    try:
+        os.rename(src, real_dst)
+    except OSError:
+        if os.path.isdir(src):
+            if _destinsrc(src, dst):
+                raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
+            copytree(src, real_dst, symlinks=True)
+            rmtree(src)
+        else:
+            copy2(src, real_dst)
+            os.unlink(src)
+
+def _destinsrc(src, dst):
+    src = abspath(src)
+    dst = abspath(dst)
+    if not src.endswith(os.path.sep):
+        src += os.path.sep
+    if not dst.endswith(os.path.sep):
+        dst += os.path.sep
+    return dst.startswith(src)
+
+def _get_gid(name):
+    """Returns a gid, given a group name."""
+    if getgrnam is None or name is None:
+        return None
+    try:
+        result = getgrnam(name)
+    except KeyError:
+        result = None
+    if result is not None:
+        return result[2]
+    return None
+
+def _get_uid(name):
+    """Returns an uid, given a user name."""
+    if getpwnam is None or name is None:
+        return None
+    try:
+        result = getpwnam(name)
+    except KeyError:
+        result = None
+    if result is not None:
+        return result[2]
+    return None
+
+def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
+                  owner=None, group=None, logger=None):
+    """Create a (possibly compressed) tar file from all the files under
+    'base_dir'.
+
+    'compress' must be "gzip" (the default), "bzip2", or None.
+
+    'owner' and 'group' can be used to define an owner and a group for the
+    archive that is being built. If not provided, the current owner and group
+    will be used.
+
+    The output tar file will be named 'base_name' +  ".tar", possibly plus
+    the appropriate compression extension (".gz", or ".bz2").
+
+    Returns the output filename.
+    """
+    tar_compression = {'gzip': 'gz', None: ''}
+    compress_ext = {'gzip': '.gz'}
+
+    if _BZ2_SUPPORTED:
+        tar_compression['bzip2'] = 'bz2'
+        compress_ext['bzip2'] = '.bz2'
+
+    # flags for compression program, each element of list will be an argument
+    if compress is not None and compress not in compress_ext:
+        raise ValueError("bad value for 'compress', or compression format not "
+                         "supported : {0}".format(compress))
+
+    archive_name = base_name + '.tar' + compress_ext.get(compress, '')
+    archive_dir = os.path.dirname(archive_name)
+
+    if not os.path.exists(archive_dir):
+        if logger is not None:
+            logger.info("creating %s", archive_dir)
+        if not dry_run:
+            os.makedirs(archive_dir)
+
+    # creating the tarball
+    if logger is not None:
+        logger.info('Creating tar archive')
+
+    uid = _get_uid(owner)
+    gid = _get_gid(group)
+
+    def _set_uid_gid(tarinfo):
+        if gid is not None:
+            tarinfo.gid = gid
+            tarinfo.gname = group
+        if uid is not None:
+            tarinfo.uid = uid
+            tarinfo.uname = owner
+        return tarinfo
+
+    if not dry_run:
+        tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
+        try:
+            tar.add(base_dir, filter=_set_uid_gid)
+        finally:
+            tar.close()
+
+    return archive_name
+
+def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
+    # XXX see if we want to keep an external call here
+    if verbose:
+        zipoptions = "-r"
+    else:
+        zipoptions = "-rq"
+    from distutils.errors import DistutilsExecError
+    from distutils.spawn import spawn
+    try:
+        spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
+    except DistutilsExecError:
+        # XXX really should distinguish between "couldn't find
+        # external 'zip' command" and "zip failed".
+        raise ExecError("unable to create zip file '%s': "
+            "could neither import the 'zipfile' module nor "
+            "find a standalone zip utility") % zip_filename
+
+def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
+    """Create a zip file from all the files under 'base_dir'.
+
+    The output zip file will be named 'base_name' + ".zip".  Uses either the
+    "zipfile" Python module (if available) or the InfoZIP "zip" utility
+    (if installed and found on the default search path).  If neither tool is
+    available, raises ExecError.  Returns the name of the output zip
+    file.
+    """
+    zip_filename = base_name + ".zip"
+    archive_dir = os.path.dirname(base_name)
+
+    if not os.path.exists(archive_dir):
+        if logger is not None:
+            logger.info("creating %s", archive_dir)
+        if not dry_run:
+            os.makedirs(archive_dir)
+
+    # If zipfile module is not available, try spawning an external 'zip'
+    # command.
+    try:
+        import zipfile
+    except ImportError:
+        zipfile = None
+
+    if zipfile is None:
+        _call_external_zip(base_dir, zip_filename, verbose, dry_run)
+    else:
+        if logger is not None:
+            logger.info("creating '%s' and adding '%s' to it",
+                        zip_filename, base_dir)
+
+        if not dry_run:
+            zip = zipfile.ZipFile(zip_filename, "w",
+                                  compression=zipfile.ZIP_DEFLATED)
+
+            for dirpath, dirnames, filenames in os.walk(base_dir):
+                for name in filenames:
+                    path = os.path.normpath(os.path.join(dirpath, name))
+                    if os.path.isfile(path):
+                        zip.write(path, path)
+                        if logger is not None:
+                            logger.info("adding '%s'", path)
+            zip.close()
+
+    return zip_filename
+
+_ARCHIVE_FORMATS = {
+    'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
+    'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
+    'tar':   (_make_tarball, [('compress', None)], "uncompressed tar file"),
+    'zip':   (_make_zipfile, [], "ZIP file"),
+    }
+
+if _BZ2_SUPPORTED:
+    _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
+                                "bzip2'ed tar-file")
+
+def get_archive_formats():
+    """Returns a list of supported formats for archiving and unarchiving.
+
+    Each element of the returned sequence is a tuple (name, description)
+    """
+    formats = [(name, registry[2]) for name, registry in
+               _ARCHIVE_FORMATS.items()]
+    formats.sort()
+    return formats
+
+def register_archive_format(name, function, extra_args=None, description=''):
+    """Registers an archive format.
+
+    name is the name of the format. function is the callable that will be
+    used to create archives. If provided, extra_args is a sequence of
+    (name, value) tuples that will be passed as arguments to the callable.
+    description can be provided to describe the format, and will be returned
+    by the get_archive_formats() function.
+    """
+    if extra_args is None:
+        extra_args = []
+    if not isinstance(function, collections.Callable):
+        raise TypeError('The %s object is not callable' % function)
+    if not isinstance(extra_args, (tuple, list)):
+        raise TypeError('extra_args needs to be a sequence')
+    for element in extra_args:
+        if not isinstance(element, (tuple, list)) or len(element) !=2:
+            raise TypeError('extra_args elements are : (arg_name, value)')
+
+    _ARCHIVE_FORMATS[name] = (function, extra_args, description)
+
+def unregister_archive_format(name):
+    del _ARCHIVE_FORMATS[name]
+
+def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
+                 dry_run=0, owner=None, group=None, logger=None):
+    """Create an archive file (eg. zip or tar).
+
+    'base_name' is the name of the file to create, minus any format-specific
+    extension; 'format' is the archive format: one of "zip", "tar", "bztar"
+    or "gztar".
+
+    'root_dir' is a directory that will be the root directory of the
+    archive; ie. we typically chdir into 'root_dir' before creating the
+    archive.  'base_dir' is the directory where we start archiving from;
+    ie. 'base_dir' will be the common prefix of all files and
+    directories in the archive.  'root_dir' and 'base_dir' both default
+    to the current directory.  Returns the name of the archive file.
+
+    'owner' and 'group' are used when creating a tar archive. By default,
+    uses the current owner and group.
+    """
+    save_cwd = os.getcwd()
+    if root_dir is not None:
+        if logger is not None:
+            logger.debug("changing into '%s'", root_dir)
+        base_name = os.path.abspath(base_name)
+        if not dry_run:
+            os.chdir(root_dir)
+
+    if base_dir is None:
+        base_dir = os.curdir
+
+    kwargs = {'dry_run': dry_run, 'logger': logger}
+
+    try:
+        format_info = _ARCHIVE_FORMATS[format]
+    except KeyError:
+        raise ValueError("unknown archive format '%s'" % format)
+
+    func = format_info[0]
+    for arg, val in format_info[1]:
+        kwargs[arg] = val
+
+    if format != 'zip':
+        kwargs['owner'] = owner
+        kwargs['group'] = group
+
+    try:
+        filename = func(base_name, base_dir, **kwargs)
+    finally:
+        if root_dir is not None:
+            if logger is not None:
+                logger.debug("changing back to '%s'", save_cwd)
+            os.chdir(save_cwd)
+
+    return filename
+
+
+def get_unpack_formats():
+    """Returns a list of supported formats for unpacking.
+
+    Each element of the returned sequence is a tuple
+    (name, extensions, description)
+    """
+    formats = [(name, info[0], info[3]) for name, info in
+               _UNPACK_FORMATS.items()]
+    formats.sort()
+    return formats
+
+def _check_unpack_options(extensions, function, extra_args):
+    """Checks what gets registered as an unpacker."""
+    # first make sure no other unpacker is registered for this extension
+    existing_extensions = {}
+    for name, info in _UNPACK_FORMATS.items():
+        for ext in info[0]:
+            existing_extensions[ext] = name
+
+    for extension in extensions:
+        if extension in existing_extensions:
+            msg = '%s is already registered for "%s"'
+            raise RegistryError(msg % (extension,
+                                       existing_extensions[extension]))
+
+    if not isinstance(function, collections.Callable):
+        raise TypeError('The registered function must be a callable')
+
+
+def register_unpack_format(name, extensions, function, extra_args=None,
+                           description=''):
+    """Registers an unpack format.
+
+    `name` is the name of the format. `extensions` is a list of extensions
+    corresponding to the format.
+
+    `function` is the callable that will be
+    used to unpack archives. The callable will receive archives to unpack.
+    If it's unable to handle an archive, it needs to raise a ReadError
+    exception.
+
+    If provided, `extra_args` is a sequence of
+    (name, value) tuples that will be passed as arguments to the callable.
+    description can be provided to describe the format, and will be returned
+    by the get_unpack_formats() function.
+    """
+    if extra_args is None:
+        extra_args = []
+    _check_unpack_options(extensions, function, extra_args)
+    _UNPACK_FORMATS[name] = extensions, function, extra_args, description
+
+def unregister_unpack_format(name):
+    """Removes the pack format from the registry."""
+    del _UNPACK_FORMATS[name]
+
+def _ensure_directory(path):
+    """Ensure that the parent directory of `path` exists"""
+    dirname = os.path.dirname(path)
+    if not os.path.isdir(dirname):
+        os.makedirs(dirname)
+
+def _unpack_zipfile(filename, extract_dir):
+    """Unpack zip `filename` to `extract_dir`
+    """
+    try:
+        import zipfile
+    except ImportError:
+        raise ReadError('zlib not supported, cannot unpack this archive.')
+
+    if not zipfile.is_zipfile(filename):
+        raise ReadError("%s is not a zip file" % filename)
+
+    zip = zipfile.ZipFile(filename)
+    try:
+        for info in zip.infolist():
+            name = info.filename
+
+            # don't extract absolute paths or ones with .. in them
+            if name.startswith('/') or '..' in name:
+                continue
+
+            target = os.path.join(extract_dir, *name.split('/'))
+            if not target:
+                continue
+
+            _ensure_directory(target)
+            if not name.endswith('/'):
+                # file
+                data = zip.read(info.filename)
+                f = open(target, 'wb')
+                try:
+                    f.write(data)
+                finally:
+                    f.close()
+                    del data
+    finally:
+        zip.close()
+
+def _unpack_tarfile(filename, extract_dir):
+    """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
+    """
+    try:
+        tarobj = tarfile.open(filename)
+    except tarfile.TarError:
+        raise ReadError(
+            "%s is not a compressed or uncompressed tar file" % filename)
+    try:
+        tarobj.extractall(extract_dir)
+    finally:
+        tarobj.close()
+
+_UNPACK_FORMATS = {
+    'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
+    'tar':   (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
+    'zip':   (['.zip'], _unpack_zipfile, [], "ZIP file")
+    }
+
+if _BZ2_SUPPORTED:
+    _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
+                                "bzip2'ed tar-file")
+
+def _find_unpack_format(filename):
+    for name, info in _UNPACK_FORMATS.items():
+        for extension in info[0]:
+            if filename.endswith(extension):
+                return name
+    return None
+
+def unpack_archive(filename, extract_dir=None, format=None):
+    """Unpack an archive.
+
+    `filename` is the name of the archive.
+
+    `extract_dir` is the name of the target directory, where the archive
+    is unpacked. If not provided, the current working directory is used.
+
+    `format` is the archive format: one of "zip", "tar", or "gztar". Or any
+    other registered format. If not provided, unpack_archive will use the
+    filename extension and see if an unpacker was registered for that
+    extension.
+
+    In case none is found, a ValueError is raised.
+    """
+    if extract_dir is None:
+        extract_dir = os.getcwd()
+
+    if format is not None:
+        try:
+            format_info = _UNPACK_FORMATS[format]
+        except KeyError:
+            raise ValueError("Unknown unpack format '{0}'".format(format))
+
+        func = format_info[1]
+        func(filename, extract_dir, **dict(format_info[2]))
+    else:
+        # we need to look at the registered unpackers supported extensions
+        format = _find_unpack_format(filename)
+        if format is None:
+            raise ReadError("Unknown archive format '{0}'".format(filename))
+
+        func = _UNPACK_FORMATS[format][1]
+        kwargs = dict(_UNPACK_FORMATS[format][2])
+        func(filename, extract_dir, **kwargs)
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..1746bd01c1ad915b728ab58b4668c82b2bd578e1
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.cfg
@@ -0,0 +1,84 @@
+[posix_prefix]
+# Configuration directories.  Some of these come straight out of the
+# configure script.  They are for implementing the other variables, not to
+# be used directly in [resource_locations].
+confdir = /etc
+datadir = /usr/share
+libdir = /usr/lib
+statedir = /var
+# User resource directory
+local = ~/.local/{distribution.name}
+
+stdlib = {base}/lib/python{py_version_short}
+platstdlib = {platbase}/lib/python{py_version_short}
+purelib = {base}/lib/python{py_version_short}/site-packages
+platlib = {platbase}/lib/python{py_version_short}/site-packages
+include = {base}/include/python{py_version_short}{abiflags}
+platinclude = {platbase}/include/python{py_version_short}{abiflags}
+data = {base}
+
+[posix_home]
+stdlib = {base}/lib/python
+platstdlib = {base}/lib/python
+purelib = {base}/lib/python
+platlib = {base}/lib/python
+include = {base}/include/python
+platinclude = {base}/include/python
+scripts = {base}/bin
+data = {base}
+
+[nt]
+stdlib = {base}/Lib
+platstdlib = {base}/Lib
+purelib = {base}/Lib/site-packages
+platlib = {base}/Lib/site-packages
+include = {base}/Include
+platinclude = {base}/Include
+scripts = {base}/Scripts
+data = {base}
+
+[os2]
+stdlib = {base}/Lib
+platstdlib = {base}/Lib
+purelib = {base}/Lib/site-packages
+platlib = {base}/Lib/site-packages
+include = {base}/Include
+platinclude = {base}/Include
+scripts = {base}/Scripts
+data = {base}
+
+[os2_home]
+stdlib = {userbase}/lib/python{py_version_short}
+platstdlib = {userbase}/lib/python{py_version_short}
+purelib = {userbase}/lib/python{py_version_short}/site-packages
+platlib = {userbase}/lib/python{py_version_short}/site-packages
+include = {userbase}/include/python{py_version_short}
+scripts = {userbase}/bin
+data = {userbase}
+
+[nt_user]
+stdlib = {userbase}/Python{py_version_nodot}
+platstdlib = {userbase}/Python{py_version_nodot}
+purelib = {userbase}/Python{py_version_nodot}/site-packages
+platlib = {userbase}/Python{py_version_nodot}/site-packages
+include = {userbase}/Python{py_version_nodot}/Include
+scripts = {userbase}/Scripts
+data = {userbase}
+
+[posix_user]
+stdlib = {userbase}/lib/python{py_version_short}
+platstdlib = {userbase}/lib/python{py_version_short}
+purelib = {userbase}/lib/python{py_version_short}/site-packages
+platlib = {userbase}/lib/python{py_version_short}/site-packages
+include = {userbase}/include/python{py_version_short}
+scripts = {userbase}/bin
+data = {userbase}
+
+[osx_framework_user]
+stdlib = {userbase}/lib/python
+platstdlib = {userbase}/lib/python
+purelib = {userbase}/lib/python/site-packages
+platlib = {userbase}/lib/python/site-packages
+include = {userbase}/include
+scripts = {userbase}/bin
+data = {userbase}
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
new file mode 100644
index 0000000000000000000000000000000000000000..1df3aba144c874384f6768427cd38d26028e27dc
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py
@@ -0,0 +1,788 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""Access to Python's configuration information."""
+
+import codecs
+import os
+import re
+import sys
+from os.path import pardir, realpath
+try:
+    import configparser
+except ImportError:
+    import ConfigParser as configparser
+
+
+__all__ = [
+    'get_config_h_filename',
+    'get_config_var',
+    'get_config_vars',
+    'get_makefile_filename',
+    'get_path',
+    'get_path_names',
+    'get_paths',
+    'get_platform',
+    'get_python_version',
+    'get_scheme_names',
+    'parse_config_h',
+]
+
+
+def _safe_realpath(path):
+    try:
+        return realpath(path)
+    except OSError:
+        return path
+
+
+if sys.executable:
+    _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
+else:
+    # sys.executable can be empty if argv[0] has been changed and Python is
+    # unable to retrieve the real program name
+    _PROJECT_BASE = _safe_realpath(os.getcwd())
+
+if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
+    _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
+# PC/VS7.1
+if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
+    _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
+# PC/AMD64
+if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
+    _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
+
+
+def is_python_build():
+    for fn in ("Setup.dist", "Setup.local"):
+        if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
+            return True
+    return False
+
+_PYTHON_BUILD = is_python_build()
+
+_cfg_read = False
+
+def _ensure_cfg_read():
+    global _cfg_read
+    if not _cfg_read:
+        from ..resources import finder
+        backport_package = __name__.rsplit('.', 1)[0]
+        _finder = finder(backport_package)
+        _cfgfile = _finder.find('sysconfig.cfg')
+        assert _cfgfile, 'sysconfig.cfg exists'
+        with _cfgfile.as_stream() as s:
+            _SCHEMES.readfp(s)
+        if _PYTHON_BUILD:
+            for scheme in ('posix_prefix', 'posix_home'):
+                _SCHEMES.set(scheme, 'include', '{srcdir}/Include')
+                _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
+
+        _cfg_read = True
+
+
+_SCHEMES = configparser.RawConfigParser()
+_VAR_REPL = re.compile(r'\{([^{]*?)\}')
+
+def _expand_globals(config):
+    _ensure_cfg_read()
+    if config.has_section('globals'):
+        globals = config.items('globals')
+    else:
+        globals = tuple()
+
+    sections = config.sections()
+    for section in sections:
+        if section == 'globals':
+            continue
+        for option, value in globals:
+            if config.has_option(section, option):
+                continue
+            config.set(section, option, value)
+    config.remove_section('globals')
+
+    # now expanding local variables defined in the cfg file
+    #
+    for section in config.sections():
+        variables = dict(config.items(section))
+
+        def _replacer(matchobj):
+            name = matchobj.group(1)
+            if name in variables:
+                return variables[name]
+            return matchobj.group(0)
+
+        for option, value in config.items(section):
+            config.set(section, option, _VAR_REPL.sub(_replacer, value))
+
+#_expand_globals(_SCHEMES)
+
+ # FIXME don't rely on sys.version here, its format is an implementation detail
+ # of CPython, use sys.version_info or sys.hexversion
+_PY_VERSION = sys.version.split()[0]
+_PY_VERSION_SHORT = sys.version[:3]
+_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
+_PREFIX = os.path.normpath(sys.prefix)
+_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
+_CONFIG_VARS = None
+_USER_BASE = None
+
+
+def _subst_vars(path, local_vars):
+    """In the string `path`, replace tokens like {some.thing} with the
+    corresponding value from the map `local_vars`.
+
+    If there is no corresponding value, leave the token unchanged.
+    """
+    def _replacer(matchobj):
+        name = matchobj.group(1)
+        if name in local_vars:
+            return local_vars[name]
+        elif name in os.environ:
+            return os.environ[name]
+        return matchobj.group(0)
+    return _VAR_REPL.sub(_replacer, path)
+
+
+def _extend_dict(target_dict, other_dict):
+    target_keys = target_dict.keys()
+    for key, value in other_dict.items():
+        if key in target_keys:
+            continue
+        target_dict[key] = value
+
+
+def _expand_vars(scheme, vars):
+    res = {}
+    if vars is None:
+        vars = {}
+    _extend_dict(vars, get_config_vars())
+
+    for key, value in _SCHEMES.items(scheme):
+        if os.name in ('posix', 'nt'):
+            value = os.path.expanduser(value)
+        res[key] = os.path.normpath(_subst_vars(value, vars))
+    return res
+
+
+def format_value(value, vars):
+    def _replacer(matchobj):
+        name = matchobj.group(1)
+        if name in vars:
+            return vars[name]
+        return matchobj.group(0)
+    return _VAR_REPL.sub(_replacer, value)
+
+
+def _get_default_scheme():
+    if os.name == 'posix':
+        # the default scheme for posix is posix_prefix
+        return 'posix_prefix'
+    return os.name
+
+
+def _getuserbase():
+    env_base = os.environ.get("PYTHONUSERBASE", None)
+
+    def joinuser(*args):
+        return os.path.expanduser(os.path.join(*args))
+
+    # what about 'os2emx', 'riscos' ?
+    if os.name == "nt":
+        base = os.environ.get("APPDATA") or "~"
+        if env_base:
+            return env_base
+        else:
+            return joinuser(base, "Python")
+
+    if sys.platform == "darwin":
+        framework = get_config_var("PYTHONFRAMEWORK")
+        if framework:
+            if env_base:
+                return env_base
+            else:
+                return joinuser("~", "Library", framework, "%d.%d" %
+                                sys.version_info[:2])
+
+    if env_base:
+        return env_base
+    else:
+        return joinuser("~", ".local")
+
+
+def _parse_makefile(filename, vars=None):
+    """Parse a Makefile-style file.
+
+    A dictionary containing name/value pairs is returned.  If an
+    optional dictionary is passed in as the second argument, it is
+    used instead of a new dictionary.
+    """
+    # Regexes needed for parsing Makefile (and similar syntaxes,
+    # like old-style Setup files).
+    _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
+    _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
+    _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
+
+    if vars is None:
+        vars = {}
+    done = {}
+    notdone = {}
+
+    with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
+        lines = f.readlines()
+
+    for line in lines:
+        if line.startswith('#') or line.strip() == '':
+            continue
+        m = _variable_rx.match(line)
+        if m:
+            n, v = m.group(1, 2)
+            v = v.strip()
+            # `$$' is a literal `$' in make
+            tmpv = v.replace('$$', '')
+
+            if "$" in tmpv:
+                notdone[n] = v
+            else:
+                try:
+                    v = int(v)
+                except ValueError:
+                    # insert literal `$'
+                    done[n] = v.replace('$$', '$')
+                else:
+                    done[n] = v
+
+    # do variable interpolation here
+    variables = list(notdone.keys())
+
+    # Variables with a 'PY_' prefix in the makefile. These need to
+    # be made available without that prefix through sysconfig.
+    # Special care is needed to ensure that variable expansion works, even
+    # if the expansion uses the name without a prefix.
+    renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
+
+    while len(variables) > 0:
+        for name in tuple(variables):
+            value = notdone[name]
+            m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
+            if m is not None:
+                n = m.group(1)
+                found = True
+                if n in done:
+                    item = str(done[n])
+                elif n in notdone:
+                    # get it on a subsequent round
+                    found = False
+                elif n in os.environ:
+                    # do it like make: fall back to environment
+                    item = os.environ[n]
+
+                elif n in renamed_variables:
+                    if (name.startswith('PY_') and
+                        name[3:] in renamed_variables):
+                        item = ""
+
+                    elif 'PY_' + n in notdone:
+                        found = False
+
+                    else:
+                        item = str(done['PY_' + n])
+
+                else:
+                    done[n] = item = ""
+
+                if found:
+                    after = value[m.end():]
+                    value = value[:m.start()] + item + after
+                    if "$" in after:
+                        notdone[name] = value
+                    else:
+                        try:
+                            value = int(value)
+                        except ValueError:
+                            done[name] = value.strip()
+                        else:
+                            done[name] = value
+                        variables.remove(name)
+
+                        if (name.startswith('PY_') and
+                            name[3:] in renamed_variables):
+
+                            name = name[3:]
+                            if name not in done:
+                                done[name] = value
+
+            else:
+                # bogus variable reference (e.g. "prefix=$/opt/python");
+                # just drop it since we can't deal
+                done[name] = value
+                variables.remove(name)
+
+    # strip spurious spaces
+    for k, v in done.items():
+        if isinstance(v, str):
+            done[k] = v.strip()
+
+    # save the results in the global dictionary
+    vars.update(done)
+    return vars
+
+
+def get_makefile_filename():
+    """Return the path of the Makefile."""
+    if _PYTHON_BUILD:
+        return os.path.join(_PROJECT_BASE, "Makefile")
+    if hasattr(sys, 'abiflags'):
+        config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
+    else:
+        config_dir_name = 'config'
+    return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
+
+
+def _init_posix(vars):
+    """Initialize the module as appropriate for POSIX systems."""
+    # load the installed Makefile:
+    makefile = get_makefile_filename()
+    try:
+        _parse_makefile(makefile, vars)
+    except IOError as e:
+        msg = "invalid Python installation: unable to open %s" % makefile
+        if hasattr(e, "strerror"):
+            msg = msg + " (%s)" % e.strerror
+        raise IOError(msg)
+    # load the installed pyconfig.h:
+    config_h = get_config_h_filename()
+    try:
+        with open(config_h) as f:
+            parse_config_h(f, vars)
+    except IOError as e:
+        msg = "invalid Python installation: unable to open %s" % config_h
+        if hasattr(e, "strerror"):
+            msg = msg + " (%s)" % e.strerror
+        raise IOError(msg)
+    # On AIX, there are wrong paths to the linker scripts in the Makefile
+    # -- these paths are relative to the Python source, but when installed
+    # the scripts are in another directory.
+    if _PYTHON_BUILD:
+        vars['LDSHARED'] = vars['BLDSHARED']
+
+
+def _init_non_posix(vars):
+    """Initialize the module as appropriate for NT"""
+    # set basic install directories
+    vars['LIBDEST'] = get_path('stdlib')
+    vars['BINLIBDEST'] = get_path('platstdlib')
+    vars['INCLUDEPY'] = get_path('include')
+    vars['SO'] = '.pyd'
+    vars['EXE'] = '.exe'
+    vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
+    vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
+
+#
+# public APIs
+#
+
+
+def parse_config_h(fp, vars=None):
+    """Parse a config.h-style file.
+
+    A dictionary containing name/value pairs is returned.  If an
+    optional dictionary is passed in as the second argument, it is
+    used instead of a new dictionary.
+    """
+    if vars is None:
+        vars = {}
+    define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
+    undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
+
+    while True:
+        line = fp.readline()
+        if not line:
+            break
+        m = define_rx.match(line)
+        if m:
+            n, v = m.group(1, 2)
+            try:
+                v = int(v)
+            except ValueError:
+                pass
+            vars[n] = v
+        else:
+            m = undef_rx.match(line)
+            if m:
+                vars[m.group(1)] = 0
+    return vars
+
+
+def get_config_h_filename():
+    """Return the path of pyconfig.h."""
+    if _PYTHON_BUILD:
+        if os.name == "nt":
+            inc_dir = os.path.join(_PROJECT_BASE, "PC")
+        else:
+            inc_dir = _PROJECT_BASE
+    else:
+        inc_dir = get_path('platinclude')
+    return os.path.join(inc_dir, 'pyconfig.h')
+
+
+def get_scheme_names():
+    """Return a tuple containing the schemes names."""
+    return tuple(sorted(_SCHEMES.sections()))
+
+
+def get_path_names():
+    """Return a tuple containing the paths names."""
+    # xxx see if we want a static list
+    return _SCHEMES.options('posix_prefix')
+
+
+def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
+    """Return a mapping containing an install scheme.
+
+    ``scheme`` is the install scheme name. If not provided, it will
+    return the default scheme for the current platform.
+    """
+    _ensure_cfg_read()
+    if expand:
+        return _expand_vars(scheme, vars)
+    else:
+        return dict(_SCHEMES.items(scheme))
+
+
+def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
+    """Return a path corresponding to the scheme.
+
+    ``scheme`` is the install scheme name.
+    """
+    return get_paths(scheme, vars, expand)[name]
+
+
+def get_config_vars(*args):
+    """With no arguments, return a dictionary of all configuration
+    variables relevant for the current platform.
+
+    On Unix, this means every variable defined in Python's installed Makefile;
+    On Windows and Mac OS it's a much smaller set.
+
+    With arguments, return a list of values that result from looking up
+    each argument in the configuration variable dictionary.
+    """
+    global _CONFIG_VARS
+    if _CONFIG_VARS is None:
+        _CONFIG_VARS = {}
+        # Normalized versions of prefix and exec_prefix are handy to have;
+        # in fact, these are the standard versions used most places in the
+        # distutils2 module.
+        _CONFIG_VARS['prefix'] = _PREFIX
+        _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
+        _CONFIG_VARS['py_version'] = _PY_VERSION
+        _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
+        _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
+        _CONFIG_VARS['base'] = _PREFIX
+        _CONFIG_VARS['platbase'] = _EXEC_PREFIX
+        _CONFIG_VARS['projectbase'] = _PROJECT_BASE
+        try:
+            _CONFIG_VARS['abiflags'] = sys.abiflags
+        except AttributeError:
+            # sys.abiflags may not be defined on all platforms.
+            _CONFIG_VARS['abiflags'] = ''
+
+        if os.name in ('nt', 'os2'):
+            _init_non_posix(_CONFIG_VARS)
+        if os.name == 'posix':
+            _init_posix(_CONFIG_VARS)
+        # Setting 'userbase' is done below the call to the
+        # init function to enable using 'get_config_var' in
+        # the init-function.
+        if sys.version >= '2.6':
+            _CONFIG_VARS['userbase'] = _getuserbase()
+
+        if 'srcdir' not in _CONFIG_VARS:
+            _CONFIG_VARS['srcdir'] = _PROJECT_BASE
+        else:
+            _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
+
+        # Convert srcdir into an absolute path if it appears necessary.
+        # Normally it is relative to the build directory.  However, during
+        # testing, for example, we might be running a non-installed python
+        # from a different directory.
+        if _PYTHON_BUILD and os.name == "posix":
+            base = _PROJECT_BASE
+            try:
+                cwd = os.getcwd()
+            except OSError:
+                cwd = None
+            if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
+                base != cwd):
+                # srcdir is relative and we are not in the same directory
+                # as the executable. Assume executable is in the build
+                # directory and make srcdir absolute.
+                srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
+                _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
+
+        if sys.platform == 'darwin':
+            kernel_version = os.uname()[2]  # Kernel version (8.4.3)
+            major_version = int(kernel_version.split('.')[0])
+
+            if major_version < 8:
+                # On Mac OS X before 10.4, check if -arch and -isysroot
+                # are in CFLAGS or LDFLAGS and remove them if they are.
+                # This is needed when building extensions on a 10.3 system
+                # using a universal build of python.
+                for key in ('LDFLAGS', 'BASECFLAGS',
+                        # a number of derived variables. These need to be
+                        # patched up as well.
+                        'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
+                    flags = _CONFIG_VARS[key]
+                    flags = re.sub(r'-arch\s+\w+\s', ' ', flags)
+                    flags = re.sub('-isysroot [^ \t]*', ' ', flags)
+                    _CONFIG_VARS[key] = flags
+            else:
+                # Allow the user to override the architecture flags using
+                # an environment variable.
+                # NOTE: This name was introduced by Apple in OSX 10.5 and
+                # is used by several scripting languages distributed with
+                # that OS release.
+                if 'ARCHFLAGS' in os.environ:
+                    arch = os.environ['ARCHFLAGS']
+                    for key in ('LDFLAGS', 'BASECFLAGS',
+                        # a number of derived variables. These need to be
+                        # patched up as well.
+                        'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
+
+                        flags = _CONFIG_VARS[key]
+                        flags = re.sub(r'-arch\s+\w+\s', ' ', flags)
+                        flags = flags + ' ' + arch
+                        _CONFIG_VARS[key] = flags
+
+                # If we're on OSX 10.5 or later and the user tries to
+                # compiles an extension using an SDK that is not present
+                # on the current machine it is better to not use an SDK
+                # than to fail.
+                #
+                # The major usecase for this is users using a Python.org
+                # binary installer  on OSX 10.6: that installer uses
+                # the 10.4u SDK, but that SDK is not installed by default
+                # when you install Xcode.
+                #
+                CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
+                m = re.search(r'-isysroot\s+(\S+)', CFLAGS)
+                if m is not None:
+                    sdk = m.group(1)
+                    if not os.path.exists(sdk):
+                        for key in ('LDFLAGS', 'BASECFLAGS',
+                             # a number of derived variables. These need to be
+                             # patched up as well.
+                            'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
+
+                            flags = _CONFIG_VARS[key]
+                            flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags)
+                            _CONFIG_VARS[key] = flags
+
+    if args:
+        vals = []
+        for name in args:
+            vals.append(_CONFIG_VARS.get(name))
+        return vals
+    else:
+        return _CONFIG_VARS
+
+
+def get_config_var(name):
+    """Return the value of a single variable using the dictionary returned by
+    'get_config_vars()'.
+
+    Equivalent to get_config_vars().get(name)
+    """
+    return get_config_vars().get(name)
+
+
+def get_platform():
+    """Return a string that identifies the current platform.
+
+    This is used mainly to distinguish platform-specific build directories and
+    platform-specific built distributions.  Typically includes the OS name
+    and version and the architecture (as supplied by 'os.uname()'),
+    although the exact information included depends on the OS; eg. for IRIX
+    the architecture isn't particularly important (IRIX only runs on SGI
+    hardware), but for Linux the kernel version isn't particularly
+    important.
+
+    Examples of returned values:
+       linux-i586
+       linux-alpha (?)
+       solaris-2.6-sun4u
+       irix-5.3
+       irix64-6.2
+
+    Windows will return one of:
+       win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
+       win-ia64 (64bit Windows on Itanium)
+       win32 (all others - specifically, sys.platform is returned)
+
+    For other non-POSIX platforms, currently just returns 'sys.platform'.
+    """
+    if os.name == 'nt':
+        # sniff sys.version for architecture.
+        prefix = " bit ("
+        i = sys.version.find(prefix)
+        if i == -1:
+            return sys.platform
+        j = sys.version.find(")", i)
+        look = sys.version[i+len(prefix):j].lower()
+        if look == 'amd64':
+            return 'win-amd64'
+        if look == 'itanium':
+            return 'win-ia64'
+        return sys.platform
+
+    if os.name != "posix" or not hasattr(os, 'uname'):
+        # XXX what about the architecture? NT is Intel or Alpha,
+        # Mac OS is M68k or PPC, etc.
+        return sys.platform
+
+    # Try to distinguish various flavours of Unix
+    osname, host, release, version, machine = os.uname()
+
+    # Convert the OS name to lowercase, remove '/' characters
+    # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
+    osname = osname.lower().replace('/', '')
+    machine = machine.replace(' ', '_')
+    machine = machine.replace('/', '-')
+
+    if osname[:5] == "linux":
+        # At least on Linux/Intel, 'machine' is the processor --
+        # i386, etc.
+        # XXX what about Alpha, SPARC, etc?
+        return  "%s-%s" % (osname, machine)
+    elif osname[:5] == "sunos":
+        if release[0] >= "5":           # SunOS 5 == Solaris 2
+            osname = "solaris"
+            release = "%d.%s" % (int(release[0]) - 3, release[2:])
+        # fall through to standard osname-release-machine representation
+    elif osname[:4] == "irix":              # could be "irix64"!
+        return "%s-%s" % (osname, release)
+    elif osname[:3] == "aix":
+        return "%s-%s.%s" % (osname, version, release)
+    elif osname[:6] == "cygwin":
+        osname = "cygwin"
+        rel_re = re.compile(r'[\d.]+')
+        m = rel_re.match(release)
+        if m:
+            release = m.group()
+    elif osname[:6] == "darwin":
+        #
+        # For our purposes, we'll assume that the system version from
+        # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
+        # to. This makes the compatibility story a bit more sane because the
+        # machine is going to compile and link as if it were
+        # MACOSX_DEPLOYMENT_TARGET.
+        cfgvars = get_config_vars()
+        macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
+
+        if True:
+            # Always calculate the release of the running machine,
+            # needed to determine if we can build fat binaries or not.
+
+            macrelease = macver
+            # Get the system version. Reading this plist is a documented
+            # way to get the system version (see the documentation for
+            # the Gestalt Manager)
+            try:
+                f = open('/System/Library/CoreServices/SystemVersion.plist')
+            except IOError:
+                # We're on a plain darwin box, fall back to the default
+                # behaviour.
+                pass
+            else:
+                try:
+                    m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
+                                  r'<string>(.*?)</string>', f.read())
+                finally:
+                    f.close()
+                if m is not None:
+                    macrelease = '.'.join(m.group(1).split('.')[:2])
+                # else: fall back to the default behaviour
+
+        if not macver:
+            macver = macrelease
+
+        if macver:
+            release = macver
+            osname = "macosx"
+
+            if ((macrelease + '.') >= '10.4.' and
+                '-arch' in get_config_vars().get('CFLAGS', '').strip()):
+                # The universal build will build fat binaries, but not on
+                # systems before 10.4
+                #
+                # Try to detect 4-way universal builds, those have machine-type
+                # 'universal' instead of 'fat'.
+
+                machine = 'fat'
+                cflags = get_config_vars().get('CFLAGS')
+
+                archs = re.findall(r'-arch\s+(\S+)', cflags)
+                archs = tuple(sorted(set(archs)))
+
+                if len(archs) == 1:
+                    machine = archs[0]
+                elif archs == ('i386', 'ppc'):
+                    machine = 'fat'
+                elif archs == ('i386', 'x86_64'):
+                    machine = 'intel'
+                elif archs == ('i386', 'ppc', 'x86_64'):
+                    machine = 'fat3'
+                elif archs == ('ppc64', 'x86_64'):
+                    machine = 'fat64'
+                elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
+                    machine = 'universal'
+                else:
+                    raise ValueError(
+                       "Don't know machine value for archs=%r" % (archs,))
+
+            elif machine == 'i386':
+                # On OSX the machine type returned by uname is always the
+                # 32-bit variant, even if the executable architecture is
+                # the 64-bit variant
+                if sys.maxsize >= 2**32:
+                    machine = 'x86_64'
+
+            elif machine in ('PowerPC', 'Power_Macintosh'):
+                # Pick a sane name for the PPC architecture.
+                # See 'i386' case
+                if sys.maxsize >= 2**32:
+                    machine = 'ppc64'
+                else:
+                    machine = 'ppc'
+
+    return "%s-%s-%s" % (osname, release, machine)
+
+
+def get_python_version():
+    return _PY_VERSION_SHORT
+
+
+def _print_dict(title, data):
+    for index, (key, value) in enumerate(sorted(data.items())):
+        if index == 0:
+            print('%s: ' % (title))
+        print('\t%s = "%s"' % (key, value))
+
+
+def _main():
+    """Display all information sysconfig detains."""
+    print('Platform: "%s"' % get_platform())
+    print('Python version: "%s"' % get_python_version())
+    print('Current installation scheme: "%s"' % _get_default_scheme())
+    print()
+    _print_dict('Paths', get_paths())
+    print()
+    _print_dict('Variables', get_config_vars())
+
+
+if __name__ == '__main__':
+    _main()
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..d66d8566374be661085213468bce338c6a747702
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/tarfile.py
@@ -0,0 +1,2607 @@
+#-------------------------------------------------------------------
+# tarfile.py
+#-------------------------------------------------------------------
+# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
+# All rights reserved.
+#
+# Permission  is  hereby granted,  free  of charge,  to  any person
+# obtaining a  copy of  this software  and associated documentation
+# files  (the  "Software"),  to   deal  in  the  Software   without
+# restriction,  including  without limitation  the  rights to  use,
+# copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies  of  the  Software,  and to  permit  persons  to  whom the
+# Software  is  furnished  to  do  so,  subject  to  the  following
+# conditions:
+#
+# The above copyright  notice and this  permission notice shall  be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS  IS", WITHOUT WARRANTY OF ANY  KIND,
+# EXPRESS OR IMPLIED, INCLUDING  BUT NOT LIMITED TO  THE WARRANTIES
+# OF  MERCHANTABILITY,  FITNESS   FOR  A  PARTICULAR   PURPOSE  AND
+# NONINFRINGEMENT.  IN  NO  EVENT SHALL  THE  AUTHORS  OR COPYRIGHT
+# HOLDERS  BE LIABLE  FOR ANY  CLAIM, DAMAGES  OR OTHER  LIABILITY,
+# WHETHER  IN AN  ACTION OF  CONTRACT, TORT  OR OTHERWISE,  ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+from __future__ import print_function
+
+"""Read from and write to tar format archives.
+"""
+
+__version__ = "$Revision$"
+
+version     = "0.9.0"
+__author__  = "Lars Gust\u00e4bel (lars@gustaebel.de)"
+__date__    = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
+__cvsid__   = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
+__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
+
+#---------
+# Imports
+#---------
+import sys
+import os
+import stat
+import errno
+import time
+import struct
+import copy
+import re
+
+try:
+    import grp, pwd
+except ImportError:
+    grp = pwd = None
+
+# os.symlink on Windows prior to 6.0 raises NotImplementedError
+symlink_exception = (AttributeError, NotImplementedError)
+try:
+    # WindowsError (1314) will be raised if the caller does not hold the
+    # SeCreateSymbolicLinkPrivilege privilege
+    symlink_exception += (WindowsError,)
+except NameError:
+    pass
+
+# from tarfile import *
+__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
+
+if sys.version_info[0] < 3:
+    import __builtin__ as builtins
+else:
+    import builtins
+
+_open = builtins.open   # Since 'open' is TarFile.open
+
+#---------------------------------------------------------
+# tar constants
+#---------------------------------------------------------
+NUL = b"\0"                     # the null character
+BLOCKSIZE = 512                 # length of processing blocks
+RECORDSIZE = BLOCKSIZE * 20     # length of records
+GNU_MAGIC = b"ustar  \0"        # magic gnu tar string
+POSIX_MAGIC = b"ustar\x0000"    # magic posix tar string
+
+LENGTH_NAME = 100               # maximum length of a filename
+LENGTH_LINK = 100               # maximum length of a linkname
+LENGTH_PREFIX = 155             # maximum length of the prefix field
+
+REGTYPE = b"0"                  # regular file
+AREGTYPE = b"\0"                # regular file
+LNKTYPE = b"1"                  # link (inside tarfile)
+SYMTYPE = b"2"                  # symbolic link
+CHRTYPE = b"3"                  # character special device
+BLKTYPE = b"4"                  # block special device
+DIRTYPE = b"5"                  # directory
+FIFOTYPE = b"6"                 # fifo special device
+CONTTYPE = b"7"                 # contiguous file
+
+GNUTYPE_LONGNAME = b"L"         # GNU tar longname
+GNUTYPE_LONGLINK = b"K"         # GNU tar longlink
+GNUTYPE_SPARSE = b"S"           # GNU tar sparse file
+
+XHDTYPE = b"x"                  # POSIX.1-2001 extended header
+XGLTYPE = b"g"                  # POSIX.1-2001 global header
+SOLARIS_XHDTYPE = b"X"          # Solaris extended header
+
+USTAR_FORMAT = 0                # POSIX.1-1988 (ustar) format
+GNU_FORMAT = 1                  # GNU tar format
+PAX_FORMAT = 2                  # POSIX.1-2001 (pax) format
+DEFAULT_FORMAT = GNU_FORMAT
+
+#---------------------------------------------------------
+# tarfile constants
+#---------------------------------------------------------
+# File types that tarfile supports:
+SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
+                   SYMTYPE, DIRTYPE, FIFOTYPE,
+                   CONTTYPE, CHRTYPE, BLKTYPE,
+                   GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
+                   GNUTYPE_SPARSE)
+
+# File types that will be treated as a regular file.
+REGULAR_TYPES = (REGTYPE, AREGTYPE,
+                 CONTTYPE, GNUTYPE_SPARSE)
+
+# File types that are part of the GNU tar format.
+GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
+             GNUTYPE_SPARSE)
+
+# Fields from a pax header that override a TarInfo attribute.
+PAX_FIELDS = ("path", "linkpath", "size", "mtime",
+              "uid", "gid", "uname", "gname")
+
+# Fields from a pax header that are affected by hdrcharset.
+PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
+
+# Fields in a pax header that are numbers, all other fields
+# are treated as strings.
+PAX_NUMBER_FIELDS = {
+    "atime": float,
+    "ctime": float,
+    "mtime": float,
+    "uid": int,
+    "gid": int,
+    "size": int
+}
+
+#---------------------------------------------------------
+# Bits used in the mode field, values in octal.
+#---------------------------------------------------------
+S_IFLNK = 0o120000        # symbolic link
+S_IFREG = 0o100000        # regular file
+S_IFBLK = 0o060000        # block device
+S_IFDIR = 0o040000        # directory
+S_IFCHR = 0o020000        # character device
+S_IFIFO = 0o010000        # fifo
+
+TSUID   = 0o4000          # set UID on execution
+TSGID   = 0o2000          # set GID on execution
+TSVTX   = 0o1000          # reserved
+
+TUREAD  = 0o400           # read by owner
+TUWRITE = 0o200           # write by owner
+TUEXEC  = 0o100           # execute/search by owner
+TGREAD  = 0o040           # read by group
+TGWRITE = 0o020           # write by group
+TGEXEC  = 0o010           # execute/search by group
+TOREAD  = 0o004           # read by other
+TOWRITE = 0o002           # write by other
+TOEXEC  = 0o001           # execute/search by other
+
+#---------------------------------------------------------
+# initialization
+#---------------------------------------------------------
+if os.name in ("nt", "ce"):
+    ENCODING = "utf-8"
+else:
+    ENCODING = sys.getfilesystemencoding()
+
+#---------------------------------------------------------
+# Some useful functions
+#---------------------------------------------------------
+
+def stn(s, length, encoding, errors):
+    """Convert a string to a null-terminated bytes object.
+    """
+    s = s.encode(encoding, errors)
+    return s[:length] + (length - len(s)) * NUL
+
+def nts(s, encoding, errors):
+    """Convert a null-terminated bytes object to a string.
+    """
+    p = s.find(b"\0")
+    if p != -1:
+        s = s[:p]
+    return s.decode(encoding, errors)
+
+def nti(s):
+    """Convert a number field to a python number.
+    """
+    # There are two possible encodings for a number field, see
+    # itn() below.
+    if s[0] != chr(0o200):
+        try:
+            n = int(nts(s, "ascii", "strict") or "0", 8)
+        except ValueError:
+            raise InvalidHeaderError("invalid header")
+    else:
+        n = 0
+        for i in range(len(s) - 1):
+            n <<= 8
+            n += ord(s[i + 1])
+    return n
+
+def itn(n, digits=8, format=DEFAULT_FORMAT):
+    """Convert a python number to a number field.
+    """
+    # POSIX 1003.1-1988 requires numbers to be encoded as a string of
+    # octal digits followed by a null-byte, this allows values up to
+    # (8**(digits-1))-1. GNU tar allows storing numbers greater than
+    # that if necessary. A leading 0o200 byte indicates this particular
+    # encoding, the following digits-1 bytes are a big-endian
+    # representation. This allows values up to (256**(digits-1))-1.
+    if 0 <= n < 8 ** (digits - 1):
+        s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
+    else:
+        if format != GNU_FORMAT or n >= 256 ** (digits - 1):
+            raise ValueError("overflow in number field")
+
+        if n < 0:
+            # XXX We mimic GNU tar's behaviour with negative numbers,
+            # this could raise OverflowError.
+            n = struct.unpack("L", struct.pack("l", n))[0]
+
+        s = bytearray()
+        for i in range(digits - 1):
+            s.insert(0, n & 0o377)
+            n >>= 8
+        s.insert(0, 0o200)
+    return s
+
+def calc_chksums(buf):
+    """Calculate the checksum for a member's header by summing up all
+       characters except for the chksum field which is treated as if
+       it was filled with spaces. According to the GNU tar sources,
+       some tars (Sun and NeXT) calculate chksum with signed char,
+       which will be different if there are chars in the buffer with
+       the high bit set. So we calculate two checksums, unsigned and
+       signed.
+    """
+    unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
+    signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
+    return unsigned_chksum, signed_chksum
+
+def copyfileobj(src, dst, length=None):
+    """Copy length bytes from fileobj src to fileobj dst.
+       If length is None, copy the entire content.
+    """
+    if length == 0:
+        return
+    if length is None:
+        while True:
+            buf = src.read(16*1024)
+            if not buf:
+                break
+            dst.write(buf)
+        return
+
+    BUFSIZE = 16 * 1024
+    blocks, remainder = divmod(length, BUFSIZE)
+    for b in range(blocks):
+        buf = src.read(BUFSIZE)
+        if len(buf) < BUFSIZE:
+            raise IOError("end of file reached")
+        dst.write(buf)
+
+    if remainder != 0:
+        buf = src.read(remainder)
+        if len(buf) < remainder:
+            raise IOError("end of file reached")
+        dst.write(buf)
+    return
+
+filemode_table = (
+    ((S_IFLNK,      "l"),
+     (S_IFREG,      "-"),
+     (S_IFBLK,      "b"),
+     (S_IFDIR,      "d"),
+     (S_IFCHR,      "c"),
+     (S_IFIFO,      "p")),
+
+    ((TUREAD,       "r"),),
+    ((TUWRITE,      "w"),),
+    ((TUEXEC|TSUID, "s"),
+     (TSUID,        "S"),
+     (TUEXEC,       "x")),
+
+    ((TGREAD,       "r"),),
+    ((TGWRITE,      "w"),),
+    ((TGEXEC|TSGID, "s"),
+     (TSGID,        "S"),
+     (TGEXEC,       "x")),
+
+    ((TOREAD,       "r"),),
+    ((TOWRITE,      "w"),),
+    ((TOEXEC|TSVTX, "t"),
+     (TSVTX,        "T"),
+     (TOEXEC,       "x"))
+)
+
+def filemode(mode):
+    """Convert a file's mode to a string of the form
+       -rwxrwxrwx.
+       Used by TarFile.list()
+    """
+    perm = []
+    for table in filemode_table:
+        for bit, char in table:
+            if mode & bit == bit:
+                perm.append(char)
+                break
+        else:
+            perm.append("-")
+    return "".join(perm)
+
+class TarError(Exception):
+    """Base exception."""
+    pass
+class ExtractError(TarError):
+    """General exception for extract errors."""
+    pass
+class ReadError(TarError):
+    """Exception for unreadable tar archives."""
+    pass
+class CompressionError(TarError):
+    """Exception for unavailable compression methods."""
+    pass
+class StreamError(TarError):
+    """Exception for unsupported operations on stream-like TarFiles."""
+    pass
+class HeaderError(TarError):
+    """Base exception for header errors."""
+    pass
+class EmptyHeaderError(HeaderError):
+    """Exception for empty headers."""
+    pass
+class TruncatedHeaderError(HeaderError):
+    """Exception for truncated headers."""
+    pass
+class EOFHeaderError(HeaderError):
+    """Exception for end of file headers."""
+    pass
+class InvalidHeaderError(HeaderError):
+    """Exception for invalid headers."""
+    pass
+class SubsequentHeaderError(HeaderError):
+    """Exception for missing and invalid extended headers."""
+    pass
+
+#---------------------------
+# internal stream interface
+#---------------------------
+class _LowLevelFile(object):
+    """Low-level file object. Supports reading and writing.
+       It is used instead of a regular file object for streaming
+       access.
+    """
+
+    def __init__(self, name, mode):
+        mode = {
+            "r": os.O_RDONLY,
+            "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
+        }[mode]
+        if hasattr(os, "O_BINARY"):
+            mode |= os.O_BINARY
+        self.fd = os.open(name, mode, 0o666)
+
+    def close(self):
+        os.close(self.fd)
+
+    def read(self, size):
+        return os.read(self.fd, size)
+
+    def write(self, s):
+        os.write(self.fd, s)
+
+class _Stream(object):
+    """Class that serves as an adapter between TarFile and
+       a stream-like object.  The stream-like object only
+       needs to have a read() or write() method and is accessed
+       blockwise.  Use of gzip or bzip2 compression is possible.
+       A stream-like object could be for example: sys.stdin,
+       sys.stdout, a socket, a tape device etc.
+
+       _Stream is intended to be used only internally.
+    """
+
+    def __init__(self, name, mode, comptype, fileobj, bufsize):
+        """Construct a _Stream object.
+        """
+        self._extfileobj = True
+        if fileobj is None:
+            fileobj = _LowLevelFile(name, mode)
+            self._extfileobj = False
+
+        if comptype == '*':
+            # Enable transparent compression detection for the
+            # stream interface
+            fileobj = _StreamProxy(fileobj)
+            comptype = fileobj.getcomptype()
+
+        self.name     = name or ""
+        self.mode     = mode
+        self.comptype = comptype
+        self.fileobj  = fileobj
+        self.bufsize  = bufsize
+        self.buf      = b""
+        self.pos      = 0
+        self.closed   = False
+
+        try:
+            if comptype == "gz":
+                try:
+                    import zlib
+                except ImportError:
+                    raise CompressionError("zlib module is not available")
+                self.zlib = zlib
+                self.crc = zlib.crc32(b"")
+                if mode == "r":
+                    self._init_read_gz()
+                else:
+                    self._init_write_gz()
+
+            if comptype == "bz2":
+                try:
+                    import bz2
+                except ImportError:
+                    raise CompressionError("bz2 module is not available")
+                if mode == "r":
+                    self.dbuf = b""
+                    self.cmp = bz2.BZ2Decompressor()
+                else:
+                    self.cmp = bz2.BZ2Compressor()
+        except:
+            if not self._extfileobj:
+                self.fileobj.close()
+            self.closed = True
+            raise
+
+    def __del__(self):
+        if hasattr(self, "closed") and not self.closed:
+            self.close()
+
+    def _init_write_gz(self):
+        """Initialize for writing with gzip compression.
+        """
+        self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
+                                            -self.zlib.MAX_WBITS,
+                                            self.zlib.DEF_MEM_LEVEL,
+                                            0)
+        timestamp = struct.pack("<L", int(time.time()))
+        self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
+        if self.name.endswith(".gz"):
+            self.name = self.name[:-3]
+        # RFC1952 says we must use ISO-8859-1 for the FNAME field.
+        self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
+
+    def write(self, s):
+        """Write string s to the stream.
+        """
+        if self.comptype == "gz":
+            self.crc = self.zlib.crc32(s, self.crc)
+        self.pos += len(s)
+        if self.comptype != "tar":
+            s = self.cmp.compress(s)
+        self.__write(s)
+
+    def __write(self, s):
+        """Write string s to the stream if a whole new block
+           is ready to be written.
+        """
+        self.buf += s
+        while len(self.buf) > self.bufsize:
+            self.fileobj.write(self.buf[:self.bufsize])
+            self.buf = self.buf[self.bufsize:]
+
+    def close(self):
+        """Close the _Stream object. No operation should be
+           done on it afterwards.
+        """
+        if self.closed:
+            return
+
+        if self.mode == "w" and self.comptype != "tar":
+            self.buf += self.cmp.flush()
+
+        if self.mode == "w" and self.buf:
+            self.fileobj.write(self.buf)
+            self.buf = b""
+            if self.comptype == "gz":
+                # The native zlib crc is an unsigned 32-bit integer, but
+                # the Python wrapper implicitly casts that to a signed C
+                # long.  So, on a 32-bit box self.crc may "look negative",
+                # while the same crc on a 64-bit box may "look positive".
+                # To avoid irksome warnings from the `struct` module, force
+                # it to look positive on all boxes.
+                self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
+                self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
+
+        if not self._extfileobj:
+            self.fileobj.close()
+
+        self.closed = True
+
+    def _init_read_gz(self):
+        """Initialize for reading a gzip compressed fileobj.
+        """
+        self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
+        self.dbuf = b""
+
+        # taken from gzip.GzipFile with some alterations
+        if self.__read(2) != b"\037\213":
+            raise ReadError("not a gzip file")
+        if self.__read(1) != b"\010":
+            raise CompressionError("unsupported compression method")
+
+        flag = ord(self.__read(1))
+        self.__read(6)
+
+        if flag & 4:
+            xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
+            self.read(xlen)
+        if flag & 8:
+            while True:
+                s = self.__read(1)
+                if not s or s == NUL:
+                    break
+        if flag & 16:
+            while True:
+                s = self.__read(1)
+                if not s or s == NUL:
+                    break
+        if flag & 2:
+            self.__read(2)
+
+    def tell(self):
+        """Return the stream's file pointer position.
+        """
+        return self.pos
+
+    def seek(self, pos=0):
+        """Set the stream's file pointer to pos. Negative seeking
+           is forbidden.
+        """
+        if pos - self.pos >= 0:
+            blocks, remainder = divmod(pos - self.pos, self.bufsize)
+            for i in range(blocks):
+                self.read(self.bufsize)
+            self.read(remainder)
+        else:
+            raise StreamError("seeking backwards is not allowed")
+        return self.pos
+
+    def read(self, size=None):
+        """Return the next size number of bytes from the stream.
+           If size is not defined, return all bytes of the stream
+           up to EOF.
+        """
+        if size is None:
+            t = []
+            while True:
+                buf = self._read(self.bufsize)
+                if not buf:
+                    break
+                t.append(buf)
+            buf = "".join(t)
+        else:
+            buf = self._read(size)
+        self.pos += len(buf)
+        return buf
+
+    def _read(self, size):
+        """Return size bytes from the stream.
+        """
+        if self.comptype == "tar":
+            return self.__read(size)
+
+        c = len(self.dbuf)
+        while c < size:
+            buf = self.__read(self.bufsize)
+            if not buf:
+                break
+            try:
+                buf = self.cmp.decompress(buf)
+            except IOError:
+                raise ReadError("invalid compressed data")
+            self.dbuf += buf
+            c += len(buf)
+        buf = self.dbuf[:size]
+        self.dbuf = self.dbuf[size:]
+        return buf
+
+    def __read(self, size):
+        """Return size bytes from stream. If internal buffer is empty,
+           read another block from the stream.
+        """
+        c = len(self.buf)
+        while c < size:
+            buf = self.fileobj.read(self.bufsize)
+            if not buf:
+                break
+            self.buf += buf
+            c += len(buf)
+        buf = self.buf[:size]
+        self.buf = self.buf[size:]
+        return buf
+# class _Stream
+
+class _StreamProxy(object):
+    """Small proxy class that enables transparent compression
+       detection for the Stream interface (mode 'r|*').
+    """
+
+    def __init__(self, fileobj):
+        self.fileobj = fileobj
+        self.buf = self.fileobj.read(BLOCKSIZE)
+
+    def read(self, size):
+        self.read = self.fileobj.read
+        return self.buf
+
+    def getcomptype(self):
+        if self.buf.startswith(b"\037\213\010"):
+            return "gz"
+        if self.buf.startswith(b"BZh91"):
+            return "bz2"
+        return "tar"
+
+    def close(self):
+        self.fileobj.close()
+# class StreamProxy
+
+class _BZ2Proxy(object):
+    """Small proxy class that enables external file object
+       support for "r:bz2" and "w:bz2" modes. This is actually
+       a workaround for a limitation in bz2 module's BZ2File
+       class which (unlike gzip.GzipFile) has no support for
+       a file object argument.
+    """
+
+    blocksize = 16 * 1024
+
+    def __init__(self, fileobj, mode):
+        self.fileobj = fileobj
+        self.mode = mode
+        self.name = getattr(self.fileobj, "name", None)
+        self.init()
+
+    def init(self):
+        import bz2
+        self.pos = 0
+        if self.mode == "r":
+            self.bz2obj = bz2.BZ2Decompressor()
+            self.fileobj.seek(0)
+            self.buf = b""
+        else:
+            self.bz2obj = bz2.BZ2Compressor()
+
+    def read(self, size):
+        x = len(self.buf)
+        while x < size:
+            raw = self.fileobj.read(self.blocksize)
+            if not raw:
+                break
+            data = self.bz2obj.decompress(raw)
+            self.buf += data
+            x += len(data)
+
+        buf = self.buf[:size]
+        self.buf = self.buf[size:]
+        self.pos += len(buf)
+        return buf
+
+    def seek(self, pos):
+        if pos < self.pos:
+            self.init()
+        self.read(pos - self.pos)
+
+    def tell(self):
+        return self.pos
+
+    def write(self, data):
+        self.pos += len(data)
+        raw = self.bz2obj.compress(data)
+        self.fileobj.write(raw)
+
+    def close(self):
+        if self.mode == "w":
+            raw = self.bz2obj.flush()
+            self.fileobj.write(raw)
+# class _BZ2Proxy
+
+#------------------------
+# Extraction file object
+#------------------------
+class _FileInFile(object):
+    """A thin wrapper around an existing file object that
+       provides a part of its data as an individual file
+       object.
+    """
+
+    def __init__(self, fileobj, offset, size, blockinfo=None):
+        self.fileobj = fileobj
+        self.offset = offset
+        self.size = size
+        self.position = 0
+
+        if blockinfo is None:
+            blockinfo = [(0, size)]
+
+        # Construct a map with data and zero blocks.
+        self.map_index = 0
+        self.map = []
+        lastpos = 0
+        realpos = self.offset
+        for offset, size in blockinfo:
+            if offset > lastpos:
+                self.map.append((False, lastpos, offset, None))
+            self.map.append((True, offset, offset + size, realpos))
+            realpos += size
+            lastpos = offset + size
+        if lastpos < self.size:
+            self.map.append((False, lastpos, self.size, None))
+
+    def seekable(self):
+        if not hasattr(self.fileobj, "seekable"):
+            # XXX gzip.GzipFile and bz2.BZ2File
+            return True
+        return self.fileobj.seekable()
+
+    def tell(self):
+        """Return the current file position.
+        """
+        return self.position
+
+    def seek(self, position):
+        """Seek to a position in the file.
+        """
+        self.position = position
+
+    def read(self, size=None):
+        """Read data from the file.
+        """
+        if size is None:
+            size = self.size - self.position
+        else:
+            size = min(size, self.size - self.position)
+
+        buf = b""
+        while size > 0:
+            while True:
+                data, start, stop, offset = self.map[self.map_index]
+                if start <= self.position < stop:
+                    break
+                else:
+                    self.map_index += 1
+                    if self.map_index == len(self.map):
+                        self.map_index = 0
+            length = min(size, stop - self.position)
+            if data:
+                self.fileobj.seek(offset + (self.position - start))
+                buf += self.fileobj.read(length)
+            else:
+                buf += NUL * length
+            size -= length
+            self.position += length
+        return buf
+#class _FileInFile
+
+
+class ExFileObject(object):
+    """File-like object for reading an archive member.
+       Is returned by TarFile.extractfile().
+    """
+    blocksize = 1024
+
+    def __init__(self, tarfile, tarinfo):
+        self.fileobj = _FileInFile(tarfile.fileobj,
+                                   tarinfo.offset_data,
+                                   tarinfo.size,
+                                   tarinfo.sparse)
+        self.name = tarinfo.name
+        self.mode = "r"
+        self.closed = False
+        self.size = tarinfo.size
+
+        self.position = 0
+        self.buffer = b""
+
+    def readable(self):
+        return True
+
+    def writable(self):
+        return False
+
+    def seekable(self):
+        return self.fileobj.seekable()
+
+    def read(self, size=None):
+        """Read at most size bytes from the file. If size is not
+           present or None, read all data until EOF is reached.
+        """
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+
+        buf = b""
+        if self.buffer:
+            if size is None:
+                buf = self.buffer
+                self.buffer = b""
+            else:
+                buf = self.buffer[:size]
+                self.buffer = self.buffer[size:]
+
+        if size is None:
+            buf += self.fileobj.read()
+        else:
+            buf += self.fileobj.read(size - len(buf))
+
+        self.position += len(buf)
+        return buf
+
+    # XXX TextIOWrapper uses the read1() method.
+    read1 = read
+
+    def readline(self, size=-1):
+        """Read one entire line from the file. If size is present
+           and non-negative, return a string with at most that
+           size, which may be an incomplete line.
+        """
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+
+        pos = self.buffer.find(b"\n") + 1
+        if pos == 0:
+            # no newline found.
+            while True:
+                buf = self.fileobj.read(self.blocksize)
+                self.buffer += buf
+                if not buf or b"\n" in buf:
+                    pos = self.buffer.find(b"\n") + 1
+                    if pos == 0:
+                        # no newline found.
+                        pos = len(self.buffer)
+                    break
+
+        if size != -1:
+            pos = min(size, pos)
+
+        buf = self.buffer[:pos]
+        self.buffer = self.buffer[pos:]
+        self.position += len(buf)
+        return buf
+
+    def readlines(self):
+        """Return a list with all remaining lines.
+        """
+        result = []
+        while True:
+            line = self.readline()
+            if not line: break
+            result.append(line)
+        return result
+
+    def tell(self):
+        """Return the current file position.
+        """
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+
+        return self.position
+
+    def seek(self, pos, whence=os.SEEK_SET):
+        """Seek to a position in the file.
+        """
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+
+        if whence == os.SEEK_SET:
+            self.position = min(max(pos, 0), self.size)
+        elif whence == os.SEEK_CUR:
+            if pos < 0:
+                self.position = max(self.position + pos, 0)
+            else:
+                self.position = min(self.position + pos, self.size)
+        elif whence == os.SEEK_END:
+            self.position = max(min(self.size + pos, self.size), 0)
+        else:
+            raise ValueError("Invalid argument")
+
+        self.buffer = b""
+        self.fileobj.seek(self.position)
+
+    def close(self):
+        """Close the file object.
+        """
+        self.closed = True
+
+    def __iter__(self):
+        """Get an iterator over the file's lines.
+        """
+        while True:
+            line = self.readline()
+            if not line:
+                break
+            yield line
+#class ExFileObject
+
+#------------------
+# Exported Classes
+#------------------
+class TarInfo(object):
+    """Informational class which holds the details about an
+       archive member given by a tar header block.
+       TarInfo objects are returned by TarFile.getmember(),
+       TarFile.getmembers() and TarFile.gettarinfo() and are
+       usually created internally.
+    """
+
+    __slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
+                 "chksum", "type", "linkname", "uname", "gname",
+                 "devmajor", "devminor",
+                 "offset", "offset_data", "pax_headers", "sparse",
+                 "tarfile", "_sparse_structs", "_link_target")
+
+    def __init__(self, name=""):
+        """Construct a TarInfo object. name is the optional name
+           of the member.
+        """
+        self.name = name        # member name
+        self.mode = 0o644       # file permissions
+        self.uid = 0            # user id
+        self.gid = 0            # group id
+        self.size = 0           # file size
+        self.mtime = 0          # modification time
+        self.chksum = 0         # header checksum
+        self.type = REGTYPE     # member type
+        self.linkname = ""      # link name
+        self.uname = ""         # user name
+        self.gname = ""         # group name
+        self.devmajor = 0       # device major number
+        self.devminor = 0       # device minor number
+
+        self.offset = 0         # the tar header starts here
+        self.offset_data = 0    # the file's data starts here
+
+        self.sparse = None      # sparse member information
+        self.pax_headers = {}   # pax header information
+
+    # In pax headers the "name" and "linkname" field are called
+    # "path" and "linkpath".
+    def _getpath(self):
+        return self.name
+    def _setpath(self, name):
+        self.name = name
+    path = property(_getpath, _setpath)
+
+    def _getlinkpath(self):
+        return self.linkname
+    def _setlinkpath(self, linkname):
+        self.linkname = linkname
+    linkpath = property(_getlinkpath, _setlinkpath)
+
+    def __repr__(self):
+        return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
+
+    def get_info(self):
+        """Return the TarInfo's attributes as a dictionary.
+        """
+        info = {
+            "name":     self.name,
+            "mode":     self.mode & 0o7777,
+            "uid":      self.uid,
+            "gid":      self.gid,
+            "size":     self.size,
+            "mtime":    self.mtime,
+            "chksum":   self.chksum,
+            "type":     self.type,
+            "linkname": self.linkname,
+            "uname":    self.uname,
+            "gname":    self.gname,
+            "devmajor": self.devmajor,
+            "devminor": self.devminor
+        }
+
+        if info["type"] == DIRTYPE and not info["name"].endswith("/"):
+            info["name"] += "/"
+
+        return info
+
+    def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
+        """Return a tar header as a string of 512 byte blocks.
+        """
+        info = self.get_info()
+
+        if format == USTAR_FORMAT:
+            return self.create_ustar_header(info, encoding, errors)
+        elif format == GNU_FORMAT:
+            return self.create_gnu_header(info, encoding, errors)
+        elif format == PAX_FORMAT:
+            return self.create_pax_header(info, encoding)
+        else:
+            raise ValueError("invalid format")
+
+    def create_ustar_header(self, info, encoding, errors):
+        """Return the object as a ustar header block.
+        """
+        info["magic"] = POSIX_MAGIC
+
+        if len(info["linkname"]) > LENGTH_LINK:
+            raise ValueError("linkname is too long")
+
+        if len(info["name"]) > LENGTH_NAME:
+            info["prefix"], info["name"] = self._posix_split_name(info["name"])
+
+        return self._create_header(info, USTAR_FORMAT, encoding, errors)
+
+    def create_gnu_header(self, info, encoding, errors):
+        """Return the object as a GNU header block sequence.
+        """
+        info["magic"] = GNU_MAGIC
+
+        buf = b""
+        if len(info["linkname"]) > LENGTH_LINK:
+            buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
+
+        if len(info["name"]) > LENGTH_NAME:
+            buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
+
+        return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
+
+    def create_pax_header(self, info, encoding):
+        """Return the object as a ustar header block. If it cannot be
+           represented this way, prepend a pax extended header sequence
+           with supplement information.
+        """
+        info["magic"] = POSIX_MAGIC
+        pax_headers = self.pax_headers.copy()
+
+        # Test string fields for values that exceed the field length or cannot
+        # be represented in ASCII encoding.
+        for name, hname, length in (
+                ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
+                ("uname", "uname", 32), ("gname", "gname", 32)):
+
+            if hname in pax_headers:
+                # The pax header has priority.
+                continue
+
+            # Try to encode the string as ASCII.
+            try:
+                info[name].encode("ascii", "strict")
+            except UnicodeEncodeError:
+                pax_headers[hname] = info[name]
+                continue
+
+            if len(info[name]) > length:
+                pax_headers[hname] = info[name]
+
+        # Test number fields for values that exceed the field limit or values
+        # that like to be stored as float.
+        for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
+            if name in pax_headers:
+                # The pax header has priority. Avoid overflow.
+                info[name] = 0
+                continue
+
+            val = info[name]
+            if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
+                pax_headers[name] = str(val)
+                info[name] = 0
+
+        # Create a pax extended header if necessary.
+        if pax_headers:
+            buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
+        else:
+            buf = b""
+
+        return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
+
+    @classmethod
+    def create_pax_global_header(cls, pax_headers):
+        """Return the object as a pax global header block sequence.
+        """
+        return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
+
+    def _posix_split_name(self, name):
+        """Split a name longer than 100 chars into a prefix
+           and a name part.
+        """
+        prefix = name[:LENGTH_PREFIX + 1]
+        while prefix and prefix[-1] != "/":
+            prefix = prefix[:-1]
+
+        name = name[len(prefix):]
+        prefix = prefix[:-1]
+
+        if not prefix or len(name) > LENGTH_NAME:
+            raise ValueError("name is too long")
+        return prefix, name
+
+    @staticmethod
+    def _create_header(info, format, encoding, errors):
+        """Return a header block. info is a dictionary with file
+           information, format must be one of the *_FORMAT constants.
+        """
+        parts = [
+            stn(info.get("name", ""), 100, encoding, errors),
+            itn(info.get("mode", 0) & 0o7777, 8, format),
+            itn(info.get("uid", 0), 8, format),
+            itn(info.get("gid", 0), 8, format),
+            itn(info.get("size", 0), 12, format),
+            itn(info.get("mtime", 0), 12, format),
+            b"        ", # checksum field
+            info.get("type", REGTYPE),
+            stn(info.get("linkname", ""), 100, encoding, errors),
+            info.get("magic", POSIX_MAGIC),
+            stn(info.get("uname", ""), 32, encoding, errors),
+            stn(info.get("gname", ""), 32, encoding, errors),
+            itn(info.get("devmajor", 0), 8, format),
+            itn(info.get("devminor", 0), 8, format),
+            stn(info.get("prefix", ""), 155, encoding, errors)
+        ]
+
+        buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
+        chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
+        buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
+        return buf
+
+    @staticmethod
+    def _create_payload(payload):
+        """Return the string payload filled with zero bytes
+           up to the next 512 byte border.
+        """
+        blocks, remainder = divmod(len(payload), BLOCKSIZE)
+        if remainder > 0:
+            payload += (BLOCKSIZE - remainder) * NUL
+        return payload
+
+    @classmethod
+    def _create_gnu_long_header(cls, name, type, encoding, errors):
+        """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
+           for name.
+        """
+        name = name.encode(encoding, errors) + NUL
+
+        info = {}
+        info["name"] = "././@LongLink"
+        info["type"] = type
+        info["size"] = len(name)
+        info["magic"] = GNU_MAGIC
+
+        # create extended header + name blocks.
+        return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
+                cls._create_payload(name)
+
+    @classmethod
+    def _create_pax_generic_header(cls, pax_headers, type, encoding):
+        """Return a POSIX.1-2008 extended or global header sequence
+           that contains a list of keyword, value pairs. The values
+           must be strings.
+        """
+        # Check if one of the fields contains surrogate characters and thereby
+        # forces hdrcharset=BINARY, see _proc_pax() for more information.
+        binary = False
+        for keyword, value in pax_headers.items():
+            try:
+                value.encode("utf8", "strict")
+            except UnicodeEncodeError:
+                binary = True
+                break
+
+        records = b""
+        if binary:
+            # Put the hdrcharset field at the beginning of the header.
+            records += b"21 hdrcharset=BINARY\n"
+
+        for keyword, value in pax_headers.items():
+            keyword = keyword.encode("utf8")
+            if binary:
+                # Try to restore the original byte representation of `value'.
+                # Needless to say, that the encoding must match the string.
+                value = value.encode(encoding, "surrogateescape")
+            else:
+                value = value.encode("utf8")
+
+            l = len(keyword) + len(value) + 3   # ' ' + '=' + '\n'
+            n = p = 0
+            while True:
+                n = l + len(str(p))
+                if n == p:
+                    break
+                p = n
+            records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
+
+        # We use a hardcoded "././@PaxHeader" name like star does
+        # instead of the one that POSIX recommends.
+        info = {}
+        info["name"] = "././@PaxHeader"
+        info["type"] = type
+        info["size"] = len(records)
+        info["magic"] = POSIX_MAGIC
+
+        # Create pax header + record blocks.
+        return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
+                cls._create_payload(records)
+
+    @classmethod
+    def frombuf(cls, buf, encoding, errors):
+        """Construct a TarInfo object from a 512 byte bytes object.
+        """
+        if len(buf) == 0:
+            raise EmptyHeaderError("empty header")
+        if len(buf) != BLOCKSIZE:
+            raise TruncatedHeaderError("truncated header")
+        if buf.count(NUL) == BLOCKSIZE:
+            raise EOFHeaderError("end of file header")
+
+        chksum = nti(buf[148:156])
+        if chksum not in calc_chksums(buf):
+            raise InvalidHeaderError("bad checksum")
+
+        obj = cls()
+        obj.name = nts(buf[0:100], encoding, errors)
+        obj.mode = nti(buf[100:108])
+        obj.uid = nti(buf[108:116])
+        obj.gid = nti(buf[116:124])
+        obj.size = nti(buf[124:136])
+        obj.mtime = nti(buf[136:148])
+        obj.chksum = chksum
+        obj.type = buf[156:157]
+        obj.linkname = nts(buf[157:257], encoding, errors)
+        obj.uname = nts(buf[265:297], encoding, errors)
+        obj.gname = nts(buf[297:329], encoding, errors)
+        obj.devmajor = nti(buf[329:337])
+        obj.devminor = nti(buf[337:345])
+        prefix = nts(buf[345:500], encoding, errors)
+
+        # Old V7 tar format represents a directory as a regular
+        # file with a trailing slash.
+        if obj.type == AREGTYPE and obj.name.endswith("/"):
+            obj.type = DIRTYPE
+
+        # The old GNU sparse format occupies some of the unused
+        # space in the buffer for up to 4 sparse structures.
+        # Save the them for later processing in _proc_sparse().
+        if obj.type == GNUTYPE_SPARSE:
+            pos = 386
+            structs = []
+            for i in range(4):
+                try:
+                    offset = nti(buf[pos:pos + 12])
+                    numbytes = nti(buf[pos + 12:pos + 24])
+                except ValueError:
+                    break
+                structs.append((offset, numbytes))
+                pos += 24
+            isextended = bool(buf[482])
+            origsize = nti(buf[483:495])
+            obj._sparse_structs = (structs, isextended, origsize)
+
+        # Remove redundant slashes from directories.
+        if obj.isdir():
+            obj.name = obj.name.rstrip("/")
+
+        # Reconstruct a ustar longname.
+        if prefix and obj.type not in GNU_TYPES:
+            obj.name = prefix + "/" + obj.name
+        return obj
+
+    @classmethod
+    def fromtarfile(cls, tarfile):
+        """Return the next TarInfo object from TarFile object
+           tarfile.
+        """
+        buf = tarfile.fileobj.read(BLOCKSIZE)
+        obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
+        obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
+        return obj._proc_member(tarfile)
+
+    #--------------------------------------------------------------------------
+    # The following are methods that are called depending on the type of a
+    # member. The entry point is _proc_member() which can be overridden in a
+    # subclass to add custom _proc_*() methods. A _proc_*() method MUST
+    # implement the following
+    # operations:
+    # 1. Set self.offset_data to the position where the data blocks begin,
+    #    if there is data that follows.
+    # 2. Set tarfile.offset to the position where the next member's header will
+    #    begin.
+    # 3. Return self or another valid TarInfo object.
+    def _proc_member(self, tarfile):
+        """Choose the right processing method depending on
+           the type and call it.
+        """
+        if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
+            return self._proc_gnulong(tarfile)
+        elif self.type == GNUTYPE_SPARSE:
+            return self._proc_sparse(tarfile)
+        elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
+            return self._proc_pax(tarfile)
+        else:
+            return self._proc_builtin(tarfile)
+
+    def _proc_builtin(self, tarfile):
+        """Process a builtin type or an unknown type which
+           will be treated as a regular file.
+        """
+        self.offset_data = tarfile.fileobj.tell()
+        offset = self.offset_data
+        if self.isreg() or self.type not in SUPPORTED_TYPES:
+            # Skip the following data blocks.
+            offset += self._block(self.size)
+        tarfile.offset = offset
+
+        # Patch the TarInfo object with saved global
+        # header information.
+        self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
+
+        return self
+
+    def _proc_gnulong(self, tarfile):
+        """Process the blocks that hold a GNU longname
+           or longlink member.
+        """
+        buf = tarfile.fileobj.read(self._block(self.size))
+
+        # Fetch the next header and process it.
+        try:
+            next = self.fromtarfile(tarfile)
+        except HeaderError:
+            raise SubsequentHeaderError("missing or bad subsequent header")
+
+        # Patch the TarInfo object from the next header with
+        # the longname information.
+        next.offset = self.offset
+        if self.type == GNUTYPE_LONGNAME:
+            next.name = nts(buf, tarfile.encoding, tarfile.errors)
+        elif self.type == GNUTYPE_LONGLINK:
+            next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
+
+        return next
+
+    def _proc_sparse(self, tarfile):
+        """Process a GNU sparse header plus extra headers.
+        """
+        # We already collected some sparse structures in frombuf().
+        structs, isextended, origsize = self._sparse_structs
+        del self._sparse_structs
+
+        # Collect sparse structures from extended header blocks.
+        while isextended:
+            buf = tarfile.fileobj.read(BLOCKSIZE)
+            pos = 0
+            for i in range(21):
+                try:
+                    offset = nti(buf[pos:pos + 12])
+                    numbytes = nti(buf[pos + 12:pos + 24])
+                except ValueError:
+                    break
+                if offset and numbytes:
+                    structs.append((offset, numbytes))
+                pos += 24
+            isextended = bool(buf[504])
+        self.sparse = structs
+
+        self.offset_data = tarfile.fileobj.tell()
+        tarfile.offset = self.offset_data + self._block(self.size)
+        self.size = origsize
+        return self
+
+    def _proc_pax(self, tarfile):
+        """Process an extended or global header as described in
+           POSIX.1-2008.
+        """
+        # Read the header information.
+        buf = tarfile.fileobj.read(self._block(self.size))
+
+        # A pax header stores supplemental information for either
+        # the following file (extended) or all following files
+        # (global).
+        if self.type == XGLTYPE:
+            pax_headers = tarfile.pax_headers
+        else:
+            pax_headers = tarfile.pax_headers.copy()
+
+        # Check if the pax header contains a hdrcharset field. This tells us
+        # the encoding of the path, linkpath, uname and gname fields. Normally,
+        # these fields are UTF-8 encoded but since POSIX.1-2008 tar
+        # implementations are allowed to store them as raw binary strings if
+        # the translation to UTF-8 fails.
+        match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
+        if match is not None:
+            pax_headers["hdrcharset"] = match.group(1).decode("utf8")
+
+        # For the time being, we don't care about anything other than "BINARY".
+        # The only other value that is currently allowed by the standard is
+        # "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
+        hdrcharset = pax_headers.get("hdrcharset")
+        if hdrcharset == "BINARY":
+            encoding = tarfile.encoding
+        else:
+            encoding = "utf8"
+
+        # Parse pax header information. A record looks like that:
+        # "%d %s=%s\n" % (length, keyword, value). length is the size
+        # of the complete record including the length field itself and
+        # the newline. keyword and value are both UTF-8 encoded strings.
+        regex = re.compile(br"(\d+) ([^=]+)=")
+        pos = 0
+        while True:
+            match = regex.match(buf, pos)
+            if not match:
+                break
+
+            length, keyword = match.groups()
+            length = int(length)
+            value = buf[match.end(2) + 1:match.start(1) + length - 1]
+
+            # Normally, we could just use "utf8" as the encoding and "strict"
+            # as the error handler, but we better not take the risk. For
+            # example, GNU tar <= 1.23 is known to store filenames it cannot
+            # translate to UTF-8 as raw strings (unfortunately without a
+            # hdrcharset=BINARY header).
+            # We first try the strict standard encoding, and if that fails we
+            # fall back on the user's encoding and error handler.
+            keyword = self._decode_pax_field(keyword, "utf8", "utf8",
+                    tarfile.errors)
+            if keyword in PAX_NAME_FIELDS:
+                value = self._decode_pax_field(value, encoding, tarfile.encoding,
+                        tarfile.errors)
+            else:
+                value = self._decode_pax_field(value, "utf8", "utf8",
+                        tarfile.errors)
+
+            pax_headers[keyword] = value
+            pos += length
+
+        # Fetch the next header.
+        try:
+            next = self.fromtarfile(tarfile)
+        except HeaderError:
+            raise SubsequentHeaderError("missing or bad subsequent header")
+
+        # Process GNU sparse information.
+        if "GNU.sparse.map" in pax_headers:
+            # GNU extended sparse format version 0.1.
+            self._proc_gnusparse_01(next, pax_headers)
+
+        elif "GNU.sparse.size" in pax_headers:
+            # GNU extended sparse format version 0.0.
+            self._proc_gnusparse_00(next, pax_headers, buf)
+
+        elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
+            # GNU extended sparse format version 1.0.
+            self._proc_gnusparse_10(next, pax_headers, tarfile)
+
+        if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
+            # Patch the TarInfo object with the extended header info.
+            next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
+            next.offset = self.offset
+
+            if "size" in pax_headers:
+                # If the extended header replaces the size field,
+                # we need to recalculate the offset where the next
+                # header starts.
+                offset = next.offset_data
+                if next.isreg() or next.type not in SUPPORTED_TYPES:
+                    offset += next._block(next.size)
+                tarfile.offset = offset
+
+        return next
+
+    def _proc_gnusparse_00(self, next, pax_headers, buf):
+        """Process a GNU tar extended sparse header, version 0.0.
+        """
+        offsets = []
+        for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
+            offsets.append(int(match.group(1)))
+        numbytes = []
+        for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
+            numbytes.append(int(match.group(1)))
+        next.sparse = list(zip(offsets, numbytes))
+
+    def _proc_gnusparse_01(self, next, pax_headers):
+        """Process a GNU tar extended sparse header, version 0.1.
+        """
+        sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
+        next.sparse = list(zip(sparse[::2], sparse[1::2]))
+
+    def _proc_gnusparse_10(self, next, pax_headers, tarfile):
+        """Process a GNU tar extended sparse header, version 1.0.
+        """
+        fields = None
+        sparse = []
+        buf = tarfile.fileobj.read(BLOCKSIZE)
+        fields, buf = buf.split(b"\n", 1)
+        fields = int(fields)
+        while len(sparse) < fields * 2:
+            if b"\n" not in buf:
+                buf += tarfile.fileobj.read(BLOCKSIZE)
+            number, buf = buf.split(b"\n", 1)
+            sparse.append(int(number))
+        next.offset_data = tarfile.fileobj.tell()
+        next.sparse = list(zip(sparse[::2], sparse[1::2]))
+
+    def _apply_pax_info(self, pax_headers, encoding, errors):
+        """Replace fields with supplemental information from a previous
+           pax extended or global header.
+        """
+        for keyword, value in pax_headers.items():
+            if keyword == "GNU.sparse.name":
+                setattr(self, "path", value)
+            elif keyword == "GNU.sparse.size":
+                setattr(self, "size", int(value))
+            elif keyword == "GNU.sparse.realsize":
+                setattr(self, "size", int(value))
+            elif keyword in PAX_FIELDS:
+                if keyword in PAX_NUMBER_FIELDS:
+                    try:
+                        value = PAX_NUMBER_FIELDS[keyword](value)
+                    except ValueError:
+                        value = 0
+                if keyword == "path":
+                    value = value.rstrip("/")
+                setattr(self, keyword, value)
+
+        self.pax_headers = pax_headers.copy()
+
+    def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
+        """Decode a single field from a pax record.
+        """
+        try:
+            return value.decode(encoding, "strict")
+        except UnicodeDecodeError:
+            return value.decode(fallback_encoding, fallback_errors)
+
+    def _block(self, count):
+        """Round up a byte count by BLOCKSIZE and return it,
+           e.g. _block(834) => 1024.
+        """
+        blocks, remainder = divmod(count, BLOCKSIZE)
+        if remainder:
+            blocks += 1
+        return blocks * BLOCKSIZE
+
+    def isreg(self):
+        return self.type in REGULAR_TYPES
+    def isfile(self):
+        return self.isreg()
+    def isdir(self):
+        return self.type == DIRTYPE
+    def issym(self):
+        return self.type == SYMTYPE
+    def islnk(self):
+        return self.type == LNKTYPE
+    def ischr(self):
+        return self.type == CHRTYPE
+    def isblk(self):
+        return self.type == BLKTYPE
+    def isfifo(self):
+        return self.type == FIFOTYPE
+    def issparse(self):
+        return self.sparse is not None
+    def isdev(self):
+        return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
+# class TarInfo
+
+class TarFile(object):
+    """The TarFile Class provides an interface to tar archives.
+    """
+
+    debug = 0                   # May be set from 0 (no msgs) to 3 (all msgs)
+
+    dereference = False         # If true, add content of linked file to the
+                                # tar file, else the link.
+
+    ignore_zeros = False        # If true, skips empty or invalid blocks and
+                                # continues processing.
+
+    errorlevel = 1              # If 0, fatal errors only appear in debug
+                                # messages (if debug >= 0). If > 0, errors
+                                # are passed to the caller as exceptions.
+
+    format = DEFAULT_FORMAT     # The format to use when creating an archive.
+
+    encoding = ENCODING         # Encoding for 8-bit character strings.
+
+    errors = None               # Error handler for unicode conversion.
+
+    tarinfo = TarInfo           # The default TarInfo class to use.
+
+    fileobject = ExFileObject   # The default ExFileObject class to use.
+
+    def __init__(self, name=None, mode="r", fileobj=None, format=None,
+            tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
+            errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
+        """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
+           read from an existing archive, 'a' to append data to an existing
+           file or 'w' to create a new file overwriting an existing one. `mode'
+           defaults to 'r'.
+           If `fileobj' is given, it is used for reading or writing data. If it
+           can be determined, `mode' is overridden by `fileobj's mode.
+           `fileobj' is not closed, when TarFile is closed.
+        """
+        if len(mode) > 1 or mode not in "raw":
+            raise ValueError("mode must be 'r', 'a' or 'w'")
+        self.mode = mode
+        self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
+
+        if not fileobj:
+            if self.mode == "a" and not os.path.exists(name):
+                # Create nonexistent files in append mode.
+                self.mode = "w"
+                self._mode = "wb"
+            fileobj = bltn_open(name, self._mode)
+            self._extfileobj = False
+        else:
+            if name is None and hasattr(fileobj, "name"):
+                name = fileobj.name
+            if hasattr(fileobj, "mode"):
+                self._mode = fileobj.mode
+            self._extfileobj = True
+        self.name = os.path.abspath(name) if name else None
+        self.fileobj = fileobj
+
+        # Init attributes.
+        if format is not None:
+            self.format = format
+        if tarinfo is not None:
+            self.tarinfo = tarinfo
+        if dereference is not None:
+            self.dereference = dereference
+        if ignore_zeros is not None:
+            self.ignore_zeros = ignore_zeros
+        if encoding is not None:
+            self.encoding = encoding
+        self.errors = errors
+
+        if pax_headers is not None and self.format == PAX_FORMAT:
+            self.pax_headers = pax_headers
+        else:
+            self.pax_headers = {}
+
+        if debug is not None:
+            self.debug = debug
+        if errorlevel is not None:
+            self.errorlevel = errorlevel
+
+        # Init datastructures.
+        self.closed = False
+        self.members = []       # list of members as TarInfo objects
+        self._loaded = False    # flag if all members have been read
+        self.offset = self.fileobj.tell()
+                                # current position in the archive file
+        self.inodes = {}        # dictionary caching the inodes of
+                                # archive members already added
+
+        try:
+            if self.mode == "r":
+                self.firstmember = None
+                self.firstmember = self.next()
+
+            if self.mode == "a":
+                # Move to the end of the archive,
+                # before the first empty block.
+                while True:
+                    self.fileobj.seek(self.offset)
+                    try:
+                        tarinfo = self.tarinfo.fromtarfile(self)
+                        self.members.append(tarinfo)
+                    except EOFHeaderError:
+                        self.fileobj.seek(self.offset)
+                        break
+                    except HeaderError as e:
+                        raise ReadError(str(e))
+
+            if self.mode in "aw":
+                self._loaded = True
+
+                if self.pax_headers:
+                    buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
+                    self.fileobj.write(buf)
+                    self.offset += len(buf)
+        except:
+            if not self._extfileobj:
+                self.fileobj.close()
+            self.closed = True
+            raise
+
+    #--------------------------------------------------------------------------
+    # Below are the classmethods which act as alternate constructors to the
+    # TarFile class. The open() method is the only one that is needed for
+    # public use; it is the "super"-constructor and is able to select an
+    # adequate "sub"-constructor for a particular compression using the mapping
+    # from OPEN_METH.
+    #
+    # This concept allows one to subclass TarFile without losing the comfort of
+    # the super-constructor. A sub-constructor is registered and made available
+    # by adding it to the mapping in OPEN_METH.
+
+    @classmethod
+    def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
+        """Open a tar archive for reading, writing or appending. Return
+           an appropriate TarFile class.
+
+           mode:
+           'r' or 'r:*' open for reading with transparent compression
+           'r:'         open for reading exclusively uncompressed
+           'r:gz'       open for reading with gzip compression
+           'r:bz2'      open for reading with bzip2 compression
+           'a' or 'a:'  open for appending, creating the file if necessary
+           'w' or 'w:'  open for writing without compression
+           'w:gz'       open for writing with gzip compression
+           'w:bz2'      open for writing with bzip2 compression
+
+           'r|*'        open a stream of tar blocks with transparent compression
+           'r|'         open an uncompressed stream of tar blocks for reading
+           'r|gz'       open a gzip compressed stream of tar blocks
+           'r|bz2'      open a bzip2 compressed stream of tar blocks
+           'w|'         open an uncompressed stream for writing
+           'w|gz'       open a gzip compressed stream for writing
+           'w|bz2'      open a bzip2 compressed stream for writing
+        """
+
+        if not name and not fileobj:
+            raise ValueError("nothing to open")
+
+        if mode in ("r", "r:*"):
+            # Find out which *open() is appropriate for opening the file.
+            for comptype in cls.OPEN_METH:
+                func = getattr(cls, cls.OPEN_METH[comptype])
+                if fileobj is not None:
+                    saved_pos = fileobj.tell()
+                try:
+                    return func(name, "r", fileobj, **kwargs)
+                except (ReadError, CompressionError) as e:
+                    if fileobj is not None:
+                        fileobj.seek(saved_pos)
+                    continue
+            raise ReadError("file could not be opened successfully")
+
+        elif ":" in mode:
+            filemode, comptype = mode.split(":", 1)
+            filemode = filemode or "r"
+            comptype = comptype or "tar"
+
+            # Select the *open() function according to
+            # given compression.
+            if comptype in cls.OPEN_METH:
+                func = getattr(cls, cls.OPEN_METH[comptype])
+            else:
+                raise CompressionError("unknown compression type %r" % comptype)
+            return func(name, filemode, fileobj, **kwargs)
+
+        elif "|" in mode:
+            filemode, comptype = mode.split("|", 1)
+            filemode = filemode or "r"
+            comptype = comptype or "tar"
+
+            if filemode not in "rw":
+                raise ValueError("mode must be 'r' or 'w'")
+
+            stream = _Stream(name, filemode, comptype, fileobj, bufsize)
+            try:
+                t = cls(name, filemode, stream, **kwargs)
+            except:
+                stream.close()
+                raise
+            t._extfileobj = False
+            return t
+
+        elif mode in "aw":
+            return cls.taropen(name, mode, fileobj, **kwargs)
+
+        raise ValueError("undiscernible mode")
+
+    @classmethod
+    def taropen(cls, name, mode="r", fileobj=None, **kwargs):
+        """Open uncompressed tar archive name for reading or writing.
+        """
+        if len(mode) > 1 or mode not in "raw":
+            raise ValueError("mode must be 'r', 'a' or 'w'")
+        return cls(name, mode, fileobj, **kwargs)
+
+    @classmethod
+    def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
+        """Open gzip compressed tar archive name for reading or writing.
+           Appending is not allowed.
+        """
+        if len(mode) > 1 or mode not in "rw":
+            raise ValueError("mode must be 'r' or 'w'")
+
+        try:
+            import gzip
+            gzip.GzipFile
+        except (ImportError, AttributeError):
+            raise CompressionError("gzip module is not available")
+
+        extfileobj = fileobj is not None
+        try:
+            fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
+            t = cls.taropen(name, mode, fileobj, **kwargs)
+        except IOError:
+            if not extfileobj and fileobj is not None:
+                fileobj.close()
+            if fileobj is None:
+                raise
+            raise ReadError("not a gzip file")
+        except:
+            if not extfileobj and fileobj is not None:
+                fileobj.close()
+            raise
+        t._extfileobj = extfileobj
+        return t
+
+    @classmethod
+    def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
+        """Open bzip2 compressed tar archive name for reading or writing.
+           Appending is not allowed.
+        """
+        if len(mode) > 1 or mode not in "rw":
+            raise ValueError("mode must be 'r' or 'w'.")
+
+        try:
+            import bz2
+        except ImportError:
+            raise CompressionError("bz2 module is not available")
+
+        if fileobj is not None:
+            fileobj = _BZ2Proxy(fileobj, mode)
+        else:
+            fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
+
+        try:
+            t = cls.taropen(name, mode, fileobj, **kwargs)
+        except (IOError, EOFError):
+            fileobj.close()
+            raise ReadError("not a bzip2 file")
+        t._extfileobj = False
+        return t
+
+    # All *open() methods are registered here.
+    OPEN_METH = {
+        "tar": "taropen",   # uncompressed tar
+        "gz":  "gzopen",    # gzip compressed tar
+        "bz2": "bz2open"    # bzip2 compressed tar
+    }
+
+    #--------------------------------------------------------------------------
+    # The public methods which TarFile provides:
+
+    def close(self):
+        """Close the TarFile. In write-mode, two finishing zero blocks are
+           appended to the archive.
+        """
+        if self.closed:
+            return
+
+        if self.mode in "aw":
+            self.fileobj.write(NUL * (BLOCKSIZE * 2))
+            self.offset += (BLOCKSIZE * 2)
+            # fill up the end with zero-blocks
+            # (like option -b20 for tar does)
+            blocks, remainder = divmod(self.offset, RECORDSIZE)
+            if remainder > 0:
+                self.fileobj.write(NUL * (RECORDSIZE - remainder))
+
+        if not self._extfileobj:
+            self.fileobj.close()
+        self.closed = True
+
+    def getmember(self, name):
+        """Return a TarInfo object for member `name'. If `name' can not be
+           found in the archive, KeyError is raised. If a member occurs more
+           than once in the archive, its last occurrence is assumed to be the
+           most up-to-date version.
+        """
+        tarinfo = self._getmember(name)
+        if tarinfo is None:
+            raise KeyError("filename %r not found" % name)
+        return tarinfo
+
+    def getmembers(self):
+        """Return the members of the archive as a list of TarInfo objects. The
+           list has the same order as the members in the archive.
+        """
+        self._check()
+        if not self._loaded:    # if we want to obtain a list of
+            self._load()        # all members, we first have to
+                                # scan the whole archive.
+        return self.members
+
+    def getnames(self):
+        """Return the members of the archive as a list of their names. It has
+           the same order as the list returned by getmembers().
+        """
+        return [tarinfo.name for tarinfo in self.getmembers()]
+
+    def gettarinfo(self, name=None, arcname=None, fileobj=None):
+        """Create a TarInfo object for either the file `name' or the file
+           object `fileobj' (using os.fstat on its file descriptor). You can
+           modify some of the TarInfo's attributes before you add it using
+           addfile(). If given, `arcname' specifies an alternative name for the
+           file in the archive.
+        """
+        self._check("aw")
+
+        # When fileobj is given, replace name by
+        # fileobj's real name.
+        if fileobj is not None:
+            name = fileobj.name
+
+        # Building the name of the member in the archive.
+        # Backward slashes are converted to forward slashes,
+        # Absolute paths are turned to relative paths.
+        if arcname is None:
+            arcname = name
+        drv, arcname = os.path.splitdrive(arcname)
+        arcname = arcname.replace(os.sep, "/")
+        arcname = arcname.lstrip("/")
+
+        # Now, fill the TarInfo object with
+        # information specific for the file.
+        tarinfo = self.tarinfo()
+        tarinfo.tarfile = self
+
+        # Use os.stat or os.lstat, depending on platform
+        # and if symlinks shall be resolved.
+        if fileobj is None:
+            if hasattr(os, "lstat") and not self.dereference:
+                statres = os.lstat(name)
+            else:
+                statres = os.stat(name)
+        else:
+            statres = os.fstat(fileobj.fileno())
+        linkname = ""
+
+        stmd = statres.st_mode
+        if stat.S_ISREG(stmd):
+            inode = (statres.st_ino, statres.st_dev)
+            if not self.dereference and statres.st_nlink > 1 and \
+                    inode in self.inodes and arcname != self.inodes[inode]:
+                # Is it a hardlink to an already
+                # archived file?
+                type = LNKTYPE
+                linkname = self.inodes[inode]
+            else:
+                # The inode is added only if its valid.
+                # For win32 it is always 0.
+                type = REGTYPE
+                if inode[0]:
+                    self.inodes[inode] = arcname
+        elif stat.S_ISDIR(stmd):
+            type = DIRTYPE
+        elif stat.S_ISFIFO(stmd):
+            type = FIFOTYPE
+        elif stat.S_ISLNK(stmd):
+            type = SYMTYPE
+            linkname = os.readlink(name)
+        elif stat.S_ISCHR(stmd):
+            type = CHRTYPE
+        elif stat.S_ISBLK(stmd):
+            type = BLKTYPE
+        else:
+            return None
+
+        # Fill the TarInfo object with all
+        # information we can get.
+        tarinfo.name = arcname
+        tarinfo.mode = stmd
+        tarinfo.uid = statres.st_uid
+        tarinfo.gid = statres.st_gid
+        if type == REGTYPE:
+            tarinfo.size = statres.st_size
+        else:
+            tarinfo.size = 0
+        tarinfo.mtime = statres.st_mtime
+        tarinfo.type = type
+        tarinfo.linkname = linkname
+        if pwd:
+            try:
+                tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
+            except KeyError:
+                pass
+        if grp:
+            try:
+                tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
+            except KeyError:
+                pass
+
+        if type in (CHRTYPE, BLKTYPE):
+            if hasattr(os, "major") and hasattr(os, "minor"):
+                tarinfo.devmajor = os.major(statres.st_rdev)
+                tarinfo.devminor = os.minor(statres.st_rdev)
+        return tarinfo
+
+    def list(self, verbose=True):
+        """Print a table of contents to sys.stdout. If `verbose' is False, only
+           the names of the members are printed. If it is True, an `ls -l'-like
+           output is produced.
+        """
+        self._check()
+
+        for tarinfo in self:
+            if verbose:
+                print(filemode(tarinfo.mode), end=' ')
+                print("%s/%s" % (tarinfo.uname or tarinfo.uid,
+                                 tarinfo.gname or tarinfo.gid), end=' ')
+                if tarinfo.ischr() or tarinfo.isblk():
+                    print("%10s" % ("%d,%d" \
+                                    % (tarinfo.devmajor, tarinfo.devminor)), end=' ')
+                else:
+                    print("%10d" % tarinfo.size, end=' ')
+                print("%d-%02d-%02d %02d:%02d:%02d" \
+                      % time.localtime(tarinfo.mtime)[:6], end=' ')
+
+            print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
+
+            if verbose:
+                if tarinfo.issym():
+                    print("->", tarinfo.linkname, end=' ')
+                if tarinfo.islnk():
+                    print("link to", tarinfo.linkname, end=' ')
+            print()
+
+    def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
+        """Add the file `name' to the archive. `name' may be any type of file
+           (directory, fifo, symbolic link, etc.). If given, `arcname'
+           specifies an alternative name for the file in the archive.
+           Directories are added recursively by default. This can be avoided by
+           setting `recursive' to False. `exclude' is a function that should
+           return True for each filename to be excluded. `filter' is a function
+           that expects a TarInfo object argument and returns the changed
+           TarInfo object, if it returns None the TarInfo object will be
+           excluded from the archive.
+        """
+        self._check("aw")
+
+        if arcname is None:
+            arcname = name
+
+        # Exclude pathnames.
+        if exclude is not None:
+            import warnings
+            warnings.warn("use the filter argument instead",
+                    DeprecationWarning, 2)
+            if exclude(name):
+                self._dbg(2, "tarfile: Excluded %r" % name)
+                return
+
+        # Skip if somebody tries to archive the archive...
+        if self.name is not None and os.path.abspath(name) == self.name:
+            self._dbg(2, "tarfile: Skipped %r" % name)
+            return
+
+        self._dbg(1, name)
+
+        # Create a TarInfo object from the file.
+        tarinfo = self.gettarinfo(name, arcname)
+
+        if tarinfo is None:
+            self._dbg(1, "tarfile: Unsupported type %r" % name)
+            return
+
+        # Change or exclude the TarInfo object.
+        if filter is not None:
+            tarinfo = filter(tarinfo)
+            if tarinfo is None:
+                self._dbg(2, "tarfile: Excluded %r" % name)
+                return
+
+        # Append the tar header and data to the archive.
+        if tarinfo.isreg():
+            f = bltn_open(name, "rb")
+            self.addfile(tarinfo, f)
+            f.close()
+
+        elif tarinfo.isdir():
+            self.addfile(tarinfo)
+            if recursive:
+                for f in os.listdir(name):
+                    self.add(os.path.join(name, f), os.path.join(arcname, f),
+                            recursive, exclude, filter=filter)
+
+        else:
+            self.addfile(tarinfo)
+
+    def addfile(self, tarinfo, fileobj=None):
+        """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
+           given, tarinfo.size bytes are read from it and added to the archive.
+           You can create TarInfo objects using gettarinfo().
+           On Windows platforms, `fileobj' should always be opened with mode
+           'rb' to avoid irritation about the file size.
+        """
+        self._check("aw")
+
+        tarinfo = copy.copy(tarinfo)
+
+        buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
+        self.fileobj.write(buf)
+        self.offset += len(buf)
+
+        # If there's data to follow, append it.
+        if fileobj is not None:
+            copyfileobj(fileobj, self.fileobj, tarinfo.size)
+            blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
+            if remainder > 0:
+                self.fileobj.write(NUL * (BLOCKSIZE - remainder))
+                blocks += 1
+            self.offset += blocks * BLOCKSIZE
+
+        self.members.append(tarinfo)
+
+    def extractall(self, path=".", members=None):
+        """Extract all members from the archive to the current working
+           directory and set owner, modification time and permissions on
+           directories afterwards. `path' specifies a different directory
+           to extract to. `members' is optional and must be a subset of the
+           list returned by getmembers().
+        """
+        directories = []
+
+        if members is None:
+            members = self
+
+        for tarinfo in members:
+            if tarinfo.isdir():
+                # Extract directories with a safe mode.
+                directories.append(tarinfo)
+                tarinfo = copy.copy(tarinfo)
+                tarinfo.mode = 0o700
+            # Do not set_attrs directories, as we will do that further down
+            self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
+
+        # Reverse sort directories.
+        directories.sort(key=lambda a: a.name)
+        directories.reverse()
+
+        # Set correct owner, mtime and filemode on directories.
+        for tarinfo in directories:
+            dirpath = os.path.join(path, tarinfo.name)
+            try:
+                self.chown(tarinfo, dirpath)
+                self.utime(tarinfo, dirpath)
+                self.chmod(tarinfo, dirpath)
+            except ExtractError as e:
+                if self.errorlevel > 1:
+                    raise
+                else:
+                    self._dbg(1, "tarfile: %s" % e)
+
+    def extract(self, member, path="", set_attrs=True):
+        """Extract a member from the archive to the current working directory,
+           using its full name. Its file information is extracted as accurately
+           as possible. `member' may be a filename or a TarInfo object. You can
+           specify a different directory using `path'. File attributes (owner,
+           mtime, mode) are set unless `set_attrs' is False.
+        """
+        self._check("r")
+
+        if isinstance(member, str):
+            tarinfo = self.getmember(member)
+        else:
+            tarinfo = member
+
+        # Prepare the link target for makelink().
+        if tarinfo.islnk():
+            tarinfo._link_target = os.path.join(path, tarinfo.linkname)
+
+        try:
+            self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
+                                 set_attrs=set_attrs)
+        except EnvironmentError as e:
+            if self.errorlevel > 0:
+                raise
+            else:
+                if e.filename is None:
+                    self._dbg(1, "tarfile: %s" % e.strerror)
+                else:
+                    self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
+        except ExtractError as e:
+            if self.errorlevel > 1:
+                raise
+            else:
+                self._dbg(1, "tarfile: %s" % e)
+
+    def extractfile(self, member):
+        """Extract a member from the archive as a file object. `member' may be
+           a filename or a TarInfo object. If `member' is a regular file, a
+           file-like object is returned. If `member' is a link, a file-like
+           object is constructed from the link's target. If `member' is none of
+           the above, None is returned.
+           The file-like object is read-only and provides the following
+           methods: read(), readline(), readlines(), seek() and tell()
+        """
+        self._check("r")
+
+        if isinstance(member, str):
+            tarinfo = self.getmember(member)
+        else:
+            tarinfo = member
+
+        if tarinfo.isreg():
+            return self.fileobject(self, tarinfo)
+
+        elif tarinfo.type not in SUPPORTED_TYPES:
+            # If a member's type is unknown, it is treated as a
+            # regular file.
+            return self.fileobject(self, tarinfo)
+
+        elif tarinfo.islnk() or tarinfo.issym():
+            if isinstance(self.fileobj, _Stream):
+                # A small but ugly workaround for the case that someone tries
+                # to extract a (sym)link as a file-object from a non-seekable
+                # stream of tar blocks.
+                raise StreamError("cannot extract (sym)link as file object")
+            else:
+                # A (sym)link's file object is its target's file object.
+                return self.extractfile(self._find_link_target(tarinfo))
+        else:
+            # If there's no data associated with the member (directory, chrdev,
+            # blkdev, etc.), return None instead of a file object.
+            return None
+
+    def _extract_member(self, tarinfo, targetpath, set_attrs=True):
+        """Extract the TarInfo object tarinfo to a physical
+           file called targetpath.
+        """
+        # Fetch the TarInfo object for the given name
+        # and build the destination pathname, replacing
+        # forward slashes to platform specific separators.
+        targetpath = targetpath.rstrip("/")
+        targetpath = targetpath.replace("/", os.sep)
+
+        # Create all upper directories.
+        upperdirs = os.path.dirname(targetpath)
+        if upperdirs and not os.path.exists(upperdirs):
+            # Create directories that are not part of the archive with
+            # default permissions.
+            os.makedirs(upperdirs)
+
+        if tarinfo.islnk() or tarinfo.issym():
+            self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
+        else:
+            self._dbg(1, tarinfo.name)
+
+        if tarinfo.isreg():
+            self.makefile(tarinfo, targetpath)
+        elif tarinfo.isdir():
+            self.makedir(tarinfo, targetpath)
+        elif tarinfo.isfifo():
+            self.makefifo(tarinfo, targetpath)
+        elif tarinfo.ischr() or tarinfo.isblk():
+            self.makedev(tarinfo, targetpath)
+        elif tarinfo.islnk() or tarinfo.issym():
+            self.makelink(tarinfo, targetpath)
+        elif tarinfo.type not in SUPPORTED_TYPES:
+            self.makeunknown(tarinfo, targetpath)
+        else:
+            self.makefile(tarinfo, targetpath)
+
+        if set_attrs:
+            self.chown(tarinfo, targetpath)
+            if not tarinfo.issym():
+                self.chmod(tarinfo, targetpath)
+                self.utime(tarinfo, targetpath)
+
+    #--------------------------------------------------------------------------
+    # Below are the different file methods. They are called via
+    # _extract_member() when extract() is called. They can be replaced in a
+    # subclass to implement other functionality.
+
+    def makedir(self, tarinfo, targetpath):
+        """Make a directory called targetpath.
+        """
+        try:
+            # Use a safe mode for the directory, the real mode is set
+            # later in _extract_member().
+            os.mkdir(targetpath, 0o700)
+        except EnvironmentError as e:
+            if e.errno != errno.EEXIST:
+                raise
+
+    def makefile(self, tarinfo, targetpath):
+        """Make a file called targetpath.
+        """
+        source = self.fileobj
+        source.seek(tarinfo.offset_data)
+        target = bltn_open(targetpath, "wb")
+        if tarinfo.sparse is not None:
+            for offset, size in tarinfo.sparse:
+                target.seek(offset)
+                copyfileobj(source, target, size)
+        else:
+            copyfileobj(source, target, tarinfo.size)
+        target.seek(tarinfo.size)
+        target.truncate()
+        target.close()
+
+    def makeunknown(self, tarinfo, targetpath):
+        """Make a file from a TarInfo object with an unknown type
+           at targetpath.
+        """
+        self.makefile(tarinfo, targetpath)
+        self._dbg(1, "tarfile: Unknown file type %r, " \
+                     "extracted as regular file." % tarinfo.type)
+
+    def makefifo(self, tarinfo, targetpath):
+        """Make a fifo called targetpath.
+        """
+        if hasattr(os, "mkfifo"):
+            os.mkfifo(targetpath)
+        else:
+            raise ExtractError("fifo not supported by system")
+
+    def makedev(self, tarinfo, targetpath):
+        """Make a character or block device called targetpath.
+        """
+        if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
+            raise ExtractError("special devices not supported by system")
+
+        mode = tarinfo.mode
+        if tarinfo.isblk():
+            mode |= stat.S_IFBLK
+        else:
+            mode |= stat.S_IFCHR
+
+        os.mknod(targetpath, mode,
+                 os.makedev(tarinfo.devmajor, tarinfo.devminor))
+
+    def makelink(self, tarinfo, targetpath):
+        """Make a (symbolic) link called targetpath. If it cannot be created
+          (platform limitation), we try to make a copy of the referenced file
+          instead of a link.
+        """
+        try:
+            # For systems that support symbolic and hard links.
+            if tarinfo.issym():
+                os.symlink(tarinfo.linkname, targetpath)
+            else:
+                # See extract().
+                if os.path.exists(tarinfo._link_target):
+                    os.link(tarinfo._link_target, targetpath)
+                else:
+                    self._extract_member(self._find_link_target(tarinfo),
+                                         targetpath)
+        except symlink_exception:
+            if tarinfo.issym():
+                linkpath = os.path.join(os.path.dirname(tarinfo.name),
+                                        tarinfo.linkname)
+            else:
+                linkpath = tarinfo.linkname
+        else:
+            try:
+                self._extract_member(self._find_link_target(tarinfo),
+                                     targetpath)
+            except KeyError:
+                raise ExtractError("unable to resolve link inside archive")
+
+    def chown(self, tarinfo, targetpath):
+        """Set owner of targetpath according to tarinfo.
+        """
+        if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
+            # We have to be root to do so.
+            try:
+                g = grp.getgrnam(tarinfo.gname)[2]
+            except KeyError:
+                g = tarinfo.gid
+            try:
+                u = pwd.getpwnam(tarinfo.uname)[2]
+            except KeyError:
+                u = tarinfo.uid
+            try:
+                if tarinfo.issym() and hasattr(os, "lchown"):
+                    os.lchown(targetpath, u, g)
+                else:
+                    if sys.platform != "os2emx":
+                        os.chown(targetpath, u, g)
+            except EnvironmentError as e:
+                raise ExtractError("could not change owner")
+
+    def chmod(self, tarinfo, targetpath):
+        """Set file permissions of targetpath according to tarinfo.
+        """
+        if hasattr(os, 'chmod'):
+            try:
+                os.chmod(targetpath, tarinfo.mode)
+            except EnvironmentError as e:
+                raise ExtractError("could not change mode")
+
+    def utime(self, tarinfo, targetpath):
+        """Set modification time of targetpath according to tarinfo.
+        """
+        if not hasattr(os, 'utime'):
+            return
+        try:
+            os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
+        except EnvironmentError as e:
+            raise ExtractError("could not change modification time")
+
+    #--------------------------------------------------------------------------
+    def next(self):
+        """Return the next member of the archive as a TarInfo object, when
+           TarFile is opened for reading. Return None if there is no more
+           available.
+        """
+        self._check("ra")
+        if self.firstmember is not None:
+            m = self.firstmember
+            self.firstmember = None
+            return m
+
+        # Read the next block.
+        self.fileobj.seek(self.offset)
+        tarinfo = None
+        while True:
+            try:
+                tarinfo = self.tarinfo.fromtarfile(self)
+            except EOFHeaderError as e:
+                if self.ignore_zeros:
+                    self._dbg(2, "0x%X: %s" % (self.offset, e))
+                    self.offset += BLOCKSIZE
+                    continue
+            except InvalidHeaderError as e:
+                if self.ignore_zeros:
+                    self._dbg(2, "0x%X: %s" % (self.offset, e))
+                    self.offset += BLOCKSIZE
+                    continue
+                elif self.offset == 0:
+                    raise ReadError(str(e))
+            except EmptyHeaderError:
+                if self.offset == 0:
+                    raise ReadError("empty file")
+            except TruncatedHeaderError as e:
+                if self.offset == 0:
+                    raise ReadError(str(e))
+            except SubsequentHeaderError as e:
+                raise ReadError(str(e))
+            break
+
+        if tarinfo is not None:
+            self.members.append(tarinfo)
+        else:
+            self._loaded = True
+
+        return tarinfo
+
+    #--------------------------------------------------------------------------
+    # Little helper methods:
+
+    def _getmember(self, name, tarinfo=None, normalize=False):
+        """Find an archive member by name from bottom to top.
+           If tarinfo is given, it is used as the starting point.
+        """
+        # Ensure that all members have been loaded.
+        members = self.getmembers()
+
+        # Limit the member search list up to tarinfo.
+        if tarinfo is not None:
+            members = members[:members.index(tarinfo)]
+
+        if normalize:
+            name = os.path.normpath(name)
+
+        for member in reversed(members):
+            if normalize:
+                member_name = os.path.normpath(member.name)
+            else:
+                member_name = member.name
+
+            if name == member_name:
+                return member
+
+    def _load(self):
+        """Read through the entire archive file and look for readable
+           members.
+        """
+        while True:
+            tarinfo = self.next()
+            if tarinfo is None:
+                break
+        self._loaded = True
+
+    def _check(self, mode=None):
+        """Check if TarFile is still open, and if the operation's mode
+           corresponds to TarFile's mode.
+        """
+        if self.closed:
+            raise IOError("%s is closed" % self.__class__.__name__)
+        if mode is not None and self.mode not in mode:
+            raise IOError("bad operation for mode %r" % self.mode)
+
+    def _find_link_target(self, tarinfo):
+        """Find the target member of a symlink or hardlink member in the
+           archive.
+        """
+        if tarinfo.issym():
+            # Always search the entire archive.
+            linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
+            limit = None
+        else:
+            # Search the archive before the link, because a hard link is
+            # just a reference to an already archived file.
+            linkname = tarinfo.linkname
+            limit = tarinfo
+
+        member = self._getmember(linkname, tarinfo=limit, normalize=True)
+        if member is None:
+            raise KeyError("linkname %r not found" % linkname)
+        return member
+
+    def __iter__(self):
+        """Provide an iterator object.
+        """
+        if self._loaded:
+            return iter(self.members)
+        else:
+            return TarIter(self)
+
+    def _dbg(self, level, msg):
+        """Write debugging output to sys.stderr.
+        """
+        if level <= self.debug:
+            print(msg, file=sys.stderr)
+
+    def __enter__(self):
+        self._check()
+        return self
+
+    def __exit__(self, type, value, traceback):
+        if type is None:
+            self.close()
+        else:
+            # An exception occurred. We must not call close() because
+            # it would try to write end-of-archive blocks and padding.
+            if not self._extfileobj:
+                self.fileobj.close()
+            self.closed = True
+# class TarFile
+
+class TarIter(object):
+    """Iterator Class.
+
+       for tarinfo in TarFile(...):
+           suite...
+    """
+
+    def __init__(self, tarfile):
+        """Construct a TarIter object.
+        """
+        self.tarfile = tarfile
+        self.index = 0
+    def __iter__(self):
+        """Return iterator object.
+        """
+        return self
+
+    def __next__(self):
+        """Return the next item using TarFile's next() method.
+           When all members have been read, set TarFile as _loaded.
+        """
+        # Fix for SF #1100429: Under rare circumstances it can
+        # happen that getmembers() is called during iteration,
+        # which will cause TarIter to stop prematurely.
+        if not self.tarfile._loaded:
+            tarinfo = self.tarfile.next()
+            if not tarinfo:
+                self.tarfile._loaded = True
+                raise StopIteration
+        else:
+            try:
+                tarinfo = self.tarfile.members[self.index]
+            except IndexError:
+                raise StopIteration
+        self.index += 1
+        return tarinfo
+
+    next = __next__ # for Python 2.x
+
+#--------------------
+# exported functions
+#--------------------
+def is_tarfile(name):
+    """Return True if name points to a tar archive that we
+       are able to handle, else return False.
+    """
+    try:
+        t = open(name)
+        t.close()
+        return True
+    except TarError:
+        return False
+
+bltn_open = open
+open = TarFile.open
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/compat.py b/lib/python3.7/site-packages/pip/_vendor/distlib/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff328c8ee491f9f3c943cf637dc7ea17ea33ee3b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/compat.py
@@ -0,0 +1,1120 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013-2017 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+from __future__ import absolute_import
+
+import os
+import re
+import sys
+
+try:
+    import ssl
+except ImportError:  # pragma: no cover
+    ssl = None
+
+if sys.version_info[0] < 3:  # pragma: no cover
+    from StringIO import StringIO
+    string_types = basestring,
+    text_type = unicode
+    from types import FileType as file_type
+    import __builtin__ as builtins
+    import ConfigParser as configparser
+    from ._backport import shutil
+    from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
+    from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
+                        pathname2url, ContentTooShortError, splittype)
+
+    def quote(s):
+        if isinstance(s, unicode):
+            s = s.encode('utf-8')
+        return _quote(s)
+
+    import urllib2
+    from urllib2 import (Request, urlopen, URLError, HTTPError,
+                         HTTPBasicAuthHandler, HTTPPasswordMgr,
+                         HTTPHandler, HTTPRedirectHandler,
+                         build_opener)
+    if ssl:
+        from urllib2 import HTTPSHandler
+    import httplib
+    import xmlrpclib
+    import Queue as queue
+    from HTMLParser import HTMLParser
+    import htmlentitydefs
+    raw_input = raw_input
+    from itertools import ifilter as filter
+    from itertools import ifilterfalse as filterfalse
+
+    _userprog = None
+    def splituser(host):
+        """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
+        global _userprog
+        if _userprog is None:
+            import re
+            _userprog = re.compile('^(.*)@(.*)$')
+
+        match = _userprog.match(host)
+        if match: return match.group(1, 2)
+        return None, host
+
+else:  # pragma: no cover
+    from io import StringIO
+    string_types = str,
+    text_type = str
+    from io import TextIOWrapper as file_type
+    import builtins
+    import configparser
+    import shutil
+    from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote,
+                              unquote, urlsplit, urlunsplit, splittype)
+    from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
+                                pathname2url,
+                                HTTPBasicAuthHandler, HTTPPasswordMgr,
+                                HTTPHandler, HTTPRedirectHandler,
+                                build_opener)
+    if ssl:
+        from urllib.request import HTTPSHandler
+    from urllib.error import HTTPError, URLError, ContentTooShortError
+    import http.client as httplib
+    import urllib.request as urllib2
+    import xmlrpc.client as xmlrpclib
+    import queue
+    from html.parser import HTMLParser
+    import html.entities as htmlentitydefs
+    raw_input = input
+    from itertools import filterfalse
+    filter = filter
+
+try:
+    from ssl import match_hostname, CertificateError
+except ImportError: # pragma: no cover
+    class CertificateError(ValueError):
+        pass
+
+
+    def _dnsname_match(dn, hostname, max_wildcards=1):
+        """Matching according to RFC 6125, section 6.4.3
+
+        http://tools.ietf.org/html/rfc6125#section-6.4.3
+        """
+        pats = []
+        if not dn:
+            return False
+
+        parts = dn.split('.')
+        leftmost, remainder = parts[0], parts[1:]
+
+        wildcards = leftmost.count('*')
+        if wildcards > max_wildcards:
+            # Issue #17980: avoid denials of service by refusing more
+            # than one wildcard per fragment.  A survey of established
+            # policy among SSL implementations showed it to be a
+            # reasonable choice.
+            raise CertificateError(
+                "too many wildcards in certificate DNS name: " + repr(dn))
+
+        # speed up common case w/o wildcards
+        if not wildcards:
+            return dn.lower() == hostname.lower()
+
+        # RFC 6125, section 6.4.3, subitem 1.
+        # The client SHOULD NOT attempt to match a presented identifier in which
+        # the wildcard character comprises a label other than the left-most label.
+        if leftmost == '*':
+            # When '*' is a fragment by itself, it matches a non-empty dotless
+            # fragment.
+            pats.append('[^.]+')
+        elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+            # RFC 6125, section 6.4.3, subitem 3.
+            # The client SHOULD NOT attempt to match a presented identifier
+            # where the wildcard character is embedded within an A-label or
+            # U-label of an internationalized domain name.
+            pats.append(re.escape(leftmost))
+        else:
+            # Otherwise, '*' matches any dotless string, e.g. www*
+            pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+        # add the remaining fragments, ignore any wildcards
+        for frag in remainder:
+            pats.append(re.escape(frag))
+
+        pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+        return pat.match(hostname)
+
+
+    def match_hostname(cert, hostname):
+        """Verify that *cert* (in decoded format as returned by
+        SSLSocket.getpeercert()) matches the *hostname*.  RFC 2818 and RFC 6125
+        rules are followed, but IP addresses are not accepted for *hostname*.
+
+        CertificateError is raised on failure. On success, the function
+        returns nothing.
+        """
+        if not cert:
+            raise ValueError("empty or no certificate, match_hostname needs a "
+                             "SSL socket or SSL context with either "
+                             "CERT_OPTIONAL or CERT_REQUIRED")
+        dnsnames = []
+        san = cert.get('subjectAltName', ())
+        for key, value in san:
+            if key == 'DNS':
+                if _dnsname_match(value, hostname):
+                    return
+                dnsnames.append(value)
+        if not dnsnames:
+            # The subject is only checked when there is no dNSName entry
+            # in subjectAltName
+            for sub in cert.get('subject', ()):
+                for key, value in sub:
+                    # XXX according to RFC 2818, the most specific Common Name
+                    # must be used.
+                    if key == 'commonName':
+                        if _dnsname_match(value, hostname):
+                            return
+                        dnsnames.append(value)
+        if len(dnsnames) > 1:
+            raise CertificateError("hostname %r "
+                "doesn't match either of %s"
+                % (hostname, ', '.join(map(repr, dnsnames))))
+        elif len(dnsnames) == 1:
+            raise CertificateError("hostname %r "
+                "doesn't match %r"
+                % (hostname, dnsnames[0]))
+        else:
+            raise CertificateError("no appropriate commonName or "
+                "subjectAltName fields were found")
+
+
+try:
+    from types import SimpleNamespace as Container
+except ImportError:  # pragma: no cover
+    class Container(object):
+        """
+        A generic container for when multiple values need to be returned
+        """
+        def __init__(self, **kwargs):
+            self.__dict__.update(kwargs)
+
+
+try:
+    from shutil import which
+except ImportError:  # pragma: no cover
+    # Implementation from Python 3.3
+    def which(cmd, mode=os.F_OK | os.X_OK, path=None):
+        """Given a command, mode, and a PATH string, return the path which
+        conforms to the given mode on the PATH, or None if there is no such
+        file.
+
+        `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
+        of os.environ.get("PATH"), or can be overridden with a custom search
+        path.
+
+        """
+        # Check that a given file can be accessed with the correct mode.
+        # Additionally check that `file` is not a directory, as on Windows
+        # directories pass the os.access check.
+        def _access_check(fn, mode):
+            return (os.path.exists(fn) and os.access(fn, mode)
+                    and not os.path.isdir(fn))
+
+        # If we're given a path with a directory part, look it up directly rather
+        # than referring to PATH directories. This includes checking relative to the
+        # current directory, e.g. ./script
+        if os.path.dirname(cmd):
+            if _access_check(cmd, mode):
+                return cmd
+            return None
+
+        if path is None:
+            path = os.environ.get("PATH", os.defpath)
+        if not path:
+            return None
+        path = path.split(os.pathsep)
+
+        if sys.platform == "win32":
+            # The current directory takes precedence on Windows.
+            if not os.curdir in path:
+                path.insert(0, os.curdir)
+
+            # PATHEXT is necessary to check on Windows.
+            pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
+            # See if the given file matches any of the expected path extensions.
+            # This will allow us to short circuit when given "python.exe".
+            # If it does match, only test that one, otherwise we have to try
+            # others.
+            if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
+                files = [cmd]
+            else:
+                files = [cmd + ext for ext in pathext]
+        else:
+            # On other platforms you don't have things like PATHEXT to tell you
+            # what file suffixes are executable, so just pass on cmd as-is.
+            files = [cmd]
+
+        seen = set()
+        for dir in path:
+            normdir = os.path.normcase(dir)
+            if not normdir in seen:
+                seen.add(normdir)
+                for thefile in files:
+                    name = os.path.join(dir, thefile)
+                    if _access_check(name, mode):
+                        return name
+        return None
+
+
+# ZipFile is a context manager in 2.7, but not in 2.6
+
+from zipfile import ZipFile as BaseZipFile
+
+if hasattr(BaseZipFile, '__enter__'):  # pragma: no cover
+    ZipFile = BaseZipFile
+else:  # pragma: no cover
+    from zipfile import ZipExtFile as BaseZipExtFile
+
+    class ZipExtFile(BaseZipExtFile):
+        def __init__(self, base):
+            self.__dict__.update(base.__dict__)
+
+        def __enter__(self):
+            return self
+
+        def __exit__(self, *exc_info):
+            self.close()
+            # return None, so if an exception occurred, it will propagate
+
+    class ZipFile(BaseZipFile):
+        def __enter__(self):
+            return self
+
+        def __exit__(self, *exc_info):
+            self.close()
+            # return None, so if an exception occurred, it will propagate
+
+        def open(self, *args, **kwargs):
+            base = BaseZipFile.open(self, *args, **kwargs)
+            return ZipExtFile(base)
+
+try:
+    from platform import python_implementation
+except ImportError: # pragma: no cover
+    def python_implementation():
+        """Return a string identifying the Python implementation."""
+        if 'PyPy' in sys.version:
+            return 'PyPy'
+        if os.name == 'java':
+            return 'Jython'
+        if sys.version.startswith('IronPython'):
+            return 'IronPython'
+        return 'CPython'
+
+try:
+    import sysconfig
+except ImportError: # pragma: no cover
+    from ._backport import sysconfig
+
+try:
+    callable = callable
+except NameError:   # pragma: no cover
+    from collections import Callable
+
+    def callable(obj):
+        return isinstance(obj, Callable)
+
+
+try:
+    fsencode = os.fsencode
+    fsdecode = os.fsdecode
+except AttributeError:  # pragma: no cover
+    # Issue #99: on some systems (e.g. containerised),
+    # sys.getfilesystemencoding() returns None, and we need a real value,
+    # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and
+    # sys.getfilesystemencoding(): the return value is "the user’s preference
+    # according to the result of nl_langinfo(CODESET), or None if the
+    # nl_langinfo(CODESET) failed."
+    _fsencoding = sys.getfilesystemencoding() or 'utf-8'
+    if _fsencoding == 'mbcs':
+        _fserrors = 'strict'
+    else:
+        _fserrors = 'surrogateescape'
+
+    def fsencode(filename):
+        if isinstance(filename, bytes):
+            return filename
+        elif isinstance(filename, text_type):
+            return filename.encode(_fsencoding, _fserrors)
+        else:
+            raise TypeError("expect bytes or str, not %s" %
+                            type(filename).__name__)
+
+    def fsdecode(filename):
+        if isinstance(filename, text_type):
+            return filename
+        elif isinstance(filename, bytes):
+            return filename.decode(_fsencoding, _fserrors)
+        else:
+            raise TypeError("expect bytes or str, not %s" %
+                            type(filename).__name__)
+
+try:
+    from tokenize import detect_encoding
+except ImportError: # pragma: no cover
+    from codecs import BOM_UTF8, lookup
+    import re
+
+    cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
+
+    def _get_normal_name(orig_enc):
+        """Imitates get_normal_name in tokenizer.c."""
+        # Only care about the first 12 characters.
+        enc = orig_enc[:12].lower().replace("_", "-")
+        if enc == "utf-8" or enc.startswith("utf-8-"):
+            return "utf-8"
+        if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
+           enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
+            return "iso-8859-1"
+        return orig_enc
+
+    def detect_encoding(readline):
+        """
+        The detect_encoding() function is used to detect the encoding that should
+        be used to decode a Python source file.  It requires one argument, readline,
+        in the same way as the tokenize() generator.
+
+        It will call readline a maximum of twice, and return the encoding used
+        (as a string) and a list of any lines (left as bytes) it has read in.
+
+        It detects the encoding from the presence of a utf-8 bom or an encoding
+        cookie as specified in pep-0263.  If both a bom and a cookie are present,
+        but disagree, a SyntaxError will be raised.  If the encoding cookie is an
+        invalid charset, raise a SyntaxError.  Note that if a utf-8 bom is found,
+        'utf-8-sig' is returned.
+
+        If no encoding is specified, then the default of 'utf-8' will be returned.
+        """
+        try:
+            filename = readline.__self__.name
+        except AttributeError:
+            filename = None
+        bom_found = False
+        encoding = None
+        default = 'utf-8'
+        def read_or_stop():
+            try:
+                return readline()
+            except StopIteration:
+                return b''
+
+        def find_cookie(line):
+            try:
+                # Decode as UTF-8. Either the line is an encoding declaration,
+                # in which case it should be pure ASCII, or it must be UTF-8
+                # per default encoding.
+                line_string = line.decode('utf-8')
+            except UnicodeDecodeError:
+                msg = "invalid or missing encoding declaration"
+                if filename is not None:
+                    msg = '{} for {!r}'.format(msg, filename)
+                raise SyntaxError(msg)
+
+            matches = cookie_re.findall(line_string)
+            if not matches:
+                return None
+            encoding = _get_normal_name(matches[0])
+            try:
+                codec = lookup(encoding)
+            except LookupError:
+                # This behaviour mimics the Python interpreter
+                if filename is None:
+                    msg = "unknown encoding: " + encoding
+                else:
+                    msg = "unknown encoding for {!r}: {}".format(filename,
+                            encoding)
+                raise SyntaxError(msg)
+
+            if bom_found:
+                if codec.name != 'utf-8':
+                    # This behaviour mimics the Python interpreter
+                    if filename is None:
+                        msg = 'encoding problem: utf-8'
+                    else:
+                        msg = 'encoding problem for {!r}: utf-8'.format(filename)
+                    raise SyntaxError(msg)
+                encoding += '-sig'
+            return encoding
+
+        first = read_or_stop()
+        if first.startswith(BOM_UTF8):
+            bom_found = True
+            first = first[3:]
+            default = 'utf-8-sig'
+        if not first:
+            return default, []
+
+        encoding = find_cookie(first)
+        if encoding:
+            return encoding, [first]
+
+        second = read_or_stop()
+        if not second:
+            return default, [first]
+
+        encoding = find_cookie(second)
+        if encoding:
+            return encoding, [first, second]
+
+        return default, [first, second]
+
+# For converting & <-> &amp; etc.
+try:
+    from html import escape
+except ImportError:
+    from cgi import escape
+if sys.version_info[:2] < (3, 4):
+    unescape = HTMLParser().unescape
+else:
+    from html import unescape
+
+try:
+    from collections import ChainMap
+except ImportError: # pragma: no cover
+    from collections import MutableMapping
+
+    try:
+        from reprlib import recursive_repr as _recursive_repr
+    except ImportError:
+        def _recursive_repr(fillvalue='...'):
+            '''
+            Decorator to make a repr function return fillvalue for a recursive
+            call
+            '''
+
+            def decorating_function(user_function):
+                repr_running = set()
+
+                def wrapper(self):
+                    key = id(self), get_ident()
+                    if key in repr_running:
+                        return fillvalue
+                    repr_running.add(key)
+                    try:
+                        result = user_function(self)
+                    finally:
+                        repr_running.discard(key)
+                    return result
+
+                # Can't use functools.wraps() here because of bootstrap issues
+                wrapper.__module__ = getattr(user_function, '__module__')
+                wrapper.__doc__ = getattr(user_function, '__doc__')
+                wrapper.__name__ = getattr(user_function, '__name__')
+                wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
+                return wrapper
+
+            return decorating_function
+
+    class ChainMap(MutableMapping):
+        ''' A ChainMap groups multiple dicts (or other mappings) together
+        to create a single, updateable view.
+
+        The underlying mappings are stored in a list.  That list is public and can
+        accessed or updated using the *maps* attribute.  There is no other state.
+
+        Lookups search the underlying mappings successively until a key is found.
+        In contrast, writes, updates, and deletions only operate on the first
+        mapping.
+
+        '''
+
+        def __init__(self, *maps):
+            '''Initialize a ChainMap by setting *maps* to the given mappings.
+            If no mappings are provided, a single empty dictionary is used.
+
+            '''
+            self.maps = list(maps) or [{}]          # always at least one map
+
+        def __missing__(self, key):
+            raise KeyError(key)
+
+        def __getitem__(self, key):
+            for mapping in self.maps:
+                try:
+                    return mapping[key]             # can't use 'key in mapping' with defaultdict
+                except KeyError:
+                    pass
+            return self.__missing__(key)            # support subclasses that define __missing__
+
+        def get(self, key, default=None):
+            return self[key] if key in self else default
+
+        def __len__(self):
+            return len(set().union(*self.maps))     # reuses stored hash values if possible
+
+        def __iter__(self):
+            return iter(set().union(*self.maps))
+
+        def __contains__(self, key):
+            return any(key in m for m in self.maps)
+
+        def __bool__(self):
+            return any(self.maps)
+
+        @_recursive_repr()
+        def __repr__(self):
+            return '{0.__class__.__name__}({1})'.format(
+                self, ', '.join(map(repr, self.maps)))
+
+        @classmethod
+        def fromkeys(cls, iterable, *args):
+            'Create a ChainMap with a single dict created from the iterable.'
+            return cls(dict.fromkeys(iterable, *args))
+
+        def copy(self):
+            'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
+            return self.__class__(self.maps[0].copy(), *self.maps[1:])
+
+        __copy__ = copy
+
+        def new_child(self):                        # like Django's Context.push()
+            'New ChainMap with a new dict followed by all previous maps.'
+            return self.__class__({}, *self.maps)
+
+        @property
+        def parents(self):                          # like Django's Context.pop()
+            'New ChainMap from maps[1:].'
+            return self.__class__(*self.maps[1:])
+
+        def __setitem__(self, key, value):
+            self.maps[0][key] = value
+
+        def __delitem__(self, key):
+            try:
+                del self.maps[0][key]
+            except KeyError:
+                raise KeyError('Key not found in the first mapping: {!r}'.format(key))
+
+        def popitem(self):
+            'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
+            try:
+                return self.maps[0].popitem()
+            except KeyError:
+                raise KeyError('No keys found in the first mapping.')
+
+        def pop(self, key, *args):
+            'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
+            try:
+                return self.maps[0].pop(key, *args)
+            except KeyError:
+                raise KeyError('Key not found in the first mapping: {!r}'.format(key))
+
+        def clear(self):
+            'Clear maps[0], leaving maps[1:] intact.'
+            self.maps[0].clear()
+
+try:
+    from importlib.util import cache_from_source  # Python >= 3.4
+except ImportError:  # pragma: no cover
+    try:
+        from imp import cache_from_source
+    except ImportError:  # pragma: no cover
+        def cache_from_source(path, debug_override=None):
+            assert path.endswith('.py')
+            if debug_override is None:
+                debug_override = __debug__
+            if debug_override:
+                suffix = 'c'
+            else:
+                suffix = 'o'
+            return path + suffix
+
+try:
+    from collections import OrderedDict
+except ImportError: # pragma: no cover
+## {{{ http://code.activestate.com/recipes/576693/ (r9)
+# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
+# Passes Python2.7's test suite and incorporates all the latest updates.
+    try:
+        from thread import get_ident as _get_ident
+    except ImportError:
+        from dummy_thread import get_ident as _get_ident
+
+    try:
+        from _abcoll import KeysView, ValuesView, ItemsView
+    except ImportError:
+        pass
+
+
+    class OrderedDict(dict):
+        'Dictionary that remembers insertion order'
+        # An inherited dict maps keys to values.
+        # The inherited dict provides __getitem__, __len__, __contains__, and get.
+        # The remaining methods are order-aware.
+        # Big-O running times for all methods are the same as for regular dictionaries.
+
+        # The internal self.__map dictionary maps keys to links in a doubly linked list.
+        # The circular doubly linked list starts and ends with a sentinel element.
+        # The sentinel element never gets deleted (this simplifies the algorithm).
+        # Each link is stored as a list of length three:  [PREV, NEXT, KEY].
+
+        def __init__(self, *args, **kwds):
+            '''Initialize an ordered dictionary.  Signature is the same as for
+            regular dictionaries, but keyword arguments are not recommended
+            because their insertion order is arbitrary.
+
+            '''
+            if len(args) > 1:
+                raise TypeError('expected at most 1 arguments, got %d' % len(args))
+            try:
+                self.__root
+            except AttributeError:
+                self.__root = root = []                     # sentinel node
+                root[:] = [root, root, None]
+                self.__map = {}
+            self.__update(*args, **kwds)
+
+        def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+            'od.__setitem__(i, y) <==> od[i]=y'
+            # Setting a new item creates a new link which goes at the end of the linked
+            # list, and the inherited dictionary is updated with the new key/value pair.
+            if key not in self:
+                root = self.__root
+                last = root[0]
+                last[1] = root[0] = self.__map[key] = [last, root, key]
+            dict_setitem(self, key, value)
+
+        def __delitem__(self, key, dict_delitem=dict.__delitem__):
+            'od.__delitem__(y) <==> del od[y]'
+            # Deleting an existing item uses self.__map to find the link which is
+            # then removed by updating the links in the predecessor and successor nodes.
+            dict_delitem(self, key)
+            link_prev, link_next, key = self.__map.pop(key)
+            link_prev[1] = link_next
+            link_next[0] = link_prev
+
+        def __iter__(self):
+            'od.__iter__() <==> iter(od)'
+            root = self.__root
+            curr = root[1]
+            while curr is not root:
+                yield curr[2]
+                curr = curr[1]
+
+        def __reversed__(self):
+            'od.__reversed__() <==> reversed(od)'
+            root = self.__root
+            curr = root[0]
+            while curr is not root:
+                yield curr[2]
+                curr = curr[0]
+
+        def clear(self):
+            'od.clear() -> None.  Remove all items from od.'
+            try:
+                for node in self.__map.itervalues():
+                    del node[:]
+                root = self.__root
+                root[:] = [root, root, None]
+                self.__map.clear()
+            except AttributeError:
+                pass
+            dict.clear(self)
+
+        def popitem(self, last=True):
+            '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+            Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+            '''
+            if not self:
+                raise KeyError('dictionary is empty')
+            root = self.__root
+            if last:
+                link = root[0]
+                link_prev = link[0]
+                link_prev[1] = root
+                root[0] = link_prev
+            else:
+                link = root[1]
+                link_next = link[1]
+                root[1] = link_next
+                link_next[0] = root
+            key = link[2]
+            del self.__map[key]
+            value = dict.pop(self, key)
+            return key, value
+
+        # -- the following methods do not depend on the internal structure --
+
+        def keys(self):
+            'od.keys() -> list of keys in od'
+            return list(self)
+
+        def values(self):
+            'od.values() -> list of values in od'
+            return [self[key] for key in self]
+
+        def items(self):
+            'od.items() -> list of (key, value) pairs in od'
+            return [(key, self[key]) for key in self]
+
+        def iterkeys(self):
+            'od.iterkeys() -> an iterator over the keys in od'
+            return iter(self)
+
+        def itervalues(self):
+            'od.itervalues -> an iterator over the values in od'
+            for k in self:
+                yield self[k]
+
+        def iteritems(self):
+            'od.iteritems -> an iterator over the (key, value) items in od'
+            for k in self:
+                yield (k, self[k])
+
+        def update(*args, **kwds):
+            '''od.update(E, **F) -> None.  Update od from dict/iterable E and F.
+
+            If E is a dict instance, does:           for k in E: od[k] = E[k]
+            If E has a .keys() method, does:         for k in E.keys(): od[k] = E[k]
+            Or if E is an iterable of items, does:   for k, v in E: od[k] = v
+            In either case, this is followed by:     for k, v in F.items(): od[k] = v
+
+            '''
+            if len(args) > 2:
+                raise TypeError('update() takes at most 2 positional '
+                                'arguments (%d given)' % (len(args),))
+            elif not args:
+                raise TypeError('update() takes at least 1 argument (0 given)')
+            self = args[0]
+            # Make progressively weaker assumptions about "other"
+            other = ()
+            if len(args) == 2:
+                other = args[1]
+            if isinstance(other, dict):
+                for key in other:
+                    self[key] = other[key]
+            elif hasattr(other, 'keys'):
+                for key in other.keys():
+                    self[key] = other[key]
+            else:
+                for key, value in other:
+                    self[key] = value
+            for key, value in kwds.items():
+                self[key] = value
+
+        __update = update  # let subclasses override update without breaking __init__
+
+        __marker = object()
+
+        def pop(self, key, default=__marker):
+            '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+            If key is not found, d is returned if given, otherwise KeyError is raised.
+
+            '''
+            if key in self:
+                result = self[key]
+                del self[key]
+                return result
+            if default is self.__marker:
+                raise KeyError(key)
+            return default
+
+        def setdefault(self, key, default=None):
+            'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+            if key in self:
+                return self[key]
+            self[key] = default
+            return default
+
+        def __repr__(self, _repr_running=None):
+            'od.__repr__() <==> repr(od)'
+            if not _repr_running: _repr_running = {}
+            call_key = id(self), _get_ident()
+            if call_key in _repr_running:
+                return '...'
+            _repr_running[call_key] = 1
+            try:
+                if not self:
+                    return '%s()' % (self.__class__.__name__,)
+                return '%s(%r)' % (self.__class__.__name__, self.items())
+            finally:
+                del _repr_running[call_key]
+
+        def __reduce__(self):
+            'Return state information for pickling'
+            items = [[k, self[k]] for k in self]
+            inst_dict = vars(self).copy()
+            for k in vars(OrderedDict()):
+                inst_dict.pop(k, None)
+            if inst_dict:
+                return (self.__class__, (items,), inst_dict)
+            return self.__class__, (items,)
+
+        def copy(self):
+            'od.copy() -> a shallow copy of od'
+            return self.__class__(self)
+
+        @classmethod
+        def fromkeys(cls, iterable, value=None):
+            '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
+            and values equal to v (which defaults to None).
+
+            '''
+            d = cls()
+            for key in iterable:
+                d[key] = value
+            return d
+
+        def __eq__(self, other):
+            '''od.__eq__(y) <==> od==y.  Comparison to another OD is order-sensitive
+            while comparison to a regular mapping is order-insensitive.
+
+            '''
+            if isinstance(other, OrderedDict):
+                return len(self)==len(other) and self.items() == other.items()
+            return dict.__eq__(self, other)
+
+        def __ne__(self, other):
+            return not self == other
+
+        # -- the following methods are only used in Python 2.7 --
+
+        def viewkeys(self):
+            "od.viewkeys() -> a set-like object providing a view on od's keys"
+            return KeysView(self)
+
+        def viewvalues(self):
+            "od.viewvalues() -> an object providing a view on od's values"
+            return ValuesView(self)
+
+        def viewitems(self):
+            "od.viewitems() -> a set-like object providing a view on od's items"
+            return ItemsView(self)
+
+try:
+    from logging.config import BaseConfigurator, valid_ident
+except ImportError: # pragma: no cover
+    IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
+
+
+    def valid_ident(s):
+        m = IDENTIFIER.match(s)
+        if not m:
+            raise ValueError('Not a valid Python identifier: %r' % s)
+        return True
+
+
+    # The ConvertingXXX classes are wrappers around standard Python containers,
+    # and they serve to convert any suitable values in the container. The
+    # conversion converts base dicts, lists and tuples to their wrapped
+    # equivalents, whereas strings which match a conversion format are converted
+    # appropriately.
+    #
+    # Each wrapper should have a configurator attribute holding the actual
+    # configurator to use for conversion.
+
+    class ConvertingDict(dict):
+        """A converting dictionary wrapper."""
+
+        def __getitem__(self, key):
+            value = dict.__getitem__(self, key)
+            result = self.configurator.convert(value)
+            #If the converted value is different, save for next time
+            if value is not result:
+                self[key] = result
+                if type(result) in (ConvertingDict, ConvertingList,
+                                    ConvertingTuple):
+                    result.parent = self
+                    result.key = key
+            return result
+
+        def get(self, key, default=None):
+            value = dict.get(self, key, default)
+            result = self.configurator.convert(value)
+            #If the converted value is different, save for next time
+            if value is not result:
+                self[key] = result
+                if type(result) in (ConvertingDict, ConvertingList,
+                                    ConvertingTuple):
+                    result.parent = self
+                    result.key = key
+            return result
+
+    def pop(self, key, default=None):
+        value = dict.pop(self, key, default)
+        result = self.configurator.convert(value)
+        if value is not result:
+            if type(result) in (ConvertingDict, ConvertingList,
+                                ConvertingTuple):
+                result.parent = self
+                result.key = key
+        return result
+
+    class ConvertingList(list):
+        """A converting list wrapper."""
+        def __getitem__(self, key):
+            value = list.__getitem__(self, key)
+            result = self.configurator.convert(value)
+            #If the converted value is different, save for next time
+            if value is not result:
+                self[key] = result
+                if type(result) in (ConvertingDict, ConvertingList,
+                                    ConvertingTuple):
+                    result.parent = self
+                    result.key = key
+            return result
+
+        def pop(self, idx=-1):
+            value = list.pop(self, idx)
+            result = self.configurator.convert(value)
+            if value is not result:
+                if type(result) in (ConvertingDict, ConvertingList,
+                                    ConvertingTuple):
+                    result.parent = self
+            return result
+
+    class ConvertingTuple(tuple):
+        """A converting tuple wrapper."""
+        def __getitem__(self, key):
+            value = tuple.__getitem__(self, key)
+            result = self.configurator.convert(value)
+            if value is not result:
+                if type(result) in (ConvertingDict, ConvertingList,
+                                    ConvertingTuple):
+                    result.parent = self
+                    result.key = key
+            return result
+
+    class BaseConfigurator(object):
+        """
+        The configurator base class which defines some useful defaults.
+        """
+
+        CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
+
+        WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
+        DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
+        INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
+        DIGIT_PATTERN = re.compile(r'^\d+$')
+
+        value_converters = {
+            'ext' : 'ext_convert',
+            'cfg' : 'cfg_convert',
+        }
+
+        # We might want to use a different one, e.g. importlib
+        importer = staticmethod(__import__)
+
+        def __init__(self, config):
+            self.config = ConvertingDict(config)
+            self.config.configurator = self
+
+        def resolve(self, s):
+            """
+            Resolve strings to objects using standard import and attribute
+            syntax.
+            """
+            name = s.split('.')
+            used = name.pop(0)
+            try:
+                found = self.importer(used)
+                for frag in name:
+                    used += '.' + frag
+                    try:
+                        found = getattr(found, frag)
+                    except AttributeError:
+                        self.importer(used)
+                        found = getattr(found, frag)
+                return found
+            except ImportError:
+                e, tb = sys.exc_info()[1:]
+                v = ValueError('Cannot resolve %r: %s' % (s, e))
+                v.__cause__, v.__traceback__ = e, tb
+                raise v
+
+        def ext_convert(self, value):
+            """Default converter for the ext:// protocol."""
+            return self.resolve(value)
+
+        def cfg_convert(self, value):
+            """Default converter for the cfg:// protocol."""
+            rest = value
+            m = self.WORD_PATTERN.match(rest)
+            if m is None:
+                raise ValueError("Unable to convert %r" % value)
+            else:
+                rest = rest[m.end():]
+                d = self.config[m.groups()[0]]
+                #print d, rest
+                while rest:
+                    m = self.DOT_PATTERN.match(rest)
+                    if m:
+                        d = d[m.groups()[0]]
+                    else:
+                        m = self.INDEX_PATTERN.match(rest)
+                        if m:
+                            idx = m.groups()[0]
+                            if not self.DIGIT_PATTERN.match(idx):
+                                d = d[idx]
+                            else:
+                                try:
+                                    n = int(idx) # try as number first (most likely)
+                                    d = d[n]
+                                except TypeError:
+                                    d = d[idx]
+                    if m:
+                        rest = rest[m.end():]
+                    else:
+                        raise ValueError('Unable to convert '
+                                         '%r at %r' % (value, rest))
+            #rest should be empty
+            return d
+
+        def convert(self, value):
+            """
+            Convert values to an appropriate type. dicts, lists and tuples are
+            replaced by their converting alternatives. Strings are checked to
+            see if they have a conversion format and are converted if they do.
+            """
+            if not isinstance(value, ConvertingDict) and isinstance(value, dict):
+                value = ConvertingDict(value)
+                value.configurator = self
+            elif not isinstance(value, ConvertingList) and isinstance(value, list):
+                value = ConvertingList(value)
+                value.configurator = self
+            elif not isinstance(value, ConvertingTuple) and\
+                     isinstance(value, tuple):
+                value = ConvertingTuple(value)
+                value.configurator = self
+            elif isinstance(value, string_types):
+                m = self.CONVERT_PATTERN.match(value)
+                if m:
+                    d = m.groupdict()
+                    prefix = d['prefix']
+                    converter = self.value_converters.get(prefix, None)
+                    if converter:
+                        suffix = d['suffix']
+                        converter = getattr(self, converter)
+                        value = converter(suffix)
+            return value
+
+        def configure_custom(self, config):
+            """Configure an object with a user-supplied factory."""
+            c = config.pop('()')
+            if not callable(c):
+                c = self.resolve(c)
+            props = config.pop('.', None)
+            # Check for valid identifiers
+            kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
+            result = c(**kwargs)
+            if props:
+                for name, value in props.items():
+                    setattr(result, name, value)
+            return result
+
+        def as_tuple(self, value):
+            """Utility function which converts lists to tuples."""
+            if isinstance(value, list):
+                value = tuple(value)
+            return value
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/database.py b/lib/python3.7/site-packages/pip/_vendor/distlib/database.py
new file mode 100644
index 0000000000000000000000000000000000000000..b13cdac92b230b0048bf7705dc92e15cefc58c05
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/database.py
@@ -0,0 +1,1339 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2017 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""PEP 376 implementation."""
+
+from __future__ import unicode_literals
+
+import base64
+import codecs
+import contextlib
+import hashlib
+import logging
+import os
+import posixpath
+import sys
+import zipimport
+
+from . import DistlibException, resources
+from .compat import StringIO
+from .version import get_scheme, UnsupportedVersionError
+from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
+                       LEGACY_METADATA_FILENAME)
+from .util import (parse_requirement, cached_property, parse_name_and_version,
+                   read_exports, write_exports, CSVReader, CSVWriter)
+
+
+__all__ = ['Distribution', 'BaseInstalledDistribution',
+           'InstalledDistribution', 'EggInfoDistribution',
+           'DistributionPath']
+
+
+logger = logging.getLogger(__name__)
+
+EXPORTS_FILENAME = 'pydist-exports.json'
+COMMANDS_FILENAME = 'pydist-commands.json'
+
+DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
+              'RESOURCES', EXPORTS_FILENAME, 'SHARED')
+
+DISTINFO_EXT = '.dist-info'
+
+
+class _Cache(object):
+    """
+    A simple cache mapping names and .dist-info paths to distributions
+    """
+    def __init__(self):
+        """
+        Initialise an instance. There is normally one for each DistributionPath.
+        """
+        self.name = {}
+        self.path = {}
+        self.generated = False
+
+    def clear(self):
+        """
+        Clear the cache, setting it to its initial state.
+        """
+        self.name.clear()
+        self.path.clear()
+        self.generated = False
+
+    def add(self, dist):
+        """
+        Add a distribution to the cache.
+        :param dist: The distribution to add.
+        """
+        if dist.path not in self.path:
+            self.path[dist.path] = dist
+            self.name.setdefault(dist.key, []).append(dist)
+
+
+class DistributionPath(object):
+    """
+    Represents a set of distributions installed on a path (typically sys.path).
+    """
+    def __init__(self, path=None, include_egg=False):
+        """
+        Create an instance from a path, optionally including legacy (distutils/
+        setuptools/distribute) distributions.
+        :param path: The path to use, as a list of directories. If not specified,
+                     sys.path is used.
+        :param include_egg: If True, this instance will look for and return legacy
+                            distributions as well as those based on PEP 376.
+        """
+        if path is None:
+            path = sys.path
+        self.path = path
+        self._include_dist = True
+        self._include_egg = include_egg
+
+        self._cache = _Cache()
+        self._cache_egg = _Cache()
+        self._cache_enabled = True
+        self._scheme = get_scheme('default')
+
+    def _get_cache_enabled(self):
+        return self._cache_enabled
+
+    def _set_cache_enabled(self, value):
+        self._cache_enabled = value
+
+    cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
+
+    def clear_cache(self):
+        """
+        Clears the internal cache.
+        """
+        self._cache.clear()
+        self._cache_egg.clear()
+
+
+    def _yield_distributions(self):
+        """
+        Yield .dist-info and/or .egg(-info) distributions.
+        """
+        # We need to check if we've seen some resources already, because on
+        # some Linux systems (e.g. some Debian/Ubuntu variants) there are
+        # symlinks which alias other files in the environment.
+        seen = set()
+        for path in self.path:
+            finder = resources.finder_for_path(path)
+            if finder is None:
+                continue
+            r = finder.find('')
+            if not r or not r.is_container:
+                continue
+            rset = sorted(r.resources)
+            for entry in rset:
+                r = finder.find(entry)
+                if not r or r.path in seen:
+                    continue
+                if self._include_dist and entry.endswith(DISTINFO_EXT):
+                    possible_filenames = [METADATA_FILENAME,
+                                          WHEEL_METADATA_FILENAME,
+                                          LEGACY_METADATA_FILENAME]
+                    for metadata_filename in possible_filenames:
+                        metadata_path = posixpath.join(entry, metadata_filename)
+                        pydist = finder.find(metadata_path)
+                        if pydist:
+                            break
+                    else:
+                        continue
+
+                    with contextlib.closing(pydist.as_stream()) as stream:
+                        metadata = Metadata(fileobj=stream, scheme='legacy')
+                    logger.debug('Found %s', r.path)
+                    seen.add(r.path)
+                    yield new_dist_class(r.path, metadata=metadata,
+                                         env=self)
+                elif self._include_egg and entry.endswith(('.egg-info',
+                                                          '.egg')):
+                    logger.debug('Found %s', r.path)
+                    seen.add(r.path)
+                    yield old_dist_class(r.path, self)
+
+    def _generate_cache(self):
+        """
+        Scan the path for distributions and populate the cache with
+        those that are found.
+        """
+        gen_dist = not self._cache.generated
+        gen_egg = self._include_egg and not self._cache_egg.generated
+        if gen_dist or gen_egg:
+            for dist in self._yield_distributions():
+                if isinstance(dist, InstalledDistribution):
+                    self._cache.add(dist)
+                else:
+                    self._cache_egg.add(dist)
+
+            if gen_dist:
+                self._cache.generated = True
+            if gen_egg:
+                self._cache_egg.generated = True
+
+    @classmethod
+    def distinfo_dirname(cls, name, version):
+        """
+        The *name* and *version* parameters are converted into their
+        filename-escaped form, i.e. any ``'-'`` characters are replaced
+        with ``'_'`` other than the one in ``'dist-info'`` and the one
+        separating the name from the version number.
+
+        :parameter name: is converted to a standard distribution name by replacing
+                         any runs of non- alphanumeric characters with a single
+                         ``'-'``.
+        :type name: string
+        :parameter version: is converted to a standard version string. Spaces
+                            become dots, and all other non-alphanumeric characters
+                            (except dots) become dashes, with runs of multiple
+                            dashes condensed to a single dash.
+        :type version: string
+        :returns: directory name
+        :rtype: string"""
+        name = name.replace('-', '_')
+        return '-'.join([name, version]) + DISTINFO_EXT
+
+    def get_distributions(self):
+        """
+        Provides an iterator that looks for distributions and returns
+        :class:`InstalledDistribution` or
+        :class:`EggInfoDistribution` instances for each one of them.
+
+        :rtype: iterator of :class:`InstalledDistribution` and
+                :class:`EggInfoDistribution` instances
+        """
+        if not self._cache_enabled:
+            for dist in self._yield_distributions():
+                yield dist
+        else:
+            self._generate_cache()
+
+            for dist in self._cache.path.values():
+                yield dist
+
+            if self._include_egg:
+                for dist in self._cache_egg.path.values():
+                    yield dist
+
+    def get_distribution(self, name):
+        """
+        Looks for a named distribution on the path.
+
+        This function only returns the first result found, as no more than one
+        value is expected. If nothing is found, ``None`` is returned.
+
+        :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
+                or ``None``
+        """
+        result = None
+        name = name.lower()
+        if not self._cache_enabled:
+            for dist in self._yield_distributions():
+                if dist.key == name:
+                    result = dist
+                    break
+        else:
+            self._generate_cache()
+
+            if name in self._cache.name:
+                result = self._cache.name[name][0]
+            elif self._include_egg and name in self._cache_egg.name:
+                result = self._cache_egg.name[name][0]
+        return result
+
+    def provides_distribution(self, name, version=None):
+        """
+        Iterates over all distributions to find which distributions provide *name*.
+        If a *version* is provided, it will be used to filter the results.
+
+        This function only returns the first result found, since no more than
+        one values are expected. If the directory is not found, returns ``None``.
+
+        :parameter version: a version specifier that indicates the version
+                            required, conforming to the format in ``PEP-345``
+
+        :type name: string
+        :type version: string
+        """
+        matcher = None
+        if version is not None:
+            try:
+                matcher = self._scheme.matcher('%s (%s)' % (name, version))
+            except ValueError:
+                raise DistlibException('invalid name or version: %r, %r' %
+                                      (name, version))
+
+        for dist in self.get_distributions():
+            # We hit a problem on Travis where enum34 was installed and doesn't
+            # have a provides attribute ...
+            if not hasattr(dist, 'provides'):
+                logger.debug('No "provides": %s', dist)
+            else:
+                provided = dist.provides
+
+                for p in provided:
+                    p_name, p_ver = parse_name_and_version(p)
+                    if matcher is None:
+                        if p_name == name:
+                            yield dist
+                            break
+                    else:
+                        if p_name == name and matcher.match(p_ver):
+                            yield dist
+                            break
+
+    def get_file_path(self, name, relative_path):
+        """
+        Return the path to a resource file.
+        """
+        dist = self.get_distribution(name)
+        if dist is None:
+            raise LookupError('no distribution named %r found' % name)
+        return dist.get_resource_path(relative_path)
+
+    def get_exported_entries(self, category, name=None):
+        """
+        Return all of the exported entries in a particular category.
+
+        :param category: The category to search for entries.
+        :param name: If specified, only entries with that name are returned.
+        """
+        for dist in self.get_distributions():
+            r = dist.exports
+            if category in r:
+                d = r[category]
+                if name is not None:
+                    if name in d:
+                        yield d[name]
+                else:
+                    for v in d.values():
+                        yield v
+
+
+class Distribution(object):
+    """
+    A base class for distributions, whether installed or from indexes.
+    Either way, it must have some metadata, so that's all that's needed
+    for construction.
+    """
+
+    build_time_dependency = False
+    """
+    Set to True if it's known to be only a build-time dependency (i.e.
+    not needed after installation).
+    """
+
+    requested = False
+    """A boolean that indicates whether the ``REQUESTED`` metadata file is
+    present (in other words, whether the package was installed by user
+    request or it was installed as a dependency)."""
+
+    def __init__(self, metadata):
+        """
+        Initialise an instance.
+        :param metadata: The instance of :class:`Metadata` describing this
+        distribution.
+        """
+        self.metadata = metadata
+        self.name = metadata.name
+        self.key = self.name.lower()    # for case-insensitive comparisons
+        self.version = metadata.version
+        self.locator = None
+        self.digest = None
+        self.extras = None      # additional features requested
+        self.context = None     # environment marker overrides
+        self.download_urls = set()
+        self.digests = {}
+
+    @property
+    def source_url(self):
+        """
+        The source archive download URL for this distribution.
+        """
+        return self.metadata.source_url
+
+    download_url = source_url   # Backward compatibility
+
+    @property
+    def name_and_version(self):
+        """
+        A utility property which displays the name and version in parentheses.
+        """
+        return '%s (%s)' % (self.name, self.version)
+
+    @property
+    def provides(self):
+        """
+        A set of distribution names and versions provided by this distribution.
+        :return: A set of "name (version)" strings.
+        """
+        plist = self.metadata.provides
+        s = '%s (%s)' % (self.name, self.version)
+        if s not in plist:
+            plist.append(s)
+        return plist
+
+    def _get_requirements(self, req_attr):
+        md = self.metadata
+        logger.debug('Getting requirements from metadata %r', md.todict())
+        reqts = getattr(md, req_attr)
+        return set(md.get_requirements(reqts, extras=self.extras,
+                                       env=self.context))
+
+    @property
+    def run_requires(self):
+        return self._get_requirements('run_requires')
+
+    @property
+    def meta_requires(self):
+        return self._get_requirements('meta_requires')
+
+    @property
+    def build_requires(self):
+        return self._get_requirements('build_requires')
+
+    @property
+    def test_requires(self):
+        return self._get_requirements('test_requires')
+
+    @property
+    def dev_requires(self):
+        return self._get_requirements('dev_requires')
+
+    def matches_requirement(self, req):
+        """
+        Say if this instance matches (fulfills) a requirement.
+        :param req: The requirement to match.
+        :rtype req: str
+        :return: True if it matches, else False.
+        """
+        # Requirement may contain extras - parse to lose those
+        # from what's passed to the matcher
+        r = parse_requirement(req)
+        scheme = get_scheme(self.metadata.scheme)
+        try:
+            matcher = scheme.matcher(r.requirement)
+        except UnsupportedVersionError:
+            # XXX compat-mode if cannot read the version
+            logger.warning('could not read version %r - using name only',
+                           req)
+            name = req.split()[0]
+            matcher = scheme.matcher(name)
+
+        name = matcher.key   # case-insensitive
+
+        result = False
+        for p in self.provides:
+            p_name, p_ver = parse_name_and_version(p)
+            if p_name != name:
+                continue
+            try:
+                result = matcher.match(p_ver)
+                break
+            except UnsupportedVersionError:
+                pass
+        return result
+
+    def __repr__(self):
+        """
+        Return a textual representation of this instance,
+        """
+        if self.source_url:
+            suffix = ' [%s]' % self.source_url
+        else:
+            suffix = ''
+        return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
+
+    def __eq__(self, other):
+        """
+        See if this distribution is the same as another.
+        :param other: The distribution to compare with. To be equal to one
+                      another. distributions must have the same type, name,
+                      version and source_url.
+        :return: True if it is the same, else False.
+        """
+        if type(other) is not type(self):
+            result = False
+        else:
+            result = (self.name == other.name and
+                      self.version == other.version and
+                      self.source_url == other.source_url)
+        return result
+
+    def __hash__(self):
+        """
+        Compute hash in a way which matches the equality test.
+        """
+        return hash(self.name) + hash(self.version) + hash(self.source_url)
+
+
+class BaseInstalledDistribution(Distribution):
+    """
+    This is the base class for installed distributions (whether PEP 376 or
+    legacy).
+    """
+
+    hasher = None
+
+    def __init__(self, metadata, path, env=None):
+        """
+        Initialise an instance.
+        :param metadata: An instance of :class:`Metadata` which describes the
+                         distribution. This will normally have been initialised
+                         from a metadata file in the ``path``.
+        :param path:     The path of the ``.dist-info`` or ``.egg-info``
+                         directory for the distribution.
+        :param env:      This is normally the :class:`DistributionPath`
+                         instance where this distribution was found.
+        """
+        super(BaseInstalledDistribution, self).__init__(metadata)
+        self.path = path
+        self.dist_path = env
+
+    def get_hash(self, data, hasher=None):
+        """
+        Get the hash of some data, using a particular hash algorithm, if
+        specified.
+
+        :param data: The data to be hashed.
+        :type data: bytes
+        :param hasher: The name of a hash implementation, supported by hashlib,
+                       or ``None``. Examples of valid values are ``'sha1'``,
+                       ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
+                       ``'sha512'``. If no hasher is specified, the ``hasher``
+                       attribute of the :class:`InstalledDistribution` instance
+                       is used. If the hasher is determined to be ``None``, MD5
+                       is used as the hashing algorithm.
+        :returns: The hash of the data. If a hasher was explicitly specified,
+                  the returned hash will be prefixed with the specified hasher
+                  followed by '='.
+        :rtype: str
+        """
+        if hasher is None:
+            hasher = self.hasher
+        if hasher is None:
+            hasher = hashlib.md5
+            prefix = ''
+        else:
+            hasher = getattr(hashlib, hasher)
+            prefix = '%s=' % self.hasher
+        digest = hasher(data).digest()
+        digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
+        return '%s%s' % (prefix, digest)
+
+
+class InstalledDistribution(BaseInstalledDistribution):
+    """
+    Created with the *path* of the ``.dist-info`` directory provided to the
+    constructor. It reads the metadata contained in ``pydist.json`` when it is
+    instantiated., or uses a passed in Metadata instance (useful for when
+    dry-run mode is being used).
+    """
+
+    hasher = 'sha256'
+
+    def __init__(self, path, metadata=None, env=None):
+        self.modules = []
+        self.finder = finder = resources.finder_for_path(path)
+        if finder is None:
+            raise ValueError('finder unavailable for %s' % path)
+        if env and env._cache_enabled and path in env._cache.path:
+            metadata = env._cache.path[path].metadata
+        elif metadata is None:
+            r = finder.find(METADATA_FILENAME)
+            # Temporary - for Wheel 0.23 support
+            if r is None:
+                r = finder.find(WHEEL_METADATA_FILENAME)
+            # Temporary - for legacy support
+            if r is None:
+                r = finder.find('METADATA')
+            if r is None:
+                raise ValueError('no %s found in %s' % (METADATA_FILENAME,
+                                                        path))
+            with contextlib.closing(r.as_stream()) as stream:
+                metadata = Metadata(fileobj=stream, scheme='legacy')
+
+        super(InstalledDistribution, self).__init__(metadata, path, env)
+
+        if env and env._cache_enabled:
+            env._cache.add(self)
+
+        r = finder.find('REQUESTED')
+        self.requested = r is not None
+        p  = os.path.join(path, 'top_level.txt')
+        if os.path.exists(p):
+            with open(p, 'rb') as f:
+                data = f.read()
+            self.modules = data.splitlines()
+
+    def __repr__(self):
+        return '<InstalledDistribution %r %s at %r>' % (
+            self.name, self.version, self.path)
+
+    def __str__(self):
+        return "%s %s" % (self.name, self.version)
+
+    def _get_records(self):
+        """
+        Get the list of installed files for the distribution
+        :return: A list of tuples of path, hash and size. Note that hash and
+                 size might be ``None`` for some entries. The path is exactly
+                 as stored in the file (which is as in PEP 376).
+        """
+        results = []
+        r = self.get_distinfo_resource('RECORD')
+        with contextlib.closing(r.as_stream()) as stream:
+            with CSVReader(stream=stream) as record_reader:
+                # Base location is parent dir of .dist-info dir
+                #base_location = os.path.dirname(self.path)
+                #base_location = os.path.abspath(base_location)
+                for row in record_reader:
+                    missing = [None for i in range(len(row), 3)]
+                    path, checksum, size = row + missing
+                    #if not os.path.isabs(path):
+                    #    path = path.replace('/', os.sep)
+                    #    path = os.path.join(base_location, path)
+                    results.append((path, checksum, size))
+        return results
+
+    @cached_property
+    def exports(self):
+        """
+        Return the information exported by this distribution.
+        :return: A dictionary of exports, mapping an export category to a dict
+                 of :class:`ExportEntry` instances describing the individual
+                 export entries, and keyed by name.
+        """
+        result = {}
+        r = self.get_distinfo_resource(EXPORTS_FILENAME)
+        if r:
+            result = self.read_exports()
+        return result
+
+    def read_exports(self):
+        """
+        Read exports data from a file in .ini format.
+
+        :return: A dictionary of exports, mapping an export category to a list
+                 of :class:`ExportEntry` instances describing the individual
+                 export entries.
+        """
+        result = {}
+        r = self.get_distinfo_resource(EXPORTS_FILENAME)
+        if r:
+            with contextlib.closing(r.as_stream()) as stream:
+                result = read_exports(stream)
+        return result
+
+    def write_exports(self, exports):
+        """
+        Write a dictionary of exports to a file in .ini format.
+        :param exports: A dictionary of exports, mapping an export category to
+                        a list of :class:`ExportEntry` instances describing the
+                        individual export entries.
+        """
+        rf = self.get_distinfo_file(EXPORTS_FILENAME)
+        with open(rf, 'w') as f:
+            write_exports(exports, f)
+
+    def get_resource_path(self, relative_path):
+        """
+        NOTE: This API may change in the future.
+
+        Return the absolute path to a resource file with the given relative
+        path.
+
+        :param relative_path: The path, relative to .dist-info, of the resource
+                              of interest.
+        :return: The absolute path where the resource is to be found.
+        """
+        r = self.get_distinfo_resource('RESOURCES')
+        with contextlib.closing(r.as_stream()) as stream:
+            with CSVReader(stream=stream) as resources_reader:
+                for relative, destination in resources_reader:
+                    if relative == relative_path:
+                        return destination
+        raise KeyError('no resource file with relative path %r '
+                       'is installed' % relative_path)
+
+    def list_installed_files(self):
+        """
+        Iterates over the ``RECORD`` entries and returns a tuple
+        ``(path, hash, size)`` for each line.
+
+        :returns: iterator of (path, hash, size)
+        """
+        for result in self._get_records():
+            yield result
+
+    def write_installed_files(self, paths, prefix, dry_run=False):
+        """
+        Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
+        existing ``RECORD`` file is silently overwritten.
+
+        prefix is used to determine when to write absolute paths.
+        """
+        prefix = os.path.join(prefix, '')
+        base = os.path.dirname(self.path)
+        base_under_prefix = base.startswith(prefix)
+        base = os.path.join(base, '')
+        record_path = self.get_distinfo_file('RECORD')
+        logger.info('creating %s', record_path)
+        if dry_run:
+            return None
+        with CSVWriter(record_path) as writer:
+            for path in paths:
+                if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
+                    # do not put size and hash, as in PEP-376
+                    hash_value = size = ''
+                else:
+                    size = '%d' % os.path.getsize(path)
+                    with open(path, 'rb') as fp:
+                        hash_value = self.get_hash(fp.read())
+                if path.startswith(base) or (base_under_prefix and
+                                             path.startswith(prefix)):
+                    path = os.path.relpath(path, base)
+                writer.writerow((path, hash_value, size))
+
+            # add the RECORD file itself
+            if record_path.startswith(base):
+                record_path = os.path.relpath(record_path, base)
+            writer.writerow((record_path, '', ''))
+        return record_path
+
+    def check_installed_files(self):
+        """
+        Checks that the hashes and sizes of the files in ``RECORD`` are
+        matched by the files themselves. Returns a (possibly empty) list of
+        mismatches. Each entry in the mismatch list will be a tuple consisting
+        of the path, 'exists', 'size' or 'hash' according to what didn't match
+        (existence is checked first, then size, then hash), the expected
+        value and the actual value.
+        """
+        mismatches = []
+        base = os.path.dirname(self.path)
+        record_path = self.get_distinfo_file('RECORD')
+        for path, hash_value, size in self.list_installed_files():
+            if not os.path.isabs(path):
+                path = os.path.join(base, path)
+            if path == record_path:
+                continue
+            if not os.path.exists(path):
+                mismatches.append((path, 'exists', True, False))
+            elif os.path.isfile(path):
+                actual_size = str(os.path.getsize(path))
+                if size and actual_size != size:
+                    mismatches.append((path, 'size', size, actual_size))
+                elif hash_value:
+                    if '=' in hash_value:
+                        hasher = hash_value.split('=', 1)[0]
+                    else:
+                        hasher = None
+
+                    with open(path, 'rb') as f:
+                        actual_hash = self.get_hash(f.read(), hasher)
+                        if actual_hash != hash_value:
+                            mismatches.append((path, 'hash', hash_value, actual_hash))
+        return mismatches
+
+    @cached_property
+    def shared_locations(self):
+        """
+        A dictionary of shared locations whose keys are in the set 'prefix',
+        'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
+        The corresponding value is the absolute path of that category for
+        this distribution, and takes into account any paths selected by the
+        user at installation time (e.g. via command-line arguments). In the
+        case of the 'namespace' key, this would be a list of absolute paths
+        for the roots of namespace packages in this distribution.
+
+        The first time this property is accessed, the relevant information is
+        read from the SHARED file in the .dist-info directory.
+        """
+        result = {}
+        shared_path = os.path.join(self.path, 'SHARED')
+        if os.path.isfile(shared_path):
+            with codecs.open(shared_path, 'r', encoding='utf-8') as f:
+                lines = f.read().splitlines()
+            for line in lines:
+                key, value = line.split('=', 1)
+                if key == 'namespace':
+                    result.setdefault(key, []).append(value)
+                else:
+                    result[key] = value
+        return result
+
+    def write_shared_locations(self, paths, dry_run=False):
+        """
+        Write shared location information to the SHARED file in .dist-info.
+        :param paths: A dictionary as described in the documentation for
+        :meth:`shared_locations`.
+        :param dry_run: If True, the action is logged but no file is actually
+                        written.
+        :return: The path of the file written to.
+        """
+        shared_path = os.path.join(self.path, 'SHARED')
+        logger.info('creating %s', shared_path)
+        if dry_run:
+            return None
+        lines = []
+        for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
+            path = paths[key]
+            if os.path.isdir(paths[key]):
+                lines.append('%s=%s' % (key,  path))
+        for ns in paths.get('namespace', ()):
+            lines.append('namespace=%s' % ns)
+
+        with codecs.open(shared_path, 'w', encoding='utf-8') as f:
+            f.write('\n'.join(lines))
+        return shared_path
+
+    def get_distinfo_resource(self, path):
+        if path not in DIST_FILES:
+            raise DistlibException('invalid path for a dist-info file: '
+                                   '%r at %r' % (path, self.path))
+        finder = resources.finder_for_path(self.path)
+        if finder is None:
+            raise DistlibException('Unable to get a finder for %s' % self.path)
+        return finder.find(path)
+
+    def get_distinfo_file(self, path):
+        """
+        Returns a path located under the ``.dist-info`` directory. Returns a
+        string representing the path.
+
+        :parameter path: a ``'/'``-separated path relative to the
+                         ``.dist-info`` directory or an absolute path;
+                         If *path* is an absolute path and doesn't start
+                         with the ``.dist-info`` directory path,
+                         a :class:`DistlibException` is raised
+        :type path: str
+        :rtype: str
+        """
+        # Check if it is an absolute path  # XXX use relpath, add tests
+        if path.find(os.sep) >= 0:
+            # it's an absolute path?
+            distinfo_dirname, path = path.split(os.sep)[-2:]
+            if distinfo_dirname != self.path.split(os.sep)[-1]:
+                raise DistlibException(
+                    'dist-info file %r does not belong to the %r %s '
+                    'distribution' % (path, self.name, self.version))
+
+        # The file must be relative
+        if path not in DIST_FILES:
+            raise DistlibException('invalid path for a dist-info file: '
+                                   '%r at %r' % (path, self.path))
+
+        return os.path.join(self.path, path)
+
+    def list_distinfo_files(self):
+        """
+        Iterates over the ``RECORD`` entries and returns paths for each line if
+        the path is pointing to a file located in the ``.dist-info`` directory
+        or one of its subdirectories.
+
+        :returns: iterator of paths
+        """
+        base = os.path.dirname(self.path)
+        for path, checksum, size in self._get_records():
+            # XXX add separator or use real relpath algo
+            if not os.path.isabs(path):
+                path = os.path.join(base, path)
+            if path.startswith(self.path):
+                yield path
+
+    def __eq__(self, other):
+        return (isinstance(other, InstalledDistribution) and
+                self.path == other.path)
+
+    # See http://docs.python.org/reference/datamodel#object.__hash__
+    __hash__ = object.__hash__
+
+
+class EggInfoDistribution(BaseInstalledDistribution):
+    """Created with the *path* of the ``.egg-info`` directory or file provided
+    to the constructor. It reads the metadata contained in the file itself, or
+    if the given path happens to be a directory, the metadata is read from the
+    file ``PKG-INFO`` under that directory."""
+
+    requested = True    # as we have no way of knowing, assume it was
+    shared_locations = {}
+
+    def __init__(self, path, env=None):
+        def set_name_and_version(s, n, v):
+            s.name = n
+            s.key = n.lower()   # for case-insensitive comparisons
+            s.version = v
+
+        self.path = path
+        self.dist_path = env
+        if env and env._cache_enabled and path in env._cache_egg.path:
+            metadata = env._cache_egg.path[path].metadata
+            set_name_and_version(self, metadata.name, metadata.version)
+        else:
+            metadata = self._get_metadata(path)
+
+            # Need to be set before caching
+            set_name_and_version(self, metadata.name, metadata.version)
+
+            if env and env._cache_enabled:
+                env._cache_egg.add(self)
+        super(EggInfoDistribution, self).__init__(metadata, path, env)
+
+    def _get_metadata(self, path):
+        requires = None
+
+        def parse_requires_data(data):
+            """Create a list of dependencies from a requires.txt file.
+
+            *data*: the contents of a setuptools-produced requires.txt file.
+            """
+            reqs = []
+            lines = data.splitlines()
+            for line in lines:
+                line = line.strip()
+                if line.startswith('['):
+                    logger.warning('Unexpected line: quitting requirement scan: %r',
+                                   line)
+                    break
+                r = parse_requirement(line)
+                if not r:
+                    logger.warning('Not recognised as a requirement: %r', line)
+                    continue
+                if r.extras:
+                    logger.warning('extra requirements in requires.txt are '
+                                   'not supported')
+                if not r.constraints:
+                    reqs.append(r.name)
+                else:
+                    cons = ', '.join('%s%s' % c for c in r.constraints)
+                    reqs.append('%s (%s)' % (r.name, cons))
+            return reqs
+
+        def parse_requires_path(req_path):
+            """Create a list of dependencies from a requires.txt file.
+
+            *req_path*: the path to a setuptools-produced requires.txt file.
+            """
+
+            reqs = []
+            try:
+                with codecs.open(req_path, 'r', 'utf-8') as fp:
+                    reqs = parse_requires_data(fp.read())
+            except IOError:
+                pass
+            return reqs
+
+        tl_path = tl_data = None
+        if path.endswith('.egg'):
+            if os.path.isdir(path):
+                p = os.path.join(path, 'EGG-INFO')
+                meta_path = os.path.join(p, 'PKG-INFO')
+                metadata = Metadata(path=meta_path, scheme='legacy')
+                req_path = os.path.join(p, 'requires.txt')
+                tl_path = os.path.join(p, 'top_level.txt')
+                requires = parse_requires_path(req_path)
+            else:
+                # FIXME handle the case where zipfile is not available
+                zipf = zipimport.zipimporter(path)
+                fileobj = StringIO(
+                    zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
+                metadata = Metadata(fileobj=fileobj, scheme='legacy')
+                try:
+                    data = zipf.get_data('EGG-INFO/requires.txt')
+                    tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8')
+                    requires = parse_requires_data(data.decode('utf-8'))
+                except IOError:
+                    requires = None
+        elif path.endswith('.egg-info'):
+            if os.path.isdir(path):
+                req_path = os.path.join(path, 'requires.txt')
+                requires = parse_requires_path(req_path)
+                path = os.path.join(path, 'PKG-INFO')
+                tl_path = os.path.join(path, 'top_level.txt')
+            metadata = Metadata(path=path, scheme='legacy')
+        else:
+            raise DistlibException('path must end with .egg-info or .egg, '
+                                   'got %r' % path)
+
+        if requires:
+            metadata.add_requirements(requires)
+        # look for top-level modules in top_level.txt, if present
+        if tl_data is None:
+            if tl_path is not None and os.path.exists(tl_path):
+                with open(tl_path, 'rb') as f:
+                    tl_data = f.read().decode('utf-8')
+        if not tl_data:
+            tl_data = []
+        else:
+            tl_data = tl_data.splitlines()
+        self.modules = tl_data
+        return metadata
+
+    def __repr__(self):
+        return '<EggInfoDistribution %r %s at %r>' % (
+            self.name, self.version, self.path)
+
+    def __str__(self):
+        return "%s %s" % (self.name, self.version)
+
+    def check_installed_files(self):
+        """
+        Checks that the hashes and sizes of the files in ``RECORD`` are
+        matched by the files themselves. Returns a (possibly empty) list of
+        mismatches. Each entry in the mismatch list will be a tuple consisting
+        of the path, 'exists', 'size' or 'hash' according to what didn't match
+        (existence is checked first, then size, then hash), the expected
+        value and the actual value.
+        """
+        mismatches = []
+        record_path = os.path.join(self.path, 'installed-files.txt')
+        if os.path.exists(record_path):
+            for path, _, _ in self.list_installed_files():
+                if path == record_path:
+                    continue
+                if not os.path.exists(path):
+                    mismatches.append((path, 'exists', True, False))
+        return mismatches
+
+    def list_installed_files(self):
+        """
+        Iterates over the ``installed-files.txt`` entries and returns a tuple
+        ``(path, hash, size)`` for each line.
+
+        :returns: a list of (path, hash, size)
+        """
+
+        def _md5(path):
+            f = open(path, 'rb')
+            try:
+                content = f.read()
+            finally:
+                f.close()
+            return hashlib.md5(content).hexdigest()
+
+        def _size(path):
+            return os.stat(path).st_size
+
+        record_path = os.path.join(self.path, 'installed-files.txt')
+        result = []
+        if os.path.exists(record_path):
+            with codecs.open(record_path, 'r', encoding='utf-8') as f:
+                for line in f:
+                    line = line.strip()
+                    p = os.path.normpath(os.path.join(self.path, line))
+                    # "./" is present as a marker between installed files
+                    # and installation metadata files
+                    if not os.path.exists(p):
+                        logger.warning('Non-existent file: %s', p)
+                        if p.endswith(('.pyc', '.pyo')):
+                            continue
+                        #otherwise fall through and fail
+                    if not os.path.isdir(p):
+                        result.append((p, _md5(p), _size(p)))
+            result.append((record_path, None, None))
+        return result
+
+    def list_distinfo_files(self, absolute=False):
+        """
+        Iterates over the ``installed-files.txt`` entries and returns paths for
+        each line if the path is pointing to a file located in the
+        ``.egg-info`` directory or one of its subdirectories.
+
+        :parameter absolute: If *absolute* is ``True``, each returned path is
+                          transformed into a local absolute path. Otherwise the
+                          raw value from ``installed-files.txt`` is returned.
+        :type absolute: boolean
+        :returns: iterator of paths
+        """
+        record_path = os.path.join(self.path, 'installed-files.txt')
+        if os.path.exists(record_path):
+            skip = True
+            with codecs.open(record_path, 'r', encoding='utf-8') as f:
+                for line in f:
+                    line = line.strip()
+                    if line == './':
+                        skip = False
+                        continue
+                    if not skip:
+                        p = os.path.normpath(os.path.join(self.path, line))
+                        if p.startswith(self.path):
+                            if absolute:
+                                yield p
+                            else:
+                                yield line
+
+    def __eq__(self, other):
+        return (isinstance(other, EggInfoDistribution) and
+                self.path == other.path)
+
+    # See http://docs.python.org/reference/datamodel#object.__hash__
+    __hash__ = object.__hash__
+
+new_dist_class = InstalledDistribution
+old_dist_class = EggInfoDistribution
+
+
+class DependencyGraph(object):
+    """
+    Represents a dependency graph between distributions.
+
+    The dependency relationships are stored in an ``adjacency_list`` that maps
+    distributions to a list of ``(other, label)`` tuples where  ``other``
+    is a distribution and the edge is labeled with ``label`` (i.e. the version
+    specifier, if such was provided). Also, for more efficient traversal, for
+    every distribution ``x``, a list of predecessors is kept in
+    ``reverse_list[x]``. An edge from distribution ``a`` to
+    distribution ``b`` means that ``a`` depends on ``b``. If any missing
+    dependencies are found, they are stored in ``missing``, which is a
+    dictionary that maps distributions to a list of requirements that were not
+    provided by any other distributions.
+    """
+
+    def __init__(self):
+        self.adjacency_list = {}
+        self.reverse_list = {}
+        self.missing = {}
+
+    def add_distribution(self, distribution):
+        """Add the *distribution* to the graph.
+
+        :type distribution: :class:`distutils2.database.InstalledDistribution`
+                            or :class:`distutils2.database.EggInfoDistribution`
+        """
+        self.adjacency_list[distribution] = []
+        self.reverse_list[distribution] = []
+        #self.missing[distribution] = []
+
+    def add_edge(self, x, y, label=None):
+        """Add an edge from distribution *x* to distribution *y* with the given
+        *label*.
+
+        :type x: :class:`distutils2.database.InstalledDistribution` or
+                 :class:`distutils2.database.EggInfoDistribution`
+        :type y: :class:`distutils2.database.InstalledDistribution` or
+                 :class:`distutils2.database.EggInfoDistribution`
+        :type label: ``str`` or ``None``
+        """
+        self.adjacency_list[x].append((y, label))
+        # multiple edges are allowed, so be careful
+        if x not in self.reverse_list[y]:
+            self.reverse_list[y].append(x)
+
+    def add_missing(self, distribution, requirement):
+        """
+        Add a missing *requirement* for the given *distribution*.
+
+        :type distribution: :class:`distutils2.database.InstalledDistribution`
+                            or :class:`distutils2.database.EggInfoDistribution`
+        :type requirement: ``str``
+        """
+        logger.debug('%s missing %r', distribution, requirement)
+        self.missing.setdefault(distribution, []).append(requirement)
+
+    def _repr_dist(self, dist):
+        return '%s %s' % (dist.name, dist.version)
+
+    def repr_node(self, dist, level=1):
+        """Prints only a subgraph"""
+        output = [self._repr_dist(dist)]
+        for other, label in self.adjacency_list[dist]:
+            dist = self._repr_dist(other)
+            if label is not None:
+                dist = '%s [%s]' % (dist, label)
+            output.append('    ' * level + str(dist))
+            suboutput = self.repr_node(other, level + 1)
+            subs = suboutput.split('\n')
+            output.extend(subs[1:])
+        return '\n'.join(output)
+
+    def to_dot(self, f, skip_disconnected=True):
+        """Writes a DOT output for the graph to the provided file *f*.
+
+        If *skip_disconnected* is set to ``True``, then all distributions
+        that are not dependent on any other distribution are skipped.
+
+        :type f: has to support ``file``-like operations
+        :type skip_disconnected: ``bool``
+        """
+        disconnected = []
+
+        f.write("digraph dependencies {\n")
+        for dist, adjs in self.adjacency_list.items():
+            if len(adjs) == 0 and not skip_disconnected:
+                disconnected.append(dist)
+            for other, label in adjs:
+                if not label is None:
+                    f.write('"%s" -> "%s" [label="%s"]\n' %
+                            (dist.name, other.name, label))
+                else:
+                    f.write('"%s" -> "%s"\n' % (dist.name, other.name))
+        if not skip_disconnected and len(disconnected) > 0:
+            f.write('subgraph disconnected {\n')
+            f.write('label = "Disconnected"\n')
+            f.write('bgcolor = red\n')
+
+            for dist in disconnected:
+                f.write('"%s"' % dist.name)
+                f.write('\n')
+            f.write('}\n')
+        f.write('}\n')
+
+    def topological_sort(self):
+        """
+        Perform a topological sort of the graph.
+        :return: A tuple, the first element of which is a topologically sorted
+                 list of distributions, and the second element of which is a
+                 list of distributions that cannot be sorted because they have
+                 circular dependencies and so form a cycle.
+        """
+        result = []
+        # Make a shallow copy of the adjacency list
+        alist = {}
+        for k, v in self.adjacency_list.items():
+            alist[k] = v[:]
+        while True:
+            # See what we can remove in this run
+            to_remove = []
+            for k, v in list(alist.items())[:]:
+                if not v:
+                    to_remove.append(k)
+                    del alist[k]
+            if not to_remove:
+                # What's left in alist (if anything) is a cycle.
+                break
+            # Remove from the adjacency list of others
+            for k, v in alist.items():
+                alist[k] = [(d, r) for d, r in v if d not in to_remove]
+            logger.debug('Moving to result: %s',
+                         ['%s (%s)' % (d.name, d.version) for d in to_remove])
+            result.extend(to_remove)
+        return result, list(alist.keys())
+
+    def __repr__(self):
+        """Representation of the graph"""
+        output = []
+        for dist, adjs in self.adjacency_list.items():
+            output.append(self.repr_node(dist))
+        return '\n'.join(output)
+
+
+def make_graph(dists, scheme='default'):
+    """Makes a dependency graph from the given distributions.
+
+    :parameter dists: a list of distributions
+    :type dists: list of :class:`distutils2.database.InstalledDistribution` and
+                 :class:`distutils2.database.EggInfoDistribution` instances
+    :rtype: a :class:`DependencyGraph` instance
+    """
+    scheme = get_scheme(scheme)
+    graph = DependencyGraph()
+    provided = {}  # maps names to lists of (version, dist) tuples
+
+    # first, build the graph and find out what's provided
+    for dist in dists:
+        graph.add_distribution(dist)
+
+        for p in dist.provides:
+            name, version = parse_name_and_version(p)
+            logger.debug('Add to provided: %s, %s, %s', name, version, dist)
+            provided.setdefault(name, []).append((version, dist))
+
+    # now make the edges
+    for dist in dists:
+        requires = (dist.run_requires | dist.meta_requires |
+                    dist.build_requires | dist.dev_requires)
+        for req in requires:
+            try:
+                matcher = scheme.matcher(req)
+            except UnsupportedVersionError:
+                # XXX compat-mode if cannot read the version
+                logger.warning('could not read version %r - using name only',
+                               req)
+                name = req.split()[0]
+                matcher = scheme.matcher(name)
+
+            name = matcher.key   # case-insensitive
+
+            matched = False
+            if name in provided:
+                for version, provider in provided[name]:
+                    try:
+                        match = matcher.match(version)
+                    except UnsupportedVersionError:
+                        match = False
+
+                    if match:
+                        graph.add_edge(dist, provider, req)
+                        matched = True
+                        break
+            if not matched:
+                graph.add_missing(dist, req)
+    return graph
+
+
+def get_dependent_dists(dists, dist):
+    """Recursively generate a list of distributions from *dists* that are
+    dependent on *dist*.
+
+    :param dists: a list of distributions
+    :param dist: a distribution, member of *dists* for which we are interested
+    """
+    if dist not in dists:
+        raise DistlibException('given distribution %r is not a member '
+                               'of the list' % dist.name)
+    graph = make_graph(dists)
+
+    dep = [dist]  # dependent distributions
+    todo = graph.reverse_list[dist]  # list of nodes we should inspect
+
+    while todo:
+        d = todo.pop()
+        dep.append(d)
+        for succ in graph.reverse_list[d]:
+            if succ not in dep:
+                todo.append(succ)
+
+    dep.pop(0)  # remove dist from dep, was there to prevent infinite loops
+    return dep
+
+
+def get_required_dists(dists, dist):
+    """Recursively generate a list of distributions from *dists* that are
+    required by *dist*.
+
+    :param dists: a list of distributions
+    :param dist: a distribution, member of *dists* for which we are interested
+    """
+    if dist not in dists:
+        raise DistlibException('given distribution %r is not a member '
+                               'of the list' % dist.name)
+    graph = make_graph(dists)
+
+    req = []  # required distributions
+    todo = graph.adjacency_list[dist]  # list of nodes we should inspect
+
+    while todo:
+        d = todo.pop()[0]
+        req.append(d)
+        for pred in graph.adjacency_list[d]:
+            if pred not in req:
+                todo.append(pred)
+
+    return req
+
+
+def make_dist(name, version, **kwargs):
+    """
+    A convenience method for making a dist given just a name and version.
+    """
+    summary = kwargs.pop('summary', 'Placeholder for summary')
+    md = Metadata(**kwargs)
+    md.name = name
+    md.version = version
+    md.summary = summary or 'Placeholder for summary'
+    return Distribution(md)
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/index.py b/lib/python3.7/site-packages/pip/_vendor/distlib/index.py
new file mode 100644
index 0000000000000000000000000000000000000000..2406be2169ac4b1217d2589fd7efde548f4c4c3c
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/index.py
@@ -0,0 +1,516 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+import hashlib
+import logging
+import os
+import shutil
+import subprocess
+import tempfile
+try:
+    from threading import Thread
+except ImportError:
+    from dummy_threading import Thread
+
+from . import DistlibException
+from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
+                     urlparse, build_opener, string_types)
+from .util import cached_property, zip_dir, ServerProxy
+
+logger = logging.getLogger(__name__)
+
+DEFAULT_INDEX = 'https://pypi.python.org/pypi'
+DEFAULT_REALM = 'pypi'
+
+class PackageIndex(object):
+    """
+    This class represents a package index compatible with PyPI, the Python
+    Package Index.
+    """
+
+    boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
+
+    def __init__(self, url=None):
+        """
+        Initialise an instance.
+
+        :param url: The URL of the index. If not specified, the URL for PyPI is
+                    used.
+        """
+        self.url = url or DEFAULT_INDEX
+        self.read_configuration()
+        scheme, netloc, path, params, query, frag = urlparse(self.url)
+        if params or query or frag or scheme not in ('http', 'https'):
+            raise DistlibException('invalid repository: %s' % self.url)
+        self.password_handler = None
+        self.ssl_verifier = None
+        self.gpg = None
+        self.gpg_home = None
+        with open(os.devnull, 'w') as sink:
+            # Use gpg by default rather than gpg2, as gpg2 insists on
+            # prompting for passwords
+            for s in ('gpg', 'gpg2'):
+                try:
+                    rc = subprocess.check_call([s, '--version'], stdout=sink,
+                                               stderr=sink)
+                    if rc == 0:
+                        self.gpg = s
+                        break
+                except OSError:
+                    pass
+
+    def _get_pypirc_command(self):
+        """
+        Get the distutils command for interacting with PyPI configurations.
+        :return: the command.
+        """
+        from distutils.core import Distribution
+        from distutils.config import PyPIRCCommand
+        d = Distribution()
+        return PyPIRCCommand(d)
+
+    def read_configuration(self):
+        """
+        Read the PyPI access configuration as supported by distutils, getting
+        PyPI to do the actual work. This populates ``username``, ``password``,
+        ``realm`` and ``url`` attributes from the configuration.
+        """
+        # get distutils to do the work
+        c = self._get_pypirc_command()
+        c.repository = self.url
+        cfg = c._read_pypirc()
+        self.username = cfg.get('username')
+        self.password = cfg.get('password')
+        self.realm = cfg.get('realm', 'pypi')
+        self.url = cfg.get('repository', self.url)
+
+    def save_configuration(self):
+        """
+        Save the PyPI access configuration. You must have set ``username`` and
+        ``password`` attributes before calling this method.
+
+        Again, distutils is used to do the actual work.
+        """
+        self.check_credentials()
+        # get distutils to do the work
+        c = self._get_pypirc_command()
+        c._store_pypirc(self.username, self.password)
+
+    def check_credentials(self):
+        """
+        Check that ``username`` and ``password`` have been set, and raise an
+        exception if not.
+        """
+        if self.username is None or self.password is None:
+            raise DistlibException('username and password must be set')
+        pm = HTTPPasswordMgr()
+        _, netloc, _, _, _, _ = urlparse(self.url)
+        pm.add_password(self.realm, netloc, self.username, self.password)
+        self.password_handler = HTTPBasicAuthHandler(pm)
+
+    def register(self, metadata):
+        """
+        Register a distribution on PyPI, using the provided metadata.
+
+        :param metadata: A :class:`Metadata` instance defining at least a name
+                         and version number for the distribution to be
+                         registered.
+        :return: The HTTP response received from PyPI upon submission of the
+                request.
+        """
+        self.check_credentials()
+        metadata.validate()
+        d = metadata.todict()
+        d[':action'] = 'verify'
+        request = self.encode_request(d.items(), [])
+        response = self.send_request(request)
+        d[':action'] = 'submit'
+        request = self.encode_request(d.items(), [])
+        return self.send_request(request)
+
+    def _reader(self, name, stream, outbuf):
+        """
+        Thread runner for reading lines of from a subprocess into a buffer.
+
+        :param name: The logical name of the stream (used for logging only).
+        :param stream: The stream to read from. This will typically a pipe
+                       connected to the output stream of a subprocess.
+        :param outbuf: The list to append the read lines to.
+        """
+        while True:
+            s = stream.readline()
+            if not s:
+                break
+            s = s.decode('utf-8').rstrip()
+            outbuf.append(s)
+            logger.debug('%s: %s' % (name, s))
+        stream.close()
+
+    def get_sign_command(self, filename, signer, sign_password,
+                         keystore=None):
+        """
+        Return a suitable command for signing a file.
+
+        :param filename: The pathname to the file to be signed.
+        :param signer: The identifier of the signer of the file.
+        :param sign_password: The passphrase for the signer's
+                              private key used for signing.
+        :param keystore: The path to a directory which contains the keys
+                         used in verification. If not specified, the
+                         instance's ``gpg_home`` attribute is used instead.
+        :return: The signing command as a list suitable to be
+                 passed to :class:`subprocess.Popen`.
+        """
+        cmd = [self.gpg, '--status-fd', '2', '--no-tty']
+        if keystore is None:
+            keystore = self.gpg_home
+        if keystore:
+            cmd.extend(['--homedir', keystore])
+        if sign_password is not None:
+            cmd.extend(['--batch', '--passphrase-fd', '0'])
+        td = tempfile.mkdtemp()
+        sf = os.path.join(td, os.path.basename(filename) + '.asc')
+        cmd.extend(['--detach-sign', '--armor', '--local-user',
+                    signer, '--output', sf, filename])
+        logger.debug('invoking: %s', ' '.join(cmd))
+        return cmd, sf
+
+    def run_command(self, cmd, input_data=None):
+        """
+        Run a command in a child process , passing it any input data specified.
+
+        :param cmd: The command to run.
+        :param input_data: If specified, this must be a byte string containing
+                           data to be sent to the child process.
+        :return: A tuple consisting of the subprocess' exit code, a list of
+                 lines read from the subprocess' ``stdout``, and a list of
+                 lines read from the subprocess' ``stderr``.
+        """
+        kwargs = {
+            'stdout': subprocess.PIPE,
+            'stderr': subprocess.PIPE,
+        }
+        if input_data is not None:
+            kwargs['stdin'] = subprocess.PIPE
+        stdout = []
+        stderr = []
+        p = subprocess.Popen(cmd, **kwargs)
+        # We don't use communicate() here because we may need to
+        # get clever with interacting with the command
+        t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
+        t1.start()
+        t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
+        t2.start()
+        if input_data is not None:
+            p.stdin.write(input_data)
+            p.stdin.close()
+
+        p.wait()
+        t1.join()
+        t2.join()
+        return p.returncode, stdout, stderr
+
+    def sign_file(self, filename, signer, sign_password, keystore=None):
+        """
+        Sign a file.
+
+        :param filename: The pathname to the file to be signed.
+        :param signer: The identifier of the signer of the file.
+        :param sign_password: The passphrase for the signer's
+                              private key used for signing.
+        :param keystore: The path to a directory which contains the keys
+                         used in signing. If not specified, the instance's
+                         ``gpg_home`` attribute is used instead.
+        :return: The absolute pathname of the file where the signature is
+                 stored.
+        """
+        cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
+                                              keystore)
+        rc, stdout, stderr = self.run_command(cmd,
+                                              sign_password.encode('utf-8'))
+        if rc != 0:
+            raise DistlibException('sign command failed with error '
+                                   'code %s' % rc)
+        return sig_file
+
+    def upload_file(self, metadata, filename, signer=None, sign_password=None,
+                    filetype='sdist', pyversion='source', keystore=None):
+        """
+        Upload a release file to the index.
+
+        :param metadata: A :class:`Metadata` instance defining at least a name
+                         and version number for the file to be uploaded.
+        :param filename: The pathname of the file to be uploaded.
+        :param signer: The identifier of the signer of the file.
+        :param sign_password: The passphrase for the signer's
+                              private key used for signing.
+        :param filetype: The type of the file being uploaded. This is the
+                        distutils command which produced that file, e.g.
+                        ``sdist`` or ``bdist_wheel``.
+        :param pyversion: The version of Python which the release relates
+                          to. For code compatible with any Python, this would
+                          be ``source``, otherwise it would be e.g. ``3.2``.
+        :param keystore: The path to a directory which contains the keys
+                         used in signing. If not specified, the instance's
+                         ``gpg_home`` attribute is used instead.
+        :return: The HTTP response received from PyPI upon submission of the
+                request.
+        """
+        self.check_credentials()
+        if not os.path.exists(filename):
+            raise DistlibException('not found: %s' % filename)
+        metadata.validate()
+        d = metadata.todict()
+        sig_file = None
+        if signer:
+            if not self.gpg:
+                logger.warning('no signing program available - not signed')
+            else:
+                sig_file = self.sign_file(filename, signer, sign_password,
+                                          keystore)
+        with open(filename, 'rb') as f:
+            file_data = f.read()
+        md5_digest = hashlib.md5(file_data).hexdigest()
+        sha256_digest = hashlib.sha256(file_data).hexdigest()
+        d.update({
+            ':action': 'file_upload',
+            'protocol_version': '1',
+            'filetype': filetype,
+            'pyversion': pyversion,
+            'md5_digest': md5_digest,
+            'sha256_digest': sha256_digest,
+        })
+        files = [('content', os.path.basename(filename), file_data)]
+        if sig_file:
+            with open(sig_file, 'rb') as f:
+                sig_data = f.read()
+            files.append(('gpg_signature', os.path.basename(sig_file),
+                         sig_data))
+            shutil.rmtree(os.path.dirname(sig_file))
+        request = self.encode_request(d.items(), files)
+        return self.send_request(request)
+
+    def upload_documentation(self, metadata, doc_dir):
+        """
+        Upload documentation to the index.
+
+        :param metadata: A :class:`Metadata` instance defining at least a name
+                         and version number for the documentation to be
+                         uploaded.
+        :param doc_dir: The pathname of the directory which contains the
+                        documentation. This should be the directory that
+                        contains the ``index.html`` for the documentation.
+        :return: The HTTP response received from PyPI upon submission of the
+                request.
+        """
+        self.check_credentials()
+        if not os.path.isdir(doc_dir):
+            raise DistlibException('not a directory: %r' % doc_dir)
+        fn = os.path.join(doc_dir, 'index.html')
+        if not os.path.exists(fn):
+            raise DistlibException('not found: %r' % fn)
+        metadata.validate()
+        name, version = metadata.name, metadata.version
+        zip_data = zip_dir(doc_dir).getvalue()
+        fields = [(':action', 'doc_upload'),
+                  ('name', name), ('version', version)]
+        files = [('content', name, zip_data)]
+        request = self.encode_request(fields, files)
+        return self.send_request(request)
+
+    def get_verify_command(self, signature_filename, data_filename,
+                           keystore=None):
+        """
+        Return a suitable command for verifying a file.
+
+        :param signature_filename: The pathname to the file containing the
+                                   signature.
+        :param data_filename: The pathname to the file containing the
+                              signed data.
+        :param keystore: The path to a directory which contains the keys
+                         used in verification. If not specified, the
+                         instance's ``gpg_home`` attribute is used instead.
+        :return: The verifying command as a list suitable to be
+                 passed to :class:`subprocess.Popen`.
+        """
+        cmd = [self.gpg, '--status-fd', '2', '--no-tty']
+        if keystore is None:
+            keystore = self.gpg_home
+        if keystore:
+            cmd.extend(['--homedir', keystore])
+        cmd.extend(['--verify', signature_filename, data_filename])
+        logger.debug('invoking: %s', ' '.join(cmd))
+        return cmd
+
+    def verify_signature(self, signature_filename, data_filename,
+                         keystore=None):
+        """
+        Verify a signature for a file.
+
+        :param signature_filename: The pathname to the file containing the
+                                   signature.
+        :param data_filename: The pathname to the file containing the
+                              signed data.
+        :param keystore: The path to a directory which contains the keys
+                         used in verification. If not specified, the
+                         instance's ``gpg_home`` attribute is used instead.
+        :return: True if the signature was verified, else False.
+        """
+        if not self.gpg:
+            raise DistlibException('verification unavailable because gpg '
+                                   'unavailable')
+        cmd = self.get_verify_command(signature_filename, data_filename,
+                                      keystore)
+        rc, stdout, stderr = self.run_command(cmd)
+        if rc not in (0, 1):
+            raise DistlibException('verify command failed with error '
+                             'code %s' % rc)
+        return rc == 0
+
+    def download_file(self, url, destfile, digest=None, reporthook=None):
+        """
+        This is a convenience method for downloading a file from an URL.
+        Normally, this will be a file from the index, though currently
+        no check is made for this (i.e. a file can be downloaded from
+        anywhere).
+
+        The method is just like the :func:`urlretrieve` function in the
+        standard library, except that it allows digest computation to be
+        done during download and checking that the downloaded data
+        matched any expected value.
+
+        :param url: The URL of the file to be downloaded (assumed to be
+                    available via an HTTP GET request).
+        :param destfile: The pathname where the downloaded file is to be
+                         saved.
+        :param digest: If specified, this must be a (hasher, value)
+                       tuple, where hasher is the algorithm used (e.g.
+                       ``'md5'``) and ``value`` is the expected value.
+        :param reporthook: The same as for :func:`urlretrieve` in the
+                           standard library.
+        """
+        if digest is None:
+            digester = None
+            logger.debug('No digest specified')
+        else:
+            if isinstance(digest, (list, tuple)):
+                hasher, digest = digest
+            else:
+                hasher = 'md5'
+            digester = getattr(hashlib, hasher)()
+            logger.debug('Digest specified: %s' % digest)
+        # The following code is equivalent to urlretrieve.
+        # We need to do it this way so that we can compute the
+        # digest of the file as we go.
+        with open(destfile, 'wb') as dfp:
+            # addinfourl is not a context manager on 2.x
+            # so we have to use try/finally
+            sfp = self.send_request(Request(url))
+            try:
+                headers = sfp.info()
+                blocksize = 8192
+                size = -1
+                read = 0
+                blocknum = 0
+                if "content-length" in headers:
+                    size = int(headers["Content-Length"])
+                if reporthook:
+                    reporthook(blocknum, blocksize, size)
+                while True:
+                    block = sfp.read(blocksize)
+                    if not block:
+                        break
+                    read += len(block)
+                    dfp.write(block)
+                    if digester:
+                        digester.update(block)
+                    blocknum += 1
+                    if reporthook:
+                        reporthook(blocknum, blocksize, size)
+            finally:
+                sfp.close()
+
+        # check that we got the whole file, if we can
+        if size >= 0 and read < size:
+            raise DistlibException(
+                'retrieval incomplete: got only %d out of %d bytes'
+                % (read, size))
+        # if we have a digest, it must match.
+        if digester:
+            actual = digester.hexdigest()
+            if digest != actual:
+                raise DistlibException('%s digest mismatch for %s: expected '
+                                       '%s, got %s' % (hasher, destfile,
+                                                       digest, actual))
+            logger.debug('Digest verified: %s', digest)
+
+    def send_request(self, req):
+        """
+        Send a standard library :class:`Request` to PyPI and return its
+        response.
+
+        :param req: The request to send.
+        :return: The HTTP response from PyPI (a standard library HTTPResponse).
+        """
+        handlers = []
+        if self.password_handler:
+            handlers.append(self.password_handler)
+        if self.ssl_verifier:
+            handlers.append(self.ssl_verifier)
+        opener = build_opener(*handlers)
+        return opener.open(req)
+
+    def encode_request(self, fields, files):
+        """
+        Encode fields and files for posting to an HTTP server.
+
+        :param fields: The fields to send as a list of (fieldname, value)
+                       tuples.
+        :param files: The files to send as a list of (fieldname, filename,
+                      file_bytes) tuple.
+        """
+        # Adapted from packaging, which in turn was adapted from
+        # http://code.activestate.com/recipes/146306
+
+        parts = []
+        boundary = self.boundary
+        for k, values in fields:
+            if not isinstance(values, (list, tuple)):
+                values = [values]
+
+            for v in values:
+                parts.extend((
+                    b'--' + boundary,
+                    ('Content-Disposition: form-data; name="%s"' %
+                     k).encode('utf-8'),
+                    b'',
+                    v.encode('utf-8')))
+        for key, filename, value in files:
+            parts.extend((
+                b'--' + boundary,
+                ('Content-Disposition: form-data; name="%s"; filename="%s"' %
+                 (key, filename)).encode('utf-8'),
+                b'',
+                value))
+
+        parts.extend((b'--' + boundary + b'--', b''))
+
+        body = b'\r\n'.join(parts)
+        ct = b'multipart/form-data; boundary=' + boundary
+        headers = {
+            'Content-type': ct,
+            'Content-length': str(len(body))
+        }
+        return Request(self.url, body, headers)
+
+    def search(self, terms, operator=None):
+        if isinstance(terms, string_types):
+            terms = {'name': terms}
+        rpc_proxy = ServerProxy(self.url, timeout=3.0)
+        try:
+            return rpc_proxy.search(terms, operator or 'and')
+        finally:
+            rpc_proxy('close')()
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/locators.py b/lib/python3.7/site-packages/pip/_vendor/distlib/locators.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c655c3e51ec93f892e66d520d39aca95c8f41a3
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/locators.py
@@ -0,0 +1,1295 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2015 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+
+import gzip
+from io import BytesIO
+import json
+import logging
+import os
+import posixpath
+import re
+try:
+    import threading
+except ImportError:  # pragma: no cover
+    import dummy_threading as threading
+import zlib
+
+from . import DistlibException
+from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
+                     queue, quote, unescape, string_types, build_opener,
+                     HTTPRedirectHandler as BaseRedirectHandler, text_type,
+                     Request, HTTPError, URLError)
+from .database import Distribution, DistributionPath, make_dist
+from .metadata import Metadata, MetadataInvalidError
+from .util import (cached_property, parse_credentials, ensure_slash,
+                   split_filename, get_project_data, parse_requirement,
+                   parse_name_and_version, ServerProxy, normalize_name)
+from .version import get_scheme, UnsupportedVersionError
+from .wheel import Wheel, is_compatible
+
+logger = logging.getLogger(__name__)
+
+HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
+CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
+HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
+DEFAULT_INDEX = 'https://pypi.python.org/pypi'
+
+def get_all_distribution_names(url=None):
+    """
+    Return all distribution names known by an index.
+    :param url: The URL of the index.
+    :return: A list of all known distribution names.
+    """
+    if url is None:
+        url = DEFAULT_INDEX
+    client = ServerProxy(url, timeout=3.0)
+    try:
+        return client.list_packages()
+    finally:
+        client('close')()
+
+class RedirectHandler(BaseRedirectHandler):
+    """
+    A class to work around a bug in some Python 3.2.x releases.
+    """
+    # There's a bug in the base version for some 3.2.x
+    # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
+    # returns e.g. /abc, it bails because it says the scheme ''
+    # is bogus, when actually it should use the request's
+    # URL for the scheme. See Python issue #13696.
+    def http_error_302(self, req, fp, code, msg, headers):
+        # Some servers (incorrectly) return multiple Location headers
+        # (so probably same goes for URI).  Use first header.
+        newurl = None
+        for key in ('location', 'uri'):
+            if key in headers:
+                newurl = headers[key]
+                break
+        if newurl is None:  # pragma: no cover
+            return
+        urlparts = urlparse(newurl)
+        if urlparts.scheme == '':
+            newurl = urljoin(req.get_full_url(), newurl)
+            if hasattr(headers, 'replace_header'):
+                headers.replace_header(key, newurl)
+            else:
+                headers[key] = newurl
+        return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
+                                                  headers)
+
+    http_error_301 = http_error_303 = http_error_307 = http_error_302
+
+class Locator(object):
+    """
+    A base class for locators - things that locate distributions.
+    """
+    source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
+    binary_extensions = ('.egg', '.exe', '.whl')
+    excluded_extensions = ('.pdf',)
+
+    # A list of tags indicating which wheels you want to match. The default
+    # value of None matches against the tags compatible with the running
+    # Python. If you want to match other values, set wheel_tags on a locator
+    # instance to a list of tuples (pyver, abi, arch) which you want to match.
+    wheel_tags = None
+
+    downloadable_extensions = source_extensions + ('.whl',)
+
+    def __init__(self, scheme='default'):
+        """
+        Initialise an instance.
+        :param scheme: Because locators look for most recent versions, they
+                       need to know the version scheme to use. This specifies
+                       the current PEP-recommended scheme - use ``'legacy'``
+                       if you need to support existing distributions on PyPI.
+        """
+        self._cache = {}
+        self.scheme = scheme
+        # Because of bugs in some of the handlers on some of the platforms,
+        # we use our own opener rather than just using urlopen.
+        self.opener = build_opener(RedirectHandler())
+        # If get_project() is called from locate(), the matcher instance
+        # is set from the requirement passed to locate(). See issue #18 for
+        # why this can be useful to know.
+        self.matcher = None
+        self.errors = queue.Queue()
+
+    def get_errors(self):
+        """
+        Return any errors which have occurred.
+        """
+        result = []
+        while not self.errors.empty():  # pragma: no cover
+            try:
+                e = self.errors.get(False)
+                result.append(e)
+            except self.errors.Empty:
+                continue
+            self.errors.task_done()
+        return result
+
+    def clear_errors(self):
+        """
+        Clear any errors which may have been logged.
+        """
+        # Just get the errors and throw them away
+        self.get_errors()
+
+    def clear_cache(self):
+        self._cache.clear()
+
+    def _get_scheme(self):
+        return self._scheme
+
+    def _set_scheme(self, value):
+        self._scheme = value
+
+    scheme = property(_get_scheme, _set_scheme)
+
+    def _get_project(self, name):
+        """
+        For a given project, get a dictionary mapping available versions to Distribution
+        instances.
+
+        This should be implemented in subclasses.
+
+        If called from a locate() request, self.matcher will be set to a
+        matcher for the requirement to satisfy, otherwise it will be None.
+        """
+        raise NotImplementedError('Please implement in the subclass')
+
+    def get_distribution_names(self):
+        """
+        Return all the distribution names known to this locator.
+        """
+        raise NotImplementedError('Please implement in the subclass')
+
+    def get_project(self, name):
+        """
+        For a given project, get a dictionary mapping available versions to Distribution
+        instances.
+
+        This calls _get_project to do all the work, and just implements a caching layer on top.
+        """
+        if self._cache is None:  # pragma: no cover
+            result = self._get_project(name)
+        elif name in self._cache:
+            result = self._cache[name]
+        else:
+            self.clear_errors()
+            result = self._get_project(name)
+            self._cache[name] = result
+        return result
+
+    def score_url(self, url):
+        """
+        Give an url a score which can be used to choose preferred URLs
+        for a given project release.
+        """
+        t = urlparse(url)
+        basename = posixpath.basename(t.path)
+        compatible = True
+        is_wheel = basename.endswith('.whl')
+        is_downloadable = basename.endswith(self.downloadable_extensions)
+        if is_wheel:
+            compatible = is_compatible(Wheel(basename), self.wheel_tags)
+        return (t.scheme == 'https', 'pypi.python.org' in t.netloc,
+                is_downloadable, is_wheel, compatible, basename)
+
+    def prefer_url(self, url1, url2):
+        """
+        Choose one of two URLs where both are candidates for distribution
+        archives for the same version of a distribution (for example,
+        .tar.gz vs. zip).
+
+        The current implementation favours https:// URLs over http://, archives
+        from PyPI over those from other locations, wheel compatibility (if a
+        wheel) and then the archive name.
+        """
+        result = url2
+        if url1:
+            s1 = self.score_url(url1)
+            s2 = self.score_url(url2)
+            if s1 > s2:
+                result = url1
+            if result != url2:
+                logger.debug('Not replacing %r with %r', url1, url2)
+            else:
+                logger.debug('Replacing %r with %r', url1, url2)
+        return result
+
+    def split_filename(self, filename, project_name):
+        """
+        Attempt to split a filename in project name, version and Python version.
+        """
+        return split_filename(filename, project_name)
+
+    def convert_url_to_download_info(self, url, project_name):
+        """
+        See if a URL is a candidate for a download URL for a project (the URL
+        has typically been scraped from an HTML page).
+
+        If it is, a dictionary is returned with keys "name", "version",
+        "filename" and "url"; otherwise, None is returned.
+        """
+        def same_project(name1, name2):
+            return normalize_name(name1) == normalize_name(name2)
+
+        result = None
+        scheme, netloc, path, params, query, frag = urlparse(url)
+        if frag.lower().startswith('egg='):  # pragma: no cover
+            logger.debug('%s: version hint in fragment: %r',
+                         project_name, frag)
+        m = HASHER_HASH.match(frag)
+        if m:
+            algo, digest = m.groups()
+        else:
+            algo, digest = None, None
+        origpath = path
+        if path and path[-1] == '/':  # pragma: no cover
+            path = path[:-1]
+        if path.endswith('.whl'):
+            try:
+                wheel = Wheel(path)
+                if not is_compatible(wheel, self.wheel_tags):
+                    logger.debug('Wheel not compatible: %s', path)
+                else:
+                    if project_name is None:
+                        include = True
+                    else:
+                        include = same_project(wheel.name, project_name)
+                    if include:
+                        result = {
+                            'name': wheel.name,
+                            'version': wheel.version,
+                            'filename': wheel.filename,
+                            'url': urlunparse((scheme, netloc, origpath,
+                                               params, query, '')),
+                            'python-version': ', '.join(
+                                ['.'.join(list(v[2:])) for v in wheel.pyver]),
+                        }
+            except Exception as e:  # pragma: no cover
+                logger.warning('invalid path for wheel: %s', path)
+        elif not path.endswith(self.downloadable_extensions):  # pragma: no cover
+            logger.debug('Not downloadable: %s', path)
+        else:  # downloadable extension
+            path = filename = posixpath.basename(path)
+            for ext in self.downloadable_extensions:
+                if path.endswith(ext):
+                    path = path[:-len(ext)]
+                    t = self.split_filename(path, project_name)
+                    if not t:  # pragma: no cover
+                        logger.debug('No match for project/version: %s', path)
+                    else:
+                        name, version, pyver = t
+                        if not project_name or same_project(project_name, name):
+                            result = {
+                                'name': name,
+                                'version': version,
+                                'filename': filename,
+                                'url': urlunparse((scheme, netloc, origpath,
+                                                   params, query, '')),
+                                #'packagetype': 'sdist',
+                            }
+                            if pyver:  # pragma: no cover
+                                result['python-version'] = pyver
+                    break
+        if result and algo:
+            result['%s_digest' % algo] = digest
+        return result
+
+    def _get_digest(self, info):
+        """
+        Get a digest from a dictionary by looking at keys of the form
+        'algo_digest'.
+
+        Returns a 2-tuple (algo, digest) if found, else None. Currently
+        looks only for SHA256, then MD5.
+        """
+        result = None
+        for algo in ('sha256', 'md5'):
+            key = '%s_digest' % algo
+            if key in info:
+                result = (algo, info[key])
+                break
+        return result
+
+    def _update_version_data(self, result, info):
+        """
+        Update a result dictionary (the final result from _get_project) with a
+        dictionary for a specific version, which typically holds information
+        gleaned from a filename or URL for an archive for the distribution.
+        """
+        name = info.pop('name')
+        version = info.pop('version')
+        if version in result:
+            dist = result[version]
+            md = dist.metadata
+        else:
+            dist = make_dist(name, version, scheme=self.scheme)
+            md = dist.metadata
+        dist.digest = digest = self._get_digest(info)
+        url = info['url']
+        result['digests'][url] = digest
+        if md.source_url != info['url']:
+            md.source_url = self.prefer_url(md.source_url, url)
+            result['urls'].setdefault(version, set()).add(url)
+        dist.locator = self
+        result[version] = dist
+
+    def locate(self, requirement, prereleases=False):
+        """
+        Find the most recent distribution which matches the given
+        requirement.
+
+        :param requirement: A requirement of the form 'foo (1.0)' or perhaps
+                            'foo (>= 1.0, < 2.0, != 1.3)'
+        :param prereleases: If ``True``, allow pre-release versions
+                            to be located. Otherwise, pre-release versions
+                            are not returned.
+        :return: A :class:`Distribution` instance, or ``None`` if no such
+                 distribution could be located.
+        """
+        result = None
+        r = parse_requirement(requirement)
+        if r is None:  # pragma: no cover
+            raise DistlibException('Not a valid requirement: %r' % requirement)
+        scheme = get_scheme(self.scheme)
+        self.matcher = matcher = scheme.matcher(r.requirement)
+        logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
+        versions = self.get_project(r.name)
+        if len(versions) > 2:   # urls and digests keys are present
+            # sometimes, versions are invalid
+            slist = []
+            vcls = matcher.version_class
+            for k in versions:
+                if k in ('urls', 'digests'):
+                    continue
+                try:
+                    if not matcher.match(k):
+                        logger.debug('%s did not match %r', matcher, k)
+                    else:
+                        if prereleases or not vcls(k).is_prerelease:
+                            slist.append(k)
+                        else:
+                            logger.debug('skipping pre-release '
+                                         'version %s of %s', k, matcher.name)
+                except Exception:  # pragma: no cover
+                    logger.warning('error matching %s with %r', matcher, k)
+                    pass # slist.append(k)
+            if len(slist) > 1:
+                slist = sorted(slist, key=scheme.key)
+            if slist:
+                logger.debug('sorted list: %s', slist)
+                version = slist[-1]
+                result = versions[version]
+        if result:
+            if r.extras:
+                result.extras = r.extras
+            result.download_urls = versions.get('urls', {}).get(version, set())
+            d = {}
+            sd = versions.get('digests', {})
+            for url in result.download_urls:
+                if url in sd:  # pragma: no cover
+                    d[url] = sd[url]
+            result.digests = d
+        self.matcher = None
+        return result
+
+
+class PyPIRPCLocator(Locator):
+    """
+    This locator uses XML-RPC to locate distributions. It therefore
+    cannot be used with simple mirrors (that only mirror file content).
+    """
+    def __init__(self, url, **kwargs):
+        """
+        Initialise an instance.
+
+        :param url: The URL to use for XML-RPC.
+        :param kwargs: Passed to the superclass constructor.
+        """
+        super(PyPIRPCLocator, self).__init__(**kwargs)
+        self.base_url = url
+        self.client = ServerProxy(url, timeout=3.0)
+
+    def get_distribution_names(self):
+        """
+        Return all the distribution names known to this locator.
+        """
+        return set(self.client.list_packages())
+
+    def _get_project(self, name):
+        result = {'urls': {}, 'digests': {}}
+        versions = self.client.package_releases(name, True)
+        for v in versions:
+            urls = self.client.release_urls(name, v)
+            data = self.client.release_data(name, v)
+            metadata = Metadata(scheme=self.scheme)
+            metadata.name = data['name']
+            metadata.version = data['version']
+            metadata.license = data.get('license')
+            metadata.keywords = data.get('keywords', [])
+            metadata.summary = data.get('summary')
+            dist = Distribution(metadata)
+            if urls:
+                info = urls[0]
+                metadata.source_url = info['url']
+                dist.digest = self._get_digest(info)
+                dist.locator = self
+                result[v] = dist
+                for info in urls:
+                    url = info['url']
+                    digest = self._get_digest(info)
+                    result['urls'].setdefault(v, set()).add(url)
+                    result['digests'][url] = digest
+        return result
+
+class PyPIJSONLocator(Locator):
+    """
+    This locator uses PyPI's JSON interface. It's very limited in functionality
+    and probably not worth using.
+    """
+    def __init__(self, url, **kwargs):
+        super(PyPIJSONLocator, self).__init__(**kwargs)
+        self.base_url = ensure_slash(url)
+
+    def get_distribution_names(self):
+        """
+        Return all the distribution names known to this locator.
+        """
+        raise NotImplementedError('Not available from this locator')
+
+    def _get_project(self, name):
+        result = {'urls': {}, 'digests': {}}
+        url = urljoin(self.base_url, '%s/json' % quote(name))
+        try:
+            resp = self.opener.open(url)
+            data = resp.read().decode() # for now
+            d = json.loads(data)
+            md = Metadata(scheme=self.scheme)
+            data = d['info']
+            md.name = data['name']
+            md.version = data['version']
+            md.license = data.get('license')
+            md.keywords = data.get('keywords', [])
+            md.summary = data.get('summary')
+            dist = Distribution(md)
+            dist.locator = self
+            urls = d['urls']
+            result[md.version] = dist
+            for info in d['urls']:
+                url = info['url']
+                dist.download_urls.add(url)
+                dist.digests[url] = self._get_digest(info)
+                result['urls'].setdefault(md.version, set()).add(url)
+                result['digests'][url] = self._get_digest(info)
+            # Now get other releases
+            for version, infos in d['releases'].items():
+                if version == md.version:
+                    continue    # already done
+                omd = Metadata(scheme=self.scheme)
+                omd.name = md.name
+                omd.version = version
+                odist = Distribution(omd)
+                odist.locator = self
+                result[version] = odist
+                for info in infos:
+                    url = info['url']
+                    odist.download_urls.add(url)
+                    odist.digests[url] = self._get_digest(info)
+                    result['urls'].setdefault(version, set()).add(url)
+                    result['digests'][url] = self._get_digest(info)
+#            for info in urls:
+#                md.source_url = info['url']
+#                dist.digest = self._get_digest(info)
+#                dist.locator = self
+#                for info in urls:
+#                    url = info['url']
+#                    result['urls'].setdefault(md.version, set()).add(url)
+#                    result['digests'][url] = self._get_digest(info)
+        except Exception as e:
+            self.errors.put(text_type(e))
+            logger.exception('JSON fetch failed: %s', e)
+        return result
+
+
+class Page(object):
+    """
+    This class represents a scraped HTML page.
+    """
+    # The following slightly hairy-looking regex just looks for the contents of
+    # an anchor link, which has an attribute "href" either immediately preceded
+    # or immediately followed by a "rel" attribute. The attribute values can be
+    # declared with double quotes, single quotes or no quotes - which leads to
+    # the length of the expression.
+    _href = re.compile("""
+(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
+href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
+(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
+""", re.I | re.S | re.X)
+    _base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
+
+    def __init__(self, data, url):
+        """
+        Initialise an instance with the Unicode page contents and the URL they
+        came from.
+        """
+        self.data = data
+        self.base_url = self.url = url
+        m = self._base.search(self.data)
+        if m:
+            self.base_url = m.group(1)
+
+    _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
+
+    @cached_property
+    def links(self):
+        """
+        Return the URLs of all the links on a page together with information
+        about their "rel" attribute, for determining which ones to treat as
+        downloads and which ones to queue for further scraping.
+        """
+        def clean(url):
+            "Tidy up an URL."
+            scheme, netloc, path, params, query, frag = urlparse(url)
+            return urlunparse((scheme, netloc, quote(path),
+                               params, query, frag))
+
+        result = set()
+        for match in self._href.finditer(self.data):
+            d = match.groupdict('')
+            rel = (d['rel1'] or d['rel2'] or d['rel3'] or
+                   d['rel4'] or d['rel5'] or d['rel6'])
+            url = d['url1'] or d['url2'] or d['url3']
+            url = urljoin(self.base_url, url)
+            url = unescape(url)
+            url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
+            result.add((url, rel))
+        # We sort the result, hoping to bring the most recent versions
+        # to the front
+        result = sorted(result, key=lambda t: t[0], reverse=True)
+        return result
+
+
+class SimpleScrapingLocator(Locator):
+    """
+    A locator which scrapes HTML pages to locate downloads for a distribution.
+    This runs multiple threads to do the I/O; performance is at least as good
+    as pip's PackageFinder, which works in an analogous fashion.
+    """
+
+    # These are used to deal with various Content-Encoding schemes.
+    decoders = {
+        'deflate': zlib.decompress,
+        'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
+        'none': lambda b: b,
+    }
+
+    def __init__(self, url, timeout=None, num_workers=10, **kwargs):
+        """
+        Initialise an instance.
+        :param url: The root URL to use for scraping.
+        :param timeout: The timeout, in seconds, to be applied to requests.
+                        This defaults to ``None`` (no timeout specified).
+        :param num_workers: The number of worker threads you want to do I/O,
+                            This defaults to 10.
+        :param kwargs: Passed to the superclass.
+        """
+        super(SimpleScrapingLocator, self).__init__(**kwargs)
+        self.base_url = ensure_slash(url)
+        self.timeout = timeout
+        self._page_cache = {}
+        self._seen = set()
+        self._to_fetch = queue.Queue()
+        self._bad_hosts = set()
+        self.skip_externals = False
+        self.num_workers = num_workers
+        self._lock = threading.RLock()
+        # See issue #45: we need to be resilient when the locator is used
+        # in a thread, e.g. with concurrent.futures. We can't use self._lock
+        # as it is for coordinating our internal threads - the ones created
+        # in _prepare_threads.
+        self._gplock = threading.RLock()
+        self.platform_check = False  # See issue #112
+
+    def _prepare_threads(self):
+        """
+        Threads are created only when get_project is called, and terminate
+        before it returns. They are there primarily to parallelise I/O (i.e.
+        fetching web pages).
+        """
+        self._threads = []
+        for i in range(self.num_workers):
+            t = threading.Thread(target=self._fetch)
+            t.setDaemon(True)
+            t.start()
+            self._threads.append(t)
+
+    def _wait_threads(self):
+        """
+        Tell all the threads to terminate (by sending a sentinel value) and
+        wait for them to do so.
+        """
+        # Note that you need two loops, since you can't say which
+        # thread will get each sentinel
+        for t in self._threads:
+            self._to_fetch.put(None)    # sentinel
+        for t in self._threads:
+            t.join()
+        self._threads = []
+
+    def _get_project(self, name):
+        result = {'urls': {}, 'digests': {}}
+        with self._gplock:
+            self.result = result
+            self.project_name = name
+            url = urljoin(self.base_url, '%s/' % quote(name))
+            self._seen.clear()
+            self._page_cache.clear()
+            self._prepare_threads()
+            try:
+                logger.debug('Queueing %s', url)
+                self._to_fetch.put(url)
+                self._to_fetch.join()
+            finally:
+                self._wait_threads()
+            del self.result
+        return result
+
+    platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
+                                    r'win(32|_amd64)|macosx_?\d+)\b', re.I)
+
+    def _is_platform_dependent(self, url):
+        """
+        Does an URL refer to a platform-specific download?
+        """
+        return self.platform_dependent.search(url)
+
+    def _process_download(self, url):
+        """
+        See if an URL is a suitable download for a project.
+
+        If it is, register information in the result dictionary (for
+        _get_project) about the specific version it's for.
+
+        Note that the return value isn't actually used other than as a boolean
+        value.
+        """
+        if self.platform_check and self._is_platform_dependent(url):
+            info = None
+        else:
+            info = self.convert_url_to_download_info(url, self.project_name)
+        logger.debug('process_download: %s -> %s', url, info)
+        if info:
+            with self._lock:    # needed because self.result is shared
+                self._update_version_data(self.result, info)
+        return info
+
+    def _should_queue(self, link, referrer, rel):
+        """
+        Determine whether a link URL from a referring page and with a
+        particular "rel" attribute should be queued for scraping.
+        """
+        scheme, netloc, path, _, _, _ = urlparse(link)
+        if path.endswith(self.source_extensions + self.binary_extensions +
+                         self.excluded_extensions):
+            result = False
+        elif self.skip_externals and not link.startswith(self.base_url):
+            result = False
+        elif not referrer.startswith(self.base_url):
+            result = False
+        elif rel not in ('homepage', 'download'):
+            result = False
+        elif scheme not in ('http', 'https', 'ftp'):
+            result = False
+        elif self._is_platform_dependent(link):
+            result = False
+        else:
+            host = netloc.split(':', 1)[0]
+            if host.lower() == 'localhost':
+                result = False
+            else:
+                result = True
+        logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
+                     referrer, result)
+        return result
+
+    def _fetch(self):
+        """
+        Get a URL to fetch from the work queue, get the HTML page, examine its
+        links for download candidates and candidates for further scraping.
+
+        This is a handy method to run in a thread.
+        """
+        while True:
+            url = self._to_fetch.get()
+            try:
+                if url:
+                    page = self.get_page(url)
+                    if page is None:    # e.g. after an error
+                        continue
+                    for link, rel in page.links:
+                        if link not in self._seen:
+                            try:
+                                self._seen.add(link)
+                                if (not self._process_download(link) and
+                                    self._should_queue(link, url, rel)):
+                                    logger.debug('Queueing %s from %s', link, url)
+                                    self._to_fetch.put(link)
+                            except MetadataInvalidError:  # e.g. invalid versions
+                                pass
+            except Exception as e:  # pragma: no cover
+                self.errors.put(text_type(e))
+            finally:
+                # always do this, to avoid hangs :-)
+                self._to_fetch.task_done()
+            if not url:
+                #logger.debug('Sentinel seen, quitting.')
+                break
+
+    def get_page(self, url):
+        """
+        Get the HTML for an URL, possibly from an in-memory cache.
+
+        XXX TODO Note: this cache is never actually cleared. It's assumed that
+        the data won't get stale over the lifetime of a locator instance (not
+        necessarily true for the default_locator).
+        """
+        # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
+        scheme, netloc, path, _, _, _ = urlparse(url)
+        if scheme == 'file' and os.path.isdir(url2pathname(path)):
+            url = urljoin(ensure_slash(url), 'index.html')
+
+        if url in self._page_cache:
+            result = self._page_cache[url]
+            logger.debug('Returning %s from cache: %s', url, result)
+        else:
+            host = netloc.split(':', 1)[0]
+            result = None
+            if host in self._bad_hosts:
+                logger.debug('Skipping %s due to bad host %s', url, host)
+            else:
+                req = Request(url, headers={'Accept-encoding': 'identity'})
+                try:
+                    logger.debug('Fetching %s', url)
+                    resp = self.opener.open(req, timeout=self.timeout)
+                    logger.debug('Fetched %s', url)
+                    headers = resp.info()
+                    content_type = headers.get('Content-Type', '')
+                    if HTML_CONTENT_TYPE.match(content_type):
+                        final_url = resp.geturl()
+                        data = resp.read()
+                        encoding = headers.get('Content-Encoding')
+                        if encoding:
+                            decoder = self.decoders[encoding]   # fail if not found
+                            data = decoder(data)
+                        encoding = 'utf-8'
+                        m = CHARSET.search(content_type)
+                        if m:
+                            encoding = m.group(1)
+                        try:
+                            data = data.decode(encoding)
+                        except UnicodeError:  # pragma: no cover
+                            data = data.decode('latin-1')    # fallback
+                        result = Page(data, final_url)
+                        self._page_cache[final_url] = result
+                except HTTPError as e:
+                    if e.code != 404:
+                        logger.exception('Fetch failed: %s: %s', url, e)
+                except URLError as e:  # pragma: no cover
+                    logger.exception('Fetch failed: %s: %s', url, e)
+                    with self._lock:
+                        self._bad_hosts.add(host)
+                except Exception as e:  # pragma: no cover
+                    logger.exception('Fetch failed: %s: %s', url, e)
+                finally:
+                    self._page_cache[url] = result   # even if None (failure)
+        return result
+
+    _distname_re = re.compile('<a href=[^>]*>([^<]+)<')
+
+    def get_distribution_names(self):
+        """
+        Return all the distribution names known to this locator.
+        """
+        result = set()
+        page = self.get_page(self.base_url)
+        if not page:
+            raise DistlibException('Unable to get %s' % self.base_url)
+        for match in self._distname_re.finditer(page.data):
+            result.add(match.group(1))
+        return result
+
+class DirectoryLocator(Locator):
+    """
+    This class locates distributions in a directory tree.
+    """
+
+    def __init__(self, path, **kwargs):
+        """
+        Initialise an instance.
+        :param path: The root of the directory tree to search.
+        :param kwargs: Passed to the superclass constructor,
+                       except for:
+                       * recursive - if True (the default), subdirectories are
+                         recursed into. If False, only the top-level directory
+                         is searched,
+        """
+        self.recursive = kwargs.pop('recursive', True)
+        super(DirectoryLocator, self).__init__(**kwargs)
+        path = os.path.abspath(path)
+        if not os.path.isdir(path):  # pragma: no cover
+            raise DistlibException('Not a directory: %r' % path)
+        self.base_dir = path
+
+    def should_include(self, filename, parent):
+        """
+        Should a filename be considered as a candidate for a distribution
+        archive? As well as the filename, the directory which contains it
+        is provided, though not used by the current implementation.
+        """
+        return filename.endswith(self.downloadable_extensions)
+
+    def _get_project(self, name):
+        result = {'urls': {}, 'digests': {}}
+        for root, dirs, files in os.walk(self.base_dir):
+            for fn in files:
+                if self.should_include(fn, root):
+                    fn = os.path.join(root, fn)
+                    url = urlunparse(('file', '',
+                                      pathname2url(os.path.abspath(fn)),
+                                      '', '', ''))
+                    info = self.convert_url_to_download_info(url, name)
+                    if info:
+                        self._update_version_data(result, info)
+            if not self.recursive:
+                break
+        return result
+
+    def get_distribution_names(self):
+        """
+        Return all the distribution names known to this locator.
+        """
+        result = set()
+        for root, dirs, files in os.walk(self.base_dir):
+            for fn in files:
+                if self.should_include(fn, root):
+                    fn = os.path.join(root, fn)
+                    url = urlunparse(('file', '',
+                                      pathname2url(os.path.abspath(fn)),
+                                      '', '', ''))
+                    info = self.convert_url_to_download_info(url, None)
+                    if info:
+                        result.add(info['name'])
+            if not self.recursive:
+                break
+        return result
+
+class JSONLocator(Locator):
+    """
+    This locator uses special extended metadata (not available on PyPI) and is
+    the basis of performant dependency resolution in distlib. Other locators
+    require archive downloads before dependencies can be determined! As you
+    might imagine, that can be slow.
+    """
+    def get_distribution_names(self):
+        """
+        Return all the distribution names known to this locator.
+        """
+        raise NotImplementedError('Not available from this locator')
+
+    def _get_project(self, name):
+        result = {'urls': {}, 'digests': {}}
+        data = get_project_data(name)
+        if data:
+            for info in data.get('files', []):
+                if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
+                    continue
+                # We don't store summary in project metadata as it makes
+                # the data bigger for no benefit during dependency
+                # resolution
+                dist = make_dist(data['name'], info['version'],
+                                 summary=data.get('summary',
+                                                  'Placeholder for summary'),
+                                 scheme=self.scheme)
+                md = dist.metadata
+                md.source_url = info['url']
+                # TODO SHA256 digest
+                if 'digest' in info and info['digest']:
+                    dist.digest = ('md5', info['digest'])
+                md.dependencies = info.get('requirements', {})
+                dist.exports = info.get('exports', {})
+                result[dist.version] = dist
+                result['urls'].setdefault(dist.version, set()).add(info['url'])
+        return result
+
+class DistPathLocator(Locator):
+    """
+    This locator finds installed distributions in a path. It can be useful for
+    adding to an :class:`AggregatingLocator`.
+    """
+    def __init__(self, distpath, **kwargs):
+        """
+        Initialise an instance.
+
+        :param distpath: A :class:`DistributionPath` instance to search.
+        """
+        super(DistPathLocator, self).__init__(**kwargs)
+        assert isinstance(distpath, DistributionPath)
+        self.distpath = distpath
+
+    def _get_project(self, name):
+        dist = self.distpath.get_distribution(name)
+        if dist is None:
+            result = {'urls': {}, 'digests': {}}
+        else:
+            result = {
+                dist.version: dist,
+                'urls': {dist.version: set([dist.source_url])},
+                'digests': {dist.version: set([None])}
+            }
+        return result
+
+
+class AggregatingLocator(Locator):
+    """
+    This class allows you to chain and/or merge a list of locators.
+    """
+    def __init__(self, *locators, **kwargs):
+        """
+        Initialise an instance.
+
+        :param locators: The list of locators to search.
+        :param kwargs: Passed to the superclass constructor,
+                       except for:
+                       * merge - if False (the default), the first successful
+                         search from any of the locators is returned. If True,
+                         the results from all locators are merged (this can be
+                         slow).
+        """
+        self.merge = kwargs.pop('merge', False)
+        self.locators = locators
+        super(AggregatingLocator, self).__init__(**kwargs)
+
+    def clear_cache(self):
+        super(AggregatingLocator, self).clear_cache()
+        for locator in self.locators:
+            locator.clear_cache()
+
+    def _set_scheme(self, value):
+        self._scheme = value
+        for locator in self.locators:
+            locator.scheme = value
+
+    scheme = property(Locator.scheme.fget, _set_scheme)
+
+    def _get_project(self, name):
+        result = {}
+        for locator in self.locators:
+            d = locator.get_project(name)
+            if d:
+                if self.merge:
+                    files = result.get('urls', {})
+                    digests = result.get('digests', {})
+                    # next line could overwrite result['urls'], result['digests']
+                    result.update(d)
+                    df = result.get('urls')
+                    if files and df:
+                        for k, v in files.items():
+                            if k in df:
+                                df[k] |= v
+                            else:
+                                df[k] = v
+                    dd = result.get('digests')
+                    if digests and dd:
+                        dd.update(digests)
+                else:
+                    # See issue #18. If any dists are found and we're looking
+                    # for specific constraints, we only return something if
+                    # a match is found. For example, if a DirectoryLocator
+                    # returns just foo (1.0) while we're looking for
+                    # foo (>= 2.0), we'll pretend there was nothing there so
+                    # that subsequent locators can be queried. Otherwise we
+                    # would just return foo (1.0) which would then lead to a
+                    # failure to find foo (>= 2.0), because other locators
+                    # weren't searched. Note that this only matters when
+                    # merge=False.
+                    if self.matcher is None:
+                        found = True
+                    else:
+                        found = False
+                        for k in d:
+                            if self.matcher.match(k):
+                                found = True
+                                break
+                    if found:
+                        result = d
+                        break
+        return result
+
+    def get_distribution_names(self):
+        """
+        Return all the distribution names known to this locator.
+        """
+        result = set()
+        for locator in self.locators:
+            try:
+                result |= locator.get_distribution_names()
+            except NotImplementedError:
+                pass
+        return result
+
+
+# We use a legacy scheme simply because most of the dists on PyPI use legacy
+# versions which don't conform to PEP 426 / PEP 440.
+default_locator = AggregatingLocator(
+                    JSONLocator(),
+                    SimpleScrapingLocator('https://pypi.python.org/simple/',
+                                          timeout=3.0),
+                    scheme='legacy')
+
+locate = default_locator.locate
+
+NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
+                             r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
+
+class DependencyFinder(object):
+    """
+    Locate dependencies for distributions.
+    """
+
+    def __init__(self, locator=None):
+        """
+        Initialise an instance, using the specified locator
+        to locate distributions.
+        """
+        self.locator = locator or default_locator
+        self.scheme = get_scheme(self.locator.scheme)
+
+    def add_distribution(self, dist):
+        """
+        Add a distribution to the finder. This will update internal information
+        about who provides what.
+        :param dist: The distribution to add.
+        """
+        logger.debug('adding distribution %s', dist)
+        name = dist.key
+        self.dists_by_name[name] = dist
+        self.dists[(name, dist.version)] = dist
+        for p in dist.provides:
+            name, version = parse_name_and_version(p)
+            logger.debug('Add to provided: %s, %s, %s', name, version, dist)
+            self.provided.setdefault(name, set()).add((version, dist))
+
+    def remove_distribution(self, dist):
+        """
+        Remove a distribution from the finder. This will update internal
+        information about who provides what.
+        :param dist: The distribution to remove.
+        """
+        logger.debug('removing distribution %s', dist)
+        name = dist.key
+        del self.dists_by_name[name]
+        del self.dists[(name, dist.version)]
+        for p in dist.provides:
+            name, version = parse_name_and_version(p)
+            logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
+            s = self.provided[name]
+            s.remove((version, dist))
+            if not s:
+                del self.provided[name]
+
+    def get_matcher(self, reqt):
+        """
+        Get a version matcher for a requirement.
+        :param reqt: The requirement
+        :type reqt: str
+        :return: A version matcher (an instance of
+                 :class:`distlib.version.Matcher`).
+        """
+        try:
+            matcher = self.scheme.matcher(reqt)
+        except UnsupportedVersionError:  # pragma: no cover
+            # XXX compat-mode if cannot read the version
+            name = reqt.split()[0]
+            matcher = self.scheme.matcher(name)
+        return matcher
+
+    def find_providers(self, reqt):
+        """
+        Find the distributions which can fulfill a requirement.
+
+        :param reqt: The requirement.
+         :type reqt: str
+        :return: A set of distribution which can fulfill the requirement.
+        """
+        matcher = self.get_matcher(reqt)
+        name = matcher.key   # case-insensitive
+        result = set()
+        provided = self.provided
+        if name in provided:
+            for version, provider in provided[name]:
+                try:
+                    match = matcher.match(version)
+                except UnsupportedVersionError:
+                    match = False
+
+                if match:
+                    result.add(provider)
+                    break
+        return result
+
+    def try_to_replace(self, provider, other, problems):
+        """
+        Attempt to replace one provider with another. This is typically used
+        when resolving dependencies from multiple sources, e.g. A requires
+        (B >= 1.0) while C requires (B >= 1.1).
+
+        For successful replacement, ``provider`` must meet all the requirements
+        which ``other`` fulfills.
+
+        :param provider: The provider we are trying to replace with.
+        :param other: The provider we're trying to replace.
+        :param problems: If False is returned, this will contain what
+                         problems prevented replacement. This is currently
+                         a tuple of the literal string 'cantreplace',
+                         ``provider``, ``other``  and the set of requirements
+                         that ``provider`` couldn't fulfill.
+        :return: True if we can replace ``other`` with ``provider``, else
+                 False.
+        """
+        rlist = self.reqts[other]
+        unmatched = set()
+        for s in rlist:
+            matcher = self.get_matcher(s)
+            if not matcher.match(provider.version):
+                unmatched.add(s)
+        if unmatched:
+            # can't replace other with provider
+            problems.add(('cantreplace', provider, other,
+                          frozenset(unmatched)))
+            result = False
+        else:
+            # can replace other with provider
+            self.remove_distribution(other)
+            del self.reqts[other]
+            for s in rlist:
+                self.reqts.setdefault(provider, set()).add(s)
+            self.add_distribution(provider)
+            result = True
+        return result
+
+    def find(self, requirement, meta_extras=None, prereleases=False):
+        """
+        Find a distribution and all distributions it depends on.
+
+        :param requirement: The requirement specifying the distribution to
+                            find, or a Distribution instance.
+        :param meta_extras: A list of meta extras such as :test:, :build: and
+                            so on.
+        :param prereleases: If ``True``, allow pre-release versions to be
+                            returned - otherwise, don't return prereleases
+                            unless they're all that's available.
+
+        Return a set of :class:`Distribution` instances and a set of
+        problems.
+
+        The distributions returned should be such that they have the
+        :attr:`required` attribute set to ``True`` if they were
+        from the ``requirement`` passed to ``find()``, and they have the
+        :attr:`build_time_dependency` attribute set to ``True`` unless they
+        are post-installation dependencies of the ``requirement``.
+
+        The problems should be a tuple consisting of the string
+        ``'unsatisfied'`` and the requirement which couldn't be satisfied
+        by any distribution known to the locator.
+        """
+
+        self.provided = {}
+        self.dists = {}
+        self.dists_by_name = {}
+        self.reqts = {}
+
+        meta_extras = set(meta_extras or [])
+        if ':*:' in meta_extras:
+            meta_extras.remove(':*:')
+            # :meta: and :run: are implicitly included
+            meta_extras |= set([':test:', ':build:', ':dev:'])
+
+        if isinstance(requirement, Distribution):
+            dist = odist = requirement
+            logger.debug('passed %s as requirement', odist)
+        else:
+            dist = odist = self.locator.locate(requirement,
+                                               prereleases=prereleases)
+            if dist is None:
+                raise DistlibException('Unable to locate %r' % requirement)
+            logger.debug('located %s', odist)
+        dist.requested = True
+        problems = set()
+        todo = set([dist])
+        install_dists = set([odist])
+        while todo:
+            dist = todo.pop()
+            name = dist.key     # case-insensitive
+            if name not in self.dists_by_name:
+                self.add_distribution(dist)
+            else:
+                #import pdb; pdb.set_trace()
+                other = self.dists_by_name[name]
+                if other != dist:
+                    self.try_to_replace(dist, other, problems)
+
+            ireqts = dist.run_requires | dist.meta_requires
+            sreqts = dist.build_requires
+            ereqts = set()
+            if meta_extras and dist in install_dists:
+                for key in ('test', 'build', 'dev'):
+                    e = ':%s:' % key
+                    if e in meta_extras:
+                        ereqts |= getattr(dist, '%s_requires' % key)
+            all_reqts = ireqts | sreqts | ereqts
+            for r in all_reqts:
+                providers = self.find_providers(r)
+                if not providers:
+                    logger.debug('No providers found for %r', r)
+                    provider = self.locator.locate(r, prereleases=prereleases)
+                    # If no provider is found and we didn't consider
+                    # prereleases, consider them now.
+                    if provider is None and not prereleases:
+                        provider = self.locator.locate(r, prereleases=True)
+                    if provider is None:
+                        logger.debug('Cannot satisfy %r', r)
+                        problems.add(('unsatisfied', r))
+                    else:
+                        n, v = provider.key, provider.version
+                        if (n, v) not in self.dists:
+                            todo.add(provider)
+                        providers.add(provider)
+                        if r in ireqts and dist in install_dists:
+                            install_dists.add(provider)
+                            logger.debug('Adding %s to install_dists',
+                                         provider.name_and_version)
+                for p in providers:
+                    name = p.key
+                    if name not in self.dists_by_name:
+                        self.reqts.setdefault(p, set()).add(r)
+                    else:
+                        other = self.dists_by_name[name]
+                        if other != p:
+                            # see if other can be replaced by p
+                            self.try_to_replace(p, other, problems)
+
+        dists = set(self.dists.values())
+        for dist in dists:
+            dist.build_time_dependency = dist not in install_dists
+            if dist.build_time_dependency:
+                logger.debug('%s is a build-time dependency only.',
+                             dist.name_and_version)
+        logger.debug('find done for %s', odist)
+        return dists, problems
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/manifest.py b/lib/python3.7/site-packages/pip/_vendor/distlib/manifest.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca0fe442d9ca499466df9438df16eca405c5f102
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/manifest.py
@@ -0,0 +1,393 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2013 Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""
+Class representing the list of files in a distribution.
+
+Equivalent to distutils.filelist, but fixes some problems.
+"""
+import fnmatch
+import logging
+import os
+import re
+import sys
+
+from . import DistlibException
+from .compat import fsdecode
+from .util import convert_path
+
+
+__all__ = ['Manifest']
+
+logger = logging.getLogger(__name__)
+
+# a \ followed by some spaces + EOL
+_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M)
+_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)
+
+#
+# Due to the different results returned by fnmatch.translate, we need
+# to do slightly different processing for Python 2.7 and 3.2 ... this needed
+# to be brought in for Python 3.6 onwards.
+#
+_PYTHON_VERSION = sys.version_info[:2]
+
+class Manifest(object):
+    """A list of files built by on exploring the filesystem and filtered by
+    applying various patterns to what we find there.
+    """
+
+    def __init__(self, base=None):
+        """
+        Initialise an instance.
+
+        :param base: The base directory to explore under.
+        """
+        self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))
+        self.prefix = self.base + os.sep
+        self.allfiles = None
+        self.files = set()
+
+    #
+    # Public API
+    #
+
+    def findall(self):
+        """Find all files under the base and set ``allfiles`` to the absolute
+        pathnames of files found.
+        """
+        from stat import S_ISREG, S_ISDIR, S_ISLNK
+
+        self.allfiles = allfiles = []
+        root = self.base
+        stack = [root]
+        pop = stack.pop
+        push = stack.append
+
+        while stack:
+            root = pop()
+            names = os.listdir(root)
+
+            for name in names:
+                fullname = os.path.join(root, name)
+
+                # Avoid excess stat calls -- just one will do, thank you!
+                stat = os.stat(fullname)
+                mode = stat.st_mode
+                if S_ISREG(mode):
+                    allfiles.append(fsdecode(fullname))
+                elif S_ISDIR(mode) and not S_ISLNK(mode):
+                    push(fullname)
+
+    def add(self, item):
+        """
+        Add a file to the manifest.
+
+        :param item: The pathname to add. This can be relative to the base.
+        """
+        if not item.startswith(self.prefix):
+            item = os.path.join(self.base, item)
+        self.files.add(os.path.normpath(item))
+
+    def add_many(self, items):
+        """
+        Add a list of files to the manifest.
+
+        :param items: The pathnames to add. These can be relative to the base.
+        """
+        for item in items:
+            self.add(item)
+
+    def sorted(self, wantdirs=False):
+        """
+        Return sorted files in directory order
+        """
+
+        def add_dir(dirs, d):
+            dirs.add(d)
+            logger.debug('add_dir added %s', d)
+            if d != self.base:
+                parent, _ = os.path.split(d)
+                assert parent not in ('', '/')
+                add_dir(dirs, parent)
+
+        result = set(self.files)    # make a copy!
+        if wantdirs:
+            dirs = set()
+            for f in result:
+                add_dir(dirs, os.path.dirname(f))
+            result |= dirs
+        return [os.path.join(*path_tuple) for path_tuple in
+                sorted(os.path.split(path) for path in result)]
+
+    def clear(self):
+        """Clear all collected files."""
+        self.files = set()
+        self.allfiles = []
+
+    def process_directive(self, directive):
+        """
+        Process a directive which either adds some files from ``allfiles`` to
+        ``files``, or removes some files from ``files``.
+
+        :param directive: The directive to process. This should be in a format
+                     compatible with distutils ``MANIFEST.in`` files:
+
+                     http://docs.python.org/distutils/sourcedist.html#commands
+        """
+        # Parse the line: split it up, make sure the right number of words
+        # is there, and return the relevant words.  'action' is always
+        # defined: it's the first word of the line.  Which of the other
+        # three are defined depends on the action; it'll be either
+        # patterns, (dir and patterns), or (dirpattern).
+        action, patterns, thedir, dirpattern = self._parse_directive(directive)
+
+        # OK, now we know that the action is valid and we have the
+        # right number of words on the line for that action -- so we
+        # can proceed with minimal error-checking.
+        if action == 'include':
+            for pattern in patterns:
+                if not self._include_pattern(pattern, anchor=True):
+                    logger.warning('no files found matching %r', pattern)
+
+        elif action == 'exclude':
+            for pattern in patterns:
+                found = self._exclude_pattern(pattern, anchor=True)
+                #if not found:
+                #    logger.warning('no previously-included files '
+                #                   'found matching %r', pattern)
+
+        elif action == 'global-include':
+            for pattern in patterns:
+                if not self._include_pattern(pattern, anchor=False):
+                    logger.warning('no files found matching %r '
+                                   'anywhere in distribution', pattern)
+
+        elif action == 'global-exclude':
+            for pattern in patterns:
+                found = self._exclude_pattern(pattern, anchor=False)
+                #if not found:
+                #    logger.warning('no previously-included files '
+                #                   'matching %r found anywhere in '
+                #                   'distribution', pattern)
+
+        elif action == 'recursive-include':
+            for pattern in patterns:
+                if not self._include_pattern(pattern, prefix=thedir):
+                    logger.warning('no files found matching %r '
+                                   'under directory %r', pattern, thedir)
+
+        elif action == 'recursive-exclude':
+            for pattern in patterns:
+                found = self._exclude_pattern(pattern, prefix=thedir)
+                #if not found:
+                #    logger.warning('no previously-included files '
+                #                   'matching %r found under directory %r',
+                #                   pattern, thedir)
+
+        elif action == 'graft':
+            if not self._include_pattern(None, prefix=dirpattern):
+                logger.warning('no directories found matching %r',
+                               dirpattern)
+
+        elif action == 'prune':
+            if not self._exclude_pattern(None, prefix=dirpattern):
+                logger.warning('no previously-included directories found '
+                               'matching %r', dirpattern)
+        else:   # pragma: no cover
+            # This should never happen, as it should be caught in
+            # _parse_template_line
+            raise DistlibException(
+                'invalid action %r' % action)
+
+    #
+    # Private API
+    #
+
+    def _parse_directive(self, directive):
+        """
+        Validate a directive.
+        :param directive: The directive to validate.
+        :return: A tuple of action, patterns, thedir, dir_patterns
+        """
+        words = directive.split()
+        if len(words) == 1 and words[0] not in ('include', 'exclude',
+                                                'global-include',
+                                                'global-exclude',
+                                                'recursive-include',
+                                                'recursive-exclude',
+                                                'graft', 'prune'):
+            # no action given, let's use the default 'include'
+            words.insert(0, 'include')
+
+        action = words[0]
+        patterns = thedir = dir_pattern = None
+
+        if action in ('include', 'exclude',
+                      'global-include', 'global-exclude'):
+            if len(words) < 2:
+                raise DistlibException(
+                    '%r expects <pattern1> <pattern2> ...' % action)
+
+            patterns = [convert_path(word) for word in words[1:]]
+
+        elif action in ('recursive-include', 'recursive-exclude'):
+            if len(words) < 3:
+                raise DistlibException(
+                    '%r expects <dir> <pattern1> <pattern2> ...' % action)
+
+            thedir = convert_path(words[1])
+            patterns = [convert_path(word) for word in words[2:]]
+
+        elif action in ('graft', 'prune'):
+            if len(words) != 2:
+                raise DistlibException(
+                    '%r expects a single <dir_pattern>' % action)
+
+            dir_pattern = convert_path(words[1])
+
+        else:
+            raise DistlibException('unknown action %r' % action)
+
+        return action, patterns, thedir, dir_pattern
+
+    def _include_pattern(self, pattern, anchor=True, prefix=None,
+                         is_regex=False):
+        """Select strings (presumably filenames) from 'self.files' that
+        match 'pattern', a Unix-style wildcard (glob) pattern.
+
+        Patterns are not quite the same as implemented by the 'fnmatch'
+        module: '*' and '?'  match non-special characters, where "special"
+        is platform-dependent: slash on Unix; colon, slash, and backslash on
+        DOS/Windows; and colon on Mac OS.
+
+        If 'anchor' is true (the default), then the pattern match is more
+        stringent: "*.py" will match "foo.py" but not "foo/bar.py".  If
+        'anchor' is false, both of these will match.
+
+        If 'prefix' is supplied, then only filenames starting with 'prefix'
+        (itself a pattern) and ending with 'pattern', with anything in between
+        them, will match.  'anchor' is ignored in this case.
+
+        If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
+        'pattern' is assumed to be either a string containing a regex or a
+        regex object -- no translation is done, the regex is just compiled
+        and used as-is.
+
+        Selected strings will be added to self.files.
+
+        Return True if files are found.
+        """
+        # XXX docstring lying about what the special chars are?
+        found = False
+        pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
+
+        # delayed loading of allfiles list
+        if self.allfiles is None:
+            self.findall()
+
+        for name in self.allfiles:
+            if pattern_re.search(name):
+                self.files.add(name)
+                found = True
+        return found
+
+    def _exclude_pattern(self, pattern, anchor=True, prefix=None,
+                         is_regex=False):
+        """Remove strings (presumably filenames) from 'files' that match
+        'pattern'.
+
+        Other parameters are the same as for 'include_pattern()', above.
+        The list 'self.files' is modified in place. Return True if files are
+        found.
+
+        This API is public to allow e.g. exclusion of SCM subdirs, e.g. when
+        packaging source distributions
+        """
+        found = False
+        pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
+        for f in list(self.files):
+            if pattern_re.search(f):
+                self.files.remove(f)
+                found = True
+        return found
+
+    def _translate_pattern(self, pattern, anchor=True, prefix=None,
+                           is_regex=False):
+        """Translate a shell-like wildcard pattern to a compiled regular
+        expression.
+
+        Return the compiled regex.  If 'is_regex' true,
+        then 'pattern' is directly compiled to a regex (if it's a string)
+        or just returned as-is (assumes it's a regex object).
+        """
+        if is_regex:
+            if isinstance(pattern, str):
+                return re.compile(pattern)
+            else:
+                return pattern
+
+        if _PYTHON_VERSION > (3, 2):
+            # ditch start and end characters
+            start, _, end = self._glob_to_re('_').partition('_')
+
+        if pattern:
+            pattern_re = self._glob_to_re(pattern)
+            if _PYTHON_VERSION > (3, 2):
+                assert pattern_re.startswith(start) and pattern_re.endswith(end)
+        else:
+            pattern_re = ''
+
+        base = re.escape(os.path.join(self.base, ''))
+        if prefix is not None:
+            # ditch end of pattern character
+            if _PYTHON_VERSION <= (3, 2):
+                empty_pattern = self._glob_to_re('')
+                prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
+            else:
+                prefix_re = self._glob_to_re(prefix)
+                assert prefix_re.startswith(start) and prefix_re.endswith(end)
+                prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
+            sep = os.sep
+            if os.sep == '\\':
+                sep = r'\\'
+            if _PYTHON_VERSION <= (3, 2):
+                pattern_re = '^' + base + sep.join((prefix_re,
+                                                    '.*' + pattern_re))
+            else:
+                pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
+                pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep,
+                                                  pattern_re, end)
+        else:  # no prefix -- respect anchor flag
+            if anchor:
+                if _PYTHON_VERSION <= (3, 2):
+                    pattern_re = '^' + base + pattern_re
+                else:
+                    pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):])
+
+        return re.compile(pattern_re)
+
+    def _glob_to_re(self, pattern):
+        """Translate a shell-like glob pattern to a regular expression.
+
+        Return a string containing the regex.  Differs from
+        'fnmatch.translate()' in that '*' does not match "special characters"
+        (which are platform-specific).
+        """
+        pattern_re = fnmatch.translate(pattern)
+
+        # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
+        # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
+        # and by extension they shouldn't match such "special characters" under
+        # any OS.  So change all non-escaped dots in the RE to match any
+        # character except the special characters (currently: just os.sep).
+        sep = os.sep
+        if os.sep == '\\':
+            # we're using a regex to manipulate a regex, so we need
+            # to escape the backslash twice
+            sep = r'\\\\'
+        escaped = r'\1[^%s]' % sep
+        pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
+        return pattern_re
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/markers.py b/lib/python3.7/site-packages/pip/_vendor/distlib/markers.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee1f3e23655b53293cc2d5bf50e835ac2662cc7f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/markers.py
@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2017 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""
+Parser for the environment markers micro-language defined in PEP 508.
+"""
+
+# Note: In PEP 345, the micro-language was Python compatible, so the ast
+# module could be used to parse it. However, PEP 508 introduced operators such
+# as ~= and === which aren't in Python, necessitating a different approach.
+
+import os
+import sys
+import platform
+import re
+
+from .compat import python_implementation, urlparse, string_types
+from .util import in_venv, parse_marker
+
+__all__ = ['interpret']
+
+def _is_literal(o):
+    if not isinstance(o, string_types) or not o:
+        return False
+    return o[0] in '\'"'
+
+class Evaluator(object):
+    """
+    This class is used to evaluate marker expessions.
+    """
+
+    operations = {
+        '==': lambda x, y: x == y,
+        '===': lambda x, y: x == y,
+        '~=': lambda x, y: x == y or x > y,
+        '!=': lambda x, y: x != y,
+        '<':  lambda x, y: x < y,
+        '<=':  lambda x, y: x == y or x < y,
+        '>':  lambda x, y: x > y,
+        '>=':  lambda x, y: x == y or x > y,
+        'and': lambda x, y: x and y,
+        'or': lambda x, y: x or y,
+        'in': lambda x, y: x in y,
+        'not in': lambda x, y: x not in y,
+    }
+
+    def evaluate(self, expr, context):
+        """
+        Evaluate a marker expression returned by the :func:`parse_requirement`
+        function in the specified context.
+        """
+        if isinstance(expr, string_types):
+            if expr[0] in '\'"':
+                result = expr[1:-1]
+            else:
+                if expr not in context:
+                    raise SyntaxError('unknown variable: %s' % expr)
+                result = context[expr]
+        else:
+            assert isinstance(expr, dict)
+            op = expr['op']
+            if op not in self.operations:
+                raise NotImplementedError('op not implemented: %s' % op)
+            elhs = expr['lhs']
+            erhs = expr['rhs']
+            if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):
+                raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))
+
+            lhs = self.evaluate(elhs, context)
+            rhs = self.evaluate(erhs, context)
+            result = self.operations[op](lhs, rhs)
+        return result
+
+def default_context():
+    def format_full_version(info):
+        version = '%s.%s.%s' % (info.major, info.minor, info.micro)
+        kind = info.releaselevel
+        if kind != 'final':
+            version += kind[0] + str(info.serial)
+        return version
+
+    if hasattr(sys, 'implementation'):
+        implementation_version = format_full_version(sys.implementation.version)
+        implementation_name = sys.implementation.name
+    else:
+        implementation_version = '0'
+        implementation_name = ''
+
+    result = {
+        'implementation_name': implementation_name,
+        'implementation_version': implementation_version,
+        'os_name': os.name,
+        'platform_machine': platform.machine(),
+        'platform_python_implementation': platform.python_implementation(),
+        'platform_release': platform.release(),
+        'platform_system': platform.system(),
+        'platform_version': platform.version(),
+        'platform_in_venv': str(in_venv()),
+        'python_full_version': platform.python_version(),
+        'python_version': platform.python_version()[:3],
+        'sys_platform': sys.platform,
+    }
+    return result
+
+DEFAULT_CONTEXT = default_context()
+del default_context
+
+evaluator = Evaluator()
+
+def interpret(marker, execution_context=None):
+    """
+    Interpret a marker and return a result depending on environment.
+
+    :param marker: The marker to interpret.
+    :type marker: str
+    :param execution_context: The context used for name lookup.
+    :type execution_context: mapping
+    """
+    try:
+        expr, rest = parse_marker(marker)
+    except Exception as e:
+        raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e))
+    if rest and rest[0] != '#':
+        raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest))
+    context = dict(DEFAULT_CONTEXT)
+    if execution_context:
+        context.update(execution_context)
+    return evaluator.evaluate(expr, context)
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/metadata.py b/lib/python3.7/site-packages/pip/_vendor/distlib/metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..77eed7f96836986c76981f2fb41c6ee389aba83b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/metadata.py
@@ -0,0 +1,1094 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""Implementation of the Metadata for Python packages PEPs.
+
+Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental).
+"""
+from __future__ import unicode_literals
+
+import codecs
+from email import message_from_file
+import json
+import logging
+import re
+
+
+from . import DistlibException, __version__
+from .compat import StringIO, string_types, text_type
+from .markers import interpret
+from .util import extract_by_key, get_extras
+from .version import get_scheme, PEP440_VERSION_RE
+
+logger = logging.getLogger(__name__)
+
+
+class MetadataMissingError(DistlibException):
+    """A required metadata is missing"""
+
+
+class MetadataConflictError(DistlibException):
+    """Attempt to read or write metadata fields that are conflictual."""
+
+
+class MetadataUnrecognizedVersionError(DistlibException):
+    """Unknown metadata version number."""
+
+
+class MetadataInvalidError(DistlibException):
+    """A metadata value is invalid"""
+
+# public API of this module
+__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
+
+# Encoding used for the PKG-INFO files
+PKG_INFO_ENCODING = 'utf-8'
+
+# preferred version. Hopefully will be changed
+# to 1.2 once PEP 345 is supported everywhere
+PKG_INFO_PREFERRED_VERSION = '1.1'
+
+_LINE_PREFIX_1_2 = re.compile('\n       \\|')
+_LINE_PREFIX_PRE_1_2 = re.compile('\n        ')
+_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
+               'Summary', 'Description',
+               'Keywords', 'Home-page', 'Author', 'Author-email',
+               'License')
+
+_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
+               'Supported-Platform', 'Summary', 'Description',
+               'Keywords', 'Home-page', 'Author', 'Author-email',
+               'License', 'Classifier', 'Download-URL', 'Obsoletes',
+               'Provides', 'Requires')
+
+_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
+                'Download-URL')
+
+_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
+               'Supported-Platform', 'Summary', 'Description',
+               'Keywords', 'Home-page', 'Author', 'Author-email',
+               'Maintainer', 'Maintainer-email', 'License',
+               'Classifier', 'Download-URL', 'Obsoletes-Dist',
+               'Project-URL', 'Provides-Dist', 'Requires-Dist',
+               'Requires-Python', 'Requires-External')
+
+_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
+                'Obsoletes-Dist', 'Requires-External', 'Maintainer',
+                'Maintainer-email', 'Project-URL')
+
+_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
+               'Supported-Platform', 'Summary', 'Description',
+               'Keywords', 'Home-page', 'Author', 'Author-email',
+               'Maintainer', 'Maintainer-email', 'License',
+               'Classifier', 'Download-URL', 'Obsoletes-Dist',
+               'Project-URL', 'Provides-Dist', 'Requires-Dist',
+               'Requires-Python', 'Requires-External', 'Private-Version',
+               'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
+               'Provides-Extra')
+
+_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
+                'Setup-Requires-Dist', 'Extension')
+
+# See issue #106: Sometimes 'Requires' occurs wrongly in the metadata. Include
+# it in the tuple literal below to allow it (for now)
+_566_FIELDS = _426_FIELDS + ('Description-Content-Type', 'Requires')
+
+_566_MARKERS = ('Description-Content-Type',)
+
+_ALL_FIELDS = set()
+_ALL_FIELDS.update(_241_FIELDS)
+_ALL_FIELDS.update(_314_FIELDS)
+_ALL_FIELDS.update(_345_FIELDS)
+_ALL_FIELDS.update(_426_FIELDS)
+_ALL_FIELDS.update(_566_FIELDS)
+
+EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
+
+
+def _version2fieldlist(version):
+    if version == '1.0':
+        return _241_FIELDS
+    elif version == '1.1':
+        return _314_FIELDS
+    elif version == '1.2':
+        return _345_FIELDS
+    elif version in ('1.3', '2.1'):
+        return _345_FIELDS + _566_FIELDS
+    elif version == '2.0':
+        return _426_FIELDS
+    raise MetadataUnrecognizedVersionError(version)
+
+
+def _best_version(fields):
+    """Detect the best version depending on the fields used."""
+    def _has_marker(keys, markers):
+        for marker in markers:
+            if marker in keys:
+                return True
+        return False
+
+    keys = []
+    for key, value in fields.items():
+        if value in ([], 'UNKNOWN', None):
+            continue
+        keys.append(key)
+
+    possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1']
+
+    # first let's try to see if a field is not part of one of the version
+    for key in keys:
+        if key not in _241_FIELDS and '1.0' in possible_versions:
+            possible_versions.remove('1.0')
+            logger.debug('Removed 1.0 due to %s', key)
+        if key not in _314_FIELDS and '1.1' in possible_versions:
+            possible_versions.remove('1.1')
+            logger.debug('Removed 1.1 due to %s', key)
+        if key not in _345_FIELDS and '1.2' in possible_versions:
+            possible_versions.remove('1.2')
+            logger.debug('Removed 1.2 due to %s', key)
+        if key not in _566_FIELDS and '1.3' in possible_versions:
+            possible_versions.remove('1.3')
+            logger.debug('Removed 1.3 due to %s', key)
+        if key not in _566_FIELDS and '2.1' in possible_versions:
+            if key != 'Description':  # In 2.1, description allowed after headers
+                possible_versions.remove('2.1')
+                logger.debug('Removed 2.1 due to %s', key)
+        if key not in _426_FIELDS and '2.0' in possible_versions:
+            possible_versions.remove('2.0')
+            logger.debug('Removed 2.0 due to %s', key)
+
+    # possible_version contains qualified versions
+    if len(possible_versions) == 1:
+        return possible_versions[0]   # found !
+    elif len(possible_versions) == 0:
+        logger.debug('Out of options - unknown metadata set: %s', fields)
+        raise MetadataConflictError('Unknown metadata set')
+
+    # let's see if one unique marker is found
+    is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
+    is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
+    is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS)
+    is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
+    if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1:
+        raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields')
+
+    # we have the choice, 1.0, or 1.2, or 2.0
+    #   - 1.0 has a broken Summary field but works with all tools
+    #   - 1.1 is to avoid
+    #   - 1.2 fixes Summary but has little adoption
+    #   - 2.0 adds more features and is very new
+    if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0:
+        # we couldn't find any specific marker
+        if PKG_INFO_PREFERRED_VERSION in possible_versions:
+            return PKG_INFO_PREFERRED_VERSION
+    if is_1_1:
+        return '1.1'
+    if is_1_2:
+        return '1.2'
+    if is_2_1:
+        return '2.1'
+
+    return '2.0'
+
+_ATTR2FIELD = {
+    'metadata_version': 'Metadata-Version',
+    'name': 'Name',
+    'version': 'Version',
+    'platform': 'Platform',
+    'supported_platform': 'Supported-Platform',
+    'summary': 'Summary',
+    'description': 'Description',
+    'keywords': 'Keywords',
+    'home_page': 'Home-page',
+    'author': 'Author',
+    'author_email': 'Author-email',
+    'maintainer': 'Maintainer',
+    'maintainer_email': 'Maintainer-email',
+    'license': 'License',
+    'classifier': 'Classifier',
+    'download_url': 'Download-URL',
+    'obsoletes_dist': 'Obsoletes-Dist',
+    'provides_dist': 'Provides-Dist',
+    'requires_dist': 'Requires-Dist',
+    'setup_requires_dist': 'Setup-Requires-Dist',
+    'requires_python': 'Requires-Python',
+    'requires_external': 'Requires-External',
+    'requires': 'Requires',
+    'provides': 'Provides',
+    'obsoletes': 'Obsoletes',
+    'project_url': 'Project-URL',
+    'private_version': 'Private-Version',
+    'obsoleted_by': 'Obsoleted-By',
+    'extension': 'Extension',
+    'provides_extra': 'Provides-Extra',
+}
+
+_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
+_VERSIONS_FIELDS = ('Requires-Python',)
+_VERSION_FIELDS = ('Version',)
+_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
+               'Requires', 'Provides', 'Obsoletes-Dist',
+               'Provides-Dist', 'Requires-Dist', 'Requires-External',
+               'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
+               'Provides-Extra', 'Extension')
+_LISTTUPLEFIELDS = ('Project-URL',)
+
+_ELEMENTSFIELD = ('Keywords',)
+
+_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
+
+_MISSING = object()
+
+_FILESAFE = re.compile('[^A-Za-z0-9.]+')
+
+
+def _get_name_and_version(name, version, for_filename=False):
+    """Return the distribution name with version.
+
+    If for_filename is true, return a filename-escaped form."""
+    if for_filename:
+        # For both name and version any runs of non-alphanumeric or '.'
+        # characters are replaced with a single '-'.  Additionally any
+        # spaces in the version string become '.'
+        name = _FILESAFE.sub('-', name)
+        version = _FILESAFE.sub('-', version.replace(' ', '.'))
+    return '%s-%s' % (name, version)
+
+
+class LegacyMetadata(object):
+    """The legacy metadata of a release.
+
+    Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
+    instantiate the class with one of these arguments (or none):
+    - *path*, the path to a metadata file
+    - *fileobj* give a file-like object with metadata as content
+    - *mapping* is a dict-like object
+    - *scheme* is a version scheme name
+    """
+    # TODO document the mapping API and UNKNOWN default key
+
+    def __init__(self, path=None, fileobj=None, mapping=None,
+                 scheme='default'):
+        if [path, fileobj, mapping].count(None) < 2:
+            raise TypeError('path, fileobj and mapping are exclusive')
+        self._fields = {}
+        self.requires_files = []
+        self._dependencies = None
+        self.scheme = scheme
+        if path is not None:
+            self.read(path)
+        elif fileobj is not None:
+            self.read_file(fileobj)
+        elif mapping is not None:
+            self.update(mapping)
+            self.set_metadata_version()
+
+    def set_metadata_version(self):
+        self._fields['Metadata-Version'] = _best_version(self._fields)
+
+    def _write_field(self, fileobj, name, value):
+        fileobj.write('%s: %s\n' % (name, value))
+
+    def __getitem__(self, name):
+        return self.get(name)
+
+    def __setitem__(self, name, value):
+        return self.set(name, value)
+
+    def __delitem__(self, name):
+        field_name = self._convert_name(name)
+        try:
+            del self._fields[field_name]
+        except KeyError:
+            raise KeyError(name)
+
+    def __contains__(self, name):
+        return (name in self._fields or
+                self._convert_name(name) in self._fields)
+
+    def _convert_name(self, name):
+        if name in _ALL_FIELDS:
+            return name
+        name = name.replace('-', '_').lower()
+        return _ATTR2FIELD.get(name, name)
+
+    def _default_value(self, name):
+        if name in _LISTFIELDS or name in _ELEMENTSFIELD:
+            return []
+        return 'UNKNOWN'
+
+    def _remove_line_prefix(self, value):
+        if self.metadata_version in ('1.0', '1.1'):
+            return _LINE_PREFIX_PRE_1_2.sub('\n', value)
+        else:
+            return _LINE_PREFIX_1_2.sub('\n', value)
+
+    def __getattr__(self, name):
+        if name in _ATTR2FIELD:
+            return self[name]
+        raise AttributeError(name)
+
+    #
+    # Public API
+    #
+
+#    dependencies = property(_get_dependencies, _set_dependencies)
+
+    def get_fullname(self, filesafe=False):
+        """Return the distribution name with version.
+
+        If filesafe is true, return a filename-escaped form."""
+        return _get_name_and_version(self['Name'], self['Version'], filesafe)
+
+    def is_field(self, name):
+        """return True if name is a valid metadata key"""
+        name = self._convert_name(name)
+        return name in _ALL_FIELDS
+
+    def is_multi_field(self, name):
+        name = self._convert_name(name)
+        return name in _LISTFIELDS
+
+    def read(self, filepath):
+        """Read the metadata values from a file path."""
+        fp = codecs.open(filepath, 'r', encoding='utf-8')
+        try:
+            self.read_file(fp)
+        finally:
+            fp.close()
+
+    def read_file(self, fileob):
+        """Read the metadata values from a file object."""
+        msg = message_from_file(fileob)
+        self._fields['Metadata-Version'] = msg['metadata-version']
+
+        # When reading, get all the fields we can
+        for field in _ALL_FIELDS:
+            if field not in msg:
+                continue
+            if field in _LISTFIELDS:
+                # we can have multiple lines
+                values = msg.get_all(field)
+                if field in _LISTTUPLEFIELDS and values is not None:
+                    values = [tuple(value.split(',')) for value in values]
+                self.set(field, values)
+            else:
+                # single line
+                value = msg[field]
+                if value is not None and value != 'UNKNOWN':
+                    self.set(field, value)
+        # logger.debug('Attempting to set metadata for %s', self)
+        # self.set_metadata_version()
+
+    def write(self, filepath, skip_unknown=False):
+        """Write the metadata fields to filepath."""
+        fp = codecs.open(filepath, 'w', encoding='utf-8')
+        try:
+            self.write_file(fp, skip_unknown)
+        finally:
+            fp.close()
+
+    def write_file(self, fileobject, skip_unknown=False):
+        """Write the PKG-INFO format data to a file object."""
+        self.set_metadata_version()
+
+        for field in _version2fieldlist(self['Metadata-Version']):
+            values = self.get(field)
+            if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
+                continue
+            if field in _ELEMENTSFIELD:
+                self._write_field(fileobject, field, ','.join(values))
+                continue
+            if field not in _LISTFIELDS:
+                if field == 'Description':
+                    if self.metadata_version in ('1.0', '1.1'):
+                        values = values.replace('\n', '\n        ')
+                    else:
+                        values = values.replace('\n', '\n       |')
+                values = [values]
+
+            if field in _LISTTUPLEFIELDS:
+                values = [','.join(value) for value in values]
+
+            for value in values:
+                self._write_field(fileobject, field, value)
+
+    def update(self, other=None, **kwargs):
+        """Set metadata values from the given iterable `other` and kwargs.
+
+        Behavior is like `dict.update`: If `other` has a ``keys`` method,
+        they are looped over and ``self[key]`` is assigned ``other[key]``.
+        Else, ``other`` is an iterable of ``(key, value)`` iterables.
+
+        Keys that don't match a metadata field or that have an empty value are
+        dropped.
+        """
+        def _set(key, value):
+            if key in _ATTR2FIELD and value:
+                self.set(self._convert_name(key), value)
+
+        if not other:
+            # other is None or empty container
+            pass
+        elif hasattr(other, 'keys'):
+            for k in other.keys():
+                _set(k, other[k])
+        else:
+            for k, v in other:
+                _set(k, v)
+
+        if kwargs:
+            for k, v in kwargs.items():
+                _set(k, v)
+
+    def set(self, name, value):
+        """Control then set a metadata field."""
+        name = self._convert_name(name)
+
+        if ((name in _ELEMENTSFIELD or name == 'Platform') and
+            not isinstance(value, (list, tuple))):
+            if isinstance(value, string_types):
+                value = [v.strip() for v in value.split(',')]
+            else:
+                value = []
+        elif (name in _LISTFIELDS and
+              not isinstance(value, (list, tuple))):
+            if isinstance(value, string_types):
+                value = [value]
+            else:
+                value = []
+
+        if logger.isEnabledFor(logging.WARNING):
+            project_name = self['Name']
+
+            scheme = get_scheme(self.scheme)
+            if name in _PREDICATE_FIELDS and value is not None:
+                for v in value:
+                    # check that the values are valid
+                    if not scheme.is_valid_matcher(v.split(';')[0]):
+                        logger.warning(
+                            "'%s': '%s' is not valid (field '%s')",
+                            project_name, v, name)
+            # FIXME this rejects UNKNOWN, is that right?
+            elif name in _VERSIONS_FIELDS and value is not None:
+                if not scheme.is_valid_constraint_list(value):
+                    logger.warning("'%s': '%s' is not a valid version (field '%s')",
+                                   project_name, value, name)
+            elif name in _VERSION_FIELDS and value is not None:
+                if not scheme.is_valid_version(value):
+                    logger.warning("'%s': '%s' is not a valid version (field '%s')",
+                                   project_name, value, name)
+
+        if name in _UNICODEFIELDS:
+            if name == 'Description':
+                value = self._remove_line_prefix(value)
+
+        self._fields[name] = value
+
+    def get(self, name, default=_MISSING):
+        """Get a metadata field."""
+        name = self._convert_name(name)
+        if name not in self._fields:
+            if default is _MISSING:
+                default = self._default_value(name)
+            return default
+        if name in _UNICODEFIELDS:
+            value = self._fields[name]
+            return value
+        elif name in _LISTFIELDS:
+            value = self._fields[name]
+            if value is None:
+                return []
+            res = []
+            for val in value:
+                if name not in _LISTTUPLEFIELDS:
+                    res.append(val)
+                else:
+                    # That's for Project-URL
+                    res.append((val[0], val[1]))
+            return res
+
+        elif name in _ELEMENTSFIELD:
+            value = self._fields[name]
+            if isinstance(value, string_types):
+                return value.split(',')
+        return self._fields[name]
+
+    def check(self, strict=False):
+        """Check if the metadata is compliant. If strict is True then raise if
+        no Name or Version are provided"""
+        self.set_metadata_version()
+
+        # XXX should check the versions (if the file was loaded)
+        missing, warnings = [], []
+
+        for attr in ('Name', 'Version'):  # required by PEP 345
+            if attr not in self:
+                missing.append(attr)
+
+        if strict and missing != []:
+            msg = 'missing required metadata: %s' % ', '.join(missing)
+            raise MetadataMissingError(msg)
+
+        for attr in ('Home-page', 'Author'):
+            if attr not in self:
+                missing.append(attr)
+
+        # checking metadata 1.2 (XXX needs to check 1.1, 1.0)
+        if self['Metadata-Version'] != '1.2':
+            return missing, warnings
+
+        scheme = get_scheme(self.scheme)
+
+        def are_valid_constraints(value):
+            for v in value:
+                if not scheme.is_valid_matcher(v.split(';')[0]):
+                    return False
+            return True
+
+        for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
+                                   (_VERSIONS_FIELDS,
+                                    scheme.is_valid_constraint_list),
+                                   (_VERSION_FIELDS,
+                                    scheme.is_valid_version)):
+            for field in fields:
+                value = self.get(field, None)
+                if value is not None and not controller(value):
+                    warnings.append("Wrong value for '%s': %s" % (field, value))
+
+        return missing, warnings
+
+    def todict(self, skip_missing=False):
+        """Return fields as a dict.
+
+        Field names will be converted to use the underscore-lowercase style
+        instead of hyphen-mixed case (i.e. home_page instead of Home-page).
+        """
+        self.set_metadata_version()
+
+        mapping_1_0 = (
+            ('metadata_version', 'Metadata-Version'),
+            ('name', 'Name'),
+            ('version', 'Version'),
+            ('summary', 'Summary'),
+            ('home_page', 'Home-page'),
+            ('author', 'Author'),
+            ('author_email', 'Author-email'),
+            ('license', 'License'),
+            ('description', 'Description'),
+            ('keywords', 'Keywords'),
+            ('platform', 'Platform'),
+            ('classifiers', 'Classifier'),
+            ('download_url', 'Download-URL'),
+        )
+
+        data = {}
+        for key, field_name in mapping_1_0:
+            if not skip_missing or field_name in self._fields:
+                data[key] = self[field_name]
+
+        if self['Metadata-Version'] == '1.2':
+            mapping_1_2 = (
+                ('requires_dist', 'Requires-Dist'),
+                ('requires_python', 'Requires-Python'),
+                ('requires_external', 'Requires-External'),
+                ('provides_dist', 'Provides-Dist'),
+                ('obsoletes_dist', 'Obsoletes-Dist'),
+                ('project_url', 'Project-URL'),
+                ('maintainer', 'Maintainer'),
+                ('maintainer_email', 'Maintainer-email'),
+            )
+            for key, field_name in mapping_1_2:
+                if not skip_missing or field_name in self._fields:
+                    if key != 'project_url':
+                        data[key] = self[field_name]
+                    else:
+                        data[key] = [','.join(u) for u in self[field_name]]
+
+        elif self['Metadata-Version'] == '1.1':
+            mapping_1_1 = (
+                ('provides', 'Provides'),
+                ('requires', 'Requires'),
+                ('obsoletes', 'Obsoletes'),
+            )
+            for key, field_name in mapping_1_1:
+                if not skip_missing or field_name in self._fields:
+                    data[key] = self[field_name]
+
+        return data
+
+    def add_requirements(self, requirements):
+        if self['Metadata-Version'] == '1.1':
+            # we can't have 1.1 metadata *and* Setuptools requires
+            for field in ('Obsoletes', 'Requires', 'Provides'):
+                if field in self:
+                    del self[field]
+        self['Requires-Dist'] += requirements
+
+    # Mapping API
+    # TODO could add iter* variants
+
+    def keys(self):
+        return list(_version2fieldlist(self['Metadata-Version']))
+
+    def __iter__(self):
+        for key in self.keys():
+            yield key
+
+    def values(self):
+        return [self[key] for key in self.keys()]
+
+    def items(self):
+        return [(key, self[key]) for key in self.keys()]
+
+    def __repr__(self):
+        return '<%s %s %s>' % (self.__class__.__name__, self.name,
+                               self.version)
+
+
+METADATA_FILENAME = 'pydist.json'
+WHEEL_METADATA_FILENAME = 'metadata.json'
+LEGACY_METADATA_FILENAME = 'METADATA'
+
+
+class Metadata(object):
+    """
+    The metadata of a release. This implementation uses 2.0 (JSON)
+    metadata where possible. If not possible, it wraps a LegacyMetadata
+    instance which handles the key-value metadata format.
+    """
+
+    METADATA_VERSION_MATCHER = re.compile(r'^\d+(\.\d+)*$')
+
+    NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
+
+    VERSION_MATCHER = PEP440_VERSION_RE
+
+    SUMMARY_MATCHER = re.compile('.{1,2047}')
+
+    METADATA_VERSION = '2.0'
+
+    GENERATOR = 'distlib (%s)' % __version__
+
+    MANDATORY_KEYS = {
+        'name': (),
+        'version': (),
+        'summary': ('legacy',),
+    }
+
+    INDEX_KEYS = ('name version license summary description author '
+                  'author_email keywords platform home_page classifiers '
+                  'download_url')
+
+    DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
+                       'dev_requires provides meta_requires obsoleted_by '
+                       'supports_environments')
+
+    SYNTAX_VALIDATORS = {
+        'metadata_version': (METADATA_VERSION_MATCHER, ()),
+        'name': (NAME_MATCHER, ('legacy',)),
+        'version': (VERSION_MATCHER, ('legacy',)),
+        'summary': (SUMMARY_MATCHER, ('legacy',)),
+    }
+
+    __slots__ = ('_legacy', '_data', 'scheme')
+
+    def __init__(self, path=None, fileobj=None, mapping=None,
+                 scheme='default'):
+        if [path, fileobj, mapping].count(None) < 2:
+            raise TypeError('path, fileobj and mapping are exclusive')
+        self._legacy = None
+        self._data = None
+        self.scheme = scheme
+        #import pdb; pdb.set_trace()
+        if mapping is not None:
+            try:
+                self._validate_mapping(mapping, scheme)
+                self._data = mapping
+            except MetadataUnrecognizedVersionError:
+                self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
+                self.validate()
+        else:
+            data = None
+            if path:
+                with open(path, 'rb') as f:
+                    data = f.read()
+            elif fileobj:
+                data = fileobj.read()
+            if data is None:
+                # Initialised with no args - to be added
+                self._data = {
+                    'metadata_version': self.METADATA_VERSION,
+                    'generator': self.GENERATOR,
+                }
+            else:
+                if not isinstance(data, text_type):
+                    data = data.decode('utf-8')
+                try:
+                    self._data = json.loads(data)
+                    self._validate_mapping(self._data, scheme)
+                except ValueError:
+                    # Note: MetadataUnrecognizedVersionError does not
+                    # inherit from ValueError (it's a DistlibException,
+                    # which should not inherit from ValueError).
+                    # The ValueError comes from the json.load - if that
+                    # succeeds and we get a validation error, we want
+                    # that to propagate
+                    self._legacy = LegacyMetadata(fileobj=StringIO(data),
+                                                  scheme=scheme)
+                    self.validate()
+
+    common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
+
+    none_list = (None, list)
+    none_dict = (None, dict)
+
+    mapped_keys = {
+        'run_requires': ('Requires-Dist', list),
+        'build_requires': ('Setup-Requires-Dist', list),
+        'dev_requires': none_list,
+        'test_requires': none_list,
+        'meta_requires': none_list,
+        'extras': ('Provides-Extra', list),
+        'modules': none_list,
+        'namespaces': none_list,
+        'exports': none_dict,
+        'commands': none_dict,
+        'classifiers': ('Classifier', list),
+        'source_url': ('Download-URL', None),
+        'metadata_version': ('Metadata-Version', None),
+    }
+
+    del none_list, none_dict
+
+    def __getattribute__(self, key):
+        common = object.__getattribute__(self, 'common_keys')
+        mapped = object.__getattribute__(self, 'mapped_keys')
+        if key in mapped:
+            lk, maker = mapped[key]
+            if self._legacy:
+                if lk is None:
+                    result = None if maker is None else maker()
+                else:
+                    result = self._legacy.get(lk)
+            else:
+                value = None if maker is None else maker()
+                if key not in ('commands', 'exports', 'modules', 'namespaces',
+                               'classifiers'):
+                    result = self._data.get(key, value)
+                else:
+                    # special cases for PEP 459
+                    sentinel = object()
+                    result = sentinel
+                    d = self._data.get('extensions')
+                    if d:
+                        if key == 'commands':
+                            result = d.get('python.commands', value)
+                        elif key == 'classifiers':
+                            d = d.get('python.details')
+                            if d:
+                                result = d.get(key, value)
+                        else:
+                            d = d.get('python.exports')
+                            if not d:
+                                d = self._data.get('python.exports')
+                            if d:
+                                result = d.get(key, value)
+                    if result is sentinel:
+                        result = value
+        elif key not in common:
+            result = object.__getattribute__(self, key)
+        elif self._legacy:
+            result = self._legacy.get(key)
+        else:
+            result = self._data.get(key)
+        return result
+
+    def _validate_value(self, key, value, scheme=None):
+        if key in self.SYNTAX_VALIDATORS:
+            pattern, exclusions = self.SYNTAX_VALIDATORS[key]
+            if (scheme or self.scheme) not in exclusions:
+                m = pattern.match(value)
+                if not m:
+                    raise MetadataInvalidError("'%s' is an invalid value for "
+                                               "the '%s' property" % (value,
+                                                                    key))
+
+    def __setattr__(self, key, value):
+        self._validate_value(key, value)
+        common = object.__getattribute__(self, 'common_keys')
+        mapped = object.__getattribute__(self, 'mapped_keys')
+        if key in mapped:
+            lk, _ = mapped[key]
+            if self._legacy:
+                if lk is None:
+                    raise NotImplementedError
+                self._legacy[lk] = value
+            elif key not in ('commands', 'exports', 'modules', 'namespaces',
+                             'classifiers'):
+                self._data[key] = value
+            else:
+                # special cases for PEP 459
+                d = self._data.setdefault('extensions', {})
+                if key == 'commands':
+                    d['python.commands'] = value
+                elif key == 'classifiers':
+                    d = d.setdefault('python.details', {})
+                    d[key] = value
+                else:
+                    d = d.setdefault('python.exports', {})
+                    d[key] = value
+        elif key not in common:
+            object.__setattr__(self, key, value)
+        else:
+            if key == 'keywords':
+                if isinstance(value, string_types):
+                    value = value.strip()
+                    if value:
+                        value = value.split()
+                    else:
+                        value = []
+            if self._legacy:
+                self._legacy[key] = value
+            else:
+                self._data[key] = value
+
+    @property
+    def name_and_version(self):
+        return _get_name_and_version(self.name, self.version, True)
+
+    @property
+    def provides(self):
+        if self._legacy:
+            result = self._legacy['Provides-Dist']
+        else:
+            result = self._data.setdefault('provides', [])
+        s = '%s (%s)' % (self.name, self.version)
+        if s not in result:
+            result.append(s)
+        return result
+
+    @provides.setter
+    def provides(self, value):
+        if self._legacy:
+            self._legacy['Provides-Dist'] = value
+        else:
+            self._data['provides'] = value
+
+    def get_requirements(self, reqts, extras=None, env=None):
+        """
+        Base method to get dependencies, given a set of extras
+        to satisfy and an optional environment context.
+        :param reqts: A list of sometimes-wanted dependencies,
+                      perhaps dependent on extras and environment.
+        :param extras: A list of optional components being requested.
+        :param env: An optional environment for marker evaluation.
+        """
+        if self._legacy:
+            result = reqts
+        else:
+            result = []
+            extras = get_extras(extras or [], self.extras)
+            for d in reqts:
+                if 'extra' not in d and 'environment' not in d:
+                    # unconditional
+                    include = True
+                else:
+                    if 'extra' not in d:
+                        # Not extra-dependent - only environment-dependent
+                        include = True
+                    else:
+                        include = d.get('extra') in extras
+                    if include:
+                        # Not excluded because of extras, check environment
+                        marker = d.get('environment')
+                        if marker:
+                            include = interpret(marker, env)
+                if include:
+                    result.extend(d['requires'])
+            for key in ('build', 'dev', 'test'):
+                e = ':%s:' % key
+                if e in extras:
+                    extras.remove(e)
+                    # A recursive call, but it should terminate since 'test'
+                    # has been removed from the extras
+                    reqts = self._data.get('%s_requires' % key, [])
+                    result.extend(self.get_requirements(reqts, extras=extras,
+                                                        env=env))
+        return result
+
+    @property
+    def dictionary(self):
+        if self._legacy:
+            return self._from_legacy()
+        return self._data
+
+    @property
+    def dependencies(self):
+        if self._legacy:
+            raise NotImplementedError
+        else:
+            return extract_by_key(self._data, self.DEPENDENCY_KEYS)
+
+    @dependencies.setter
+    def dependencies(self, value):
+        if self._legacy:
+            raise NotImplementedError
+        else:
+            self._data.update(value)
+
+    def _validate_mapping(self, mapping, scheme):
+        if mapping.get('metadata_version') != self.METADATA_VERSION:
+            raise MetadataUnrecognizedVersionError()
+        missing = []
+        for key, exclusions in self.MANDATORY_KEYS.items():
+            if key not in mapping:
+                if scheme not in exclusions:
+                    missing.append(key)
+        if missing:
+            msg = 'Missing metadata items: %s' % ', '.join(missing)
+            raise MetadataMissingError(msg)
+        for k, v in mapping.items():
+            self._validate_value(k, v, scheme)
+
+    def validate(self):
+        if self._legacy:
+            missing, warnings = self._legacy.check(True)
+            if missing or warnings:
+                logger.warning('Metadata: missing: %s, warnings: %s',
+                               missing, warnings)
+        else:
+            self._validate_mapping(self._data, self.scheme)
+
+    def todict(self):
+        if self._legacy:
+            return self._legacy.todict(True)
+        else:
+            result = extract_by_key(self._data, self.INDEX_KEYS)
+            return result
+
+    def _from_legacy(self):
+        assert self._legacy and not self._data
+        result = {
+            'metadata_version': self.METADATA_VERSION,
+            'generator': self.GENERATOR,
+        }
+        lmd = self._legacy.todict(True)     # skip missing ones
+        for k in ('name', 'version', 'license', 'summary', 'description',
+                  'classifier'):
+            if k in lmd:
+                if k == 'classifier':
+                    nk = 'classifiers'
+                else:
+                    nk = k
+                result[nk] = lmd[k]
+        kw = lmd.get('Keywords', [])
+        if kw == ['']:
+            kw = []
+        result['keywords'] = kw
+        keys = (('requires_dist', 'run_requires'),
+                ('setup_requires_dist', 'build_requires'))
+        for ok, nk in keys:
+            if ok in lmd and lmd[ok]:
+                result[nk] = [{'requires': lmd[ok]}]
+        result['provides'] = self.provides
+        author = {}
+        maintainer = {}
+        return result
+
+    LEGACY_MAPPING = {
+        'name': 'Name',
+        'version': 'Version',
+        'license': 'License',
+        'summary': 'Summary',
+        'description': 'Description',
+        'classifiers': 'Classifier',
+    }
+
+    def _to_legacy(self):
+        def process_entries(entries):
+            reqts = set()
+            for e in entries:
+                extra = e.get('extra')
+                env = e.get('environment')
+                rlist = e['requires']
+                for r in rlist:
+                    if not env and not extra:
+                        reqts.add(r)
+                    else:
+                        marker = ''
+                        if extra:
+                            marker = 'extra == "%s"' % extra
+                        if env:
+                            if marker:
+                                marker = '(%s) and %s' % (env, marker)
+                            else:
+                                marker = env
+                        reqts.add(';'.join((r, marker)))
+            return reqts
+
+        assert self._data and not self._legacy
+        result = LegacyMetadata()
+        nmd = self._data
+        for nk, ok in self.LEGACY_MAPPING.items():
+            if nk in nmd:
+                result[ok] = nmd[nk]
+        r1 = process_entries(self.run_requires + self.meta_requires)
+        r2 = process_entries(self.build_requires + self.dev_requires)
+        if self.extras:
+            result['Provides-Extra'] = sorted(self.extras)
+        result['Requires-Dist'] = sorted(r1)
+        result['Setup-Requires-Dist'] = sorted(r2)
+        # TODO: other fields such as contacts
+        return result
+
+    def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
+        if [path, fileobj].count(None) != 1:
+            raise ValueError('Exactly one of path and fileobj is needed')
+        self.validate()
+        if legacy:
+            if self._legacy:
+                legacy_md = self._legacy
+            else:
+                legacy_md = self._to_legacy()
+            if path:
+                legacy_md.write(path, skip_unknown=skip_unknown)
+            else:
+                legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
+        else:
+            if self._legacy:
+                d = self._from_legacy()
+            else:
+                d = self._data
+            if fileobj:
+                json.dump(d, fileobj, ensure_ascii=True, indent=2,
+                          sort_keys=True)
+            else:
+                with codecs.open(path, 'w', 'utf-8') as f:
+                    json.dump(d, f, ensure_ascii=True, indent=2,
+                              sort_keys=True)
+
+    def add_requirements(self, requirements):
+        if self._legacy:
+            self._legacy.add_requirements(requirements)
+        else:
+            run_requires = self._data.setdefault('run_requires', [])
+            always = None
+            for entry in run_requires:
+                if 'environment' not in entry and 'extra' not in entry:
+                    always = entry
+                    break
+            if always is None:
+                always = { 'requires': requirements }
+                run_requires.insert(0, always)
+            else:
+                rset = set(always['requires']) | set(requirements)
+                always['requires'] = sorted(rset)
+
+    def __repr__(self):
+        name = self.name or '(no name)'
+        version = self.version or 'no version'
+        return '<%s %s %s (%s)>' % (self.__class__.__name__,
+                                    self.metadata_version, name, version)
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/resources.py b/lib/python3.7/site-packages/pip/_vendor/distlib/resources.py
new file mode 100644
index 0000000000000000000000000000000000000000..18840167a9e3ea1cf443b0803230e41481df41d2
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/resources.py
@@ -0,0 +1,355 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013-2017 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+from __future__ import unicode_literals
+
+import bisect
+import io
+import logging
+import os
+import pkgutil
+import shutil
+import sys
+import types
+import zipimport
+
+from . import DistlibException
+from .util import cached_property, get_cache_base, path_to_cache_dir, Cache
+
+logger = logging.getLogger(__name__)
+
+
+cache = None    # created when needed
+
+
+class ResourceCache(Cache):
+    def __init__(self, base=None):
+        if base is None:
+            # Use native string to avoid issues on 2.x: see Python #20140.
+            base = os.path.join(get_cache_base(), str('resource-cache'))
+        super(ResourceCache, self).__init__(base)
+
+    def is_stale(self, resource, path):
+        """
+        Is the cache stale for the given resource?
+
+        :param resource: The :class:`Resource` being cached.
+        :param path: The path of the resource in the cache.
+        :return: True if the cache is stale.
+        """
+        # Cache invalidation is a hard problem :-)
+        return True
+
+    def get(self, resource):
+        """
+        Get a resource into the cache,
+
+        :param resource: A :class:`Resource` instance.
+        :return: The pathname of the resource in the cache.
+        """
+        prefix, path = resource.finder.get_cache_info(resource)
+        if prefix is None:
+            result = path
+        else:
+            result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
+            dirname = os.path.dirname(result)
+            if not os.path.isdir(dirname):
+                os.makedirs(dirname)
+            if not os.path.exists(result):
+                stale = True
+            else:
+                stale = self.is_stale(resource, path)
+            if stale:
+                # write the bytes of the resource to the cache location
+                with open(result, 'wb') as f:
+                    f.write(resource.bytes)
+        return result
+
+
+class ResourceBase(object):
+    def __init__(self, finder, name):
+        self.finder = finder
+        self.name = name
+
+
+class Resource(ResourceBase):
+    """
+    A class representing an in-package resource, such as a data file. This is
+    not normally instantiated by user code, but rather by a
+    :class:`ResourceFinder` which manages the resource.
+    """
+    is_container = False        # Backwards compatibility
+
+    def as_stream(self):
+        """
+        Get the resource as a stream.
+
+        This is not a property to make it obvious that it returns a new stream
+        each time.
+        """
+        return self.finder.get_stream(self)
+
+    @cached_property
+    def file_path(self):
+        global cache
+        if cache is None:
+            cache = ResourceCache()
+        return cache.get(self)
+
+    @cached_property
+    def bytes(self):
+        return self.finder.get_bytes(self)
+
+    @cached_property
+    def size(self):
+        return self.finder.get_size(self)
+
+
+class ResourceContainer(ResourceBase):
+    is_container = True     # Backwards compatibility
+
+    @cached_property
+    def resources(self):
+        return self.finder.get_resources(self)
+
+
+class ResourceFinder(object):
+    """
+    Resource finder for file system resources.
+    """
+
+    if sys.platform.startswith('java'):
+        skipped_extensions = ('.pyc', '.pyo', '.class')
+    else:
+        skipped_extensions = ('.pyc', '.pyo')
+
+    def __init__(self, module):
+        self.module = module
+        self.loader = getattr(module, '__loader__', None)
+        self.base = os.path.dirname(getattr(module, '__file__', ''))
+
+    def _adjust_path(self, path):
+        return os.path.realpath(path)
+
+    def _make_path(self, resource_name):
+        # Issue #50: need to preserve type of path on Python 2.x
+        # like os.path._get_sep
+        if isinstance(resource_name, bytes):    # should only happen on 2.x
+            sep = b'/'
+        else:
+            sep = '/'
+        parts = resource_name.split(sep)
+        parts.insert(0, self.base)
+        result = os.path.join(*parts)
+        return self._adjust_path(result)
+
+    def _find(self, path):
+        return os.path.exists(path)
+
+    def get_cache_info(self, resource):
+        return None, resource.path
+
+    def find(self, resource_name):
+        path = self._make_path(resource_name)
+        if not self._find(path):
+            result = None
+        else:
+            if self._is_directory(path):
+                result = ResourceContainer(self, resource_name)
+            else:
+                result = Resource(self, resource_name)
+            result.path = path
+        return result
+
+    def get_stream(self, resource):
+        return open(resource.path, 'rb')
+
+    def get_bytes(self, resource):
+        with open(resource.path, 'rb') as f:
+            return f.read()
+
+    def get_size(self, resource):
+        return os.path.getsize(resource.path)
+
+    def get_resources(self, resource):
+        def allowed(f):
+            return (f != '__pycache__' and not
+                    f.endswith(self.skipped_extensions))
+        return set([f for f in os.listdir(resource.path) if allowed(f)])
+
+    def is_container(self, resource):
+        return self._is_directory(resource.path)
+
+    _is_directory = staticmethod(os.path.isdir)
+
+    def iterator(self, resource_name):
+        resource = self.find(resource_name)
+        if resource is not None:
+            todo = [resource]
+            while todo:
+                resource = todo.pop(0)
+                yield resource
+                if resource.is_container:
+                    rname = resource.name
+                    for name in resource.resources:
+                        if not rname:
+                            new_name = name
+                        else:
+                            new_name = '/'.join([rname, name])
+                        child = self.find(new_name)
+                        if child.is_container:
+                            todo.append(child)
+                        else:
+                            yield child
+
+
+class ZipResourceFinder(ResourceFinder):
+    """
+    Resource finder for resources in .zip files.
+    """
+    def __init__(self, module):
+        super(ZipResourceFinder, self).__init__(module)
+        archive = self.loader.archive
+        self.prefix_len = 1 + len(archive)
+        # PyPy doesn't have a _files attr on zipimporter, and you can't set one
+        if hasattr(self.loader, '_files'):
+            self._files = self.loader._files
+        else:
+            self._files = zipimport._zip_directory_cache[archive]
+        self.index = sorted(self._files)
+
+    def _adjust_path(self, path):
+        return path
+
+    def _find(self, path):
+        path = path[self.prefix_len:]
+        if path in self._files:
+            result = True
+        else:
+            if path and path[-1] != os.sep:
+                path = path + os.sep
+            i = bisect.bisect(self.index, path)
+            try:
+                result = self.index[i].startswith(path)
+            except IndexError:
+                result = False
+        if not result:
+            logger.debug('_find failed: %r %r', path, self.loader.prefix)
+        else:
+            logger.debug('_find worked: %r %r', path, self.loader.prefix)
+        return result
+
+    def get_cache_info(self, resource):
+        prefix = self.loader.archive
+        path = resource.path[1 + len(prefix):]
+        return prefix, path
+
+    def get_bytes(self, resource):
+        return self.loader.get_data(resource.path)
+
+    def get_stream(self, resource):
+        return io.BytesIO(self.get_bytes(resource))
+
+    def get_size(self, resource):
+        path = resource.path[self.prefix_len:]
+        return self._files[path][3]
+
+    def get_resources(self, resource):
+        path = resource.path[self.prefix_len:]
+        if path and path[-1] != os.sep:
+            path += os.sep
+        plen = len(path)
+        result = set()
+        i = bisect.bisect(self.index, path)
+        while i < len(self.index):
+            if not self.index[i].startswith(path):
+                break
+            s = self.index[i][plen:]
+            result.add(s.split(os.sep, 1)[0])   # only immediate children
+            i += 1
+        return result
+
+    def _is_directory(self, path):
+        path = path[self.prefix_len:]
+        if path and path[-1] != os.sep:
+            path += os.sep
+        i = bisect.bisect(self.index, path)
+        try:
+            result = self.index[i].startswith(path)
+        except IndexError:
+            result = False
+        return result
+
+_finder_registry = {
+    type(None): ResourceFinder,
+    zipimport.zipimporter: ZipResourceFinder
+}
+
+try:
+    # In Python 3.6, _frozen_importlib -> _frozen_importlib_external
+    try:
+        import _frozen_importlib_external as _fi
+    except ImportError:
+        import _frozen_importlib as _fi
+    _finder_registry[_fi.SourceFileLoader] = ResourceFinder
+    _finder_registry[_fi.FileFinder] = ResourceFinder
+    del _fi
+except (ImportError, AttributeError):
+    pass
+
+
+def register_finder(loader, finder_maker):
+    _finder_registry[type(loader)] = finder_maker
+
+_finder_cache = {}
+
+
+def finder(package):
+    """
+    Return a resource finder for a package.
+    :param package: The name of the package.
+    :return: A :class:`ResourceFinder` instance for the package.
+    """
+    if package in _finder_cache:
+        result = _finder_cache[package]
+    else:
+        if package not in sys.modules:
+            __import__(package)
+        module = sys.modules[package]
+        path = getattr(module, '__path__', None)
+        if path is None:
+            raise DistlibException('You cannot get a finder for a module, '
+                                   'only for a package')
+        loader = getattr(module, '__loader__', None)
+        finder_maker = _finder_registry.get(type(loader))
+        if finder_maker is None:
+            raise DistlibException('Unable to locate finder for %r' % package)
+        result = finder_maker(module)
+        _finder_cache[package] = result
+    return result
+
+
+_dummy_module = types.ModuleType(str('__dummy__'))
+
+
+def finder_for_path(path):
+    """
+    Return a resource finder for a path, which should represent a container.
+
+    :param path: The path.
+    :return: A :class:`ResourceFinder` instance for the path.
+    """
+    result = None
+    # calls any path hooks, gets importer into cache
+    pkgutil.get_importer(path)
+    loader = sys.path_importer_cache.get(path)
+    finder = _finder_registry.get(type(loader))
+    if finder:
+        module = _dummy_module
+        module.__file__ = os.path.join(path, '')
+        module.__loader__ = loader
+        result = finder(module)
+    return result
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/scripts.py b/lib/python3.7/site-packages/pip/_vendor/distlib/scripts.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e22cb9163d9a8047c8f311757c0ff648d9c4f0f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/scripts.py
@@ -0,0 +1,417 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013-2015 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+from io import BytesIO
+import logging
+import os
+import re
+import struct
+import sys
+
+from .compat import sysconfig, detect_encoding, ZipFile
+from .resources import finder
+from .util import (FileOperator, get_export_entry, convert_path,
+                   get_executable, in_venv)
+
+logger = logging.getLogger(__name__)
+
+_DEFAULT_MANIFEST = '''
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+ <assemblyIdentity version="1.0.0.0"
+ processorArchitecture="X86"
+ name="%s"
+ type="win32"/>
+
+ <!-- Identify the application security requirements. -->
+ <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+ <security>
+ <requestedPrivileges>
+ <requestedExecutionLevel level="asInvoker" uiAccess="false"/>
+ </requestedPrivileges>
+ </security>
+ </trustInfo>
+</assembly>'''.strip()
+
+# check if Python is called on the first line with this expression
+FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
+SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*-
+if __name__ == '__main__':
+    import sys, re
+
+    def _resolve(module, func):
+        __import__(module)
+        mod = sys.modules[module]
+        parts = func.split('.')
+        result = getattr(mod, parts.pop(0))
+        for p in parts:
+            result = getattr(result, p)
+        return result
+
+    try:
+        sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+
+        func = _resolve('%(module)s', '%(func)s')
+        rc = func() # None interpreted as 0
+    except Exception as e:  # only supporting Python >= 2.6
+        sys.stderr.write('%%s\n' %% e)
+        rc = 1
+    sys.exit(rc)
+'''
+
+
+def _enquote_executable(executable):
+    if ' ' in executable:
+        # make sure we quote only the executable in case of env
+        # for example /usr/bin/env "/dir with spaces/bin/jython"
+        # instead of "/usr/bin/env /dir with spaces/bin/jython"
+        # otherwise whole
+        if executable.startswith('/usr/bin/env '):
+            env, _executable = executable.split(' ', 1)
+            if ' ' in _executable and not _executable.startswith('"'):
+                executable = '%s "%s"' % (env, _executable)
+        else:
+            if not executable.startswith('"'):
+                executable = '"%s"' % executable
+    return executable
+
+
+class ScriptMaker(object):
+    """
+    A class to copy or create scripts from source scripts or callable
+    specifications.
+    """
+    script_template = SCRIPT_TEMPLATE
+
+    executable = None  # for shebangs
+
+    def __init__(self, source_dir, target_dir, add_launchers=True,
+                 dry_run=False, fileop=None):
+        self.source_dir = source_dir
+        self.target_dir = target_dir
+        self.add_launchers = add_launchers
+        self.force = False
+        self.clobber = False
+        # It only makes sense to set mode bits on POSIX.
+        self.set_mode = (os.name == 'posix') or (os.name == 'java' and
+                                                 os._name == 'posix')
+        self.variants = set(('', 'X.Y'))
+        self._fileop = fileop or FileOperator(dry_run)
+
+        self._is_nt = os.name == 'nt' or (
+            os.name == 'java' and os._name == 'nt')
+
+    def _get_alternate_executable(self, executable, options):
+        if options.get('gui', False) and self._is_nt:  # pragma: no cover
+            dn, fn = os.path.split(executable)
+            fn = fn.replace('python', 'pythonw')
+            executable = os.path.join(dn, fn)
+        return executable
+
+    if sys.platform.startswith('java'):  # pragma: no cover
+        def _is_shell(self, executable):
+            """
+            Determine if the specified executable is a script
+            (contains a #! line)
+            """
+            try:
+                with open(executable) as fp:
+                    return fp.read(2) == '#!'
+            except (OSError, IOError):
+                logger.warning('Failed to open %s', executable)
+                return False
+
+        def _fix_jython_executable(self, executable):
+            if self._is_shell(executable):
+                # Workaround for Jython is not needed on Linux systems.
+                import java
+
+                if java.lang.System.getProperty('os.name') == 'Linux':
+                    return executable
+            elif executable.lower().endswith('jython.exe'):
+                # Use wrapper exe for Jython on Windows
+                return executable
+            return '/usr/bin/env %s' % executable
+
+    def _build_shebang(self, executable, post_interp):
+        """
+        Build a shebang line. In the simple case (on Windows, or a shebang line
+        which is not too long or contains spaces) use a simple formulation for
+        the shebang. Otherwise, use /bin/sh as the executable, with a contrived
+        shebang which allows the script to run either under Python or sh, using
+        suitable quoting. Thanks to Harald Nordgren for his input.
+
+        See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
+                  https://hg.mozilla.org/mozilla-central/file/tip/mach
+        """
+        if os.name != 'posix':
+            simple_shebang = True
+        else:
+            # Add 3 for '#!' prefix and newline suffix.
+            shebang_length = len(executable) + len(post_interp) + 3
+            if sys.platform == 'darwin':
+                max_shebang_length = 512
+            else:
+                max_shebang_length = 127
+            simple_shebang = ((b' ' not in executable) and
+                              (shebang_length <= max_shebang_length))
+
+        if simple_shebang:
+            result = b'#!' + executable + post_interp + b'\n'
+        else:
+            result = b'#!/bin/sh\n'
+            result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
+            result += b"' '''"
+        return result
+
+    def _get_shebang(self, encoding, post_interp=b'', options=None):
+        enquote = True
+        if self.executable:
+            executable = self.executable
+            enquote = False     # assume this will be taken care of
+        elif not sysconfig.is_python_build():
+            executable = get_executable()
+        elif in_venv():  # pragma: no cover
+            executable = os.path.join(sysconfig.get_path('scripts'),
+                            'python%s' % sysconfig.get_config_var('EXE'))
+        else:  # pragma: no cover
+            executable = os.path.join(
+                sysconfig.get_config_var('BINDIR'),
+               'python%s%s' % (sysconfig.get_config_var('VERSION'),
+                               sysconfig.get_config_var('EXE')))
+        if options:
+            executable = self._get_alternate_executable(executable, options)
+
+        if sys.platform.startswith('java'):  # pragma: no cover
+            executable = self._fix_jython_executable(executable)
+        # Normalise case for Windows
+        executable = os.path.normcase(executable)
+        # If the user didn't specify an executable, it may be necessary to
+        # cater for executable paths with spaces (not uncommon on Windows)
+        if enquote:
+            executable = _enquote_executable(executable)
+        # Issue #51: don't use fsencode, since we later try to
+        # check that the shebang is decodable using utf-8.
+        executable = executable.encode('utf-8')
+        # in case of IronPython, play safe and enable frames support
+        if (sys.platform == 'cli' and '-X:Frames' not in post_interp
+            and '-X:FullFrames' not in post_interp):  # pragma: no cover
+            post_interp += b' -X:Frames'
+        shebang = self._build_shebang(executable, post_interp)
+        # Python parser starts to read a script using UTF-8 until
+        # it gets a #coding:xxx cookie. The shebang has to be the
+        # first line of a file, the #coding:xxx cookie cannot be
+        # written before. So the shebang has to be decodable from
+        # UTF-8.
+        try:
+            shebang.decode('utf-8')
+        except UnicodeDecodeError:  # pragma: no cover
+            raise ValueError(
+                'The shebang (%r) is not decodable from utf-8' % shebang)
+        # If the script is encoded to a custom encoding (use a
+        # #coding:xxx cookie), the shebang has to be decodable from
+        # the script encoding too.
+        if encoding != 'utf-8':
+            try:
+                shebang.decode(encoding)
+            except UnicodeDecodeError:  # pragma: no cover
+                raise ValueError(
+                    'The shebang (%r) is not decodable '
+                    'from the script encoding (%r)' % (shebang, encoding))
+        return shebang
+
+    def _get_script_text(self, entry):
+        return self.script_template % dict(module=entry.prefix,
+                                           func=entry.suffix)
+
+    manifest = _DEFAULT_MANIFEST
+
+    def get_manifest(self, exename):
+        base = os.path.basename(exename)
+        return self.manifest % base
+
+    def _write_script(self, names, shebang, script_bytes, filenames, ext):
+        use_launcher = self.add_launchers and self._is_nt
+        linesep = os.linesep.encode('utf-8')
+        if not shebang.endswith(linesep):
+            shebang += linesep
+        if not use_launcher:
+            script_bytes = shebang + script_bytes
+        else:  # pragma: no cover
+            if ext == 'py':
+                launcher = self._get_launcher('t')
+            else:
+                launcher = self._get_launcher('w')
+            stream = BytesIO()
+            with ZipFile(stream, 'w') as zf:
+                zf.writestr('__main__.py', script_bytes)
+            zip_data = stream.getvalue()
+            script_bytes = launcher + shebang + zip_data
+        for name in names:
+            outname = os.path.join(self.target_dir, name)
+            if use_launcher:  # pragma: no cover
+                n, e = os.path.splitext(outname)
+                if e.startswith('.py'):
+                    outname = n
+                outname = '%s.exe' % outname
+                try:
+                    self._fileop.write_binary_file(outname, script_bytes)
+                except Exception:
+                    # Failed writing an executable - it might be in use.
+                    logger.warning('Failed to write executable - trying to '
+                                   'use .deleteme logic')
+                    dfname = '%s.deleteme' % outname
+                    if os.path.exists(dfname):
+                        os.remove(dfname)       # Not allowed to fail here
+                    os.rename(outname, dfname)  # nor here
+                    self._fileop.write_binary_file(outname, script_bytes)
+                    logger.debug('Able to replace executable using '
+                                 '.deleteme logic')
+                    try:
+                        os.remove(dfname)
+                    except Exception:
+                        pass    # still in use - ignore error
+            else:
+                if self._is_nt and not outname.endswith('.' + ext):  # pragma: no cover
+                    outname = '%s.%s' % (outname, ext)
+                if os.path.exists(outname) and not self.clobber:
+                    logger.warning('Skipping existing file %s', outname)
+                    continue
+                self._fileop.write_binary_file(outname, script_bytes)
+                if self.set_mode:
+                    self._fileop.set_executable_mode([outname])
+            filenames.append(outname)
+
+    def _make_script(self, entry, filenames, options=None):
+        post_interp = b''
+        if options:
+            args = options.get('interpreter_args', [])
+            if args:
+                args = ' %s' % ' '.join(args)
+                post_interp = args.encode('utf-8')
+        shebang = self._get_shebang('utf-8', post_interp, options=options)
+        script = self._get_script_text(entry).encode('utf-8')
+        name = entry.name
+        scriptnames = set()
+        if '' in self.variants:
+            scriptnames.add(name)
+        if 'X' in self.variants:
+            scriptnames.add('%s%s' % (name, sys.version[0]))
+        if 'X.Y' in self.variants:
+            scriptnames.add('%s-%s' % (name, sys.version[:3]))
+        if options and options.get('gui', False):
+            ext = 'pyw'
+        else:
+            ext = 'py'
+        self._write_script(scriptnames, shebang, script, filenames, ext)
+
+    def _copy_script(self, script, filenames):
+        adjust = False
+        script = os.path.join(self.source_dir, convert_path(script))
+        outname = os.path.join(self.target_dir, os.path.basename(script))
+        if not self.force and not self._fileop.newer(script, outname):
+            logger.debug('not copying %s (up-to-date)', script)
+            return
+
+        # Always open the file, but ignore failures in dry-run mode --
+        # that way, we'll get accurate feedback if we can read the
+        # script.
+        try:
+            f = open(script, 'rb')
+        except IOError:  # pragma: no cover
+            if not self.dry_run:
+                raise
+            f = None
+        else:
+            first_line = f.readline()
+            if not first_line:  # pragma: no cover
+                logger.warning('%s: %s is an empty file (skipping)',
+                               self.get_command_name(),  script)
+                return
+
+            match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
+            if match:
+                adjust = True
+                post_interp = match.group(1) or b''
+
+        if not adjust:
+            if f:
+                f.close()
+            self._fileop.copy_file(script, outname)
+            if self.set_mode:
+                self._fileop.set_executable_mode([outname])
+            filenames.append(outname)
+        else:
+            logger.info('copying and adjusting %s -> %s', script,
+                        self.target_dir)
+            if not self._fileop.dry_run:
+                encoding, lines = detect_encoding(f.readline)
+                f.seek(0)
+                shebang = self._get_shebang(encoding, post_interp)
+                if b'pythonw' in first_line:  # pragma: no cover
+                    ext = 'pyw'
+                else:
+                    ext = 'py'
+                n = os.path.basename(outname)
+                self._write_script([n], shebang, f.read(), filenames, ext)
+            if f:
+                f.close()
+
+    @property
+    def dry_run(self):
+        return self._fileop.dry_run
+
+    @dry_run.setter
+    def dry_run(self, value):
+        self._fileop.dry_run = value
+
+    if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'):  # pragma: no cover
+        # Executable launcher support.
+        # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
+
+        def _get_launcher(self, kind):
+            if struct.calcsize('P') == 8:   # 64-bit
+                bits = '64'
+            else:
+                bits = '32'
+            name = '%s%s.exe' % (kind, bits)
+            # Issue 31: don't hardcode an absolute package name, but
+            # determine it relative to the current package
+            distlib_package = __name__.rsplit('.', 1)[0]
+            result = finder(distlib_package).find(name).bytes
+            return result
+
+    # Public API follows
+
+    def make(self, specification, options=None):
+        """
+        Make a script.
+
+        :param specification: The specification, which is either a valid export
+                              entry specification (to make a script from a
+                              callable) or a filename (to make a script by
+                              copying from a source location).
+        :param options: A dictionary of options controlling script generation.
+        :return: A list of all absolute pathnames written to.
+        """
+        filenames = []
+        entry = get_export_entry(specification)
+        if entry is None:
+            self._copy_script(specification, filenames)
+        else:
+            self._make_script(entry, filenames, options=options)
+        return filenames
+
+    def make_multiple(self, specifications, options=None):
+        """
+        Take a list of specifications and make scripts from them,
+        :param specifications: A list of specifications.
+        :return: A list of all absolute pathnames written to,
+        """
+        filenames = []
+        for specification in specifications:
+            filenames.extend(self.make(specification, options))
+        return filenames
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/t32.exe b/lib/python3.7/site-packages/pip/_vendor/distlib/t32.exe
new file mode 100644
index 0000000000000000000000000000000000000000..a09d926872d84ae22a617dfe9ebb560d420b37de
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/t32.exe differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/t64.exe b/lib/python3.7/site-packages/pip/_vendor/distlib/t64.exe
new file mode 100644
index 0000000000000000000000000000000000000000..9da9b40de922fb203df6b9a1d0ad4139af536850
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/t64.exe differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/util.py b/lib/python3.7/site-packages/pip/_vendor/distlib/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d4bfd3bec7936f3af82aaa4447691b3192eb568
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/util.py
@@ -0,0 +1,1756 @@
+#
+# Copyright (C) 2012-2017 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+import codecs
+from collections import deque
+import contextlib
+import csv
+from glob import iglob as std_iglob
+import io
+import json
+import logging
+import os
+import py_compile
+import re
+import socket
+try:
+    import ssl
+except ImportError:  # pragma: no cover
+    ssl = None
+import subprocess
+import sys
+import tarfile
+import tempfile
+import textwrap
+
+try:
+    import threading
+except ImportError:  # pragma: no cover
+    import dummy_threading as threading
+import time
+
+from . import DistlibException
+from .compat import (string_types, text_type, shutil, raw_input, StringIO,
+                     cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
+                     splittype, HTTPHandler, BaseConfigurator, valid_ident,
+                     Container, configparser, URLError, ZipFile, fsdecode,
+                     unquote, urlparse)
+
+logger = logging.getLogger(__name__)
+
+#
+# Requirement parsing code as per PEP 508
+#
+
+IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
+VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
+COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
+MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
+OR = re.compile(r'^or\b\s*')
+AND = re.compile(r'^and\b\s*')
+NON_SPACE = re.compile(r'(\S+)\s*')
+STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
+
+
+def parse_marker(marker_string):
+    """
+    Parse a marker string and return a dictionary containing a marker expression.
+
+    The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
+    the expression grammar, or strings. A string contained in quotes is to be
+    interpreted as a literal string, and a string not contained in quotes is a
+    variable (such as os_name).
+    """
+    def marker_var(remaining):
+        # either identifier, or literal string
+        m = IDENTIFIER.match(remaining)
+        if m:
+            result = m.groups()[0]
+            remaining = remaining[m.end():]
+        elif not remaining:
+            raise SyntaxError('unexpected end of input')
+        else:
+            q = remaining[0]
+            if q not in '\'"':
+                raise SyntaxError('invalid expression: %s' % remaining)
+            oq = '\'"'.replace(q, '')
+            remaining = remaining[1:]
+            parts = [q]
+            while remaining:
+                # either a string chunk, or oq, or q to terminate
+                if remaining[0] == q:
+                    break
+                elif remaining[0] == oq:
+                    parts.append(oq)
+                    remaining = remaining[1:]
+                else:
+                    m = STRING_CHUNK.match(remaining)
+                    if not m:
+                        raise SyntaxError('error in string literal: %s' % remaining)
+                    parts.append(m.groups()[0])
+                    remaining = remaining[m.end():]
+            else:
+                s = ''.join(parts)
+                raise SyntaxError('unterminated string: %s' % s)
+            parts.append(q)
+            result = ''.join(parts)
+            remaining = remaining[1:].lstrip() # skip past closing quote
+        return result, remaining
+
+    def marker_expr(remaining):
+        if remaining and remaining[0] == '(':
+            result, remaining = marker(remaining[1:].lstrip())
+            if remaining[0] != ')':
+                raise SyntaxError('unterminated parenthesis: %s' % remaining)
+            remaining = remaining[1:].lstrip()
+        else:
+            lhs, remaining = marker_var(remaining)
+            while remaining:
+                m = MARKER_OP.match(remaining)
+                if not m:
+                    break
+                op = m.groups()[0]
+                remaining = remaining[m.end():]
+                rhs, remaining = marker_var(remaining)
+                lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
+            result = lhs
+        return result, remaining
+
+    def marker_and(remaining):
+        lhs, remaining = marker_expr(remaining)
+        while remaining:
+            m = AND.match(remaining)
+            if not m:
+                break
+            remaining = remaining[m.end():]
+            rhs, remaining = marker_expr(remaining)
+            lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
+        return lhs, remaining
+
+    def marker(remaining):
+        lhs, remaining = marker_and(remaining)
+        while remaining:
+            m = OR.match(remaining)
+            if not m:
+                break
+            remaining = remaining[m.end():]
+            rhs, remaining = marker_and(remaining)
+            lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
+        return lhs, remaining
+
+    return marker(marker_string)
+
+
+def parse_requirement(req):
+    """
+    Parse a requirement passed in as a string. Return a Container
+    whose attributes contain the various parts of the requirement.
+    """
+    remaining = req.strip()
+    if not remaining or remaining.startswith('#'):
+        return None
+    m = IDENTIFIER.match(remaining)
+    if not m:
+        raise SyntaxError('name expected: %s' % remaining)
+    distname = m.groups()[0]
+    remaining = remaining[m.end():]
+    extras = mark_expr = versions = uri = None
+    if remaining and remaining[0] == '[':
+        i = remaining.find(']', 1)
+        if i < 0:
+            raise SyntaxError('unterminated extra: %s' % remaining)
+        s = remaining[1:i]
+        remaining = remaining[i + 1:].lstrip()
+        extras = []
+        while s:
+            m = IDENTIFIER.match(s)
+            if not m:
+                raise SyntaxError('malformed extra: %s' % s)
+            extras.append(m.groups()[0])
+            s = s[m.end():]
+            if not s:
+                break
+            if s[0] != ',':
+                raise SyntaxError('comma expected in extras: %s' % s)
+            s = s[1:].lstrip()
+        if not extras:
+            extras = None
+    if remaining:
+        if remaining[0] == '@':
+            # it's a URI
+            remaining = remaining[1:].lstrip()
+            m = NON_SPACE.match(remaining)
+            if not m:
+                raise SyntaxError('invalid URI: %s' % remaining)
+            uri = m.groups()[0]
+            t = urlparse(uri)
+            # there are issues with Python and URL parsing, so this test
+            # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
+            # always parse invalid URLs correctly - it should raise
+            # exceptions for malformed URLs
+            if not (t.scheme and t.netloc):
+                raise SyntaxError('Invalid URL: %s' % uri)
+            remaining = remaining[m.end():].lstrip()
+        else:
+
+            def get_versions(ver_remaining):
+                """
+                Return a list of operator, version tuples if any are
+                specified, else None.
+                """
+                m = COMPARE_OP.match(ver_remaining)
+                versions = None
+                if m:
+                    versions = []
+                    while True:
+                        op = m.groups()[0]
+                        ver_remaining = ver_remaining[m.end():]
+                        m = VERSION_IDENTIFIER.match(ver_remaining)
+                        if not m:
+                            raise SyntaxError('invalid version: %s' % ver_remaining)
+                        v = m.groups()[0]
+                        versions.append((op, v))
+                        ver_remaining = ver_remaining[m.end():]
+                        if not ver_remaining or ver_remaining[0] != ',':
+                            break
+                        ver_remaining = ver_remaining[1:].lstrip()
+                        m = COMPARE_OP.match(ver_remaining)
+                        if not m:
+                            raise SyntaxError('invalid constraint: %s' % ver_remaining)
+                    if not versions:
+                        versions = None
+                return versions, ver_remaining
+
+            if remaining[0] != '(':
+                versions, remaining = get_versions(remaining)
+            else:
+                i = remaining.find(')', 1)
+                if i < 0:
+                    raise SyntaxError('unterminated parenthesis: %s' % remaining)
+                s = remaining[1:i]
+                remaining = remaining[i + 1:].lstrip()
+                # As a special diversion from PEP 508, allow a version number
+                # a.b.c in parentheses as a synonym for ~= a.b.c (because this
+                # is allowed in earlier PEPs)
+                if COMPARE_OP.match(s):
+                    versions, _ = get_versions(s)
+                else:
+                    m = VERSION_IDENTIFIER.match(s)
+                    if not m:
+                        raise SyntaxError('invalid constraint: %s' % s)
+                    v = m.groups()[0]
+                    s = s[m.end():].lstrip()
+                    if s:
+                        raise SyntaxError('invalid constraint: %s' % s)
+                    versions = [('~=', v)]
+
+    if remaining:
+        if remaining[0] != ';':
+            raise SyntaxError('invalid requirement: %s' % remaining)
+        remaining = remaining[1:].lstrip()
+
+        mark_expr, remaining = parse_marker(remaining)
+
+    if remaining and remaining[0] != '#':
+        raise SyntaxError('unexpected trailing data: %s' % remaining)
+
+    if not versions:
+        rs = distname
+    else:
+        rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
+    return Container(name=distname, extras=extras, constraints=versions,
+                     marker=mark_expr, url=uri, requirement=rs)
+
+
+def get_resources_dests(resources_root, rules):
+    """Find destinations for resources files"""
+
+    def get_rel_path(root, path):
+        # normalizes and returns a lstripped-/-separated path
+        root = root.replace(os.path.sep, '/')
+        path = path.replace(os.path.sep, '/')
+        assert path.startswith(root)
+        return path[len(root):].lstrip('/')
+
+    destinations = {}
+    for base, suffix, dest in rules:
+        prefix = os.path.join(resources_root, base)
+        for abs_base in iglob(prefix):
+            abs_glob = os.path.join(abs_base, suffix)
+            for abs_path in iglob(abs_glob):
+                resource_file = get_rel_path(resources_root, abs_path)
+                if dest is None:  # remove the entry if it was here
+                    destinations.pop(resource_file, None)
+                else:
+                    rel_path = get_rel_path(abs_base, abs_path)
+                    rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
+                    destinations[resource_file] = rel_dest + '/' + rel_path
+    return destinations
+
+
+def in_venv():
+    if hasattr(sys, 'real_prefix'):
+        # virtualenv venvs
+        result = True
+    else:
+        # PEP 405 venvs
+        result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
+    return result
+
+
+def get_executable():
+# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
+# changes to the stub launcher mean that sys.executable always points
+# to the stub on OS X
+#    if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
+#                                     in os.environ):
+#        result =  os.environ['__PYVENV_LAUNCHER__']
+#    else:
+#        result = sys.executable
+#    return result
+    result = os.path.normcase(sys.executable)
+    if not isinstance(result, text_type):
+        result = fsdecode(result)
+    return result
+
+
+def proceed(prompt, allowed_chars, error_prompt=None, default=None):
+    p = prompt
+    while True:
+        s = raw_input(p)
+        p = prompt
+        if not s and default:
+            s = default
+        if s:
+            c = s[0].lower()
+            if c in allowed_chars:
+                break
+            if error_prompt:
+                p = '%c: %s\n%s' % (c, error_prompt, prompt)
+    return c
+
+
+def extract_by_key(d, keys):
+    if isinstance(keys, string_types):
+        keys = keys.split()
+    result = {}
+    for key in keys:
+        if key in d:
+            result[key] = d[key]
+    return result
+
+def read_exports(stream):
+    if sys.version_info[0] >= 3:
+        # needs to be a text stream
+        stream = codecs.getreader('utf-8')(stream)
+    # Try to load as JSON, falling back on legacy format
+    data = stream.read()
+    stream = StringIO(data)
+    try:
+        jdata = json.load(stream)
+        result = jdata['extensions']['python.exports']['exports']
+        for group, entries in result.items():
+            for k, v in entries.items():
+                s = '%s = %s' % (k, v)
+                entry = get_export_entry(s)
+                assert entry is not None
+                entries[k] = entry
+        return result
+    except Exception:
+        stream.seek(0, 0)
+
+    def read_stream(cp, stream):
+        if hasattr(cp, 'read_file'):
+            cp.read_file(stream)
+        else:
+            cp.readfp(stream)
+
+    cp = configparser.ConfigParser()
+    try:
+        read_stream(cp, stream)
+    except configparser.MissingSectionHeaderError:
+        stream.close()
+        data = textwrap.dedent(data)
+        stream = StringIO(data)
+        read_stream(cp, stream)
+
+    result = {}
+    for key in cp.sections():
+        result[key] = entries = {}
+        for name, value in cp.items(key):
+            s = '%s = %s' % (name, value)
+            entry = get_export_entry(s)
+            assert entry is not None
+            #entry.dist = self
+            entries[name] = entry
+    return result
+
+
+def write_exports(exports, stream):
+    if sys.version_info[0] >= 3:
+        # needs to be a text stream
+        stream = codecs.getwriter('utf-8')(stream)
+    cp = configparser.ConfigParser()
+    for k, v in exports.items():
+        # TODO check k, v for valid values
+        cp.add_section(k)
+        for entry in v.values():
+            if entry.suffix is None:
+                s = entry.prefix
+            else:
+                s = '%s:%s' % (entry.prefix, entry.suffix)
+            if entry.flags:
+                s = '%s [%s]' % (s, ', '.join(entry.flags))
+            cp.set(k, entry.name, s)
+    cp.write(stream)
+
+
+@contextlib.contextmanager
+def tempdir():
+    td = tempfile.mkdtemp()
+    try:
+        yield td
+    finally:
+        shutil.rmtree(td)
+
+@contextlib.contextmanager
+def chdir(d):
+    cwd = os.getcwd()
+    try:
+        os.chdir(d)
+        yield
+    finally:
+        os.chdir(cwd)
+
+
+@contextlib.contextmanager
+def socket_timeout(seconds=15):
+    cto = socket.getdefaulttimeout()
+    try:
+        socket.setdefaulttimeout(seconds)
+        yield
+    finally:
+        socket.setdefaulttimeout(cto)
+
+
+class cached_property(object):
+    def __init__(self, func):
+        self.func = func
+        #for attr in ('__name__', '__module__', '__doc__'):
+        #    setattr(self, attr, getattr(func, attr, None))
+
+    def __get__(self, obj, cls=None):
+        if obj is None:
+            return self
+        value = self.func(obj)
+        object.__setattr__(obj, self.func.__name__, value)
+        #obj.__dict__[self.func.__name__] = value = self.func(obj)
+        return value
+
+def convert_path(pathname):
+    """Return 'pathname' as a name that will work on the native filesystem.
+
+    The path is split on '/' and put back together again using the current
+    directory separator.  Needed because filenames in the setup script are
+    always supplied in Unix style, and have to be converted to the local
+    convention before we can actually use them in the filesystem.  Raises
+    ValueError on non-Unix-ish systems if 'pathname' either starts or
+    ends with a slash.
+    """
+    if os.sep == '/':
+        return pathname
+    if not pathname:
+        return pathname
+    if pathname[0] == '/':
+        raise ValueError("path '%s' cannot be absolute" % pathname)
+    if pathname[-1] == '/':
+        raise ValueError("path '%s' cannot end with '/'" % pathname)
+
+    paths = pathname.split('/')
+    while os.curdir in paths:
+        paths.remove(os.curdir)
+    if not paths:
+        return os.curdir
+    return os.path.join(*paths)
+
+
+class FileOperator(object):
+    def __init__(self, dry_run=False):
+        self.dry_run = dry_run
+        self.ensured = set()
+        self._init_record()
+
+    def _init_record(self):
+        self.record = False
+        self.files_written = set()
+        self.dirs_created = set()
+
+    def record_as_written(self, path):
+        if self.record:
+            self.files_written.add(path)
+
+    def newer(self, source, target):
+        """Tell if the target is newer than the source.
+
+        Returns true if 'source' exists and is more recently modified than
+        'target', or if 'source' exists and 'target' doesn't.
+
+        Returns false if both exist and 'target' is the same age or younger
+        than 'source'. Raise PackagingFileError if 'source' does not exist.
+
+        Note that this test is not very accurate: files created in the same
+        second will have the same "age".
+        """
+        if not os.path.exists(source):
+            raise DistlibException("file '%r' does not exist" %
+                                   os.path.abspath(source))
+        if not os.path.exists(target):
+            return True
+
+        return os.stat(source).st_mtime > os.stat(target).st_mtime
+
+    def copy_file(self, infile, outfile, check=True):
+        """Copy a file respecting dry-run and force flags.
+        """
+        self.ensure_dir(os.path.dirname(outfile))
+        logger.info('Copying %s to %s', infile, outfile)
+        if not self.dry_run:
+            msg = None
+            if check:
+                if os.path.islink(outfile):
+                    msg = '%s is a symlink' % outfile
+                elif os.path.exists(outfile) and not os.path.isfile(outfile):
+                    msg = '%s is a non-regular file' % outfile
+            if msg:
+                raise ValueError(msg + ' which would be overwritten')
+            shutil.copyfile(infile, outfile)
+        self.record_as_written(outfile)
+
+    def copy_stream(self, instream, outfile, encoding=None):
+        assert not os.path.isdir(outfile)
+        self.ensure_dir(os.path.dirname(outfile))
+        logger.info('Copying stream %s to %s', instream, outfile)
+        if not self.dry_run:
+            if encoding is None:
+                outstream = open(outfile, 'wb')
+            else:
+                outstream = codecs.open(outfile, 'w', encoding=encoding)
+            try:
+                shutil.copyfileobj(instream, outstream)
+            finally:
+                outstream.close()
+        self.record_as_written(outfile)
+
+    def write_binary_file(self, path, data):
+        self.ensure_dir(os.path.dirname(path))
+        if not self.dry_run:
+            if os.path.exists(path):
+                os.remove(path)
+            with open(path, 'wb') as f:
+                f.write(data)
+        self.record_as_written(path)
+
+    def write_text_file(self, path, data, encoding):
+        self.write_binary_file(path, data.encode(encoding))
+
+    def set_mode(self, bits, mask, files):
+        if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
+            # Set the executable bits (owner, group, and world) on
+            # all the files specified.
+            for f in files:
+                if self.dry_run:
+                    logger.info("changing mode of %s", f)
+                else:
+                    mode = (os.stat(f).st_mode | bits) & mask
+                    logger.info("changing mode of %s to %o", f, mode)
+                    os.chmod(f, mode)
+
+    set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
+
+    def ensure_dir(self, path):
+        path = os.path.abspath(path)
+        if path not in self.ensured and not os.path.exists(path):
+            self.ensured.add(path)
+            d, f = os.path.split(path)
+            self.ensure_dir(d)
+            logger.info('Creating %s' % path)
+            if not self.dry_run:
+                os.mkdir(path)
+            if self.record:
+                self.dirs_created.add(path)
+
+    def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
+        dpath = cache_from_source(path, not optimize)
+        logger.info('Byte-compiling %s to %s', path, dpath)
+        if not self.dry_run:
+            if force or self.newer(path, dpath):
+                if not prefix:
+                    diagpath = None
+                else:
+                    assert path.startswith(prefix)
+                    diagpath = path[len(prefix):]
+            compile_kwargs = {}
+            if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
+                compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
+            py_compile.compile(path, dpath, diagpath, True, **compile_kwargs)     # raise error
+        self.record_as_written(dpath)
+        return dpath
+
+    def ensure_removed(self, path):
+        if os.path.exists(path):
+            if os.path.isdir(path) and not os.path.islink(path):
+                logger.debug('Removing directory tree at %s', path)
+                if not self.dry_run:
+                    shutil.rmtree(path)
+                if self.record:
+                    if path in self.dirs_created:
+                        self.dirs_created.remove(path)
+            else:
+                if os.path.islink(path):
+                    s = 'link'
+                else:
+                    s = 'file'
+                logger.debug('Removing %s %s', s, path)
+                if not self.dry_run:
+                    os.remove(path)
+                if self.record:
+                    if path in self.files_written:
+                        self.files_written.remove(path)
+
+    def is_writable(self, path):
+        result = False
+        while not result:
+            if os.path.exists(path):
+                result = os.access(path, os.W_OK)
+                break
+            parent = os.path.dirname(path)
+            if parent == path:
+                break
+            path = parent
+        return result
+
+    def commit(self):
+        """
+        Commit recorded changes, turn off recording, return
+        changes.
+        """
+        assert self.record
+        result = self.files_written, self.dirs_created
+        self._init_record()
+        return result
+
+    def rollback(self):
+        if not self.dry_run:
+            for f in list(self.files_written):
+                if os.path.exists(f):
+                    os.remove(f)
+            # dirs should all be empty now, except perhaps for
+            # __pycache__ subdirs
+            # reverse so that subdirs appear before their parents
+            dirs = sorted(self.dirs_created, reverse=True)
+            for d in dirs:
+                flist = os.listdir(d)
+                if flist:
+                    assert flist == ['__pycache__']
+                    sd = os.path.join(d, flist[0])
+                    os.rmdir(sd)
+                os.rmdir(d)     # should fail if non-empty
+        self._init_record()
+
+def resolve(module_name, dotted_path):
+    if module_name in sys.modules:
+        mod = sys.modules[module_name]
+    else:
+        mod = __import__(module_name)
+    if dotted_path is None:
+        result = mod
+    else:
+        parts = dotted_path.split('.')
+        result = getattr(mod, parts.pop(0))
+        for p in parts:
+            result = getattr(result, p)
+    return result
+
+
+class ExportEntry(object):
+    def __init__(self, name, prefix, suffix, flags):
+        self.name = name
+        self.prefix = prefix
+        self.suffix = suffix
+        self.flags = flags
+
+    @cached_property
+    def value(self):
+        return resolve(self.prefix, self.suffix)
+
+    def __repr__(self):  # pragma: no cover
+        return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
+                                                self.suffix, self.flags)
+
+    def __eq__(self, other):
+        if not isinstance(other, ExportEntry):
+            result = False
+        else:
+            result = (self.name == other.name and
+                      self.prefix == other.prefix and
+                      self.suffix == other.suffix and
+                      self.flags == other.flags)
+        return result
+
+    __hash__ = object.__hash__
+
+
+ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
+                      \s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
+                      \s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
+                      ''', re.VERBOSE)
+
+def get_export_entry(specification):
+    m = ENTRY_RE.search(specification)
+    if not m:
+        result = None
+        if '[' in specification or ']' in specification:
+            raise DistlibException("Invalid specification "
+                                   "'%s'" % specification)
+    else:
+        d = m.groupdict()
+        name = d['name']
+        path = d['callable']
+        colons = path.count(':')
+        if colons == 0:
+            prefix, suffix = path, None
+        else:
+            if colons != 1:
+                raise DistlibException("Invalid specification "
+                                       "'%s'" % specification)
+            prefix, suffix = path.split(':')
+        flags = d['flags']
+        if flags is None:
+            if '[' in specification or ']' in specification:
+                raise DistlibException("Invalid specification "
+                                       "'%s'" % specification)
+            flags = []
+        else:
+            flags = [f.strip() for f in flags.split(',')]
+        result = ExportEntry(name, prefix, suffix, flags)
+    return result
+
+
+def get_cache_base(suffix=None):
+    """
+    Return the default base location for distlib caches. If the directory does
+    not exist, it is created. Use the suffix provided for the base directory,
+    and default to '.distlib' if it isn't provided.
+
+    On Windows, if LOCALAPPDATA is defined in the environment, then it is
+    assumed to be a directory, and will be the parent directory of the result.
+    On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
+    directory - using os.expanduser('~') - will be the parent directory of
+    the result.
+
+    The result is just the directory '.distlib' in the parent directory as
+    determined above, or with the name specified with ``suffix``.
+    """
+    if suffix is None:
+        suffix = '.distlib'
+    if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
+        result = os.path.expandvars('$localappdata')
+    else:
+        # Assume posix, or old Windows
+        result = os.path.expanduser('~')
+    # we use 'isdir' instead of 'exists', because we want to
+    # fail if there's a file with that name
+    if os.path.isdir(result):
+        usable = os.access(result, os.W_OK)
+        if not usable:
+            logger.warning('Directory exists but is not writable: %s', result)
+    else:
+        try:
+            os.makedirs(result)
+            usable = True
+        except OSError:
+            logger.warning('Unable to create %s', result, exc_info=True)
+            usable = False
+    if not usable:
+        result = tempfile.mkdtemp()
+        logger.warning('Default location unusable, using %s', result)
+    return os.path.join(result, suffix)
+
+
+def path_to_cache_dir(path):
+    """
+    Convert an absolute path to a directory name for use in a cache.
+
+    The algorithm used is:
+
+    #. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
+    #. Any occurrence of ``os.sep`` is replaced with ``'--'``.
+    #. ``'.cache'`` is appended.
+    """
+    d, p = os.path.splitdrive(os.path.abspath(path))
+    if d:
+        d = d.replace(':', '---')
+    p = p.replace(os.sep, '--')
+    return d + p + '.cache'
+
+
+def ensure_slash(s):
+    if not s.endswith('/'):
+        return s + '/'
+    return s
+
+
+def parse_credentials(netloc):
+    username = password = None
+    if '@' in netloc:
+        prefix, netloc = netloc.split('@', 1)
+        if ':' not in prefix:
+            username = prefix
+        else:
+            username, password = prefix.split(':', 1)
+    return username, password, netloc
+
+
+def get_process_umask():
+    result = os.umask(0o22)
+    os.umask(result)
+    return result
+
+def is_string_sequence(seq):
+    result = True
+    i = None
+    for i, s in enumerate(seq):
+        if not isinstance(s, string_types):
+            result = False
+            break
+    assert i is not None
+    return result
+
+PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
+                                      '([a-z0-9_.+-]+)', re.I)
+PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
+
+
+def split_filename(filename, project_name=None):
+    """
+    Extract name, version, python version from a filename (no extension)
+
+    Return name, version, pyver or None
+    """
+    result = None
+    pyver = None
+    filename = unquote(filename).replace(' ', '-')
+    m = PYTHON_VERSION.search(filename)
+    if m:
+        pyver = m.group(1)
+        filename = filename[:m.start()]
+    if project_name and len(filename) > len(project_name) + 1:
+        m = re.match(re.escape(project_name) + r'\b', filename)
+        if m:
+            n = m.end()
+            result = filename[:n], filename[n + 1:], pyver
+    if result is None:
+        m = PROJECT_NAME_AND_VERSION.match(filename)
+        if m:
+            result = m.group(1), m.group(3), pyver
+    return result
+
+# Allow spaces in name because of legacy dists like "Twisted Core"
+NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
+                             r'\(\s*(?P<ver>[^\s)]+)\)$')
+
+def parse_name_and_version(p):
+    """
+    A utility method used to get name and version from a string.
+
+    From e.g. a Provides-Dist value.
+
+    :param p: A value in a form 'foo (1.0)'
+    :return: The name and version as a tuple.
+    """
+    m = NAME_VERSION_RE.match(p)
+    if not m:
+        raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
+    d = m.groupdict()
+    return d['name'].strip().lower(), d['ver']
+
+def get_extras(requested, available):
+    result = set()
+    requested = set(requested or [])
+    available = set(available or [])
+    if '*' in requested:
+        requested.remove('*')
+        result |= available
+    for r in requested:
+        if r == '-':
+            result.add(r)
+        elif r.startswith('-'):
+            unwanted = r[1:]
+            if unwanted not in available:
+                logger.warning('undeclared extra: %s' % unwanted)
+            if unwanted in result:
+                result.remove(unwanted)
+        else:
+            if r not in available:
+                logger.warning('undeclared extra: %s' % r)
+            result.add(r)
+    return result
+#
+# Extended metadata functionality
+#
+
+def _get_external_data(url):
+    result = {}
+    try:
+        # urlopen might fail if it runs into redirections,
+        # because of Python issue #13696. Fixed in locators
+        # using a custom redirect handler.
+        resp = urlopen(url)
+        headers = resp.info()
+        ct = headers.get('Content-Type')
+        if not ct.startswith('application/json'):
+            logger.debug('Unexpected response for JSON request: %s', ct)
+        else:
+            reader = codecs.getreader('utf-8')(resp)
+            #data = reader.read().decode('utf-8')
+            #result = json.loads(data)
+            result = json.load(reader)
+    except Exception as e:
+        logger.exception('Failed to get external data for %s: %s', url, e)
+    return result
+
+_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
+
+def get_project_data(name):
+    url = '%s/%s/project.json' % (name[0].upper(), name)
+    url = urljoin(_external_data_base_url, url)
+    result = _get_external_data(url)
+    return result
+
+def get_package_data(name, version):
+    url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
+    url = urljoin(_external_data_base_url, url)
+    return _get_external_data(url)
+
+
+class Cache(object):
+    """
+    A class implementing a cache for resources that need to live in the file system
+    e.g. shared libraries. This class was moved from resources to here because it
+    could be used by other modules, e.g. the wheel module.
+    """
+
+    def __init__(self, base):
+        """
+        Initialise an instance.
+
+        :param base: The base directory where the cache should be located.
+        """
+        # we use 'isdir' instead of 'exists', because we want to
+        # fail if there's a file with that name
+        if not os.path.isdir(base):  # pragma: no cover
+            os.makedirs(base)
+        if (os.stat(base).st_mode & 0o77) != 0:
+            logger.warning('Directory \'%s\' is not private', base)
+        self.base = os.path.abspath(os.path.normpath(base))
+
+    def prefix_to_dir(self, prefix):
+        """
+        Converts a resource prefix to a directory name in the cache.
+        """
+        return path_to_cache_dir(prefix)
+
+    def clear(self):
+        """
+        Clear the cache.
+        """
+        not_removed = []
+        for fn in os.listdir(self.base):
+            fn = os.path.join(self.base, fn)
+            try:
+                if os.path.islink(fn) or os.path.isfile(fn):
+                    os.remove(fn)
+                elif os.path.isdir(fn):
+                    shutil.rmtree(fn)
+            except Exception:
+                not_removed.append(fn)
+        return not_removed
+
+
+class EventMixin(object):
+    """
+    A very simple publish/subscribe system.
+    """
+    def __init__(self):
+        self._subscribers = {}
+
+    def add(self, event, subscriber, append=True):
+        """
+        Add a subscriber for an event.
+
+        :param event: The name of an event.
+        :param subscriber: The subscriber to be added (and called when the
+                           event is published).
+        :param append: Whether to append or prepend the subscriber to an
+                       existing subscriber list for the event.
+        """
+        subs = self._subscribers
+        if event not in subs:
+            subs[event] = deque([subscriber])
+        else:
+            sq = subs[event]
+            if append:
+                sq.append(subscriber)
+            else:
+                sq.appendleft(subscriber)
+
+    def remove(self, event, subscriber):
+        """
+        Remove a subscriber for an event.
+
+        :param event: The name of an event.
+        :param subscriber: The subscriber to be removed.
+        """
+        subs = self._subscribers
+        if event not in subs:
+            raise ValueError('No subscribers: %r' % event)
+        subs[event].remove(subscriber)
+
+    def get_subscribers(self, event):
+        """
+        Return an iterator for the subscribers for an event.
+        :param event: The event to return subscribers for.
+        """
+        return iter(self._subscribers.get(event, ()))
+
+    def publish(self, event, *args, **kwargs):
+        """
+        Publish a event and return a list of values returned by its
+        subscribers.
+
+        :param event: The event to publish.
+        :param args: The positional arguments to pass to the event's
+                     subscribers.
+        :param kwargs: The keyword arguments to pass to the event's
+                       subscribers.
+        """
+        result = []
+        for subscriber in self.get_subscribers(event):
+            try:
+                value = subscriber(event, *args, **kwargs)
+            except Exception:
+                logger.exception('Exception during event publication')
+                value = None
+            result.append(value)
+        logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
+                     event, args, kwargs, result)
+        return result
+
+#
+# Simple sequencing
+#
+class Sequencer(object):
+    def __init__(self):
+        self._preds = {}
+        self._succs = {}
+        self._nodes = set()     # nodes with no preds/succs
+
+    def add_node(self, node):
+        self._nodes.add(node)
+
+    def remove_node(self, node, edges=False):
+        if node in self._nodes:
+            self._nodes.remove(node)
+        if edges:
+            for p in set(self._preds.get(node, ())):
+                self.remove(p, node)
+            for s in set(self._succs.get(node, ())):
+                self.remove(node, s)
+            # Remove empties
+            for k, v in list(self._preds.items()):
+                if not v:
+                    del self._preds[k]
+            for k, v in list(self._succs.items()):
+                if not v:
+                    del self._succs[k]
+
+    def add(self, pred, succ):
+        assert pred != succ
+        self._preds.setdefault(succ, set()).add(pred)
+        self._succs.setdefault(pred, set()).add(succ)
+
+    def remove(self, pred, succ):
+        assert pred != succ
+        try:
+            preds = self._preds[succ]
+            succs = self._succs[pred]
+        except KeyError:  # pragma: no cover
+            raise ValueError('%r not a successor of anything' % succ)
+        try:
+            preds.remove(pred)
+            succs.remove(succ)
+        except KeyError:  # pragma: no cover
+            raise ValueError('%r not a successor of %r' % (succ, pred))
+
+    def is_step(self, step):
+        return (step in self._preds or step in self._succs or
+                step in self._nodes)
+
+    def get_steps(self, final):
+        if not self.is_step(final):
+            raise ValueError('Unknown: %r' % final)
+        result = []
+        todo = []
+        seen = set()
+        todo.append(final)
+        while todo:
+            step = todo.pop(0)
+            if step in seen:
+                # if a step was already seen,
+                # move it to the end (so it will appear earlier
+                # when reversed on return) ... but not for the
+                # final step, as that would be confusing for
+                # users
+                if step != final:
+                    result.remove(step)
+                    result.append(step)
+            else:
+                seen.add(step)
+                result.append(step)
+                preds = self._preds.get(step, ())
+                todo.extend(preds)
+        return reversed(result)
+
+    @property
+    def strong_connections(self):
+        #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
+        index_counter = [0]
+        stack = []
+        lowlinks = {}
+        index = {}
+        result = []
+
+        graph = self._succs
+
+        def strongconnect(node):
+            # set the depth index for this node to the smallest unused index
+            index[node] = index_counter[0]
+            lowlinks[node] = index_counter[0]
+            index_counter[0] += 1
+            stack.append(node)
+
+            # Consider successors
+            try:
+                successors = graph[node]
+            except Exception:
+                successors = []
+            for successor in successors:
+                if successor not in lowlinks:
+                    # Successor has not yet been visited
+                    strongconnect(successor)
+                    lowlinks[node] = min(lowlinks[node],lowlinks[successor])
+                elif successor in stack:
+                    # the successor is in the stack and hence in the current
+                    # strongly connected component (SCC)
+                    lowlinks[node] = min(lowlinks[node],index[successor])
+
+            # If `node` is a root node, pop the stack and generate an SCC
+            if lowlinks[node] == index[node]:
+                connected_component = []
+
+                while True:
+                    successor = stack.pop()
+                    connected_component.append(successor)
+                    if successor == node: break
+                component = tuple(connected_component)
+                # storing the result
+                result.append(component)
+
+        for node in graph:
+            if node not in lowlinks:
+                strongconnect(node)
+
+        return result
+
+    @property
+    def dot(self):
+        result = ['digraph G {']
+        for succ in self._preds:
+            preds = self._preds[succ]
+            for pred in preds:
+                result.append('  %s -> %s;' % (pred, succ))
+        for node in self._nodes:
+            result.append('  %s;' % node)
+        result.append('}')
+        return '\n'.join(result)
+
+#
+# Unarchiving functionality for zip, tar, tgz, tbz, whl
+#
+
+ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
+                      '.tgz', '.tbz', '.whl')
+
+def unarchive(archive_filename, dest_dir, format=None, check=True):
+
+    def check_path(path):
+        if not isinstance(path, text_type):
+            path = path.decode('utf-8')
+        p = os.path.abspath(os.path.join(dest_dir, path))
+        if not p.startswith(dest_dir) or p[plen] != os.sep:
+            raise ValueError('path outside destination: %r' % p)
+
+    dest_dir = os.path.abspath(dest_dir)
+    plen = len(dest_dir)
+    archive = None
+    if format is None:
+        if archive_filename.endswith(('.zip', '.whl')):
+            format = 'zip'
+        elif archive_filename.endswith(('.tar.gz', '.tgz')):
+            format = 'tgz'
+            mode = 'r:gz'
+        elif archive_filename.endswith(('.tar.bz2', '.tbz')):
+            format = 'tbz'
+            mode = 'r:bz2'
+        elif archive_filename.endswith('.tar'):
+            format = 'tar'
+            mode = 'r'
+        else:  # pragma: no cover
+            raise ValueError('Unknown format for %r' % archive_filename)
+    try:
+        if format == 'zip':
+            archive = ZipFile(archive_filename, 'r')
+            if check:
+                names = archive.namelist()
+                for name in names:
+                    check_path(name)
+        else:
+            archive = tarfile.open(archive_filename, mode)
+            if check:
+                names = archive.getnames()
+                for name in names:
+                    check_path(name)
+        if format != 'zip' and sys.version_info[0] < 3:
+            # See Python issue 17153. If the dest path contains Unicode,
+            # tarfile extraction fails on Python 2.x if a member path name
+            # contains non-ASCII characters - it leads to an implicit
+            # bytes -> unicode conversion using ASCII to decode.
+            for tarinfo in archive.getmembers():
+                if not isinstance(tarinfo.name, text_type):
+                    tarinfo.name = tarinfo.name.decode('utf-8')
+        archive.extractall(dest_dir)
+
+    finally:
+        if archive:
+            archive.close()
+
+
+def zip_dir(directory):
+    """zip a directory tree into a BytesIO object"""
+    result = io.BytesIO()
+    dlen = len(directory)
+    with ZipFile(result, "w") as zf:
+        for root, dirs, files in os.walk(directory):
+            for name in files:
+                full = os.path.join(root, name)
+                rel = root[dlen:]
+                dest = os.path.join(rel, name)
+                zf.write(full, dest)
+    return result
+
+#
+# Simple progress bar
+#
+
+UNITS = ('', 'K', 'M', 'G','T','P')
+
+
+class Progress(object):
+    unknown = 'UNKNOWN'
+
+    def __init__(self, minval=0, maxval=100):
+        assert maxval is None or maxval >= minval
+        self.min = self.cur = minval
+        self.max = maxval
+        self.started = None
+        self.elapsed = 0
+        self.done = False
+
+    def update(self, curval):
+        assert self.min <= curval
+        assert self.max is None or curval <= self.max
+        self.cur = curval
+        now = time.time()
+        if self.started is None:
+            self.started = now
+        else:
+            self.elapsed = now - self.started
+
+    def increment(self, incr):
+        assert incr >= 0
+        self.update(self.cur + incr)
+
+    def start(self):
+        self.update(self.min)
+        return self
+
+    def stop(self):
+        if self.max is not None:
+            self.update(self.max)
+        self.done = True
+
+    @property
+    def maximum(self):
+        return self.unknown if self.max is None else self.max
+
+    @property
+    def percentage(self):
+        if self.done:
+            result = '100 %'
+        elif self.max is None:
+            result = ' ?? %'
+        else:
+            v = 100.0 * (self.cur - self.min) / (self.max - self.min)
+            result = '%3d %%' % v
+        return result
+
+    def format_duration(self, duration):
+        if (duration <= 0) and self.max is None or self.cur == self.min:
+            result = '??:??:??'
+        #elif duration < 1:
+        #    result = '--:--:--'
+        else:
+            result = time.strftime('%H:%M:%S', time.gmtime(duration))
+        return result
+
+    @property
+    def ETA(self):
+        if self.done:
+            prefix = 'Done'
+            t = self.elapsed
+            #import pdb; pdb.set_trace()
+        else:
+            prefix = 'ETA '
+            if self.max is None:
+                t = -1
+            elif self.elapsed == 0 or (self.cur == self.min):
+                t = 0
+            else:
+                #import pdb; pdb.set_trace()
+                t = float(self.max - self.min)
+                t /= self.cur - self.min
+                t = (t - 1) * self.elapsed
+        return '%s: %s' % (prefix, self.format_duration(t))
+
+    @property
+    def speed(self):
+        if self.elapsed == 0:
+            result = 0.0
+        else:
+            result = (self.cur - self.min) / self.elapsed
+        for unit in UNITS:
+            if result < 1000:
+                break
+            result /= 1000.0
+        return '%d %sB/s' % (result, unit)
+
+#
+# Glob functionality
+#
+
+RICH_GLOB = re.compile(r'\{([^}]*)\}')
+_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
+_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
+
+
+def iglob(path_glob):
+    """Extended globbing function that supports ** and {opt1,opt2,opt3}."""
+    if _CHECK_RECURSIVE_GLOB.search(path_glob):
+        msg = """invalid glob %r: recursive glob "**" must be used alone"""
+        raise ValueError(msg % path_glob)
+    if _CHECK_MISMATCH_SET.search(path_glob):
+        msg = """invalid glob %r: mismatching set marker '{' or '}'"""
+        raise ValueError(msg % path_glob)
+    return _iglob(path_glob)
+
+
+def _iglob(path_glob):
+    rich_path_glob = RICH_GLOB.split(path_glob, 1)
+    if len(rich_path_glob) > 1:
+        assert len(rich_path_glob) == 3, rich_path_glob
+        prefix, set, suffix = rich_path_glob
+        for item in set.split(','):
+            for path in _iglob(''.join((prefix, item, suffix))):
+                yield path
+    else:
+        if '**' not in path_glob:
+            for item in std_iglob(path_glob):
+                yield item
+        else:
+            prefix, radical = path_glob.split('**', 1)
+            if prefix == '':
+                prefix = '.'
+            if radical == '':
+                radical = '*'
+            else:
+                # we support both
+                radical = radical.lstrip('/')
+                radical = radical.lstrip('\\')
+            for path, dir, files in os.walk(prefix):
+                path = os.path.normpath(path)
+                for fn in _iglob(os.path.join(path, radical)):
+                    yield fn
+
+if ssl:
+    from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
+                         CertificateError)
+
+
+#
+# HTTPSConnection which verifies certificates/matches domains
+#
+
+    class HTTPSConnection(httplib.HTTPSConnection):
+        ca_certs = None # set this to the path to the certs file (.pem)
+        check_domain = True # only used if ca_certs is not None
+
+        # noinspection PyPropertyAccess
+        def connect(self):
+            sock = socket.create_connection((self.host, self.port), self.timeout)
+            if getattr(self, '_tunnel_host', False):
+                self.sock = sock
+                self._tunnel()
+
+            if not hasattr(ssl, 'SSLContext'):
+                # For 2.x
+                if self.ca_certs:
+                    cert_reqs = ssl.CERT_REQUIRED
+                else:
+                    cert_reqs = ssl.CERT_NONE
+                self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
+                                            cert_reqs=cert_reqs,
+                                            ssl_version=ssl.PROTOCOL_SSLv23,
+                                            ca_certs=self.ca_certs)
+            else:  # pragma: no cover
+                context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+                context.options |= ssl.OP_NO_SSLv2
+                if self.cert_file:
+                    context.load_cert_chain(self.cert_file, self.key_file)
+                kwargs = {}
+                if self.ca_certs:
+                    context.verify_mode = ssl.CERT_REQUIRED
+                    context.load_verify_locations(cafile=self.ca_certs)
+                    if getattr(ssl, 'HAS_SNI', False):
+                        kwargs['server_hostname'] = self.host
+                self.sock = context.wrap_socket(sock, **kwargs)
+            if self.ca_certs and self.check_domain:
+                try:
+                    match_hostname(self.sock.getpeercert(), self.host)
+                    logger.debug('Host verified: %s', self.host)
+                except CertificateError:  # pragma: no cover
+                    self.sock.shutdown(socket.SHUT_RDWR)
+                    self.sock.close()
+                    raise
+
+    class HTTPSHandler(BaseHTTPSHandler):
+        def __init__(self, ca_certs, check_domain=True):
+            BaseHTTPSHandler.__init__(self)
+            self.ca_certs = ca_certs
+            self.check_domain = check_domain
+
+        def _conn_maker(self, *args, **kwargs):
+            """
+            This is called to create a connection instance. Normally you'd
+            pass a connection class to do_open, but it doesn't actually check for
+            a class, and just expects a callable. As long as we behave just as a
+            constructor would have, we should be OK. If it ever changes so that
+            we *must* pass a class, we'll create an UnsafeHTTPSConnection class
+            which just sets check_domain to False in the class definition, and
+            choose which one to pass to do_open.
+            """
+            result = HTTPSConnection(*args, **kwargs)
+            if self.ca_certs:
+                result.ca_certs = self.ca_certs
+                result.check_domain = self.check_domain
+            return result
+
+        def https_open(self, req):
+            try:
+                return self.do_open(self._conn_maker, req)
+            except URLError as e:
+                if 'certificate verify failed' in str(e.reason):
+                    raise CertificateError('Unable to verify server certificate '
+                                           'for %s' % req.host)
+                else:
+                    raise
+
+    #
+    # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
+    # Middle proxy using HTTP listens on port 443, or an index mistakenly serves
+    # HTML containing a http://xyz link when it should be https://xyz),
+    # you can use the following handler class, which does not allow HTTP traffic.
+    #
+    # It works by inheriting from HTTPHandler - so build_opener won't add a
+    # handler for HTTP itself.
+    #
+    class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
+        def http_open(self, req):
+            raise URLError('Unexpected HTTP request on what should be a secure '
+                           'connection: %s' % req)
+
+#
+# XML-RPC with timeouts
+#
+
+_ver_info = sys.version_info[:2]
+
+if _ver_info == (2, 6):
+    class HTTP(httplib.HTTP):
+        def __init__(self, host='', port=None, **kwargs):
+            if port == 0:   # 0 means use port 0, not the default port
+                port = None
+            self._setup(self._connection_class(host, port, **kwargs))
+
+
+    if ssl:
+        class HTTPS(httplib.HTTPS):
+            def __init__(self, host='', port=None, **kwargs):
+                if port == 0:   # 0 means use port 0, not the default port
+                    port = None
+                self._setup(self._connection_class(host, port, **kwargs))
+
+
+class Transport(xmlrpclib.Transport):
+    def __init__(self, timeout, use_datetime=0):
+        self.timeout = timeout
+        xmlrpclib.Transport.__init__(self, use_datetime)
+
+    def make_connection(self, host):
+        h, eh, x509 = self.get_host_info(host)
+        if _ver_info == (2, 6):
+            result = HTTP(h, timeout=self.timeout)
+        else:
+            if not self._connection or host != self._connection[0]:
+                self._extra_headers = eh
+                self._connection = host, httplib.HTTPConnection(h)
+            result = self._connection[1]
+        return result
+
+if ssl:
+    class SafeTransport(xmlrpclib.SafeTransport):
+        def __init__(self, timeout, use_datetime=0):
+            self.timeout = timeout
+            xmlrpclib.SafeTransport.__init__(self, use_datetime)
+
+        def make_connection(self, host):
+            h, eh, kwargs = self.get_host_info(host)
+            if not kwargs:
+                kwargs = {}
+            kwargs['timeout'] = self.timeout
+            if _ver_info == (2, 6):
+                result = HTTPS(host, None, **kwargs)
+            else:
+                if not self._connection or host != self._connection[0]:
+                    self._extra_headers = eh
+                    self._connection = host, httplib.HTTPSConnection(h, None,
+                                                                     **kwargs)
+                result = self._connection[1]
+            return result
+
+
+class ServerProxy(xmlrpclib.ServerProxy):
+    def __init__(self, uri, **kwargs):
+        self.timeout = timeout = kwargs.pop('timeout', None)
+        # The above classes only come into play if a timeout
+        # is specified
+        if timeout is not None:
+            scheme, _ = splittype(uri)
+            use_datetime = kwargs.get('use_datetime', 0)
+            if scheme == 'https':
+                tcls = SafeTransport
+            else:
+                tcls = Transport
+            kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
+            self.transport = t
+        xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
+
+#
+# CSV functionality. This is provided because on 2.x, the csv module can't
+# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
+#
+
+def _csv_open(fn, mode, **kwargs):
+    if sys.version_info[0] < 3:
+        mode += 'b'
+    else:
+        kwargs['newline'] = ''
+        # Python 3 determines encoding from locale. Force 'utf-8'
+        # file encoding to match other forced utf-8 encoding
+        kwargs['encoding'] = 'utf-8'
+    return open(fn, mode, **kwargs)
+
+
+class CSVBase(object):
+    defaults = {
+        'delimiter': str(','),      # The strs are used because we need native
+        'quotechar': str('"'),      # str in the csv API (2.x won't take
+        'lineterminator': str('\n') # Unicode)
+    }
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *exc_info):
+        self.stream.close()
+
+
+class CSVReader(CSVBase):
+    def __init__(self, **kwargs):
+        if 'stream' in kwargs:
+            stream = kwargs['stream']
+            if sys.version_info[0] >= 3:
+                # needs to be a text stream
+                stream = codecs.getreader('utf-8')(stream)
+            self.stream = stream
+        else:
+            self.stream = _csv_open(kwargs['path'], 'r')
+        self.reader = csv.reader(self.stream, **self.defaults)
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        result = next(self.reader)
+        if sys.version_info[0] < 3:
+            for i, item in enumerate(result):
+                if not isinstance(item, text_type):
+                    result[i] = item.decode('utf-8')
+        return result
+
+    __next__ = next
+
+class CSVWriter(CSVBase):
+    def __init__(self, fn, **kwargs):
+        self.stream = _csv_open(fn, 'w')
+        self.writer = csv.writer(self.stream, **self.defaults)
+
+    def writerow(self, row):
+        if sys.version_info[0] < 3:
+            r = []
+            for item in row:
+                if isinstance(item, text_type):
+                    item = item.encode('utf-8')
+                r.append(item)
+            row = r
+        self.writer.writerow(row)
+
+#
+#   Configurator functionality
+#
+
+class Configurator(BaseConfigurator):
+
+    value_converters = dict(BaseConfigurator.value_converters)
+    value_converters['inc'] = 'inc_convert'
+
+    def __init__(self, config, base=None):
+        super(Configurator, self).__init__(config)
+        self.base = base or os.getcwd()
+
+    def configure_custom(self, config):
+        def convert(o):
+            if isinstance(o, (list, tuple)):
+                result = type(o)([convert(i) for i in o])
+            elif isinstance(o, dict):
+                if '()' in o:
+                    result = self.configure_custom(o)
+                else:
+                    result = {}
+                    for k in o:
+                        result[k] = convert(o[k])
+            else:
+                result = self.convert(o)
+            return result
+
+        c = config.pop('()')
+        if not callable(c):
+            c = self.resolve(c)
+        props = config.pop('.', None)
+        # Check for valid identifiers
+        args = config.pop('[]', ())
+        if args:
+            args = tuple([convert(o) for o in args])
+        items = [(k, convert(config[k])) for k in config if valid_ident(k)]
+        kwargs = dict(items)
+        result = c(*args, **kwargs)
+        if props:
+            for n, v in props.items():
+                setattr(result, n, convert(v))
+        return result
+
+    def __getitem__(self, key):
+        result = self.config[key]
+        if isinstance(result, dict) and '()' in result:
+            self.config[key] = result = self.configure_custom(result)
+        return result
+
+    def inc_convert(self, value):
+        """Default converter for the inc:// protocol."""
+        if not os.path.isabs(value):
+            value = os.path.join(self.base, value)
+        with codecs.open(value, 'r', encoding='utf-8') as f:
+            result = json.load(f)
+        return result
+
+
+class SubprocessMixin(object):
+    """
+    Mixin for running subprocesses and capturing their output
+    """
+    def __init__(self, verbose=False, progress=None):
+        self.verbose = verbose
+        self.progress = progress
+
+    def reader(self, stream, context):
+        """
+        Read lines from a subprocess' output stream and either pass to a progress
+        callable (if specified) or write progress information to sys.stderr.
+        """
+        progress = self.progress
+        verbose = self.verbose
+        while True:
+            s = stream.readline()
+            if not s:
+                break
+            if progress is not None:
+                progress(s, context)
+            else:
+                if not verbose:
+                    sys.stderr.write('.')
+                else:
+                    sys.stderr.write(s.decode('utf-8'))
+                sys.stderr.flush()
+        stream.close()
+
+    def run_command(self, cmd, **kwargs):
+        p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE, **kwargs)
+        t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
+        t1.start()
+        t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
+        t2.start()
+        p.wait()
+        t1.join()
+        t2.join()
+        if self.progress is not None:
+            self.progress('done.', 'main')
+        elif self.verbose:
+            sys.stderr.write('done.\n')
+        return p
+
+
+def normalize_name(name):
+    """Normalize a python package name a la PEP 503"""
+    # https://www.python.org/dev/peps/pep-0503/#normalized-names
+    return re.sub('[-_.]+', '-', name).lower()
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/version.py b/lib/python3.7/site-packages/pip/_vendor/distlib/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..3eebe18ee849dcafe39246ce2f4cabbe8e5a2b33
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/version.py
@@ -0,0 +1,736 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2017 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""
+Implementation of a flexible versioning scheme providing support for PEP-440,
+setuptools-compatible and semantic versioning.
+"""
+
+import logging
+import re
+
+from .compat import string_types
+from .util import parse_requirement
+
+__all__ = ['NormalizedVersion', 'NormalizedMatcher',
+           'LegacyVersion', 'LegacyMatcher',
+           'SemanticVersion', 'SemanticMatcher',
+           'UnsupportedVersionError', 'get_scheme']
+
+logger = logging.getLogger(__name__)
+
+
+class UnsupportedVersionError(ValueError):
+    """This is an unsupported version."""
+    pass
+
+
+class Version(object):
+    def __init__(self, s):
+        self._string = s = s.strip()
+        self._parts = parts = self.parse(s)
+        assert isinstance(parts, tuple)
+        assert len(parts) > 0
+
+    def parse(self, s):
+        raise NotImplementedError('please implement in a subclass')
+
+    def _check_compatible(self, other):
+        if type(self) != type(other):
+            raise TypeError('cannot compare %r and %r' % (self, other))
+
+    def __eq__(self, other):
+        self._check_compatible(other)
+        return self._parts == other._parts
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __lt__(self, other):
+        self._check_compatible(other)
+        return self._parts < other._parts
+
+    def __gt__(self, other):
+        return not (self.__lt__(other) or self.__eq__(other))
+
+    def __le__(self, other):
+        return self.__lt__(other) or self.__eq__(other)
+
+    def __ge__(self, other):
+        return self.__gt__(other) or self.__eq__(other)
+
+    # See http://docs.python.org/reference/datamodel#object.__hash__
+    def __hash__(self):
+        return hash(self._parts)
+
+    def __repr__(self):
+        return "%s('%s')" % (self.__class__.__name__, self._string)
+
+    def __str__(self):
+        return self._string
+
+    @property
+    def is_prerelease(self):
+        raise NotImplementedError('Please implement in subclasses.')
+
+
+class Matcher(object):
+    version_class = None
+
+    # value is either a callable or the name of a method
+    _operators = {
+        '<': lambda v, c, p: v < c,
+        '>': lambda v, c, p: v > c,
+        '<=': lambda v, c, p: v == c or v < c,
+        '>=': lambda v, c, p: v == c or v > c,
+        '==': lambda v, c, p: v == c,
+        '===': lambda v, c, p: v == c,
+        # by default, compatible => >=.
+        '~=': lambda v, c, p: v == c or v > c,
+        '!=': lambda v, c, p: v != c,
+    }
+
+    # this is a method only to support alternative implementations
+    # via overriding
+    def parse_requirement(self, s):
+        return parse_requirement(s)
+
+    def __init__(self, s):
+        if self.version_class is None:
+            raise ValueError('Please specify a version class')
+        self._string = s = s.strip()
+        r = self.parse_requirement(s)
+        if not r:
+            raise ValueError('Not valid: %r' % s)
+        self.name = r.name
+        self.key = self.name.lower()    # for case-insensitive comparisons
+        clist = []
+        if r.constraints:
+            # import pdb; pdb.set_trace()
+            for op, s in r.constraints:
+                if s.endswith('.*'):
+                    if op not in ('==', '!='):
+                        raise ValueError('\'.*\' not allowed for '
+                                         '%r constraints' % op)
+                    # Could be a partial version (e.g. for '2.*') which
+                    # won't parse as a version, so keep it as a string
+                    vn, prefix = s[:-2], True
+                    # Just to check that vn is a valid version
+                    self.version_class(vn)
+                else:
+                    # Should parse as a version, so we can create an
+                    # instance for the comparison
+                    vn, prefix = self.version_class(s), False
+                clist.append((op, vn, prefix))
+        self._parts = tuple(clist)
+
+    def match(self, version):
+        """
+        Check if the provided version matches the constraints.
+
+        :param version: The version to match against this instance.
+        :type version: String or :class:`Version` instance.
+        """
+        if isinstance(version, string_types):
+            version = self.version_class(version)
+        for operator, constraint, prefix in self._parts:
+            f = self._operators.get(operator)
+            if isinstance(f, string_types):
+                f = getattr(self, f)
+            if not f:
+                msg = ('%r not implemented '
+                       'for %s' % (operator, self.__class__.__name__))
+                raise NotImplementedError(msg)
+            if not f(version, constraint, prefix):
+                return False
+        return True
+
+    @property
+    def exact_version(self):
+        result = None
+        if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
+            result = self._parts[0][1]
+        return result
+
+    def _check_compatible(self, other):
+        if type(self) != type(other) or self.name != other.name:
+            raise TypeError('cannot compare %s and %s' % (self, other))
+
+    def __eq__(self, other):
+        self._check_compatible(other)
+        return self.key == other.key and self._parts == other._parts
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    # See http://docs.python.org/reference/datamodel#object.__hash__
+    def __hash__(self):
+        return hash(self.key) + hash(self._parts)
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__class__.__name__, self._string)
+
+    def __str__(self):
+        return self._string
+
+
+PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
+                               r'(\.(post)(\d+))?(\.(dev)(\d+))?'
+                               r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
+
+
+def _pep_440_key(s):
+    s = s.strip()
+    m = PEP440_VERSION_RE.match(s)
+    if not m:
+        raise UnsupportedVersionError('Not a valid version: %s' % s)
+    groups = m.groups()
+    nums = tuple(int(v) for v in groups[1].split('.'))
+    while len(nums) > 1 and nums[-1] == 0:
+        nums = nums[:-1]
+
+    if not groups[0]:
+        epoch = 0
+    else:
+        epoch = int(groups[0])
+    pre = groups[4:6]
+    post = groups[7:9]
+    dev = groups[10:12]
+    local = groups[13]
+    if pre == (None, None):
+        pre = ()
+    else:
+        pre = pre[0], int(pre[1])
+    if post == (None, None):
+        post = ()
+    else:
+        post = post[0], int(post[1])
+    if dev == (None, None):
+        dev = ()
+    else:
+        dev = dev[0], int(dev[1])
+    if local is None:
+        local = ()
+    else:
+        parts = []
+        for part in local.split('.'):
+            # to ensure that numeric compares as > lexicographic, avoid
+            # comparing them directly, but encode a tuple which ensures
+            # correct sorting
+            if part.isdigit():
+                part = (1, int(part))
+            else:
+                part = (0, part)
+            parts.append(part)
+        local = tuple(parts)
+    if not pre:
+        # either before pre-release, or final release and after
+        if not post and dev:
+            # before pre-release
+            pre = ('a', -1)     # to sort before a0
+        else:
+            pre = ('z',)        # to sort after all pre-releases
+    # now look at the state of post and dev.
+    if not post:
+        post = ('_',)   # sort before 'a'
+    if not dev:
+        dev = ('final',)
+
+    #print('%s -> %s' % (s, m.groups()))
+    return epoch, nums, pre, post, dev, local
+
+
+_normalized_key = _pep_440_key
+
+
+class NormalizedVersion(Version):
+    """A rational version.
+
+    Good:
+        1.2         # equivalent to "1.2.0"
+        1.2.0
+        1.2a1
+        1.2.3a2
+        1.2.3b1
+        1.2.3c1
+        1.2.3.4
+        TODO: fill this out
+
+    Bad:
+        1           # minimum two numbers
+        1.2a        # release level must have a release serial
+        1.2.3b
+    """
+    def parse(self, s):
+        result = _normalized_key(s)
+        # _normalized_key loses trailing zeroes in the release
+        # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
+        # However, PEP 440 prefix matching needs it: for example,
+        # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
+        m = PEP440_VERSION_RE.match(s)      # must succeed
+        groups = m.groups()
+        self._release_clause = tuple(int(v) for v in groups[1].split('.'))
+        return result
+
+    PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
+
+    @property
+    def is_prerelease(self):
+        return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
+
+
+def _match_prefix(x, y):
+    x = str(x)
+    y = str(y)
+    if x == y:
+        return True
+    if not x.startswith(y):
+        return False
+    n = len(y)
+    return x[n] == '.'
+
+
+class NormalizedMatcher(Matcher):
+    version_class = NormalizedVersion
+
+    # value is either a callable or the name of a method
+    _operators = {
+        '~=': '_match_compatible',
+        '<': '_match_lt',
+        '>': '_match_gt',
+        '<=': '_match_le',
+        '>=': '_match_ge',
+        '==': '_match_eq',
+        '===': '_match_arbitrary',
+        '!=': '_match_ne',
+    }
+
+    def _adjust_local(self, version, constraint, prefix):
+        if prefix:
+            strip_local = '+' not in constraint and version._parts[-1]
+        else:
+            # both constraint and version are
+            # NormalizedVersion instances.
+            # If constraint does not have a local component,
+            # ensure the version doesn't, either.
+            strip_local = not constraint._parts[-1] and version._parts[-1]
+        if strip_local:
+            s = version._string.split('+', 1)[0]
+            version = self.version_class(s)
+        return version, constraint
+
+    def _match_lt(self, version, constraint, prefix):
+        version, constraint = self._adjust_local(version, constraint, prefix)
+        if version >= constraint:
+            return False
+        release_clause = constraint._release_clause
+        pfx = '.'.join([str(i) for i in release_clause])
+        return not _match_prefix(version, pfx)
+
+    def _match_gt(self, version, constraint, prefix):
+        version, constraint = self._adjust_local(version, constraint, prefix)
+        if version <= constraint:
+            return False
+        release_clause = constraint._release_clause
+        pfx = '.'.join([str(i) for i in release_clause])
+        return not _match_prefix(version, pfx)
+
+    def _match_le(self, version, constraint, prefix):
+        version, constraint = self._adjust_local(version, constraint, prefix)
+        return version <= constraint
+
+    def _match_ge(self, version, constraint, prefix):
+        version, constraint = self._adjust_local(version, constraint, prefix)
+        return version >= constraint
+
+    def _match_eq(self, version, constraint, prefix):
+        version, constraint = self._adjust_local(version, constraint, prefix)
+        if not prefix:
+            result = (version == constraint)
+        else:
+            result = _match_prefix(version, constraint)
+        return result
+
+    def _match_arbitrary(self, version, constraint, prefix):
+        return str(version) == str(constraint)
+
+    def _match_ne(self, version, constraint, prefix):
+        version, constraint = self._adjust_local(version, constraint, prefix)
+        if not prefix:
+            result = (version != constraint)
+        else:
+            result = not _match_prefix(version, constraint)
+        return result
+
+    def _match_compatible(self, version, constraint, prefix):
+        version, constraint = self._adjust_local(version, constraint, prefix)
+        if version == constraint:
+            return True
+        if version < constraint:
+            return False
+#        if not prefix:
+#            return True
+        release_clause = constraint._release_clause
+        if len(release_clause) > 1:
+            release_clause = release_clause[:-1]
+        pfx = '.'.join([str(i) for i in release_clause])
+        return _match_prefix(version, pfx)
+
+_REPLACEMENTS = (
+    (re.compile('[.+-]$'), ''),                     # remove trailing puncts
+    (re.compile(r'^[.](\d)'), r'0.\1'),             # .N -> 0.N at start
+    (re.compile('^[.-]'), ''),                      # remove leading puncts
+    (re.compile(r'^\((.*)\)$'), r'\1'),             # remove parentheses
+    (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'),    # remove leading v(ersion)
+    (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'),        # remove leading v(ersion)
+    (re.compile('[.]{2,}'), '.'),                   # multiple runs of '.'
+    (re.compile(r'\b(alfa|apha)\b'), 'alpha'),      # misspelt alpha
+    (re.compile(r'\b(pre-alpha|prealpha)\b'),
+                'pre.alpha'),                       # standardise
+    (re.compile(r'\(beta\)$'), 'beta'),             # remove parentheses
+)
+
+_SUFFIX_REPLACEMENTS = (
+    (re.compile('^[:~._+-]+'), ''),                   # remove leading puncts
+    (re.compile('[,*")([\\]]'), ''),                  # remove unwanted chars
+    (re.compile('[~:+_ -]'), '.'),                    # replace illegal chars
+    (re.compile('[.]{2,}'), '.'),                   # multiple runs of '.'
+    (re.compile(r'\.$'), ''),                       # trailing '.'
+)
+
+_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
+
+
+def _suggest_semantic_version(s):
+    """
+    Try to suggest a semantic form for a version for which
+    _suggest_normalized_version couldn't come up with anything.
+    """
+    result = s.strip().lower()
+    for pat, repl in _REPLACEMENTS:
+        result = pat.sub(repl, result)
+    if not result:
+        result = '0.0.0'
+
+    # Now look for numeric prefix, and separate it out from
+    # the rest.
+    #import pdb; pdb.set_trace()
+    m = _NUMERIC_PREFIX.match(result)
+    if not m:
+        prefix = '0.0.0'
+        suffix = result
+    else:
+        prefix = m.groups()[0].split('.')
+        prefix = [int(i) for i in prefix]
+        while len(prefix) < 3:
+            prefix.append(0)
+        if len(prefix) == 3:
+            suffix = result[m.end():]
+        else:
+            suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
+            prefix = prefix[:3]
+        prefix = '.'.join([str(i) for i in prefix])
+        suffix = suffix.strip()
+    if suffix:
+        #import pdb; pdb.set_trace()
+        # massage the suffix.
+        for pat, repl in _SUFFIX_REPLACEMENTS:
+            suffix = pat.sub(repl, suffix)
+
+    if not suffix:
+        result = prefix
+    else:
+        sep = '-' if 'dev' in suffix else '+'
+        result = prefix + sep + suffix
+    if not is_semver(result):
+        result = None
+    return result
+
+
+def _suggest_normalized_version(s):
+    """Suggest a normalized version close to the given version string.
+
+    If you have a version string that isn't rational (i.e. NormalizedVersion
+    doesn't like it) then you might be able to get an equivalent (or close)
+    rational version from this function.
+
+    This does a number of simple normalizations to the given string, based
+    on observation of versions currently in use on PyPI. Given a dump of
+    those version during PyCon 2009, 4287 of them:
+    - 2312 (53.93%) match NormalizedVersion without change
+      with the automatic suggestion
+    - 3474 (81.04%) match when using this suggestion method
+
+    @param s {str} An irrational version string.
+    @returns A rational version string, or None, if couldn't determine one.
+    """
+    try:
+        _normalized_key(s)
+        return s   # already rational
+    except UnsupportedVersionError:
+        pass
+
+    rs = s.lower()
+
+    # part of this could use maketrans
+    for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
+                       ('beta', 'b'), ('rc', 'c'), ('-final', ''),
+                       ('-pre', 'c'),
+                       ('-release', ''), ('.release', ''), ('-stable', ''),
+                       ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
+                       ('final', '')):
+        rs = rs.replace(orig, repl)
+
+    # if something ends with dev or pre, we add a 0
+    rs = re.sub(r"pre$", r"pre0", rs)
+    rs = re.sub(r"dev$", r"dev0", rs)
+
+    # if we have something like "b-2" or "a.2" at the end of the
+    # version, that is probably beta, alpha, etc
+    # let's remove the dash or dot
+    rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
+
+    # 1.0-dev-r371 -> 1.0.dev371
+    # 0.1-dev-r79 -> 0.1.dev79
+    rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
+
+    # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
+    rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
+
+    # Clean: v0.3, v1.0
+    if rs.startswith('v'):
+        rs = rs[1:]
+
+    # Clean leading '0's on numbers.
+    #TODO: unintended side-effect on, e.g., "2003.05.09"
+    # PyPI stats: 77 (~2%) better
+    rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
+
+    # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
+    # zero.
+    # PyPI stats: 245 (7.56%) better
+    rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
+
+    # the 'dev-rNNN' tag is a dev tag
+    rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
+
+    # clean the - when used as a pre delimiter
+    rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
+
+    # a terminal "dev" or "devel" can be changed into ".dev0"
+    rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
+
+    # a terminal "dev" can be changed into ".dev0"
+    rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
+
+    # a terminal "final" or "stable" can be removed
+    rs = re.sub(r"(final|stable)$", "", rs)
+
+    # The 'r' and the '-' tags are post release tags
+    #   0.4a1.r10       ->  0.4a1.post10
+    #   0.9.33-17222    ->  0.9.33.post17222
+    #   0.9.33-r17222   ->  0.9.33.post17222
+    rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
+
+    # Clean 'r' instead of 'dev' usage:
+    #   0.9.33+r17222   ->  0.9.33.dev17222
+    #   1.0dev123       ->  1.0.dev123
+    #   1.0.git123      ->  1.0.dev123
+    #   1.0.bzr123      ->  1.0.dev123
+    #   0.1a0dev.123    ->  0.1a0.dev123
+    # PyPI stats:  ~150 (~4%) better
+    rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
+
+    # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
+    #   0.2.pre1        ->  0.2c1
+    #   0.2-c1         ->  0.2c1
+    #   1.0preview123   ->  1.0c123
+    # PyPI stats: ~21 (0.62%) better
+    rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
+
+    # Tcl/Tk uses "px" for their post release markers
+    rs = re.sub(r"p(\d+)$", r".post\1", rs)
+
+    try:
+        _normalized_key(rs)
+    except UnsupportedVersionError:
+        rs = None
+    return rs
+
+#
+#   Legacy version processing (distribute-compatible)
+#
+
+_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
+_VERSION_REPLACE = {
+    'pre': 'c',
+    'preview': 'c',
+    '-': 'final-',
+    'rc': 'c',
+    'dev': '@',
+    '': None,
+    '.': None,
+}
+
+
+def _legacy_key(s):
+    def get_parts(s):
+        result = []
+        for p in _VERSION_PART.split(s.lower()):
+            p = _VERSION_REPLACE.get(p, p)
+            if p:
+                if '0' <= p[:1] <= '9':
+                    p = p.zfill(8)
+                else:
+                    p = '*' + p
+                result.append(p)
+        result.append('*final')
+        return result
+
+    result = []
+    for p in get_parts(s):
+        if p.startswith('*'):
+            if p < '*final':
+                while result and result[-1] == '*final-':
+                    result.pop()
+            while result and result[-1] == '00000000':
+                result.pop()
+        result.append(p)
+    return tuple(result)
+
+
+class LegacyVersion(Version):
+    def parse(self, s):
+        return _legacy_key(s)
+
+    @property
+    def is_prerelease(self):
+        result = False
+        for x in self._parts:
+            if (isinstance(x, string_types) and x.startswith('*') and
+                x < '*final'):
+                result = True
+                break
+        return result
+
+
+class LegacyMatcher(Matcher):
+    version_class = LegacyVersion
+
+    _operators = dict(Matcher._operators)
+    _operators['~='] = '_match_compatible'
+
+    numeric_re = re.compile(r'^(\d+(\.\d+)*)')
+
+    def _match_compatible(self, version, constraint, prefix):
+        if version < constraint:
+            return False
+        m = self.numeric_re.match(str(constraint))
+        if not m:
+            logger.warning('Cannot compute compatible match for version %s '
+                           ' and constraint %s', version, constraint)
+            return True
+        s = m.groups()[0]
+        if '.' in s:
+            s = s.rsplit('.', 1)[0]
+        return _match_prefix(version, s)
+
+#
+#   Semantic versioning
+#
+
+_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
+                        r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
+                        r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
+
+
+def is_semver(s):
+    return _SEMVER_RE.match(s)
+
+
+def _semantic_key(s):
+    def make_tuple(s, absent):
+        if s is None:
+            result = (absent,)
+        else:
+            parts = s[1:].split('.')
+            # We can't compare ints and strings on Python 3, so fudge it
+            # by zero-filling numeric values so simulate a numeric comparison
+            result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
+        return result
+
+    m = is_semver(s)
+    if not m:
+        raise UnsupportedVersionError(s)
+    groups = m.groups()
+    major, minor, patch = [int(i) for i in groups[:3]]
+    # choose the '|' and '*' so that versions sort correctly
+    pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
+    return (major, minor, patch), pre, build
+
+
+class SemanticVersion(Version):
+    def parse(self, s):
+        return _semantic_key(s)
+
+    @property
+    def is_prerelease(self):
+        return self._parts[1][0] != '|'
+
+
+class SemanticMatcher(Matcher):
+    version_class = SemanticVersion
+
+
+class VersionScheme(object):
+    def __init__(self, key, matcher, suggester=None):
+        self.key = key
+        self.matcher = matcher
+        self.suggester = suggester
+
+    def is_valid_version(self, s):
+        try:
+            self.matcher.version_class(s)
+            result = True
+        except UnsupportedVersionError:
+            result = False
+        return result
+
+    def is_valid_matcher(self, s):
+        try:
+            self.matcher(s)
+            result = True
+        except UnsupportedVersionError:
+            result = False
+        return result
+
+    def is_valid_constraint_list(self, s):
+        """
+        Used for processing some metadata fields
+        """
+        return self.is_valid_matcher('dummy_name (%s)' % s)
+
+    def suggest(self, s):
+        if self.suggester is None:
+            result = None
+        else:
+            result = self.suggester(s)
+        return result
+
+_SCHEMES = {
+    'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
+                                _suggest_normalized_version),
+    'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
+    'semantic': VersionScheme(_semantic_key, SemanticMatcher,
+                              _suggest_semantic_version),
+}
+
+_SCHEMES['default'] = _SCHEMES['normalized']
+
+
+def get_scheme(name):
+    if name not in _SCHEMES:
+        raise ValueError('unknown scheme name: %r' % name)
+    return _SCHEMES[name]
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/w32.exe b/lib/python3.7/site-packages/pip/_vendor/distlib/w32.exe
new file mode 100644
index 0000000000000000000000000000000000000000..732215a9d34ccb7b417d637a7646d9b843ecafa8
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/w32.exe differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/w64.exe b/lib/python3.7/site-packages/pip/_vendor/distlib/w64.exe
new file mode 100644
index 0000000000000000000000000000000000000000..c41bd0a011fd760ce20ba795d9e535e0d2c39876
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/distlib/w64.exe differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/distlib/wheel.py b/lib/python3.7/site-packages/pip/_vendor/distlib/wheel.py
new file mode 100644
index 0000000000000000000000000000000000000000..b04bfaefe9c2ffce788d519d6d49e59e4f2ae87f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distlib/wheel.py
@@ -0,0 +1,988 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013-2017 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+from __future__ import unicode_literals
+
+import base64
+import codecs
+import datetime
+import distutils.util
+from email import message_from_file
+import hashlib
+import imp
+import json
+import logging
+import os
+import posixpath
+import re
+import shutil
+import sys
+import tempfile
+import zipfile
+
+from . import __version__, DistlibException
+from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
+from .database import InstalledDistribution
+from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME
+from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
+                   cached_property, get_cache_base, read_exports, tempdir)
+from .version import NormalizedVersion, UnsupportedVersionError
+
+logger = logging.getLogger(__name__)
+
+cache = None    # created when needed
+
+if hasattr(sys, 'pypy_version_info'):  # pragma: no cover
+    IMP_PREFIX = 'pp'
+elif sys.platform.startswith('java'):  # pragma: no cover
+    IMP_PREFIX = 'jy'
+elif sys.platform == 'cli':  # pragma: no cover
+    IMP_PREFIX = 'ip'
+else:
+    IMP_PREFIX = 'cp'
+
+VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
+if not VER_SUFFIX:   # pragma: no cover
+    VER_SUFFIX = '%s%s' % sys.version_info[:2]
+PYVER = 'py' + VER_SUFFIX
+IMPVER = IMP_PREFIX + VER_SUFFIX
+
+ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
+
+ABI = sysconfig.get_config_var('SOABI')
+if ABI and ABI.startswith('cpython-'):
+    ABI = ABI.replace('cpython-', 'cp')
+else:
+    def _derive_abi():
+        parts = ['cp', VER_SUFFIX]
+        if sysconfig.get_config_var('Py_DEBUG'):
+            parts.append('d')
+        if sysconfig.get_config_var('WITH_PYMALLOC'):
+            parts.append('m')
+        if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4:
+            parts.append('u')
+        return ''.join(parts)
+    ABI = _derive_abi()
+    del _derive_abi
+
+FILENAME_RE = re.compile(r'''
+(?P<nm>[^-]+)
+-(?P<vn>\d+[^-]*)
+(-(?P<bn>\d+[^-]*))?
+-(?P<py>\w+\d+(\.\w+\d+)*)
+-(?P<bi>\w+)
+-(?P<ar>\w+(\.\w+)*)
+\.whl$
+''', re.IGNORECASE | re.VERBOSE)
+
+NAME_VERSION_RE = re.compile(r'''
+(?P<nm>[^-]+)
+-(?P<vn>\d+[^-]*)
+(-(?P<bn>\d+[^-]*))?$
+''', re.IGNORECASE | re.VERBOSE)
+
+SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
+SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
+SHEBANG_PYTHON = b'#!python'
+SHEBANG_PYTHONW = b'#!pythonw'
+
+if os.sep == '/':
+    to_posix = lambda o: o
+else:
+    to_posix = lambda o: o.replace(os.sep, '/')
+
+
+class Mounter(object):
+    def __init__(self):
+        self.impure_wheels = {}
+        self.libs = {}
+
+    def add(self, pathname, extensions):
+        self.impure_wheels[pathname] = extensions
+        self.libs.update(extensions)
+
+    def remove(self, pathname):
+        extensions = self.impure_wheels.pop(pathname)
+        for k, v in extensions:
+            if k in self.libs:
+                del self.libs[k]
+
+    def find_module(self, fullname, path=None):
+        if fullname in self.libs:
+            result = self
+        else:
+            result = None
+        return result
+
+    def load_module(self, fullname):
+        if fullname in sys.modules:
+            result = sys.modules[fullname]
+        else:
+            if fullname not in self.libs:
+                raise ImportError('unable to find extension for %s' % fullname)
+            result = imp.load_dynamic(fullname, self.libs[fullname])
+            result.__loader__ = self
+            parts = fullname.rsplit('.', 1)
+            if len(parts) > 1:
+                result.__package__ = parts[0]
+        return result
+
+_hook = Mounter()
+
+
+class Wheel(object):
+    """
+    Class to build and install from Wheel files (PEP 427).
+    """
+
+    wheel_version = (1, 1)
+    hash_kind = 'sha256'
+
+    def __init__(self, filename=None, sign=False, verify=False):
+        """
+        Initialise an instance using a (valid) filename.
+        """
+        self.sign = sign
+        self.should_verify = verify
+        self.buildver = ''
+        self.pyver = [PYVER]
+        self.abi = ['none']
+        self.arch = ['any']
+        self.dirname = os.getcwd()
+        if filename is None:
+            self.name = 'dummy'
+            self.version = '0.1'
+            self._filename = self.filename
+        else:
+            m = NAME_VERSION_RE.match(filename)
+            if m:
+                info = m.groupdict('')
+                self.name = info['nm']
+                # Reinstate the local version separator
+                self.version = info['vn'].replace('_', '-')
+                self.buildver = info['bn']
+                self._filename = self.filename
+            else:
+                dirname, filename = os.path.split(filename)
+                m = FILENAME_RE.match(filename)
+                if not m:
+                    raise DistlibException('Invalid name or '
+                                           'filename: %r' % filename)
+                if dirname:
+                    self.dirname = os.path.abspath(dirname)
+                self._filename = filename
+                info = m.groupdict('')
+                self.name = info['nm']
+                self.version = info['vn']
+                self.buildver = info['bn']
+                self.pyver = info['py'].split('.')
+                self.abi = info['bi'].split('.')
+                self.arch = info['ar'].split('.')
+
+    @property
+    def filename(self):
+        """
+        Build and return a filename from the various components.
+        """
+        if self.buildver:
+            buildver = '-' + self.buildver
+        else:
+            buildver = ''
+        pyver = '.'.join(self.pyver)
+        abi = '.'.join(self.abi)
+        arch = '.'.join(self.arch)
+        # replace - with _ as a local version separator
+        version = self.version.replace('-', '_')
+        return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
+                                         pyver, abi, arch)
+
+    @property
+    def exists(self):
+        path = os.path.join(self.dirname, self.filename)
+        return os.path.isfile(path)
+
+    @property
+    def tags(self):
+        for pyver in self.pyver:
+            for abi in self.abi:
+                for arch in self.arch:
+                    yield pyver, abi, arch
+
+    @cached_property
+    def metadata(self):
+        pathname = os.path.join(self.dirname, self.filename)
+        name_ver = '%s-%s' % (self.name, self.version)
+        info_dir = '%s.dist-info' % name_ver
+        wrapper = codecs.getreader('utf-8')
+        with ZipFile(pathname, 'r') as zf:
+            wheel_metadata = self.get_wheel_metadata(zf)
+            wv = wheel_metadata['Wheel-Version'].split('.', 1)
+            file_version = tuple([int(i) for i in wv])
+            if file_version < (1, 1):
+                fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, 'METADATA']
+            else:
+                fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
+            result = None
+            for fn in fns:
+                try:
+                    metadata_filename = posixpath.join(info_dir, fn)
+                    with zf.open(metadata_filename) as bf:
+                        wf = wrapper(bf)
+                        result = Metadata(fileobj=wf)
+                        if result:
+                            break
+                except KeyError:
+                    pass
+            if not result:
+                raise ValueError('Invalid wheel, because metadata is '
+                                 'missing: looked in %s' % ', '.join(fns))
+        return result
+
+    def get_wheel_metadata(self, zf):
+        name_ver = '%s-%s' % (self.name, self.version)
+        info_dir = '%s.dist-info' % name_ver
+        metadata_filename = posixpath.join(info_dir, 'WHEEL')
+        with zf.open(metadata_filename) as bf:
+            wf = codecs.getreader('utf-8')(bf)
+            message = message_from_file(wf)
+        return dict(message)
+
+    @cached_property
+    def info(self):
+        pathname = os.path.join(self.dirname, self.filename)
+        with ZipFile(pathname, 'r') as zf:
+            result = self.get_wheel_metadata(zf)
+        return result
+
+    def process_shebang(self, data):
+        m = SHEBANG_RE.match(data)
+        if m:
+            end = m.end()
+            shebang, data_after_shebang = data[:end], data[end:]
+            # Preserve any arguments after the interpreter
+            if b'pythonw' in shebang.lower():
+                shebang_python = SHEBANG_PYTHONW
+            else:
+                shebang_python = SHEBANG_PYTHON
+            m = SHEBANG_DETAIL_RE.match(shebang)
+            if m:
+                args = b' ' + m.groups()[-1]
+            else:
+                args = b''
+            shebang = shebang_python + args
+            data = shebang + data_after_shebang
+        else:
+            cr = data.find(b'\r')
+            lf = data.find(b'\n')
+            if cr < 0 or cr > lf:
+                term = b'\n'
+            else:
+                if data[cr:cr + 2] == b'\r\n':
+                    term = b'\r\n'
+                else:
+                    term = b'\r'
+            data = SHEBANG_PYTHON + term + data
+        return data
+
+    def get_hash(self, data, hash_kind=None):
+        if hash_kind is None:
+            hash_kind = self.hash_kind
+        try:
+            hasher = getattr(hashlib, hash_kind)
+        except AttributeError:
+            raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
+        result = hasher(data).digest()
+        result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
+        return hash_kind, result
+
+    def write_record(self, records, record_path, base):
+        records = list(records) # make a copy for sorting
+        p = to_posix(os.path.relpath(record_path, base))
+        records.append((p, '', ''))
+        records.sort()
+        with CSVWriter(record_path) as writer:
+            for row in records:
+                writer.writerow(row)
+
+    def write_records(self, info, libdir, archive_paths):
+        records = []
+        distinfo, info_dir = info
+        hasher = getattr(hashlib, self.hash_kind)
+        for ap, p in archive_paths:
+            with open(p, 'rb') as f:
+                data = f.read()
+            digest = '%s=%s' % self.get_hash(data)
+            size = os.path.getsize(p)
+            records.append((ap, digest, size))
+
+        p = os.path.join(distinfo, 'RECORD')
+        self.write_record(records, p, libdir)
+        ap = to_posix(os.path.join(info_dir, 'RECORD'))
+        archive_paths.append((ap, p))
+
+    def build_zip(self, pathname, archive_paths):
+        with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
+            for ap, p in archive_paths:
+                logger.debug('Wrote %s to %s in wheel', p, ap)
+                zf.write(p, ap)
+
+    def build(self, paths, tags=None, wheel_version=None):
+        """
+        Build a wheel from files in specified paths, and use any specified tags
+        when determining the name of the wheel.
+        """
+        if tags is None:
+            tags = {}
+
+        libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
+        if libkey == 'platlib':
+            is_pure = 'false'
+            default_pyver = [IMPVER]
+            default_abi = [ABI]
+            default_arch = [ARCH]
+        else:
+            is_pure = 'true'
+            default_pyver = [PYVER]
+            default_abi = ['none']
+            default_arch = ['any']
+
+        self.pyver = tags.get('pyver', default_pyver)
+        self.abi = tags.get('abi', default_abi)
+        self.arch = tags.get('arch', default_arch)
+
+        libdir = paths[libkey]
+
+        name_ver = '%s-%s' % (self.name, self.version)
+        data_dir = '%s.data' % name_ver
+        info_dir = '%s.dist-info' % name_ver
+
+        archive_paths = []
+
+        # First, stuff which is not in site-packages
+        for key in ('data', 'headers', 'scripts'):
+            if key not in paths:
+                continue
+            path = paths[key]
+            if os.path.isdir(path):
+                for root, dirs, files in os.walk(path):
+                    for fn in files:
+                        p = fsdecode(os.path.join(root, fn))
+                        rp = os.path.relpath(p, path)
+                        ap = to_posix(os.path.join(data_dir, key, rp))
+                        archive_paths.append((ap, p))
+                        if key == 'scripts' and not p.endswith('.exe'):
+                            with open(p, 'rb') as f:
+                                data = f.read()
+                            data = self.process_shebang(data)
+                            with open(p, 'wb') as f:
+                                f.write(data)
+
+        # Now, stuff which is in site-packages, other than the
+        # distinfo stuff.
+        path = libdir
+        distinfo = None
+        for root, dirs, files in os.walk(path):
+            if root == path:
+                # At the top level only, save distinfo for later
+                # and skip it for now
+                for i, dn in enumerate(dirs):
+                    dn = fsdecode(dn)
+                    if dn.endswith('.dist-info'):
+                        distinfo = os.path.join(root, dn)
+                        del dirs[i]
+                        break
+                assert distinfo, '.dist-info directory expected, not found'
+
+            for fn in files:
+                # comment out next suite to leave .pyc files in
+                if fsdecode(fn).endswith(('.pyc', '.pyo')):
+                    continue
+                p = os.path.join(root, fn)
+                rp = to_posix(os.path.relpath(p, path))
+                archive_paths.append((rp, p))
+
+        # Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
+        files = os.listdir(distinfo)
+        for fn in files:
+            if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
+                p = fsdecode(os.path.join(distinfo, fn))
+                ap = to_posix(os.path.join(info_dir, fn))
+                archive_paths.append((ap, p))
+
+        wheel_metadata = [
+            'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
+            'Generator: distlib %s' % __version__,
+            'Root-Is-Purelib: %s' % is_pure,
+        ]
+        for pyver, abi, arch in self.tags:
+            wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
+        p = os.path.join(distinfo, 'WHEEL')
+        with open(p, 'w') as f:
+            f.write('\n'.join(wheel_metadata))
+        ap = to_posix(os.path.join(info_dir, 'WHEEL'))
+        archive_paths.append((ap, p))
+
+        # Now, at last, RECORD.
+        # Paths in here are archive paths - nothing else makes sense.
+        self.write_records((distinfo, info_dir), libdir, archive_paths)
+        # Now, ready to build the zip file
+        pathname = os.path.join(self.dirname, self.filename)
+        self.build_zip(pathname, archive_paths)
+        return pathname
+
+    def install(self, paths, maker, **kwargs):
+        """
+        Install a wheel to the specified paths. If kwarg ``warner`` is
+        specified, it should be a callable, which will be called with two
+        tuples indicating the wheel version of this software and the wheel
+        version in the file, if there is a discrepancy in the versions.
+        This can be used to issue any warnings to raise any exceptions.
+        If kwarg ``lib_only`` is True, only the purelib/platlib files are
+        installed, and the headers, scripts, data and dist-info metadata are
+        not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
+        bytecode will try to use file-hash based invalidation (PEP-552) on
+        supported interpreter versions (CPython 2.7+).
+
+        The return value is a :class:`InstalledDistribution` instance unless
+        ``options.lib_only`` is True, in which case the return value is ``None``.
+        """
+
+        dry_run = maker.dry_run
+        warner = kwargs.get('warner')
+        lib_only = kwargs.get('lib_only', False)
+        bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)
+
+        pathname = os.path.join(self.dirname, self.filename)
+        name_ver = '%s-%s' % (self.name, self.version)
+        data_dir = '%s.data' % name_ver
+        info_dir = '%s.dist-info' % name_ver
+
+        metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
+        wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
+        record_name = posixpath.join(info_dir, 'RECORD')
+
+        wrapper = codecs.getreader('utf-8')
+
+        with ZipFile(pathname, 'r') as zf:
+            with zf.open(wheel_metadata_name) as bwf:
+                wf = wrapper(bwf)
+                message = message_from_file(wf)
+            wv = message['Wheel-Version'].split('.', 1)
+            file_version = tuple([int(i) for i in wv])
+            if (file_version != self.wheel_version) and warner:
+                warner(self.wheel_version, file_version)
+
+            if message['Root-Is-Purelib'] == 'true':
+                libdir = paths['purelib']
+            else:
+                libdir = paths['platlib']
+
+            records = {}
+            with zf.open(record_name) as bf:
+                with CSVReader(stream=bf) as reader:
+                    for row in reader:
+                        p = row[0]
+                        records[p] = row
+
+            data_pfx = posixpath.join(data_dir, '')
+            info_pfx = posixpath.join(info_dir, '')
+            script_pfx = posixpath.join(data_dir, 'scripts', '')
+
+            # make a new instance rather than a copy of maker's,
+            # as we mutate it
+            fileop = FileOperator(dry_run=dry_run)
+            fileop.record = True    # so we can rollback if needed
+
+            bc = not sys.dont_write_bytecode    # Double negatives. Lovely!
+
+            outfiles = []   # for RECORD writing
+
+            # for script copying/shebang processing
+            workdir = tempfile.mkdtemp()
+            # set target dir later
+            # we default add_launchers to False, as the
+            # Python Launcher should be used instead
+            maker.source_dir = workdir
+            maker.target_dir = None
+            try:
+                for zinfo in zf.infolist():
+                    arcname = zinfo.filename
+                    if isinstance(arcname, text_type):
+                        u_arcname = arcname
+                    else:
+                        u_arcname = arcname.decode('utf-8')
+                    # The signature file won't be in RECORD,
+                    # and we  don't currently don't do anything with it
+                    if u_arcname.endswith('/RECORD.jws'):
+                        continue
+                    row = records[u_arcname]
+                    if row[2] and str(zinfo.file_size) != row[2]:
+                        raise DistlibException('size mismatch for '
+                                               '%s' % u_arcname)
+                    if row[1]:
+                        kind, value = row[1].split('=', 1)
+                        with zf.open(arcname) as bf:
+                            data = bf.read()
+                        _, digest = self.get_hash(data, kind)
+                        if digest != value:
+                            raise DistlibException('digest mismatch for '
+                                                   '%s' % arcname)
+
+                    if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
+                        logger.debug('lib_only: skipping %s', u_arcname)
+                        continue
+                    is_script = (u_arcname.startswith(script_pfx)
+                                 and not u_arcname.endswith('.exe'))
+
+                    if u_arcname.startswith(data_pfx):
+                        _, where, rp = u_arcname.split('/', 2)
+                        outfile = os.path.join(paths[where], convert_path(rp))
+                    else:
+                        # meant for site-packages.
+                        if u_arcname in (wheel_metadata_name, record_name):
+                            continue
+                        outfile = os.path.join(libdir, convert_path(u_arcname))
+                    if not is_script:
+                        with zf.open(arcname) as bf:
+                            fileop.copy_stream(bf, outfile)
+                        outfiles.append(outfile)
+                        # Double check the digest of the written file
+                        if not dry_run and row[1]:
+                            with open(outfile, 'rb') as bf:
+                                data = bf.read()
+                                _, newdigest = self.get_hash(data, kind)
+                                if newdigest != digest:
+                                    raise DistlibException('digest mismatch '
+                                                           'on write for '
+                                                           '%s' % outfile)
+                        if bc and outfile.endswith('.py'):
+                            try:
+                                pyc = fileop.byte_compile(outfile,
+                                                          hashed_invalidation=bc_hashed_invalidation)
+                                outfiles.append(pyc)
+                            except Exception:
+                                # Don't give up if byte-compilation fails,
+                                # but log it and perhaps warn the user
+                                logger.warning('Byte-compilation failed',
+                                               exc_info=True)
+                    else:
+                        fn = os.path.basename(convert_path(arcname))
+                        workname = os.path.join(workdir, fn)
+                        with zf.open(arcname) as bf:
+                            fileop.copy_stream(bf, workname)
+
+                        dn, fn = os.path.split(outfile)
+                        maker.target_dir = dn
+                        filenames = maker.make(fn)
+                        fileop.set_executable_mode(filenames)
+                        outfiles.extend(filenames)
+
+                if lib_only:
+                    logger.debug('lib_only: returning None')
+                    dist = None
+                else:
+                    # Generate scripts
+
+                    # Try to get pydist.json so we can see if there are
+                    # any commands to generate. If this fails (e.g. because
+                    # of a legacy wheel), log a warning but don't give up.
+                    commands = None
+                    file_version = self.info['Wheel-Version']
+                    if file_version == '1.0':
+                        # Use legacy info
+                        ep = posixpath.join(info_dir, 'entry_points.txt')
+                        try:
+                            with zf.open(ep) as bwf:
+                                epdata = read_exports(bwf)
+                            commands = {}
+                            for key in ('console', 'gui'):
+                                k = '%s_scripts' % key
+                                if k in epdata:
+                                    commands['wrap_%s' % key] = d = {}
+                                    for v in epdata[k].values():
+                                        s = '%s:%s' % (v.prefix, v.suffix)
+                                        if v.flags:
+                                            s += ' %s' % v.flags
+                                        d[v.name] = s
+                        except Exception:
+                            logger.warning('Unable to read legacy script '
+                                           'metadata, so cannot generate '
+                                           'scripts')
+                    else:
+                        try:
+                            with zf.open(metadata_name) as bwf:
+                                wf = wrapper(bwf)
+                                commands = json.load(wf).get('extensions')
+                                if commands:
+                                    commands = commands.get('python.commands')
+                        except Exception:
+                            logger.warning('Unable to read JSON metadata, so '
+                                           'cannot generate scripts')
+                    if commands:
+                        console_scripts = commands.get('wrap_console', {})
+                        gui_scripts = commands.get('wrap_gui', {})
+                        if console_scripts or gui_scripts:
+                            script_dir = paths.get('scripts', '')
+                            if not os.path.isdir(script_dir):
+                                raise ValueError('Valid script path not '
+                                                 'specified')
+                            maker.target_dir = script_dir
+                            for k, v in console_scripts.items():
+                                script = '%s = %s' % (k, v)
+                                filenames = maker.make(script)
+                                fileop.set_executable_mode(filenames)
+
+                            if gui_scripts:
+                                options = {'gui': True }
+                                for k, v in gui_scripts.items():
+                                    script = '%s = %s' % (k, v)
+                                    filenames = maker.make(script, options)
+                                    fileop.set_executable_mode(filenames)
+
+                    p = os.path.join(libdir, info_dir)
+                    dist = InstalledDistribution(p)
+
+                    # Write SHARED
+                    paths = dict(paths)     # don't change passed in dict
+                    del paths['purelib']
+                    del paths['platlib']
+                    paths['lib'] = libdir
+                    p = dist.write_shared_locations(paths, dry_run)
+                    if p:
+                        outfiles.append(p)
+
+                    # Write RECORD
+                    dist.write_installed_files(outfiles, paths['prefix'],
+                                               dry_run)
+                return dist
+            except Exception:  # pragma: no cover
+                logger.exception('installation failed.')
+                fileop.rollback()
+                raise
+            finally:
+                shutil.rmtree(workdir)
+
+    def _get_dylib_cache(self):
+        global cache
+        if cache is None:
+            # Use native string to avoid issues on 2.x: see Python #20140.
+            base = os.path.join(get_cache_base(), str('dylib-cache'),
+                                sys.version[:3])
+            cache = Cache(base)
+        return cache
+
+    def _get_extensions(self):
+        pathname = os.path.join(self.dirname, self.filename)
+        name_ver = '%s-%s' % (self.name, self.version)
+        info_dir = '%s.dist-info' % name_ver
+        arcname = posixpath.join(info_dir, 'EXTENSIONS')
+        wrapper = codecs.getreader('utf-8')
+        result = []
+        with ZipFile(pathname, 'r') as zf:
+            try:
+                with zf.open(arcname) as bf:
+                    wf = wrapper(bf)
+                    extensions = json.load(wf)
+                    cache = self._get_dylib_cache()
+                    prefix = cache.prefix_to_dir(pathname)
+                    cache_base = os.path.join(cache.base, prefix)
+                    if not os.path.isdir(cache_base):
+                        os.makedirs(cache_base)
+                    for name, relpath in extensions.items():
+                        dest = os.path.join(cache_base, convert_path(relpath))
+                        if not os.path.exists(dest):
+                            extract = True
+                        else:
+                            file_time = os.stat(dest).st_mtime
+                            file_time = datetime.datetime.fromtimestamp(file_time)
+                            info = zf.getinfo(relpath)
+                            wheel_time = datetime.datetime(*info.date_time)
+                            extract = wheel_time > file_time
+                        if extract:
+                            zf.extract(relpath, cache_base)
+                        result.append((name, dest))
+            except KeyError:
+                pass
+        return result
+
+    def is_compatible(self):
+        """
+        Determine if a wheel is compatible with the running system.
+        """
+        return is_compatible(self)
+
+    def is_mountable(self):
+        """
+        Determine if a wheel is asserted as mountable by its metadata.
+        """
+        return True # for now - metadata details TBD
+
+    def mount(self, append=False):
+        pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
+        if not self.is_compatible():
+            msg = 'Wheel %s not compatible with this Python.' % pathname
+            raise DistlibException(msg)
+        if not self.is_mountable():
+            msg = 'Wheel %s is marked as not mountable.' % pathname
+            raise DistlibException(msg)
+        if pathname in sys.path:
+            logger.debug('%s already in path', pathname)
+        else:
+            if append:
+                sys.path.append(pathname)
+            else:
+                sys.path.insert(0, pathname)
+            extensions = self._get_extensions()
+            if extensions:
+                if _hook not in sys.meta_path:
+                    sys.meta_path.append(_hook)
+                _hook.add(pathname, extensions)
+
+    def unmount(self):
+        pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
+        if pathname not in sys.path:
+            logger.debug('%s not in path', pathname)
+        else:
+            sys.path.remove(pathname)
+            if pathname in _hook.impure_wheels:
+                _hook.remove(pathname)
+            if not _hook.impure_wheels:
+                if _hook in sys.meta_path:
+                    sys.meta_path.remove(_hook)
+
+    def verify(self):
+        pathname = os.path.join(self.dirname, self.filename)
+        name_ver = '%s-%s' % (self.name, self.version)
+        data_dir = '%s.data' % name_ver
+        info_dir = '%s.dist-info' % name_ver
+
+        metadata_name = posixpath.join(info_dir, METADATA_FILENAME)
+        wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
+        record_name = posixpath.join(info_dir, 'RECORD')
+
+        wrapper = codecs.getreader('utf-8')
+
+        with ZipFile(pathname, 'r') as zf:
+            with zf.open(wheel_metadata_name) as bwf:
+                wf = wrapper(bwf)
+                message = message_from_file(wf)
+            wv = message['Wheel-Version'].split('.', 1)
+            file_version = tuple([int(i) for i in wv])
+            # TODO version verification
+
+            records = {}
+            with zf.open(record_name) as bf:
+                with CSVReader(stream=bf) as reader:
+                    for row in reader:
+                        p = row[0]
+                        records[p] = row
+
+            for zinfo in zf.infolist():
+                arcname = zinfo.filename
+                if isinstance(arcname, text_type):
+                    u_arcname = arcname
+                else:
+                    u_arcname = arcname.decode('utf-8')
+                if '..' in u_arcname:
+                    raise DistlibException('invalid entry in '
+                                           'wheel: %r' % u_arcname)
+
+                # The signature file won't be in RECORD,
+                # and we  don't currently don't do anything with it
+                if u_arcname.endswith('/RECORD.jws'):
+                    continue
+                row = records[u_arcname]
+                if row[2] and str(zinfo.file_size) != row[2]:
+                    raise DistlibException('size mismatch for '
+                                           '%s' % u_arcname)
+                if row[1]:
+                    kind, value = row[1].split('=', 1)
+                    with zf.open(arcname) as bf:
+                        data = bf.read()
+                    _, digest = self.get_hash(data, kind)
+                    if digest != value:
+                        raise DistlibException('digest mismatch for '
+                                               '%s' % arcname)
+
+    def update(self, modifier, dest_dir=None, **kwargs):
+        """
+        Update the contents of a wheel in a generic way. The modifier should
+        be a callable which expects a dictionary argument: its keys are
+        archive-entry paths, and its values are absolute filesystem paths
+        where the contents the corresponding archive entries can be found. The
+        modifier is free to change the contents of the files pointed to, add
+        new entries and remove entries, before returning. This method will
+        extract the entire contents of the wheel to a temporary location, call
+        the modifier, and then use the passed (and possibly updated)
+        dictionary to write a new wheel. If ``dest_dir`` is specified, the new
+        wheel is written there -- otherwise, the original wheel is overwritten.
+
+        The modifier should return True if it updated the wheel, else False.
+        This method returns the same value the modifier returns.
+        """
+
+        def get_version(path_map, info_dir):
+            version = path = None
+            key = '%s/%s' % (info_dir, METADATA_FILENAME)
+            if key not in path_map:
+                key = '%s/PKG-INFO' % info_dir
+            if key in path_map:
+                path = path_map[key]
+                version = Metadata(path=path).version
+            return version, path
+
+        def update_version(version, path):
+            updated = None
+            try:
+                v = NormalizedVersion(version)
+                i = version.find('-')
+                if i < 0:
+                    updated = '%s+1' % version
+                else:
+                    parts = [int(s) for s in version[i + 1:].split('.')]
+                    parts[-1] += 1
+                    updated = '%s+%s' % (version[:i],
+                                         '.'.join(str(i) for i in parts))
+            except UnsupportedVersionError:
+                logger.debug('Cannot update non-compliant (PEP-440) '
+                             'version %r', version)
+            if updated:
+                md = Metadata(path=path)
+                md.version = updated
+                legacy = not path.endswith(METADATA_FILENAME)
+                md.write(path=path, legacy=legacy)
+                logger.debug('Version updated from %r to %r', version,
+                             updated)
+
+        pathname = os.path.join(self.dirname, self.filename)
+        name_ver = '%s-%s' % (self.name, self.version)
+        info_dir = '%s.dist-info' % name_ver
+        record_name = posixpath.join(info_dir, 'RECORD')
+        with tempdir() as workdir:
+            with ZipFile(pathname, 'r') as zf:
+                path_map = {}
+                for zinfo in zf.infolist():
+                    arcname = zinfo.filename
+                    if isinstance(arcname, text_type):
+                        u_arcname = arcname
+                    else:
+                        u_arcname = arcname.decode('utf-8')
+                    if u_arcname == record_name:
+                        continue
+                    if '..' in u_arcname:
+                        raise DistlibException('invalid entry in '
+                                               'wheel: %r' % u_arcname)
+                    zf.extract(zinfo, workdir)
+                    path = os.path.join(workdir, convert_path(u_arcname))
+                    path_map[u_arcname] = path
+
+            # Remember the version.
+            original_version, _ = get_version(path_map, info_dir)
+            # Files extracted. Call the modifier.
+            modified = modifier(path_map, **kwargs)
+            if modified:
+                # Something changed - need to build a new wheel.
+                current_version, path = get_version(path_map, info_dir)
+                if current_version and (current_version == original_version):
+                    # Add or update local version to signify changes.
+                    update_version(current_version, path)
+                # Decide where the new wheel goes.
+                if dest_dir is None:
+                    fd, newpath = tempfile.mkstemp(suffix='.whl',
+                                                   prefix='wheel-update-',
+                                                   dir=workdir)
+                    os.close(fd)
+                else:
+                    if not os.path.isdir(dest_dir):
+                        raise DistlibException('Not a directory: %r' % dest_dir)
+                    newpath = os.path.join(dest_dir, self.filename)
+                archive_paths = list(path_map.items())
+                distinfo = os.path.join(workdir, info_dir)
+                info = distinfo, info_dir
+                self.write_records(info, workdir, archive_paths)
+                self.build_zip(newpath, archive_paths)
+                if dest_dir is None:
+                    shutil.copyfile(newpath, pathname)
+        return modified
+
+def compatible_tags():
+    """
+    Return (pyver, abi, arch) tuples compatible with this Python.
+    """
+    versions = [VER_SUFFIX]
+    major = VER_SUFFIX[0]
+    for minor in range(sys.version_info[1] - 1, - 1, -1):
+        versions.append(''.join([major, str(minor)]))
+
+    abis = []
+    for suffix, _, _ in imp.get_suffixes():
+        if suffix.startswith('.abi'):
+            abis.append(suffix.split('.', 2)[1])
+    abis.sort()
+    if ABI != 'none':
+        abis.insert(0, ABI)
+    abis.append('none')
+    result = []
+
+    arches = [ARCH]
+    if sys.platform == 'darwin':
+        m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
+        if m:
+            name, major, minor, arch = m.groups()
+            minor = int(minor)
+            matches = [arch]
+            if arch in ('i386', 'ppc'):
+                matches.append('fat')
+            if arch in ('i386', 'ppc', 'x86_64'):
+                matches.append('fat3')
+            if arch in ('ppc64', 'x86_64'):
+                matches.append('fat64')
+            if arch in ('i386', 'x86_64'):
+                matches.append('intel')
+            if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
+                matches.append('universal')
+            while minor >= 0:
+                for match in matches:
+                    s = '%s_%s_%s_%s' % (name, major, minor, match)
+                    if s != ARCH:   # already there
+                        arches.append(s)
+                minor -= 1
+
+    # Most specific - our Python version, ABI and arch
+    for abi in abis:
+        for arch in arches:
+            result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
+
+    # where no ABI / arch dependency, but IMP_PREFIX dependency
+    for i, version in enumerate(versions):
+        result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
+        if i == 0:
+            result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
+
+    # no IMP_PREFIX, ABI or arch dependency
+    for i, version in enumerate(versions):
+        result.append((''.join(('py', version)), 'none', 'any'))
+        if i == 0:
+            result.append((''.join(('py', version[0])), 'none', 'any'))
+    return set(result)
+
+
+COMPATIBLE_TAGS = compatible_tags()
+
+del compatible_tags
+
+
+def is_compatible(wheel, tags=None):
+    if not isinstance(wheel, Wheel):
+        wheel = Wheel(wheel)    # assume it's a filename
+    result = False
+    if tags is None:
+        tags = COMPATIBLE_TAGS
+    for ver, abi, arch in tags:
+        if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
+            result = True
+            break
+    return result
diff --git a/lib/python3.7/site-packages/pip/_vendor/distro.py b/lib/python3.7/site-packages/pip/_vendor/distro.py
new file mode 100644
index 0000000000000000000000000000000000000000..33061633efb6b03f6ca845ec5b0e69c90c25fdb8
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/distro.py
@@ -0,0 +1,1216 @@
+# Copyright 2015,2016,2017 Nir Cohen
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+The ``distro`` package (``distro`` stands for Linux Distribution) provides
+information about the Linux distribution it runs on, such as a reliable
+machine-readable distro ID, or version information.
+
+It is the recommended replacement for Python's original
+:py:func:`platform.linux_distribution` function, but it provides much more
+functionality. An alternative implementation became necessary because Python
+3.5 deprecated this function, and Python 3.8 will remove it altogether.
+Its predecessor function :py:func:`platform.dist` was already
+deprecated since Python 2.6 and will also be removed in Python 3.8.
+Still, there are many cases in which access to OS distribution information
+is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
+more information.
+"""
+
+import os
+import re
+import sys
+import json
+import shlex
+import logging
+import argparse
+import subprocess
+
+
+_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc')
+_OS_RELEASE_BASENAME = 'os-release'
+
+#: Translation table for normalizing the "ID" attribute defined in os-release
+#: files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as defined in the os-release file, translated to lower case,
+#:   with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_OS_ID = {
+    'ol': 'oracle',  # Oracle Enterprise Linux
+}
+
+#: Translation table for normalizing the "Distributor ID" attribute returned by
+#: the lsb_release command, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as returned by the lsb_release command, translated to lower
+#:   case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_LSB_ID = {
+    'enterpriseenterprise': 'oracle',  # Oracle Enterprise Linux
+    'redhatenterpriseworkstation': 'rhel',  # RHEL 6, 7 Workstation
+    'redhatenterpriseserver': 'rhel',  # RHEL 6, 7 Server
+}
+
+#: Translation table for normalizing the distro ID derived from the file name
+#: of distro release files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as derived from the file name of a distro release file,
+#:   translated to lower case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_DISTRO_ID = {
+    'redhat': 'rhel',  # RHEL 6.x, 7.x
+}
+
+# Pattern for content of distro release file (reversed)
+_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
+    r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
+
+# Pattern for base file name of distro release file
+_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
+    r'(\w+)[-_](release|version)$')
+
+# Base file names to be ignored when searching for distro release file
+_DISTRO_RELEASE_IGNORE_BASENAMES = (
+    'debian_version',
+    'lsb-release',
+    'oem-release',
+    _OS_RELEASE_BASENAME,
+    'system-release'
+)
+
+
+def linux_distribution(full_distribution_name=True):
+    """
+    Return information about the current OS distribution as a tuple
+    ``(id_name, version, codename)`` with items as follows:
+
+    * ``id_name``:  If *full_distribution_name* is false, the result of
+      :func:`distro.id`. Otherwise, the result of :func:`distro.name`.
+
+    * ``version``:  The result of :func:`distro.version`.
+
+    * ``codename``:  The result of :func:`distro.codename`.
+
+    The interface of this function is compatible with the original
+    :py:func:`platform.linux_distribution` function, supporting a subset of
+    its parameters.
+
+    The data it returns may not exactly be the same, because it uses more data
+    sources than the original function, and that may lead to different data if
+    the OS distribution is not consistent across multiple data sources it
+    provides (there are indeed such distributions ...).
+
+    Another reason for differences is the fact that the :func:`distro.id`
+    method normalizes the distro ID string to a reliable machine-readable value
+    for a number of popular OS distributions.
+    """
+    return _distro.linux_distribution(full_distribution_name)
+
+
+def id():
+    """
+    Return the distro ID of the current distribution, as a
+    machine-readable string.
+
+    For a number of OS distributions, the returned distro ID value is
+    *reliable*, in the sense that it is documented and that it does not change
+    across releases of the distribution.
+
+    This package maintains the following reliable distro ID values:
+
+    ==============  =========================================
+    Distro ID       Distribution
+    ==============  =========================================
+    "ubuntu"        Ubuntu
+    "debian"        Debian
+    "rhel"          RedHat Enterprise Linux
+    "centos"        CentOS
+    "fedora"        Fedora
+    "sles"          SUSE Linux Enterprise Server
+    "opensuse"      openSUSE
+    "amazon"        Amazon Linux
+    "arch"          Arch Linux
+    "cloudlinux"    CloudLinux OS
+    "exherbo"       Exherbo Linux
+    "gentoo"        GenToo Linux
+    "ibm_powerkvm"  IBM PowerKVM
+    "kvmibm"        KVM for IBM z Systems
+    "linuxmint"     Linux Mint
+    "mageia"        Mageia
+    "mandriva"      Mandriva Linux
+    "parallels"     Parallels
+    "pidora"        Pidora
+    "raspbian"      Raspbian
+    "oracle"        Oracle Linux (and Oracle Enterprise Linux)
+    "scientific"    Scientific Linux
+    "slackware"     Slackware
+    "xenserver"     XenServer
+    "openbsd"       OpenBSD
+    "netbsd"        NetBSD
+    "freebsd"       FreeBSD
+    ==============  =========================================
+
+    If you have a need to get distros for reliable IDs added into this set,
+    or if you find that the :func:`distro.id` function returns a different
+    distro ID for one of the listed distros, please create an issue in the
+    `distro issue tracker`_.
+
+    **Lookup hierarchy and transformations:**
+
+    First, the ID is obtained from the following sources, in the specified
+    order. The first available and non-empty value is used:
+
+    * the value of the "ID" attribute of the os-release file,
+
+    * the value of the "Distributor ID" attribute returned by the lsb_release
+      command,
+
+    * the first part of the file name of the distro release file,
+
+    The so determined ID value then passes the following transformations,
+    before it is returned by this method:
+
+    * it is translated to lower case,
+
+    * blanks (which should not be there anyway) are translated to underscores,
+
+    * a normalization of the ID is performed, based upon
+      `normalization tables`_. The purpose of this normalization is to ensure
+      that the ID is as reliable as possible, even across incompatible changes
+      in the OS distributions. A common reason for an incompatible change is
+      the addition of an os-release file, or the addition of the lsb_release
+      command, with ID values that differ from what was previously determined
+      from the distro release file name.
+    """
+    return _distro.id()
+
+
+def name(pretty=False):
+    """
+    Return the name of the current OS distribution, as a human-readable
+    string.
+
+    If *pretty* is false, the name is returned without version or codename.
+    (e.g. "CentOS Linux")
+
+    If *pretty* is true, the version and codename are appended.
+    (e.g. "CentOS Linux 7.1.1503 (Core)")
+
+    **Lookup hierarchy:**
+
+    The name is obtained from the following sources, in the specified order.
+    The first available and non-empty value is used:
+
+    * If *pretty* is false:
+
+      - the value of the "NAME" attribute of the os-release file,
+
+      - the value of the "Distributor ID" attribute returned by the lsb_release
+        command,
+
+      - the value of the "<name>" field of the distro release file.
+
+    * If *pretty* is true:
+
+      - the value of the "PRETTY_NAME" attribute of the os-release file,
+
+      - the value of the "Description" attribute returned by the lsb_release
+        command,
+
+      - the value of the "<name>" field of the distro release file, appended
+        with the value of the pretty version ("<version_id>" and "<codename>"
+        fields) of the distro release file, if available.
+    """
+    return _distro.name(pretty)
+
+
+def version(pretty=False, best=False):
+    """
+    Return the version of the current OS distribution, as a human-readable
+    string.
+
+    If *pretty* is false, the version is returned without codename (e.g.
+    "7.0").
+
+    If *pretty* is true, the codename in parenthesis is appended, if the
+    codename is non-empty (e.g. "7.0 (Maipo)").
+
+    Some distributions provide version numbers with different precisions in
+    the different sources of distribution information. Examining the different
+    sources in a fixed priority order does not always yield the most precise
+    version (e.g. for Debian 8.2, or CentOS 7.1).
+
+    The *best* parameter can be used to control the approach for the returned
+    version:
+
+    If *best* is false, the first non-empty version number in priority order of
+    the examined sources is returned.
+
+    If *best* is true, the most precise version number out of all examined
+    sources is returned.
+
+    **Lookup hierarchy:**
+
+    In all cases, the version number is obtained from the following sources.
+    If *best* is false, this order represents the priority order:
+
+    * the value of the "VERSION_ID" attribute of the os-release file,
+    * the value of the "Release" attribute returned by the lsb_release
+      command,
+    * the version number parsed from the "<version_id>" field of the first line
+      of the distro release file,
+    * the version number parsed from the "PRETTY_NAME" attribute of the
+      os-release file, if it follows the format of the distro release files.
+    * the version number parsed from the "Description" attribute returned by
+      the lsb_release command, if it follows the format of the distro release
+      files.
+    """
+    return _distro.version(pretty, best)
+
+
+def version_parts(best=False):
+    """
+    Return the version of the current OS distribution as a tuple
+    ``(major, minor, build_number)`` with items as follows:
+
+    * ``major``:  The result of :func:`distro.major_version`.
+
+    * ``minor``:  The result of :func:`distro.minor_version`.
+
+    * ``build_number``:  The result of :func:`distro.build_number`.
+
+    For a description of the *best* parameter, see the :func:`distro.version`
+    method.
+    """
+    return _distro.version_parts(best)
+
+
+def major_version(best=False):
+    """
+    Return the major version of the current OS distribution, as a string,
+    if provided.
+    Otherwise, the empty string is returned. The major version is the first
+    part of the dot-separated version string.
+
+    For a description of the *best* parameter, see the :func:`distro.version`
+    method.
+    """
+    return _distro.major_version(best)
+
+
+def minor_version(best=False):
+    """
+    Return the minor version of the current OS distribution, as a string,
+    if provided.
+    Otherwise, the empty string is returned. The minor version is the second
+    part of the dot-separated version string.
+
+    For a description of the *best* parameter, see the :func:`distro.version`
+    method.
+    """
+    return _distro.minor_version(best)
+
+
+def build_number(best=False):
+    """
+    Return the build number of the current OS distribution, as a string,
+    if provided.
+    Otherwise, the empty string is returned. The build number is the third part
+    of the dot-separated version string.
+
+    For a description of the *best* parameter, see the :func:`distro.version`
+    method.
+    """
+    return _distro.build_number(best)
+
+
+def like():
+    """
+    Return a space-separated list of distro IDs of distributions that are
+    closely related to the current OS distribution in regards to packaging
+    and programming interfaces, for example distributions the current
+    distribution is a derivative from.
+
+    **Lookup hierarchy:**
+
+    This information item is only provided by the os-release file.
+    For details, see the description of the "ID_LIKE" attribute in the
+    `os-release man page
+    <http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
+    """
+    return _distro.like()
+
+
+def codename():
+    """
+    Return the codename for the release of the current OS distribution,
+    as a string.
+
+    If the distribution does not have a codename, an empty string is returned.
+
+    Note that the returned codename is not always really a codename. For
+    example, openSUSE returns "x86_64". This function does not handle such
+    cases in any special way and just returns the string it finds, if any.
+
+    **Lookup hierarchy:**
+
+    * the codename within the "VERSION" attribute of the os-release file, if
+      provided,
+
+    * the value of the "Codename" attribute returned by the lsb_release
+      command,
+
+    * the value of the "<codename>" field of the distro release file.
+    """
+    return _distro.codename()
+
+
+def info(pretty=False, best=False):
+    """
+    Return certain machine-readable information items about the current OS
+    distribution in a dictionary, as shown in the following example:
+
+    .. sourcecode:: python
+
+        {
+            'id': 'rhel',
+            'version': '7.0',
+            'version_parts': {
+                'major': '7',
+                'minor': '0',
+                'build_number': ''
+            },
+            'like': 'fedora',
+            'codename': 'Maipo'
+        }
+
+    The dictionary structure and keys are always the same, regardless of which
+    information items are available in the underlying data sources. The values
+    for the various keys are as follows:
+
+    * ``id``:  The result of :func:`distro.id`.
+
+    * ``version``:  The result of :func:`distro.version`.
+
+    * ``version_parts -> major``:  The result of :func:`distro.major_version`.
+
+    * ``version_parts -> minor``:  The result of :func:`distro.minor_version`.
+
+    * ``version_parts -> build_number``:  The result of
+      :func:`distro.build_number`.
+
+    * ``like``:  The result of :func:`distro.like`.
+
+    * ``codename``:  The result of :func:`distro.codename`.
+
+    For a description of the *pretty* and *best* parameters, see the
+    :func:`distro.version` method.
+    """
+    return _distro.info(pretty, best)
+
+
+def os_release_info():
+    """
+    Return a dictionary containing key-value pairs for the information items
+    from the os-release file data source of the current OS distribution.
+
+    See `os-release file`_ for details about these information items.
+    """
+    return _distro.os_release_info()
+
+
+def lsb_release_info():
+    """
+    Return a dictionary containing key-value pairs for the information items
+    from the lsb_release command data source of the current OS distribution.
+
+    See `lsb_release command output`_ for details about these information
+    items.
+    """
+    return _distro.lsb_release_info()
+
+
+def distro_release_info():
+    """
+    Return a dictionary containing key-value pairs for the information items
+    from the distro release file data source of the current OS distribution.
+
+    See `distro release file`_ for details about these information items.
+    """
+    return _distro.distro_release_info()
+
+
+def uname_info():
+    """
+    Return a dictionary containing key-value pairs for the information items
+    from the distro release file data source of the current OS distribution.
+    """
+    return _distro.uname_info()
+
+
+def os_release_attr(attribute):
+    """
+    Return a single named information item from the os-release file data source
+    of the current OS distribution.
+
+    Parameters:
+
+    * ``attribute`` (string): Key of the information item.
+
+    Returns:
+
+    * (string): Value of the information item, if the item exists.
+      The empty string, if the item does not exist.
+
+    See `os-release file`_ for details about these information items.
+    """
+    return _distro.os_release_attr(attribute)
+
+
+def lsb_release_attr(attribute):
+    """
+    Return a single named information item from the lsb_release command output
+    data source of the current OS distribution.
+
+    Parameters:
+
+    * ``attribute`` (string): Key of the information item.
+
+    Returns:
+
+    * (string): Value of the information item, if the item exists.
+      The empty string, if the item does not exist.
+
+    See `lsb_release command output`_ for details about these information
+    items.
+    """
+    return _distro.lsb_release_attr(attribute)
+
+
+def distro_release_attr(attribute):
+    """
+    Return a single named information item from the distro release file
+    data source of the current OS distribution.
+
+    Parameters:
+
+    * ``attribute`` (string): Key of the information item.
+
+    Returns:
+
+    * (string): Value of the information item, if the item exists.
+      The empty string, if the item does not exist.
+
+    See `distro release file`_ for details about these information items.
+    """
+    return _distro.distro_release_attr(attribute)
+
+
+def uname_attr(attribute):
+    """
+    Return a single named information item from the distro release file
+    data source of the current OS distribution.
+
+    Parameters:
+
+    * ``attribute`` (string): Key of the information item.
+
+    Returns:
+
+    * (string): Value of the information item, if the item exists.
+                The empty string, if the item does not exist.
+    """
+    return _distro.uname_attr(attribute)
+
+
+class cached_property(object):
+    """A version of @property which caches the value.  On access, it calls the
+    underlying function and sets the value in `__dict__` so future accesses
+    will not re-call the property.
+    """
+    def __init__(self, f):
+        self._fname = f.__name__
+        self._f = f
+
+    def __get__(self, obj, owner):
+        assert obj is not None, 'call {} on an instance'.format(self._fname)
+        ret = obj.__dict__[self._fname] = self._f(obj)
+        return ret
+
+
+class LinuxDistribution(object):
+    """
+    Provides information about a OS distribution.
+
+    This package creates a private module-global instance of this class with
+    default initialization arguments, that is used by the
+    `consolidated accessor functions`_ and `single source accessor functions`_.
+    By using default initialization arguments, that module-global instance
+    returns data about the current OS distribution (i.e. the distro this
+    package runs on).
+
+    Normally, it is not necessary to create additional instances of this class.
+    However, in situations where control is needed over the exact data sources
+    that are used, instances of this class can be created with a specific
+    distro release file, or a specific os-release file, or without invoking the
+    lsb_release command.
+    """
+
+    def __init__(self,
+                 include_lsb=True,
+                 os_release_file='',
+                 distro_release_file='',
+                 include_uname=True):
+        """
+        The initialization method of this class gathers information from the
+        available data sources, and stores that in private instance attributes.
+        Subsequent access to the information items uses these private instance
+        attributes, so that the data sources are read only once.
+
+        Parameters:
+
+        * ``include_lsb`` (bool): Controls whether the
+          `lsb_release command output`_ is included as a data source.
+
+          If the lsb_release command is not available in the program execution
+          path, the data source for the lsb_release command will be empty.
+
+        * ``os_release_file`` (string): The path name of the
+          `os-release file`_ that is to be used as a data source.
+
+          An empty string (the default) will cause the default path name to
+          be used (see `os-release file`_ for details).
+
+          If the specified or defaulted os-release file does not exist, the
+          data source for the os-release file will be empty.
+
+        * ``distro_release_file`` (string): The path name of the
+          `distro release file`_ that is to be used as a data source.
+
+          An empty string (the default) will cause a default search algorithm
+          to be used (see `distro release file`_ for details).
+
+          If the specified distro release file does not exist, or if no default
+          distro release file can be found, the data source for the distro
+          release file will be empty.
+
+        * ``include_name`` (bool): Controls whether uname command output is
+          included as a data source. If the uname command is not available in
+          the program execution path the data source for the uname command will
+          be empty.
+
+        Public instance attributes:
+
+        * ``os_release_file`` (string): The path name of the
+          `os-release file`_ that is actually used as a data source. The
+          empty string if no distro release file is used as a data source.
+
+        * ``distro_release_file`` (string): The path name of the
+          `distro release file`_ that is actually used as a data source. The
+          empty string if no distro release file is used as a data source.
+
+        * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
+          This controls whether the lsb information will be loaded.
+
+        * ``include_uname`` (bool): The result of the ``include_uname``
+          parameter. This controls whether the uname information will
+          be loaded.
+
+        Raises:
+
+        * :py:exc:`IOError`: Some I/O issue with an os-release file or distro
+          release file.
+
+        * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
+          some issue (other than not being available in the program execution
+          path).
+
+        * :py:exc:`UnicodeError`: A data source has unexpected characters or
+          uses an unexpected encoding.
+        """
+        self.os_release_file = os_release_file or \
+            os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
+        self.distro_release_file = distro_release_file or ''  # updated later
+        self.include_lsb = include_lsb
+        self.include_uname = include_uname
+
+    def __repr__(self):
+        """Return repr of all info
+        """
+        return \
+            "LinuxDistribution(" \
+            "os_release_file={self.os_release_file!r}, " \
+            "distro_release_file={self.distro_release_file!r}, " \
+            "include_lsb={self.include_lsb!r}, " \
+            "include_uname={self.include_uname!r}, " \
+            "_os_release_info={self._os_release_info!r}, " \
+            "_lsb_release_info={self._lsb_release_info!r}, " \
+            "_distro_release_info={self._distro_release_info!r}, " \
+            "_uname_info={self._uname_info!r})".format(
+                self=self)
+
+    def linux_distribution(self, full_distribution_name=True):
+        """
+        Return information about the OS distribution that is compatible
+        with Python's :func:`platform.linux_distribution`, supporting a subset
+        of its parameters.
+
+        For details, see :func:`distro.linux_distribution`.
+        """
+        return (
+            self.name() if full_distribution_name else self.id(),
+            self.version(),
+            self.codename()
+        )
+
+    def id(self):
+        """Return the distro ID of the OS distribution, as a string.
+
+        For details, see :func:`distro.id`.
+        """
+        def normalize(distro_id, table):
+            distro_id = distro_id.lower().replace(' ', '_')
+            return table.get(distro_id, distro_id)
+
+        distro_id = self.os_release_attr('id')
+        if distro_id:
+            return normalize(distro_id, NORMALIZED_OS_ID)
+
+        distro_id = self.lsb_release_attr('distributor_id')
+        if distro_id:
+            return normalize(distro_id, NORMALIZED_LSB_ID)
+
+        distro_id = self.distro_release_attr('id')
+        if distro_id:
+            return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+        distro_id = self.uname_attr('id')
+        if distro_id:
+            return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+        return ''
+
+    def name(self, pretty=False):
+        """
+        Return the name of the OS distribution, as a string.
+
+        For details, see :func:`distro.name`.
+        """
+        name = self.os_release_attr('name') \
+            or self.lsb_release_attr('distributor_id') \
+            or self.distro_release_attr('name') \
+            or self.uname_attr('name')
+        if pretty:
+            name = self.os_release_attr('pretty_name') \
+                or self.lsb_release_attr('description')
+            if not name:
+                name = self.distro_release_attr('name') \
+                       or self.uname_attr('name')
+                version = self.version(pretty=True)
+                if version:
+                    name = name + ' ' + version
+        return name or ''
+
+    def version(self, pretty=False, best=False):
+        """
+        Return the version of the OS distribution, as a string.
+
+        For details, see :func:`distro.version`.
+        """
+        versions = [
+            self.os_release_attr('version_id'),
+            self.lsb_release_attr('release'),
+            self.distro_release_attr('version_id'),
+            self._parse_distro_release_content(
+                self.os_release_attr('pretty_name')).get('version_id', ''),
+            self._parse_distro_release_content(
+                self.lsb_release_attr('description')).get('version_id', ''),
+            self.uname_attr('release')
+        ]
+        version = ''
+        if best:
+            # This algorithm uses the last version in priority order that has
+            # the best precision. If the versions are not in conflict, that
+            # does not matter; otherwise, using the last one instead of the
+            # first one might be considered a surprise.
+            for v in versions:
+                if v.count(".") > version.count(".") or version == '':
+                    version = v
+        else:
+            for v in versions:
+                if v != '':
+                    version = v
+                    break
+        if pretty and version and self.codename():
+            version = u'{0} ({1})'.format(version, self.codename())
+        return version
+
+    def version_parts(self, best=False):
+        """
+        Return the version of the OS distribution, as a tuple of version
+        numbers.
+
+        For details, see :func:`distro.version_parts`.
+        """
+        version_str = self.version(best=best)
+        if version_str:
+            version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
+            matches = version_regex.match(version_str)
+            if matches:
+                major, minor, build_number = matches.groups()
+                return major, minor or '', build_number or ''
+        return '', '', ''
+
+    def major_version(self, best=False):
+        """
+        Return the major version number of the current distribution.
+
+        For details, see :func:`distro.major_version`.
+        """
+        return self.version_parts(best)[0]
+
+    def minor_version(self, best=False):
+        """
+        Return the minor version number of the current distribution.
+
+        For details, see :func:`distro.minor_version`.
+        """
+        return self.version_parts(best)[1]
+
+    def build_number(self, best=False):
+        """
+        Return the build number of the current distribution.
+
+        For details, see :func:`distro.build_number`.
+        """
+        return self.version_parts(best)[2]
+
+    def like(self):
+        """
+        Return the IDs of distributions that are like the OS distribution.
+
+        For details, see :func:`distro.like`.
+        """
+        return self.os_release_attr('id_like') or ''
+
+    def codename(self):
+        """
+        Return the codename of the OS distribution.
+
+        For details, see :func:`distro.codename`.
+        """
+        try:
+            # Handle os_release specially since distros might purposefully set
+            # this to empty string to have no codename
+            return self._os_release_info['codename']
+        except KeyError:
+            return self.lsb_release_attr('codename') \
+                or self.distro_release_attr('codename') \
+                or ''
+
+    def info(self, pretty=False, best=False):
+        """
+        Return certain machine-readable information about the OS
+        distribution.
+
+        For details, see :func:`distro.info`.
+        """
+        return dict(
+            id=self.id(),
+            version=self.version(pretty, best),
+            version_parts=dict(
+                major=self.major_version(best),
+                minor=self.minor_version(best),
+                build_number=self.build_number(best)
+            ),
+            like=self.like(),
+            codename=self.codename(),
+        )
+
+    def os_release_info(self):
+        """
+        Return a dictionary containing key-value pairs for the information
+        items from the os-release file data source of the OS distribution.
+
+        For details, see :func:`distro.os_release_info`.
+        """
+        return self._os_release_info
+
+    def lsb_release_info(self):
+        """
+        Return a dictionary containing key-value pairs for the information
+        items from the lsb_release command data source of the OS
+        distribution.
+
+        For details, see :func:`distro.lsb_release_info`.
+        """
+        return self._lsb_release_info
+
+    def distro_release_info(self):
+        """
+        Return a dictionary containing key-value pairs for the information
+        items from the distro release file data source of the OS
+        distribution.
+
+        For details, see :func:`distro.distro_release_info`.
+        """
+        return self._distro_release_info
+
+    def uname_info(self):
+        """
+        Return a dictionary containing key-value pairs for the information
+        items from the uname command data source of the OS distribution.
+
+        For details, see :func:`distro.uname_info`.
+        """
+        return self._uname_info
+
+    def os_release_attr(self, attribute):
+        """
+        Return a single named information item from the os-release file data
+        source of the OS distribution.
+
+        For details, see :func:`distro.os_release_attr`.
+        """
+        return self._os_release_info.get(attribute, '')
+
+    def lsb_release_attr(self, attribute):
+        """
+        Return a single named information item from the lsb_release command
+        output data source of the OS distribution.
+
+        For details, see :func:`distro.lsb_release_attr`.
+        """
+        return self._lsb_release_info.get(attribute, '')
+
+    def distro_release_attr(self, attribute):
+        """
+        Return a single named information item from the distro release file
+        data source of the OS distribution.
+
+        For details, see :func:`distro.distro_release_attr`.
+        """
+        return self._distro_release_info.get(attribute, '')
+
+    def uname_attr(self, attribute):
+        """
+        Return a single named information item from the uname command
+        output data source of the OS distribution.
+
+        For details, see :func:`distro.uname_release_attr`.
+        """
+        return self._uname_info.get(attribute, '')
+
+    @cached_property
+    def _os_release_info(self):
+        """
+        Get the information items from the specified os-release file.
+
+        Returns:
+            A dictionary containing all information items.
+        """
+        if os.path.isfile(self.os_release_file):
+            with open(self.os_release_file) as release_file:
+                return self._parse_os_release_content(release_file)
+        return {}
+
+    @staticmethod
+    def _parse_os_release_content(lines):
+        """
+        Parse the lines of an os-release file.
+
+        Parameters:
+
+        * lines: Iterable through the lines in the os-release file.
+                 Each line must be a unicode string or a UTF-8 encoded byte
+                 string.
+
+        Returns:
+            A dictionary containing all information items.
+        """
+        props = {}
+        lexer = shlex.shlex(lines, posix=True)
+        lexer.whitespace_split = True
+
+        # The shlex module defines its `wordchars` variable using literals,
+        # making it dependent on the encoding of the Python source file.
+        # In Python 2.6 and 2.7, the shlex source file is encoded in
+        # 'iso-8859-1', and the `wordchars` variable is defined as a byte
+        # string. This causes a UnicodeDecodeError to be raised when the
+        # parsed content is a unicode object. The following fix resolves that
+        # (... but it should be fixed in shlex...):
+        if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
+            lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
+
+        tokens = list(lexer)
+        for token in tokens:
+            # At this point, all shell-like parsing has been done (i.e.
+            # comments processed, quotes and backslash escape sequences
+            # processed, multi-line values assembled, trailing newlines
+            # stripped, etc.), so the tokens are now either:
+            # * variable assignments: var=value
+            # * commands or their arguments (not allowed in os-release)
+            if '=' in token:
+                k, v = token.split('=', 1)
+                if isinstance(v, bytes):
+                    v = v.decode('utf-8')
+                props[k.lower()] = v
+            else:
+                # Ignore any tokens that are not variable assignments
+                pass
+
+        if 'version_codename' in props:
+            # os-release added a version_codename field.  Use that in
+            # preference to anything else Note that some distros purposefully
+            # do not have code names.  They should be setting
+            # version_codename=""
+            props['codename'] = props['version_codename']
+        elif 'ubuntu_codename' in props:
+            # Same as above but a non-standard field name used on older Ubuntus
+            props['codename'] = props['ubuntu_codename']
+        elif 'version' in props:
+            # If there is no version_codename, parse it from the version
+            codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version'])
+            if codename:
+                codename = codename.group()
+                codename = codename.strip('()')
+                codename = codename.strip(',')
+                codename = codename.strip()
+                # codename appears within paranthese.
+                props['codename'] = codename
+
+        return props
+
+    @cached_property
+    def _lsb_release_info(self):
+        """
+        Get the information items from the lsb_release command output.
+
+        Returns:
+            A dictionary containing all information items.
+        """
+        if not self.include_lsb:
+            return {}
+        with open(os.devnull, 'w') as devnull:
+            try:
+                cmd = ('lsb_release', '-a')
+                stdout = subprocess.check_output(cmd, stderr=devnull)
+            except OSError:  # Command not found
+                return {}
+        content = stdout.decode(sys.getfilesystemencoding()).splitlines()
+        return self._parse_lsb_release_content(content)
+
+    @staticmethod
+    def _parse_lsb_release_content(lines):
+        """
+        Parse the output of the lsb_release command.
+
+        Parameters:
+
+        * lines: Iterable through the lines of the lsb_release output.
+                 Each line must be a unicode string or a UTF-8 encoded byte
+                 string.
+
+        Returns:
+            A dictionary containing all information items.
+        """
+        props = {}
+        for line in lines:
+            kv = line.strip('\n').split(':', 1)
+            if len(kv) != 2:
+                # Ignore lines without colon.
+                continue
+            k, v = kv
+            props.update({k.replace(' ', '_').lower(): v.strip()})
+        return props
+
+    @cached_property
+    def _uname_info(self):
+        with open(os.devnull, 'w') as devnull:
+            try:
+                cmd = ('uname', '-rs')
+                stdout = subprocess.check_output(cmd, stderr=devnull)
+            except OSError:
+                return {}
+        content = stdout.decode(sys.getfilesystemencoding()).splitlines()
+        return self._parse_uname_content(content)
+
+    @staticmethod
+    def _parse_uname_content(lines):
+        props = {}
+        match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip())
+        if match:
+            name, version = match.groups()
+
+            # This is to prevent the Linux kernel version from
+            # appearing as the 'best' version on otherwise
+            # identifiable distributions.
+            if name == 'Linux':
+                return {}
+            props['id'] = name.lower()
+            props['name'] = name
+            props['release'] = version
+        return props
+
+    @cached_property
+    def _distro_release_info(self):
+        """
+        Get the information items from the specified distro release file.
+
+        Returns:
+            A dictionary containing all information items.
+        """
+        if self.distro_release_file:
+            # If it was specified, we use it and parse what we can, even if
+            # its file name or content does not match the expected pattern.
+            distro_info = self._parse_distro_release_file(
+                self.distro_release_file)
+            basename = os.path.basename(self.distro_release_file)
+            # The file name pattern for user-specified distro release files
+            # is somewhat more tolerant (compared to when searching for the
+            # file), because we want to use what was specified as best as
+            # possible.
+            match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+            if 'name' in distro_info \
+               and 'cloudlinux' in distro_info['name'].lower():
+                distro_info['id'] = 'cloudlinux'
+            elif match:
+                distro_info['id'] = match.group(1)
+            return distro_info
+        else:
+            try:
+                basenames = os.listdir(_UNIXCONFDIR)
+                # We sort for repeatability in cases where there are multiple
+                # distro specific files; e.g. CentOS, Oracle, Enterprise all
+                # containing `redhat-release` on top of their own.
+                basenames.sort()
+            except OSError:
+                # This may occur when /etc is not readable but we can't be
+                # sure about the *-release files. Check common entries of
+                # /etc for information. If they turn out to not be there the
+                # error is handled in `_parse_distro_release_file()`.
+                basenames = ['SuSE-release',
+                             'arch-release',
+                             'base-release',
+                             'centos-release',
+                             'fedora-release',
+                             'gentoo-release',
+                             'mageia-release',
+                             'mandrake-release',
+                             'mandriva-release',
+                             'mandrivalinux-release',
+                             'manjaro-release',
+                             'oracle-release',
+                             'redhat-release',
+                             'sl-release',
+                             'slackware-version']
+            for basename in basenames:
+                if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
+                    continue
+                match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+                if match:
+                    filepath = os.path.join(_UNIXCONFDIR, basename)
+                    distro_info = self._parse_distro_release_file(filepath)
+                    if 'name' in distro_info:
+                        # The name is always present if the pattern matches
+                        self.distro_release_file = filepath
+                        distro_info['id'] = match.group(1)
+                        if 'cloudlinux' in distro_info['name'].lower():
+                            distro_info['id'] = 'cloudlinux'
+                        return distro_info
+            return {}
+
+    def _parse_distro_release_file(self, filepath):
+        """
+        Parse a distro release file.
+
+        Parameters:
+
+        * filepath: Path name of the distro release file.
+
+        Returns:
+            A dictionary containing all information items.
+        """
+        try:
+            with open(filepath) as fp:
+                # Only parse the first line. For instance, on SLES there
+                # are multiple lines. We don't want them...
+                return self._parse_distro_release_content(fp.readline())
+        except (OSError, IOError):
+            # Ignore not being able to read a specific, seemingly version
+            # related file.
+            # See https://github.com/nir0s/distro/issues/162
+            return {}
+
+    @staticmethod
+    def _parse_distro_release_content(line):
+        """
+        Parse a line from a distro release file.
+
+        Parameters:
+        * line: Line from the distro release file. Must be a unicode string
+                or a UTF-8 encoded byte string.
+
+        Returns:
+            A dictionary containing all information items.
+        """
+        if isinstance(line, bytes):
+            line = line.decode('utf-8')
+        matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
+            line.strip()[::-1])
+        distro_info = {}
+        if matches:
+            # regexp ensures non-None
+            distro_info['name'] = matches.group(3)[::-1]
+            if matches.group(2):
+                distro_info['version_id'] = matches.group(2)[::-1]
+            if matches.group(1):
+                distro_info['codename'] = matches.group(1)[::-1]
+        elif line:
+            distro_info['name'] = line.strip()
+        return distro_info
+
+
+_distro = LinuxDistribution()
+
+
+def main():
+    logger = logging.getLogger(__name__)
+    logger.setLevel(logging.DEBUG)
+    logger.addHandler(logging.StreamHandler(sys.stdout))
+
+    parser = argparse.ArgumentParser(description="OS distro info tool")
+    parser.add_argument(
+        '--json',
+        '-j',
+        help="Output in machine readable format",
+        action="store_true")
+    args = parser.parse_args()
+
+    if args.json:
+        logger.info(json.dumps(info(), indent=4, sort_keys=True))
+    else:
+        logger.info('Name: %s', name(pretty=True))
+        distribution_version = version(pretty=True)
+        logger.info('Version: %s', distribution_version)
+        distribution_codename = codename()
+        logger.info('Codename: %s', distribution_codename)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/__init__.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..049123492e28952b521183e058b3a7ea301ac8d0
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/__init__.py
@@ -0,0 +1,35 @@
+"""
+HTML parsing library based on the `WHATWG HTML specification
+<https://whatwg.org/html>`_. The parser is designed to be compatible with
+existing HTML found in the wild and implements well-defined error recovery that
+is largely compatible with modern desktop web browsers.
+
+Example usage::
+
+    from pip._vendor import html5lib
+    with open("my_document.html", "rb") as f:
+        tree = html5lib.parse(f)
+
+For convenience, this module re-exports the following names:
+
+* :func:`~.html5parser.parse`
+* :func:`~.html5parser.parseFragment`
+* :class:`~.html5parser.HTMLParser`
+* :func:`~.treebuilders.getTreeBuilder`
+* :func:`~.treewalkers.getTreeWalker`
+* :func:`~.serializer.serialize`
+"""
+
+from __future__ import absolute_import, division, unicode_literals
+
+from .html5parser import HTMLParser, parse, parseFragment
+from .treebuilders import getTreeBuilder
+from .treewalkers import getTreeWalker
+from .serializer import serialize
+
+__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
+           "getTreeWalker", "serialize"]
+
+# this has to be at the top level, see how setup.py parses this
+#: Distribution version number.
+__version__ = "1.0.1"
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74a189a49661a18e14de6287006dc24e3becf443
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..089546b732cc70530b7fc91ea65a446b97b8a417
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_ihatexml.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_inputstream.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_inputstream.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd37f18a2c2f9d66ddcda27c36b02dcbbe3023bd
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_inputstream.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e358ed011a6f667aa85d6b7f17b3793f7c855e23
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_tokenizer.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_utils.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd2e8569f3636e0546ee0c7f5c5cbbb22126b5b3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/_utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/constants.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/constants.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..60db7be2a869c0683db887b8e884e3ff8061928a
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/constants.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/html5parser.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/html5parser.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48576a816e783c39622721a4670f2c674ae25e87
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/html5parser.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/serializer.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/serializer.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b2340c6a5a413766732e2faa7abe2eab4a0f75d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/__pycache__/serializer.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_ihatexml.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/_ihatexml.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c77717bbc02c96907e1ce8c5bd36cc7bbed09cb
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/_ihatexml.py
@@ -0,0 +1,288 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import re
+import warnings
+
+from .constants import DataLossWarning
+
+baseChar = """
+[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
+[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
+[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
+[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
+[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
+[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
+[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
+[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
+[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
+[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
+[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
+[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
+[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
+[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
+[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
+[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
+[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
+[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
+[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
+[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
+[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
+[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
+[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
+[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
+[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
+[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
+[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
+[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
+[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
+[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
+#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
+#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
+#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
+[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
+[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
+#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
+[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
+[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
+[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
+[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
+[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
+#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
+[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
+[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
+[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
+[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
+
+ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
+
+combiningCharacter = """
+[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
+[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
+[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
+[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
+#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
+[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
+[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
+#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
+[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
+[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
+#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
+[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
+[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
+[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
+[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
+[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
+#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
+[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
+#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
+[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
+[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
+#x3099 | #x309A"""
+
+digit = """
+[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
+[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
+[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
+[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
+
+extender = """
+#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
+#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
+
+letter = " | ".join([baseChar, ideographic])
+
+# Without the
+name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
+                   extender])
+nameFirst = " | ".join([letter, "_"])
+
+reChar = re.compile(r"#x([\d|A-F]{4,4})")
+reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
+
+
+def charStringToList(chars):
+    charRanges = [item.strip() for item in chars.split(" | ")]
+    rv = []
+    for item in charRanges:
+        foundMatch = False
+        for regexp in (reChar, reCharRange):
+            match = regexp.match(item)
+            if match is not None:
+                rv.append([hexToInt(item) for item in match.groups()])
+                if len(rv[-1]) == 1:
+                    rv[-1] = rv[-1] * 2
+                foundMatch = True
+                break
+        if not foundMatch:
+            assert len(item) == 1
+
+            rv.append([ord(item)] * 2)
+    rv = normaliseCharList(rv)
+    return rv
+
+
+def normaliseCharList(charList):
+    charList = sorted(charList)
+    for item in charList:
+        assert item[1] >= item[0]
+    rv = []
+    i = 0
+    while i < len(charList):
+        j = 1
+        rv.append(charList[i])
+        while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
+            rv[-1][1] = charList[i + j][1]
+            j += 1
+        i += j
+    return rv
+
+# We don't really support characters above the BMP :(
+max_unicode = int("FFFF", 16)
+
+
+def missingRanges(charList):
+    rv = []
+    if charList[0] != 0:
+        rv.append([0, charList[0][0] - 1])
+    for i, item in enumerate(charList[:-1]):
+        rv.append([item[1] + 1, charList[i + 1][0] - 1])
+    if charList[-1][1] != max_unicode:
+        rv.append([charList[-1][1] + 1, max_unicode])
+    return rv
+
+
+def listToRegexpStr(charList):
+    rv = []
+    for item in charList:
+        if item[0] == item[1]:
+            rv.append(escapeRegexp(chr(item[0])))
+        else:
+            rv.append(escapeRegexp(chr(item[0])) + "-" +
+                      escapeRegexp(chr(item[1])))
+    return "[%s]" % "".join(rv)
+
+
+def hexToInt(hex_str):
+    return int(hex_str, 16)
+
+
+def escapeRegexp(string):
+    specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
+                         "[", "]", "|", "(", ")", "-")
+    for char in specialCharacters:
+        string = string.replace(char, "\\" + char)
+
+    return string
+
+# output from the above
+nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')  # noqa
+
+nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')  # noqa
+
+# Simpler things
+nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\\-'()+,./:=?;!*#@$_%]")
+
+
+class InfosetFilter(object):
+    replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
+
+    def __init__(self,
+                 dropXmlnsLocalName=False,
+                 dropXmlnsAttrNs=False,
+                 preventDoubleDashComments=False,
+                 preventDashAtCommentEnd=False,
+                 replaceFormFeedCharacters=True,
+                 preventSingleQuotePubid=False):
+
+        self.dropXmlnsLocalName = dropXmlnsLocalName
+        self.dropXmlnsAttrNs = dropXmlnsAttrNs
+
+        self.preventDoubleDashComments = preventDoubleDashComments
+        self.preventDashAtCommentEnd = preventDashAtCommentEnd
+
+        self.replaceFormFeedCharacters = replaceFormFeedCharacters
+
+        self.preventSingleQuotePubid = preventSingleQuotePubid
+
+        self.replaceCache = {}
+
+    def coerceAttribute(self, name, namespace=None):
+        if self.dropXmlnsLocalName and name.startswith("xmlns:"):
+            warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
+            return None
+        elif (self.dropXmlnsAttrNs and
+              namespace == "http://www.w3.org/2000/xmlns/"):
+            warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
+            return None
+        else:
+            return self.toXmlName(name)
+
+    def coerceElement(self, name):
+        return self.toXmlName(name)
+
+    def coerceComment(self, data):
+        if self.preventDoubleDashComments:
+            while "--" in data:
+                warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
+                data = data.replace("--", "- -")
+            if data.endswith("-"):
+                warnings.warn("Comments cannot end in a dash", DataLossWarning)
+                data += " "
+        return data
+
+    def coerceCharacters(self, data):
+        if self.replaceFormFeedCharacters:
+            for _ in range(data.count("\x0C")):
+                warnings.warn("Text cannot contain U+000C", DataLossWarning)
+            data = data.replace("\x0C", " ")
+        # Other non-xml characters
+        return data
+
+    def coercePubid(self, data):
+        dataOutput = data
+        for char in nonPubidCharRegexp.findall(data):
+            warnings.warn("Coercing non-XML pubid", DataLossWarning)
+            replacement = self.getReplacementCharacter(char)
+            dataOutput = dataOutput.replace(char, replacement)
+        if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
+            warnings.warn("Pubid cannot contain single quote", DataLossWarning)
+            dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
+        return dataOutput
+
+    def toXmlName(self, name):
+        nameFirst = name[0]
+        nameRest = name[1:]
+        m = nonXmlNameFirstBMPRegexp.match(nameFirst)
+        if m:
+            warnings.warn("Coercing non-XML name", DataLossWarning)
+            nameFirstOutput = self.getReplacementCharacter(nameFirst)
+        else:
+            nameFirstOutput = nameFirst
+
+        nameRestOutput = nameRest
+        replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
+        for char in replaceChars:
+            warnings.warn("Coercing non-XML name", DataLossWarning)
+            replacement = self.getReplacementCharacter(char)
+            nameRestOutput = nameRestOutput.replace(char, replacement)
+        return nameFirstOutput + nameRestOutput
+
+    def getReplacementCharacter(self, char):
+        if char in self.replaceCache:
+            replacement = self.replaceCache[char]
+        else:
+            replacement = self.escapeChar(char)
+        return replacement
+
+    def fromXmlName(self, name):
+        for item in set(self.replacementRegexp.findall(name)):
+            name = name.replace(item, self.unescapeChar(item))
+        return name
+
+    def escapeChar(self, char):
+        replacement = "U%05X" % ord(char)
+        self.replaceCache[char] = replacement
+        return replacement
+
+    def unescapeChar(self, charcode):
+        return chr(int(charcode[1:], 16))
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_inputstream.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/_inputstream.py
new file mode 100644
index 0000000000000000000000000000000000000000..a65e55f64bf433f98eb4ed331af91d84fb760ea9
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/_inputstream.py
@@ -0,0 +1,923 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from pip._vendor.six import text_type, binary_type
+from pip._vendor.six.moves import http_client, urllib
+
+import codecs
+import re
+
+from pip._vendor import webencodings
+
+from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
+from .constants import _ReparseException
+from . import _utils
+
+from io import StringIO
+
+try:
+    from io import BytesIO
+except ImportError:
+    BytesIO = StringIO
+
+# Non-unicode versions of constants for use in the pre-parser
+spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
+asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
+asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
+spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
+
+
+invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]"  # noqa
+
+if _utils.supports_lone_surrogates:
+    # Use one extra step of indirection and create surrogates with
+    # eval. Not using this indirection would introduce an illegal
+    # unicode literal on platforms not supporting such lone
+    # surrogates.
+    assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
+    invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
+                                    eval('"\\uD800-\\uDFFF"') +  # pylint:disable=eval-used
+                                    "]")
+else:
+    invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
+
+non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
+                                  0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
+                                  0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
+                                  0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
+                                  0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
+                                  0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
+                                  0x10FFFE, 0x10FFFF])
+
+ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]")
+
+# Cache for charsUntil()
+charsUntilRegEx = {}
+
+
+class BufferedStream(object):
+    """Buffering for streams that do not have buffering of their own
+
+    The buffer is implemented as a list of chunks on the assumption that
+    joining many strings will be slow since it is O(n**2)
+    """
+
+    def __init__(self, stream):
+        self.stream = stream
+        self.buffer = []
+        self.position = [-1, 0]  # chunk number, offset
+
+    def tell(self):
+        pos = 0
+        for chunk in self.buffer[:self.position[0]]:
+            pos += len(chunk)
+        pos += self.position[1]
+        return pos
+
+    def seek(self, pos):
+        assert pos <= self._bufferedBytes()
+        offset = pos
+        i = 0
+        while len(self.buffer[i]) < offset:
+            offset -= len(self.buffer[i])
+            i += 1
+        self.position = [i, offset]
+
+    def read(self, bytes):
+        if not self.buffer:
+            return self._readStream(bytes)
+        elif (self.position[0] == len(self.buffer) and
+              self.position[1] == len(self.buffer[-1])):
+            return self._readStream(bytes)
+        else:
+            return self._readFromBuffer(bytes)
+
+    def _bufferedBytes(self):
+        return sum([len(item) for item in self.buffer])
+
+    def _readStream(self, bytes):
+        data = self.stream.read(bytes)
+        self.buffer.append(data)
+        self.position[0] += 1
+        self.position[1] = len(data)
+        return data
+
+    def _readFromBuffer(self, bytes):
+        remainingBytes = bytes
+        rv = []
+        bufferIndex = self.position[0]
+        bufferOffset = self.position[1]
+        while bufferIndex < len(self.buffer) and remainingBytes != 0:
+            assert remainingBytes > 0
+            bufferedData = self.buffer[bufferIndex]
+
+            if remainingBytes <= len(bufferedData) - bufferOffset:
+                bytesToRead = remainingBytes
+                self.position = [bufferIndex, bufferOffset + bytesToRead]
+            else:
+                bytesToRead = len(bufferedData) - bufferOffset
+                self.position = [bufferIndex, len(bufferedData)]
+                bufferIndex += 1
+            rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
+            remainingBytes -= bytesToRead
+
+            bufferOffset = 0
+
+        if remainingBytes:
+            rv.append(self._readStream(remainingBytes))
+
+        return b"".join(rv)
+
+
+def HTMLInputStream(source, **kwargs):
+    # Work around Python bug #20007: read(0) closes the connection.
+    # http://bugs.python.org/issue20007
+    if (isinstance(source, http_client.HTTPResponse) or
+        # Also check for addinfourl wrapping HTTPResponse
+        (isinstance(source, urllib.response.addbase) and
+         isinstance(source.fp, http_client.HTTPResponse))):
+        isUnicode = False
+    elif hasattr(source, "read"):
+        isUnicode = isinstance(source.read(0), text_type)
+    else:
+        isUnicode = isinstance(source, text_type)
+
+    if isUnicode:
+        encodings = [x for x in kwargs if x.endswith("_encoding")]
+        if encodings:
+            raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
+
+        return HTMLUnicodeInputStream(source, **kwargs)
+    else:
+        return HTMLBinaryInputStream(source, **kwargs)
+
+
+class HTMLUnicodeInputStream(object):
+    """Provides a unicode stream of characters to the HTMLTokenizer.
+
+    This class takes care of character encoding and removing or replacing
+    incorrect byte-sequences and also provides column and line tracking.
+
+    """
+
+    _defaultChunkSize = 10240
+
+    def __init__(self, source):
+        """Initialises the HTMLInputStream.
+
+        HTMLInputStream(source, [encoding]) -> Normalized stream from source
+        for use by html5lib.
+
+        source can be either a file-object, local filename or a string.
+
+        The optional encoding parameter must be a string that indicates
+        the encoding.  If specified, that encoding will be used,
+        regardless of any BOM or later declaration (such as in a meta
+        element)
+
+        """
+
+        if not _utils.supports_lone_surrogates:
+            # Such platforms will have already checked for such
+            # surrogate errors, so no need to do this checking.
+            self.reportCharacterErrors = None
+        elif len("\U0010FFFF") == 1:
+            self.reportCharacterErrors = self.characterErrorsUCS4
+        else:
+            self.reportCharacterErrors = self.characterErrorsUCS2
+
+        # List of where new lines occur
+        self.newLines = [0]
+
+        self.charEncoding = (lookupEncoding("utf-8"), "certain")
+        self.dataStream = self.openStream(source)
+
+        self.reset()
+
+    def reset(self):
+        self.chunk = ""
+        self.chunkSize = 0
+        self.chunkOffset = 0
+        self.errors = []
+
+        # number of (complete) lines in previous chunks
+        self.prevNumLines = 0
+        # number of columns in the last line of the previous chunk
+        self.prevNumCols = 0
+
+        # Deal with CR LF and surrogates split over chunk boundaries
+        self._bufferedCharacter = None
+
+    def openStream(self, source):
+        """Produces a file object from source.
+
+        source can be either a file object, local filename or a string.
+
+        """
+        # Already a file object
+        if hasattr(source, 'read'):
+            stream = source
+        else:
+            stream = StringIO(source)
+
+        return stream
+
+    def _position(self, offset):
+        chunk = self.chunk
+        nLines = chunk.count('\n', 0, offset)
+        positionLine = self.prevNumLines + nLines
+        lastLinePos = chunk.rfind('\n', 0, offset)
+        if lastLinePos == -1:
+            positionColumn = self.prevNumCols + offset
+        else:
+            positionColumn = offset - (lastLinePos + 1)
+        return (positionLine, positionColumn)
+
+    def position(self):
+        """Returns (line, col) of the current position in the stream."""
+        line, col = self._position(self.chunkOffset)
+        return (line + 1, col)
+
+    def char(self):
+        """ Read one character from the stream or queue if available. Return
+            EOF when EOF is reached.
+        """
+        # Read a new chunk from the input stream if necessary
+        if self.chunkOffset >= self.chunkSize:
+            if not self.readChunk():
+                return EOF
+
+        chunkOffset = self.chunkOffset
+        char = self.chunk[chunkOffset]
+        self.chunkOffset = chunkOffset + 1
+
+        return char
+
+    def readChunk(self, chunkSize=None):
+        if chunkSize is None:
+            chunkSize = self._defaultChunkSize
+
+        self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
+
+        self.chunk = ""
+        self.chunkSize = 0
+        self.chunkOffset = 0
+
+        data = self.dataStream.read(chunkSize)
+
+        # Deal with CR LF and surrogates broken across chunks
+        if self._bufferedCharacter:
+            data = self._bufferedCharacter + data
+            self._bufferedCharacter = None
+        elif not data:
+            # We have no more data, bye-bye stream
+            return False
+
+        if len(data) > 1:
+            lastv = ord(data[-1])
+            if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
+                self._bufferedCharacter = data[-1]
+                data = data[:-1]
+
+        if self.reportCharacterErrors:
+            self.reportCharacterErrors(data)
+
+        # Replace invalid characters
+        data = data.replace("\r\n", "\n")
+        data = data.replace("\r", "\n")
+
+        self.chunk = data
+        self.chunkSize = len(data)
+
+        return True
+
+    def characterErrorsUCS4(self, data):
+        for _ in range(len(invalid_unicode_re.findall(data))):
+            self.errors.append("invalid-codepoint")
+
+    def characterErrorsUCS2(self, data):
+        # Someone picked the wrong compile option
+        # You lose
+        skip = False
+        for match in invalid_unicode_re.finditer(data):
+            if skip:
+                continue
+            codepoint = ord(match.group())
+            pos = match.start()
+            # Pretty sure there should be endianness issues here
+            if _utils.isSurrogatePair(data[pos:pos + 2]):
+                # We have a surrogate pair!
+                char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
+                if char_val in non_bmp_invalid_codepoints:
+                    self.errors.append("invalid-codepoint")
+                skip = True
+            elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
+                  pos == len(data) - 1):
+                self.errors.append("invalid-codepoint")
+            else:
+                skip = False
+                self.errors.append("invalid-codepoint")
+
+    def charsUntil(self, characters, opposite=False):
+        """ Returns a string of characters from the stream up to but not
+        including any character in 'characters' or EOF. 'characters' must be
+        a container that supports the 'in' method and iteration over its
+        characters.
+        """
+
+        # Use a cache of regexps to find the required characters
+        try:
+            chars = charsUntilRegEx[(characters, opposite)]
+        except KeyError:
+            if __debug__:
+                for c in characters:
+                    assert(ord(c) < 128)
+            regex = "".join(["\\x%02x" % ord(c) for c in characters])
+            if not opposite:
+                regex = "^%s" % regex
+            chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
+
+        rv = []
+
+        while True:
+            # Find the longest matching prefix
+            m = chars.match(self.chunk, self.chunkOffset)
+            if m is None:
+                # If nothing matched, and it wasn't because we ran out of chunk,
+                # then stop
+                if self.chunkOffset != self.chunkSize:
+                    break
+            else:
+                end = m.end()
+                # If not the whole chunk matched, return everything
+                # up to the part that didn't match
+                if end != self.chunkSize:
+                    rv.append(self.chunk[self.chunkOffset:end])
+                    self.chunkOffset = end
+                    break
+            # If the whole remainder of the chunk matched,
+            # use it all and read the next chunk
+            rv.append(self.chunk[self.chunkOffset:])
+            if not self.readChunk():
+                # Reached EOF
+                break
+
+        r = "".join(rv)
+        return r
+
+    def unget(self, char):
+        # Only one character is allowed to be ungotten at once - it must
+        # be consumed again before any further call to unget
+        if char is not None:
+            if self.chunkOffset == 0:
+                # unget is called quite rarely, so it's a good idea to do
+                # more work here if it saves a bit of work in the frequently
+                # called char and charsUntil.
+                # So, just prepend the ungotten character onto the current
+                # chunk:
+                self.chunk = char + self.chunk
+                self.chunkSize += 1
+            else:
+                self.chunkOffset -= 1
+                assert self.chunk[self.chunkOffset] == char
+
+
+class HTMLBinaryInputStream(HTMLUnicodeInputStream):
+    """Provides a unicode stream of characters to the HTMLTokenizer.
+
+    This class takes care of character encoding and removing or replacing
+    incorrect byte-sequences and also provides column and line tracking.
+
+    """
+
+    def __init__(self, source, override_encoding=None, transport_encoding=None,
+                 same_origin_parent_encoding=None, likely_encoding=None,
+                 default_encoding="windows-1252", useChardet=True):
+        """Initialises the HTMLInputStream.
+
+        HTMLInputStream(source, [encoding]) -> Normalized stream from source
+        for use by html5lib.
+
+        source can be either a file-object, local filename or a string.
+
+        The optional encoding parameter must be a string that indicates
+        the encoding.  If specified, that encoding will be used,
+        regardless of any BOM or later declaration (such as in a meta
+        element)
+
+        """
+        # Raw Stream - for unicode objects this will encode to utf-8 and set
+        #              self.charEncoding as appropriate
+        self.rawStream = self.openStream(source)
+
+        HTMLUnicodeInputStream.__init__(self, self.rawStream)
+
+        # Encoding Information
+        # Number of bytes to use when looking for a meta element with
+        # encoding information
+        self.numBytesMeta = 1024
+        # Number of bytes to use when using detecting encoding using chardet
+        self.numBytesChardet = 100
+        # Things from args
+        self.override_encoding = override_encoding
+        self.transport_encoding = transport_encoding
+        self.same_origin_parent_encoding = same_origin_parent_encoding
+        self.likely_encoding = likely_encoding
+        self.default_encoding = default_encoding
+
+        # Determine encoding
+        self.charEncoding = self.determineEncoding(useChardet)
+        assert self.charEncoding[0] is not None
+
+        # Call superclass
+        self.reset()
+
+    def reset(self):
+        self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
+        HTMLUnicodeInputStream.reset(self)
+
+    def openStream(self, source):
+        """Produces a file object from source.
+
+        source can be either a file object, local filename or a string.
+
+        """
+        # Already a file object
+        if hasattr(source, 'read'):
+            stream = source
+        else:
+            stream = BytesIO(source)
+
+        try:
+            stream.seek(stream.tell())
+        except:  # pylint:disable=bare-except
+            stream = BufferedStream(stream)
+
+        return stream
+
+    def determineEncoding(self, chardet=True):
+        # BOMs take precedence over everything
+        # This will also read past the BOM if present
+        charEncoding = self.detectBOM(), "certain"
+        if charEncoding[0] is not None:
+            return charEncoding
+
+        # If we've been overriden, we've been overriden
+        charEncoding = lookupEncoding(self.override_encoding), "certain"
+        if charEncoding[0] is not None:
+            return charEncoding
+
+        # Now check the transport layer
+        charEncoding = lookupEncoding(self.transport_encoding), "certain"
+        if charEncoding[0] is not None:
+            return charEncoding
+
+        # Look for meta elements with encoding information
+        charEncoding = self.detectEncodingMeta(), "tentative"
+        if charEncoding[0] is not None:
+            return charEncoding
+
+        # Parent document encoding
+        charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
+        if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
+            return charEncoding
+
+        # "likely" encoding
+        charEncoding = lookupEncoding(self.likely_encoding), "tentative"
+        if charEncoding[0] is not None:
+            return charEncoding
+
+        # Guess with chardet, if available
+        if chardet:
+            try:
+                from pip._vendor.chardet.universaldetector import UniversalDetector
+            except ImportError:
+                pass
+            else:
+                buffers = []
+                detector = UniversalDetector()
+                while not detector.done:
+                    buffer = self.rawStream.read(self.numBytesChardet)
+                    assert isinstance(buffer, bytes)
+                    if not buffer:
+                        break
+                    buffers.append(buffer)
+                    detector.feed(buffer)
+                detector.close()
+                encoding = lookupEncoding(detector.result['encoding'])
+                self.rawStream.seek(0)
+                if encoding is not None:
+                    return encoding, "tentative"
+
+        # Try the default encoding
+        charEncoding = lookupEncoding(self.default_encoding), "tentative"
+        if charEncoding[0] is not None:
+            return charEncoding
+
+        # Fallback to html5lib's default if even that hasn't worked
+        return lookupEncoding("windows-1252"), "tentative"
+
+    def changeEncoding(self, newEncoding):
+        assert self.charEncoding[1] != "certain"
+        newEncoding = lookupEncoding(newEncoding)
+        if newEncoding is None:
+            return
+        if newEncoding.name in ("utf-16be", "utf-16le"):
+            newEncoding = lookupEncoding("utf-8")
+            assert newEncoding is not None
+        elif newEncoding == self.charEncoding[0]:
+            self.charEncoding = (self.charEncoding[0], "certain")
+        else:
+            self.rawStream.seek(0)
+            self.charEncoding = (newEncoding, "certain")
+            self.reset()
+            raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
+
+    def detectBOM(self):
+        """Attempts to detect at BOM at the start of the stream. If
+        an encoding can be determined from the BOM return the name of the
+        encoding otherwise return None"""
+        bomDict = {
+            codecs.BOM_UTF8: 'utf-8',
+            codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
+            codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
+        }
+
+        # Go to beginning of file and read in 4 bytes
+        string = self.rawStream.read(4)
+        assert isinstance(string, bytes)
+
+        # Try detecting the BOM using bytes from the string
+        encoding = bomDict.get(string[:3])         # UTF-8
+        seek = 3
+        if not encoding:
+            # Need to detect UTF-32 before UTF-16
+            encoding = bomDict.get(string)         # UTF-32
+            seek = 4
+            if not encoding:
+                encoding = bomDict.get(string[:2])  # UTF-16
+                seek = 2
+
+        # Set the read position past the BOM if one was found, otherwise
+        # set it to the start of the stream
+        if encoding:
+            self.rawStream.seek(seek)
+            return lookupEncoding(encoding)
+        else:
+            self.rawStream.seek(0)
+            return None
+
+    def detectEncodingMeta(self):
+        """Report the encoding declared by the meta element
+        """
+        buffer = self.rawStream.read(self.numBytesMeta)
+        assert isinstance(buffer, bytes)
+        parser = EncodingParser(buffer)
+        self.rawStream.seek(0)
+        encoding = parser.getEncoding()
+
+        if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
+            encoding = lookupEncoding("utf-8")
+
+        return encoding
+
+
+class EncodingBytes(bytes):
+    """String-like object with an associated position and various extra methods
+    If the position is ever greater than the string length then an exception is
+    raised"""
+    def __new__(self, value):
+        assert isinstance(value, bytes)
+        return bytes.__new__(self, value.lower())
+
+    def __init__(self, value):
+        # pylint:disable=unused-argument
+        self._position = -1
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        p = self._position = self._position + 1
+        if p >= len(self):
+            raise StopIteration
+        elif p < 0:
+            raise TypeError
+        return self[p:p + 1]
+
+    def next(self):
+        # Py2 compat
+        return self.__next__()
+
+    def previous(self):
+        p = self._position
+        if p >= len(self):
+            raise StopIteration
+        elif p < 0:
+            raise TypeError
+        self._position = p = p - 1
+        return self[p:p + 1]
+
+    def setPosition(self, position):
+        if self._position >= len(self):
+            raise StopIteration
+        self._position = position
+
+    def getPosition(self):
+        if self._position >= len(self):
+            raise StopIteration
+        if self._position >= 0:
+            return self._position
+        else:
+            return None
+
+    position = property(getPosition, setPosition)
+
+    def getCurrentByte(self):
+        return self[self.position:self.position + 1]
+
+    currentByte = property(getCurrentByte)
+
+    def skip(self, chars=spaceCharactersBytes):
+        """Skip past a list of characters"""
+        p = self.position               # use property for the error-checking
+        while p < len(self):
+            c = self[p:p + 1]
+            if c not in chars:
+                self._position = p
+                return c
+            p += 1
+        self._position = p
+        return None
+
+    def skipUntil(self, chars):
+        p = self.position
+        while p < len(self):
+            c = self[p:p + 1]
+            if c in chars:
+                self._position = p
+                return c
+            p += 1
+        self._position = p
+        return None
+
+    def matchBytes(self, bytes):
+        """Look for a sequence of bytes at the start of a string. If the bytes
+        are found return True and advance the position to the byte after the
+        match. Otherwise return False and leave the position alone"""
+        p = self.position
+        data = self[p:p + len(bytes)]
+        rv = data.startswith(bytes)
+        if rv:
+            self.position += len(bytes)
+        return rv
+
+    def jumpTo(self, bytes):
+        """Look for the next sequence of bytes matching a given sequence. If
+        a match is found advance the position to the last byte of the match"""
+        newPosition = self[self.position:].find(bytes)
+        if newPosition > -1:
+            # XXX: This is ugly, but I can't see a nicer way to fix this.
+            if self._position == -1:
+                self._position = 0
+            self._position += (newPosition + len(bytes) - 1)
+            return True
+        else:
+            raise StopIteration
+
+
+class EncodingParser(object):
+    """Mini parser for detecting character encoding from meta elements"""
+
+    def __init__(self, data):
+        """string - the data to work on for encoding detection"""
+        self.data = EncodingBytes(data)
+        self.encoding = None
+
+    def getEncoding(self):
+        methodDispatch = (
+            (b"<!--", self.handleComment),
+            (b"<meta", self.handleMeta),
+            (b"</", self.handlePossibleEndTag),
+            (b"<!", self.handleOther),
+            (b"<?", self.handleOther),
+            (b"<", self.handlePossibleStartTag))
+        for _ in self.data:
+            keepParsing = True
+            for key, method in methodDispatch:
+                if self.data.matchBytes(key):
+                    try:
+                        keepParsing = method()
+                        break
+                    except StopIteration:
+                        keepParsing = False
+                        break
+            if not keepParsing:
+                break
+
+        return self.encoding
+
+    def handleComment(self):
+        """Skip over comments"""
+        return self.data.jumpTo(b"-->")
+
+    def handleMeta(self):
+        if self.data.currentByte not in spaceCharactersBytes:
+            # if we have <meta not followed by a space so just keep going
+            return True
+        # We have a valid meta element we want to search for attributes
+        hasPragma = False
+        pendingEncoding = None
+        while True:
+            # Try to find the next attribute after the current position
+            attr = self.getAttribute()
+            if attr is None:
+                return True
+            else:
+                if attr[0] == b"http-equiv":
+                    hasPragma = attr[1] == b"content-type"
+                    if hasPragma and pendingEncoding is not None:
+                        self.encoding = pendingEncoding
+                        return False
+                elif attr[0] == b"charset":
+                    tentativeEncoding = attr[1]
+                    codec = lookupEncoding(tentativeEncoding)
+                    if codec is not None:
+                        self.encoding = codec
+                        return False
+                elif attr[0] == b"content":
+                    contentParser = ContentAttrParser(EncodingBytes(attr[1]))
+                    tentativeEncoding = contentParser.parse()
+                    if tentativeEncoding is not None:
+                        codec = lookupEncoding(tentativeEncoding)
+                        if codec is not None:
+                            if hasPragma:
+                                self.encoding = codec
+                                return False
+                            else:
+                                pendingEncoding = codec
+
+    def handlePossibleStartTag(self):
+        return self.handlePossibleTag(False)
+
+    def handlePossibleEndTag(self):
+        next(self.data)
+        return self.handlePossibleTag(True)
+
+    def handlePossibleTag(self, endTag):
+        data = self.data
+        if data.currentByte not in asciiLettersBytes:
+            # If the next byte is not an ascii letter either ignore this
+            # fragment (possible start tag case) or treat it according to
+            # handleOther
+            if endTag:
+                data.previous()
+                self.handleOther()
+            return True
+
+        c = data.skipUntil(spacesAngleBrackets)
+        if c == b"<":
+            # return to the first step in the overall "two step" algorithm
+            # reprocessing the < byte
+            data.previous()
+        else:
+            # Read all attributes
+            attr = self.getAttribute()
+            while attr is not None:
+                attr = self.getAttribute()
+        return True
+
+    def handleOther(self):
+        return self.data.jumpTo(b">")
+
+    def getAttribute(self):
+        """Return a name,value pair for the next attribute in the stream,
+        if one is found, or None"""
+        data = self.data
+        # Step 1 (skip chars)
+        c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
+        assert c is None or len(c) == 1
+        # Step 2
+        if c in (b">", None):
+            return None
+        # Step 3
+        attrName = []
+        attrValue = []
+        # Step 4 attribute name
+        while True:
+            if c == b"=" and attrName:
+                break
+            elif c in spaceCharactersBytes:
+                # Step 6!
+                c = data.skip()
+                break
+            elif c in (b"/", b">"):
+                return b"".join(attrName), b""
+            elif c in asciiUppercaseBytes:
+                attrName.append(c.lower())
+            elif c is None:
+                return None
+            else:
+                attrName.append(c)
+            # Step 5
+            c = next(data)
+        # Step 7
+        if c != b"=":
+            data.previous()
+            return b"".join(attrName), b""
+        # Step 8
+        next(data)
+        # Step 9
+        c = data.skip()
+        # Step 10
+        if c in (b"'", b'"'):
+            # 10.1
+            quoteChar = c
+            while True:
+                # 10.2
+                c = next(data)
+                # 10.3
+                if c == quoteChar:
+                    next(data)
+                    return b"".join(attrName), b"".join(attrValue)
+                # 10.4
+                elif c in asciiUppercaseBytes:
+                    attrValue.append(c.lower())
+                # 10.5
+                else:
+                    attrValue.append(c)
+        elif c == b">":
+            return b"".join(attrName), b""
+        elif c in asciiUppercaseBytes:
+            attrValue.append(c.lower())
+        elif c is None:
+            return None
+        else:
+            attrValue.append(c)
+        # Step 11
+        while True:
+            c = next(data)
+            if c in spacesAngleBrackets:
+                return b"".join(attrName), b"".join(attrValue)
+            elif c in asciiUppercaseBytes:
+                attrValue.append(c.lower())
+            elif c is None:
+                return None
+            else:
+                attrValue.append(c)
+
+
+class ContentAttrParser(object):
+    def __init__(self, data):
+        assert isinstance(data, bytes)
+        self.data = data
+
+    def parse(self):
+        try:
+            # Check if the attr name is charset
+            # otherwise return
+            self.data.jumpTo(b"charset")
+            self.data.position += 1
+            self.data.skip()
+            if not self.data.currentByte == b"=":
+                # If there is no = sign keep looking for attrs
+                return None
+            self.data.position += 1
+            self.data.skip()
+            # Look for an encoding between matching quote marks
+            if self.data.currentByte in (b'"', b"'"):
+                quoteMark = self.data.currentByte
+                self.data.position += 1
+                oldPosition = self.data.position
+                if self.data.jumpTo(quoteMark):
+                    return self.data[oldPosition:self.data.position]
+                else:
+                    return None
+            else:
+                # Unquoted value
+                oldPosition = self.data.position
+                try:
+                    self.data.skipUntil(spaceCharactersBytes)
+                    return self.data[oldPosition:self.data.position]
+                except StopIteration:
+                    # Return the whole remaining value
+                    return self.data[oldPosition:]
+        except StopIteration:
+            return None
+
+
+def lookupEncoding(encoding):
+    """Return the python codec name corresponding to an encoding or None if the
+    string doesn't correspond to a valid encoding."""
+    if isinstance(encoding, binary_type):
+        try:
+            encoding = encoding.decode("ascii")
+        except UnicodeDecodeError:
+            return None
+
+    if encoding is not None:
+        try:
+            return webencodings.lookup(encoding)
+        except AttributeError:
+            return None
+    else:
+        return None
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_tokenizer.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/_tokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..178f6e7fa8c79abe6d18f0c60adfe0c239eb97e1
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/_tokenizer.py
@@ -0,0 +1,1721 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from pip._vendor.six import unichr as chr
+
+from collections import deque
+
+from .constants import spaceCharacters
+from .constants import entities
+from .constants import asciiLetters, asciiUpper2Lower
+from .constants import digits, hexDigits, EOF
+from .constants import tokenTypes, tagTokenTypes
+from .constants import replacementCharacters
+
+from ._inputstream import HTMLInputStream
+
+from ._trie import Trie
+
+entitiesTrie = Trie(entities)
+
+
+class HTMLTokenizer(object):
+    """ This class takes care of tokenizing HTML.
+
+    * self.currentToken
+      Holds the token that is currently being processed.
+
+    * self.state
+      Holds a reference to the method to be invoked... XXX
+
+    * self.stream
+      Points to HTMLInputStream object.
+    """
+
+    def __init__(self, stream, parser=None, **kwargs):
+
+        self.stream = HTMLInputStream(stream, **kwargs)
+        self.parser = parser
+
+        # Setup the initial tokenizer state
+        self.escapeFlag = False
+        self.lastFourChars = []
+        self.state = self.dataState
+        self.escape = False
+
+        # The current token being created
+        self.currentToken = None
+        super(HTMLTokenizer, self).__init__()
+
+    def __iter__(self):
+        """ This is where the magic happens.
+
+        We do our usually processing through the states and when we have a token
+        to return we yield the token which pauses processing until the next token
+        is requested.
+        """
+        self.tokenQueue = deque([])
+        # Start processing. When EOF is reached self.state will return False
+        # instead of True and the loop will terminate.
+        while self.state():
+            while self.stream.errors:
+                yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
+            while self.tokenQueue:
+                yield self.tokenQueue.popleft()
+
+    def consumeNumberEntity(self, isHex):
+        """This function returns either U+FFFD or the character based on the
+        decimal or hexadecimal representation. It also discards ";" if present.
+        If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
+        """
+
+        allowed = digits
+        radix = 10
+        if isHex:
+            allowed = hexDigits
+            radix = 16
+
+        charStack = []
+
+        # Consume all the characters that are in range while making sure we
+        # don't hit an EOF.
+        c = self.stream.char()
+        while c in allowed and c is not EOF:
+            charStack.append(c)
+            c = self.stream.char()
+
+        # Convert the set of characters consumed to an int.
+        charAsInt = int("".join(charStack), radix)
+
+        # Certain characters get replaced with others
+        if charAsInt in replacementCharacters:
+            char = replacementCharacters[charAsInt]
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "illegal-codepoint-for-numeric-entity",
+                                    "datavars": {"charAsInt": charAsInt}})
+        elif ((0xD800 <= charAsInt <= 0xDFFF) or
+              (charAsInt > 0x10FFFF)):
+            char = "\uFFFD"
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "illegal-codepoint-for-numeric-entity",
+                                    "datavars": {"charAsInt": charAsInt}})
+        else:
+            # Should speed up this check somehow (e.g. move the set to a constant)
+            if ((0x0001 <= charAsInt <= 0x0008) or
+                (0x000E <= charAsInt <= 0x001F) or
+                (0x007F <= charAsInt <= 0x009F) or
+                (0xFDD0 <= charAsInt <= 0xFDEF) or
+                charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
+                                        0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
+                                        0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
+                                        0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
+                                        0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
+                                        0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
+                                        0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
+                                        0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
+                                        0xFFFFF, 0x10FFFE, 0x10FFFF])):
+                self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                        "data":
+                                        "illegal-codepoint-for-numeric-entity",
+                                        "datavars": {"charAsInt": charAsInt}})
+            try:
+                # Try/except needed as UCS-2 Python builds' unichar only works
+                # within the BMP.
+                char = chr(charAsInt)
+            except ValueError:
+                v = charAsInt - 0x10000
+                char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
+
+        # Discard the ; if present. Otherwise, put it back on the queue and
+        # invoke parseError on parser.
+        if c != ";":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "numeric-entity-without-semicolon"})
+            self.stream.unget(c)
+
+        return char
+
+    def consumeEntity(self, allowedChar=None, fromAttribute=False):
+        # Initialise to the default output for when no entity is matched
+        output = "&"
+
+        charStack = [self.stream.char()]
+        if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
+                (allowedChar is not None and allowedChar == charStack[0])):
+            self.stream.unget(charStack[0])
+
+        elif charStack[0] == "#":
+            # Read the next character to see if it's hex or decimal
+            hex = False
+            charStack.append(self.stream.char())
+            if charStack[-1] in ("x", "X"):
+                hex = True
+                charStack.append(self.stream.char())
+
+            # charStack[-1] should be the first digit
+            if (hex and charStack[-1] in hexDigits) \
+                    or (not hex and charStack[-1] in digits):
+                # At least one digit found, so consume the whole number
+                self.stream.unget(charStack[-1])
+                output = self.consumeNumberEntity(hex)
+            else:
+                # No digits found
+                self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                        "data": "expected-numeric-entity"})
+                self.stream.unget(charStack.pop())
+                output = "&" + "".join(charStack)
+
+        else:
+            # At this point in the process might have named entity. Entities
+            # are stored in the global variable "entities".
+            #
+            # Consume characters and compare to these to a substring of the
+            # entity names in the list until the substring no longer matches.
+            while (charStack[-1] is not EOF):
+                if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
+                    break
+                charStack.append(self.stream.char())
+
+            # At this point we have a string that starts with some characters
+            # that may match an entity
+            # Try to find the longest entity the string will match to take care
+            # of &noti for instance.
+            try:
+                entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
+                entityLength = len(entityName)
+            except KeyError:
+                entityName = None
+
+            if entityName is not None:
+                if entityName[-1] != ";":
+                    self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                            "named-entity-without-semicolon"})
+                if (entityName[-1] != ";" and fromAttribute and
+                    (charStack[entityLength] in asciiLetters or
+                     charStack[entityLength] in digits or
+                     charStack[entityLength] == "=")):
+                    self.stream.unget(charStack.pop())
+                    output = "&" + "".join(charStack)
+                else:
+                    output = entities[entityName]
+                    self.stream.unget(charStack.pop())
+                    output += "".join(charStack[entityLength:])
+            else:
+                self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                        "expected-named-entity"})
+                self.stream.unget(charStack.pop())
+                output = "&" + "".join(charStack)
+
+        if fromAttribute:
+            self.currentToken["data"][-1][1] += output
+        else:
+            if output in spaceCharacters:
+                tokenType = "SpaceCharacters"
+            else:
+                tokenType = "Characters"
+            self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
+
+    def processEntityInAttribute(self, allowedChar):
+        """This method replaces the need for "entityInAttributeValueState".
+        """
+        self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
+
+    def emitCurrentToken(self):
+        """This method is a generic handler for emitting the tags. It also sets
+        the state to "data" because that's what's needed after a token has been
+        emitted.
+        """
+        token = self.currentToken
+        # Add token to the queue to be yielded
+        if (token["type"] in tagTokenTypes):
+            token["name"] = token["name"].translate(asciiUpper2Lower)
+            if token["type"] == tokenTypes["EndTag"]:
+                if token["data"]:
+                    self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                            "data": "attributes-in-end-tag"})
+                if token["selfClosing"]:
+                    self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                            "data": "self-closing-flag-on-end-tag"})
+        self.tokenQueue.append(token)
+        self.state = self.dataState
+
+    # Below are the various tokenizer states worked out.
+    def dataState(self):
+        data = self.stream.char()
+        if data == "&":
+            self.state = self.entityDataState
+        elif data == "<":
+            self.state = self.tagOpenState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\u0000"})
+        elif data is EOF:
+            # Tokenization ends.
+            return False
+        elif data in spaceCharacters:
+            # Directly after emitting a token you switch back to the "data
+            # state". At that point spaceCharacters are important so they are
+            # emitted separately.
+            self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
+                                    data + self.stream.charsUntil(spaceCharacters, True)})
+            # No need to update lastFourChars here, since the first space will
+            # have already been appended to lastFourChars and will have broken
+            # any <!-- or --> sequences
+        else:
+            chars = self.stream.charsUntil(("&", "<", "\u0000"))
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+                                    data + chars})
+        return True
+
+    def entityDataState(self):
+        self.consumeEntity()
+        self.state = self.dataState
+        return True
+
+    def rcdataState(self):
+        data = self.stream.char()
+        if data == "&":
+            self.state = self.characterReferenceInRcdata
+        elif data == "<":
+            self.state = self.rcdataLessThanSignState
+        elif data == EOF:
+            # Tokenization ends.
+            return False
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\uFFFD"})
+        elif data in spaceCharacters:
+            # Directly after emitting a token you switch back to the "data
+            # state". At that point spaceCharacters are important so they are
+            # emitted separately.
+            self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
+                                    data + self.stream.charsUntil(spaceCharacters, True)})
+            # No need to update lastFourChars here, since the first space will
+            # have already been appended to lastFourChars and will have broken
+            # any <!-- or --> sequences
+        else:
+            chars = self.stream.charsUntil(("&", "<", "\u0000"))
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+                                    data + chars})
+        return True
+
+    def characterReferenceInRcdata(self):
+        self.consumeEntity()
+        self.state = self.rcdataState
+        return True
+
+    def rawtextState(self):
+        data = self.stream.char()
+        if data == "<":
+            self.state = self.rawtextLessThanSignState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\uFFFD"})
+        elif data == EOF:
+            # Tokenization ends.
+            return False
+        else:
+            chars = self.stream.charsUntil(("<", "\u0000"))
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+                                    data + chars})
+        return True
+
+    def scriptDataState(self):
+        data = self.stream.char()
+        if data == "<":
+            self.state = self.scriptDataLessThanSignState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\uFFFD"})
+        elif data == EOF:
+            # Tokenization ends.
+            return False
+        else:
+            chars = self.stream.charsUntil(("<", "\u0000"))
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+                                    data + chars})
+        return True
+
+    def plaintextState(self):
+        data = self.stream.char()
+        if data == EOF:
+            # Tokenization ends.
+            return False
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\uFFFD"})
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+                                    data + self.stream.charsUntil("\u0000")})
+        return True
+
+    def tagOpenState(self):
+        data = self.stream.char()
+        if data == "!":
+            self.state = self.markupDeclarationOpenState
+        elif data == "/":
+            self.state = self.closeTagOpenState
+        elif data in asciiLetters:
+            self.currentToken = {"type": tokenTypes["StartTag"],
+                                 "name": data, "data": [],
+                                 "selfClosing": False,
+                                 "selfClosingAcknowledged": False}
+            self.state = self.tagNameState
+        elif data == ">":
+            # XXX In theory it could be something besides a tag name. But
+            # do we really care?
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-tag-name-but-got-right-bracket"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
+            self.state = self.dataState
+        elif data == "?":
+            # XXX In theory it could be something besides a tag name. But
+            # do we really care?
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-tag-name-but-got-question-mark"})
+            self.stream.unget(data)
+            self.state = self.bogusCommentState
+        else:
+            # XXX
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-tag-name"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+            self.stream.unget(data)
+            self.state = self.dataState
+        return True
+
+    def closeTagOpenState(self):
+        data = self.stream.char()
+        if data in asciiLetters:
+            self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
+                                 "data": [], "selfClosing": False}
+            self.state = self.tagNameState
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-closing-tag-but-got-right-bracket"})
+            self.state = self.dataState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-closing-tag-but-got-eof"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
+            self.state = self.dataState
+        else:
+            # XXX data can be _'_...
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-closing-tag-but-got-char",
+                                    "datavars": {"data": data}})
+            self.stream.unget(data)
+            self.state = self.bogusCommentState
+        return True
+
+    def tagNameState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.state = self.beforeAttributeNameState
+        elif data == ">":
+            self.emitCurrentToken()
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-tag-name"})
+            self.state = self.dataState
+        elif data == "/":
+            self.state = self.selfClosingStartTagState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["name"] += "\uFFFD"
+        else:
+            self.currentToken["name"] += data
+            # (Don't use charsUntil here, because tag names are
+            # very short and it's faster to not do anything fancy)
+        return True
+
+    def rcdataLessThanSignState(self):
+        data = self.stream.char()
+        if data == "/":
+            self.temporaryBuffer = ""
+            self.state = self.rcdataEndTagOpenState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+            self.stream.unget(data)
+            self.state = self.rcdataState
+        return True
+
+    def rcdataEndTagOpenState(self):
+        data = self.stream.char()
+        if data in asciiLetters:
+            self.temporaryBuffer += data
+            self.state = self.rcdataEndTagNameState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
+            self.stream.unget(data)
+            self.state = self.rcdataState
+        return True
+
+    def rcdataEndTagNameState(self):
+        appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
+        data = self.stream.char()
+        if data in spaceCharacters and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.state = self.beforeAttributeNameState
+        elif data == "/" and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.state = self.selfClosingStartTagState
+        elif data == ">" and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.emitCurrentToken()
+            self.state = self.dataState
+        elif data in asciiLetters:
+            self.temporaryBuffer += data
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "</" + self.temporaryBuffer})
+            self.stream.unget(data)
+            self.state = self.rcdataState
+        return True
+
+    def rawtextLessThanSignState(self):
+        data = self.stream.char()
+        if data == "/":
+            self.temporaryBuffer = ""
+            self.state = self.rawtextEndTagOpenState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+            self.stream.unget(data)
+            self.state = self.rawtextState
+        return True
+
+    def rawtextEndTagOpenState(self):
+        data = self.stream.char()
+        if data in asciiLetters:
+            self.temporaryBuffer += data
+            self.state = self.rawtextEndTagNameState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
+            self.stream.unget(data)
+            self.state = self.rawtextState
+        return True
+
+    def rawtextEndTagNameState(self):
+        appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
+        data = self.stream.char()
+        if data in spaceCharacters and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.state = self.beforeAttributeNameState
+        elif data == "/" and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.state = self.selfClosingStartTagState
+        elif data == ">" and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.emitCurrentToken()
+            self.state = self.dataState
+        elif data in asciiLetters:
+            self.temporaryBuffer += data
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "</" + self.temporaryBuffer})
+            self.stream.unget(data)
+            self.state = self.rawtextState
+        return True
+
+    def scriptDataLessThanSignState(self):
+        data = self.stream.char()
+        if data == "/":
+            self.temporaryBuffer = ""
+            self.state = self.scriptDataEndTagOpenState
+        elif data == "!":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
+            self.state = self.scriptDataEscapeStartState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+            self.stream.unget(data)
+            self.state = self.scriptDataState
+        return True
+
+    def scriptDataEndTagOpenState(self):
+        data = self.stream.char()
+        if data in asciiLetters:
+            self.temporaryBuffer += data
+            self.state = self.scriptDataEndTagNameState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
+            self.stream.unget(data)
+            self.state = self.scriptDataState
+        return True
+
+    def scriptDataEndTagNameState(self):
+        appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
+        data = self.stream.char()
+        if data in spaceCharacters and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.state = self.beforeAttributeNameState
+        elif data == "/" and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.state = self.selfClosingStartTagState
+        elif data == ">" and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.emitCurrentToken()
+            self.state = self.dataState
+        elif data in asciiLetters:
+            self.temporaryBuffer += data
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "</" + self.temporaryBuffer})
+            self.stream.unget(data)
+            self.state = self.scriptDataState
+        return True
+
+    def scriptDataEscapeStartState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+            self.state = self.scriptDataEscapeStartDashState
+        else:
+            self.stream.unget(data)
+            self.state = self.scriptDataState
+        return True
+
+    def scriptDataEscapeStartDashState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+            self.state = self.scriptDataEscapedDashDashState
+        else:
+            self.stream.unget(data)
+            self.state = self.scriptDataState
+        return True
+
+    def scriptDataEscapedState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+            self.state = self.scriptDataEscapedDashState
+        elif data == "<":
+            self.state = self.scriptDataEscapedLessThanSignState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\uFFFD"})
+        elif data == EOF:
+            self.state = self.dataState
+        else:
+            chars = self.stream.charsUntil(("<", "-", "\u0000"))
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
+                                    data + chars})
+        return True
+
+    def scriptDataEscapedDashState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+            self.state = self.scriptDataEscapedDashDashState
+        elif data == "<":
+            self.state = self.scriptDataEscapedLessThanSignState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\uFFFD"})
+            self.state = self.scriptDataEscapedState
+        elif data == EOF:
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+            self.state = self.scriptDataEscapedState
+        return True
+
+    def scriptDataEscapedDashDashState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+        elif data == "<":
+            self.state = self.scriptDataEscapedLessThanSignState
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
+            self.state = self.scriptDataState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\uFFFD"})
+            self.state = self.scriptDataEscapedState
+        elif data == EOF:
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+            self.state = self.scriptDataEscapedState
+        return True
+
+    def scriptDataEscapedLessThanSignState(self):
+        data = self.stream.char()
+        if data == "/":
+            self.temporaryBuffer = ""
+            self.state = self.scriptDataEscapedEndTagOpenState
+        elif data in asciiLetters:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
+            self.temporaryBuffer = data
+            self.state = self.scriptDataDoubleEscapeStartState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+            self.stream.unget(data)
+            self.state = self.scriptDataEscapedState
+        return True
+
+    def scriptDataEscapedEndTagOpenState(self):
+        data = self.stream.char()
+        if data in asciiLetters:
+            self.temporaryBuffer = data
+            self.state = self.scriptDataEscapedEndTagNameState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
+            self.stream.unget(data)
+            self.state = self.scriptDataEscapedState
+        return True
+
+    def scriptDataEscapedEndTagNameState(self):
+        appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
+        data = self.stream.char()
+        if data in spaceCharacters and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.state = self.beforeAttributeNameState
+        elif data == "/" and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.state = self.selfClosingStartTagState
+        elif data == ">" and appropriate:
+            self.currentToken = {"type": tokenTypes["EndTag"],
+                                 "name": self.temporaryBuffer,
+                                 "data": [], "selfClosing": False}
+            self.emitCurrentToken()
+            self.state = self.dataState
+        elif data in asciiLetters:
+            self.temporaryBuffer += data
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "</" + self.temporaryBuffer})
+            self.stream.unget(data)
+            self.state = self.scriptDataEscapedState
+        return True
+
+    def scriptDataDoubleEscapeStartState(self):
+        data = self.stream.char()
+        if data in (spaceCharacters | frozenset(("/", ">"))):
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+            if self.temporaryBuffer.lower() == "script":
+                self.state = self.scriptDataDoubleEscapedState
+            else:
+                self.state = self.scriptDataEscapedState
+        elif data in asciiLetters:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+            self.temporaryBuffer += data
+        else:
+            self.stream.unget(data)
+            self.state = self.scriptDataEscapedState
+        return True
+
+    def scriptDataDoubleEscapedState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+            self.state = self.scriptDataDoubleEscapedDashState
+        elif data == "<":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+            self.state = self.scriptDataDoubleEscapedLessThanSignState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\uFFFD"})
+        elif data == EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-script-in-script"})
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+        return True
+
+    def scriptDataDoubleEscapedDashState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+            self.state = self.scriptDataDoubleEscapedDashDashState
+        elif data == "<":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+            self.state = self.scriptDataDoubleEscapedLessThanSignState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\uFFFD"})
+            self.state = self.scriptDataDoubleEscapedState
+        elif data == EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-script-in-script"})
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+            self.state = self.scriptDataDoubleEscapedState
+        return True
+
+    def scriptDataDoubleEscapedDashDashState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
+        elif data == "<":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
+            self.state = self.scriptDataDoubleEscapedLessThanSignState
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
+            self.state = self.scriptDataState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": "\uFFFD"})
+            self.state = self.scriptDataDoubleEscapedState
+        elif data == EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-script-in-script"})
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+            self.state = self.scriptDataDoubleEscapedState
+        return True
+
+    def scriptDataDoubleEscapedLessThanSignState(self):
+        data = self.stream.char()
+        if data == "/":
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
+            self.temporaryBuffer = ""
+            self.state = self.scriptDataDoubleEscapeEndState
+        else:
+            self.stream.unget(data)
+            self.state = self.scriptDataDoubleEscapedState
+        return True
+
+    def scriptDataDoubleEscapeEndState(self):
+        data = self.stream.char()
+        if data in (spaceCharacters | frozenset(("/", ">"))):
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+            if self.temporaryBuffer.lower() == "script":
+                self.state = self.scriptDataEscapedState
+            else:
+                self.state = self.scriptDataDoubleEscapedState
+        elif data in asciiLetters:
+            self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
+            self.temporaryBuffer += data
+        else:
+            self.stream.unget(data)
+            self.state = self.scriptDataDoubleEscapedState
+        return True
+
+    def beforeAttributeNameState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.stream.charsUntil(spaceCharacters, True)
+        elif data in asciiLetters:
+            self.currentToken["data"].append([data, ""])
+            self.state = self.attributeNameState
+        elif data == ">":
+            self.emitCurrentToken()
+        elif data == "/":
+            self.state = self.selfClosingStartTagState
+        elif data in ("'", '"', "=", "<"):
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "invalid-character-in-attribute-name"})
+            self.currentToken["data"].append([data, ""])
+            self.state = self.attributeNameState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"].append(["\uFFFD", ""])
+            self.state = self.attributeNameState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-attribute-name-but-got-eof"})
+            self.state = self.dataState
+        else:
+            self.currentToken["data"].append([data, ""])
+            self.state = self.attributeNameState
+        return True
+
+    def attributeNameState(self):
+        data = self.stream.char()
+        leavingThisState = True
+        emitToken = False
+        if data == "=":
+            self.state = self.beforeAttributeValueState
+        elif data in asciiLetters:
+            self.currentToken["data"][-1][0] += data +\
+                self.stream.charsUntil(asciiLetters, True)
+            leavingThisState = False
+        elif data == ">":
+            # XXX If we emit here the attributes are converted to a dict
+            # without being checked and when the code below runs we error
+            # because data is a dict not a list
+            emitToken = True
+        elif data in spaceCharacters:
+            self.state = self.afterAttributeNameState
+        elif data == "/":
+            self.state = self.selfClosingStartTagState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"][-1][0] += "\uFFFD"
+            leavingThisState = False
+        elif data in ("'", '"', "<"):
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data":
+                                    "invalid-character-in-attribute-name"})
+            self.currentToken["data"][-1][0] += data
+            leavingThisState = False
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "eof-in-attribute-name"})
+            self.state = self.dataState
+        else:
+            self.currentToken["data"][-1][0] += data
+            leavingThisState = False
+
+        if leavingThisState:
+            # Attributes are not dropped at this stage. That happens when the
+            # start tag token is emitted so values can still be safely appended
+            # to attributes, but we do want to report the parse error in time.
+            self.currentToken["data"][-1][0] = (
+                self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
+            for name, _ in self.currentToken["data"][:-1]:
+                if self.currentToken["data"][-1][0] == name:
+                    self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                            "duplicate-attribute"})
+                    break
+            # XXX Fix for above XXX
+            if emitToken:
+                self.emitCurrentToken()
+        return True
+
+    def afterAttributeNameState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.stream.charsUntil(spaceCharacters, True)
+        elif data == "=":
+            self.state = self.beforeAttributeValueState
+        elif data == ">":
+            self.emitCurrentToken()
+        elif data in asciiLetters:
+            self.currentToken["data"].append([data, ""])
+            self.state = self.attributeNameState
+        elif data == "/":
+            self.state = self.selfClosingStartTagState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"].append(["\uFFFD", ""])
+            self.state = self.attributeNameState
+        elif data in ("'", '"', "<"):
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "invalid-character-after-attribute-name"})
+            self.currentToken["data"].append([data, ""])
+            self.state = self.attributeNameState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-end-of-tag-but-got-eof"})
+            self.state = self.dataState
+        else:
+            self.currentToken["data"].append([data, ""])
+            self.state = self.attributeNameState
+        return True
+
+    def beforeAttributeValueState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.stream.charsUntil(spaceCharacters, True)
+        elif data == "\"":
+            self.state = self.attributeValueDoubleQuotedState
+        elif data == "&":
+            self.state = self.attributeValueUnQuotedState
+            self.stream.unget(data)
+        elif data == "'":
+            self.state = self.attributeValueSingleQuotedState
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-attribute-value-but-got-right-bracket"})
+            self.emitCurrentToken()
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"][-1][1] += "\uFFFD"
+            self.state = self.attributeValueUnQuotedState
+        elif data in ("=", "<", "`"):
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "equals-in-unquoted-attribute-value"})
+            self.currentToken["data"][-1][1] += data
+            self.state = self.attributeValueUnQuotedState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-attribute-value-but-got-eof"})
+            self.state = self.dataState
+        else:
+            self.currentToken["data"][-1][1] += data
+            self.state = self.attributeValueUnQuotedState
+        return True
+
+    def attributeValueDoubleQuotedState(self):
+        data = self.stream.char()
+        if data == "\"":
+            self.state = self.afterAttributeValueState
+        elif data == "&":
+            self.processEntityInAttribute('"')
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"][-1][1] += "\uFFFD"
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-attribute-value-double-quote"})
+            self.state = self.dataState
+        else:
+            self.currentToken["data"][-1][1] += data +\
+                self.stream.charsUntil(("\"", "&", "\u0000"))
+        return True
+
+    def attributeValueSingleQuotedState(self):
+        data = self.stream.char()
+        if data == "'":
+            self.state = self.afterAttributeValueState
+        elif data == "&":
+            self.processEntityInAttribute("'")
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"][-1][1] += "\uFFFD"
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-attribute-value-single-quote"})
+            self.state = self.dataState
+        else:
+            self.currentToken["data"][-1][1] += data +\
+                self.stream.charsUntil(("'", "&", "\u0000"))
+        return True
+
+    def attributeValueUnQuotedState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.state = self.beforeAttributeNameState
+        elif data == "&":
+            self.processEntityInAttribute(">")
+        elif data == ">":
+            self.emitCurrentToken()
+        elif data in ('"', "'", "=", "<", "`"):
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-character-in-unquoted-attribute-value"})
+            self.currentToken["data"][-1][1] += data
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"][-1][1] += "\uFFFD"
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-attribute-value-no-quotes"})
+            self.state = self.dataState
+        else:
+            self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
+                frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
+        return True
+
+    def afterAttributeValueState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.state = self.beforeAttributeNameState
+        elif data == ">":
+            self.emitCurrentToken()
+        elif data == "/":
+            self.state = self.selfClosingStartTagState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-EOF-after-attribute-value"})
+            self.stream.unget(data)
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-character-after-attribute-value"})
+            self.stream.unget(data)
+            self.state = self.beforeAttributeNameState
+        return True
+
+    def selfClosingStartTagState(self):
+        data = self.stream.char()
+        if data == ">":
+            self.currentToken["selfClosing"] = True
+            self.emitCurrentToken()
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data":
+                                    "unexpected-EOF-after-solidus-in-tag"})
+            self.stream.unget(data)
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-character-after-solidus-in-tag"})
+            self.stream.unget(data)
+            self.state = self.beforeAttributeNameState
+        return True
+
+    def bogusCommentState(self):
+        # Make a new comment token and give it as value all the characters
+        # until the first > or EOF (charsUntil checks for EOF automatically)
+        # and emit it.
+        data = self.stream.charsUntil(">")
+        data = data.replace("\u0000", "\uFFFD")
+        self.tokenQueue.append(
+            {"type": tokenTypes["Comment"], "data": data})
+
+        # Eat the character directly after the bogus comment which is either a
+        # ">" or an EOF.
+        self.stream.char()
+        self.state = self.dataState
+        return True
+
+    def markupDeclarationOpenState(self):
+        charStack = [self.stream.char()]
+        if charStack[-1] == "-":
+            charStack.append(self.stream.char())
+            if charStack[-1] == "-":
+                self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
+                self.state = self.commentStartState
+                return True
+        elif charStack[-1] in ('d', 'D'):
+            matched = True
+            for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
+                             ('y', 'Y'), ('p', 'P'), ('e', 'E')):
+                charStack.append(self.stream.char())
+                if charStack[-1] not in expected:
+                    matched = False
+                    break
+            if matched:
+                self.currentToken = {"type": tokenTypes["Doctype"],
+                                     "name": "",
+                                     "publicId": None, "systemId": None,
+                                     "correct": True}
+                self.state = self.doctypeState
+                return True
+        elif (charStack[-1] == "[" and
+              self.parser is not None and
+              self.parser.tree.openElements and
+              self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
+            matched = True
+            for expected in ["C", "D", "A", "T", "A", "["]:
+                charStack.append(self.stream.char())
+                if charStack[-1] != expected:
+                    matched = False
+                    break
+            if matched:
+                self.state = self.cdataSectionState
+                return True
+
+        self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                "expected-dashes-or-doctype"})
+
+        while charStack:
+            self.stream.unget(charStack.pop())
+        self.state = self.bogusCommentState
+        return True
+
+    def commentStartState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.state = self.commentStartDashState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"] += "\uFFFD"
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "incorrect-comment"})
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-comment"})
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["data"] += data
+            self.state = self.commentState
+        return True
+
+    def commentStartDashState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.state = self.commentEndState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"] += "-\uFFFD"
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "incorrect-comment"})
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-comment"})
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["data"] += "-" + data
+            self.state = self.commentState
+        return True
+
+    def commentState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.state = self.commentEndDashState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"] += "\uFFFD"
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "eof-in-comment"})
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["data"] += data + \
+                self.stream.charsUntil(("-", "\u0000"))
+        return True
+
+    def commentEndDashState(self):
+        data = self.stream.char()
+        if data == "-":
+            self.state = self.commentEndState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"] += "-\uFFFD"
+            self.state = self.commentState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-comment-end-dash"})
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["data"] += "-" + data
+            self.state = self.commentState
+        return True
+
+    def commentEndState(self):
+        data = self.stream.char()
+        if data == ">":
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"] += "--\uFFFD"
+            self.state = self.commentState
+        elif data == "!":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-bang-after-double-dash-in-comment"})
+            self.state = self.commentEndBangState
+        elif data == "-":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-dash-after-double-dash-in-comment"})
+            self.currentToken["data"] += data
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-comment-double-dash"})
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            # XXX
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-comment"})
+            self.currentToken["data"] += "--" + data
+            self.state = self.commentState
+        return True
+
+    def commentEndBangState(self):
+        data = self.stream.char()
+        if data == ">":
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data == "-":
+            self.currentToken["data"] += "--!"
+            self.state = self.commentEndDashState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["data"] += "--!\uFFFD"
+            self.state = self.commentState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-comment-end-bang-state"})
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["data"] += "--!" + data
+            self.state = self.commentState
+        return True
+
+    def doctypeState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.state = self.beforeDoctypeNameState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-doctype-name-but-got-eof"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "need-space-after-doctype"})
+            self.stream.unget(data)
+            self.state = self.beforeDoctypeNameState
+        return True
+
+    def beforeDoctypeNameState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            pass
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-doctype-name-but-got-right-bracket"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["name"] = "\uFFFD"
+            self.state = self.doctypeNameState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-doctype-name-but-got-eof"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["name"] = data
+            self.state = self.doctypeNameState
+        return True
+
+    def doctypeNameState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
+            self.state = self.afterDoctypeNameState
+        elif data == ">":
+            self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["name"] += "\uFFFD"
+            self.state = self.doctypeNameState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype-name"})
+            self.currentToken["correct"] = False
+            self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["name"] += data
+        return True
+
+    def afterDoctypeNameState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            pass
+        elif data == ">":
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            self.currentToken["correct"] = False
+            self.stream.unget(data)
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            if data in ("p", "P"):
+                matched = True
+                for expected in (("u", "U"), ("b", "B"), ("l", "L"),
+                                 ("i", "I"), ("c", "C")):
+                    data = self.stream.char()
+                    if data not in expected:
+                        matched = False
+                        break
+                if matched:
+                    self.state = self.afterDoctypePublicKeywordState
+                    return True
+            elif data in ("s", "S"):
+                matched = True
+                for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
+                                 ("e", "E"), ("m", "M")):
+                    data = self.stream.char()
+                    if data not in expected:
+                        matched = False
+                        break
+                if matched:
+                    self.state = self.afterDoctypeSystemKeywordState
+                    return True
+
+            # All the characters read before the current 'data' will be
+            # [a-zA-Z], so they're garbage in the bogus doctype and can be
+            # discarded; only the latest character might be '>' or EOF
+            # and needs to be ungetted
+            self.stream.unget(data)
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "expected-space-or-right-bracket-in-doctype", "datavars":
+                                    {"data": data}})
+            self.currentToken["correct"] = False
+            self.state = self.bogusDoctypeState
+
+        return True
+
+    def afterDoctypePublicKeywordState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.state = self.beforeDoctypePublicIdentifierState
+        elif data in ("'", '"'):
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-doctype"})
+            self.stream.unget(data)
+            self.state = self.beforeDoctypePublicIdentifierState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.stream.unget(data)
+            self.state = self.beforeDoctypePublicIdentifierState
+        return True
+
+    def beforeDoctypePublicIdentifierState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            pass
+        elif data == "\"":
+            self.currentToken["publicId"] = ""
+            self.state = self.doctypePublicIdentifierDoubleQuotedState
+        elif data == "'":
+            self.currentToken["publicId"] = ""
+            self.state = self.doctypePublicIdentifierSingleQuotedState
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-end-of-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-doctype"})
+            self.currentToken["correct"] = False
+            self.state = self.bogusDoctypeState
+        return True
+
+    def doctypePublicIdentifierDoubleQuotedState(self):
+        data = self.stream.char()
+        if data == "\"":
+            self.state = self.afterDoctypePublicIdentifierState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["publicId"] += "\uFFFD"
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-end-of-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["publicId"] += data
+        return True
+
+    def doctypePublicIdentifierSingleQuotedState(self):
+        data = self.stream.char()
+        if data == "'":
+            self.state = self.afterDoctypePublicIdentifierState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["publicId"] += "\uFFFD"
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-end-of-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["publicId"] += data
+        return True
+
+    def afterDoctypePublicIdentifierState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.state = self.betweenDoctypePublicAndSystemIdentifiersState
+        elif data == ">":
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data == '"':
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-doctype"})
+            self.currentToken["systemId"] = ""
+            self.state = self.doctypeSystemIdentifierDoubleQuotedState
+        elif data == "'":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-doctype"})
+            self.currentToken["systemId"] = ""
+            self.state = self.doctypeSystemIdentifierSingleQuotedState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-doctype"})
+            self.currentToken["correct"] = False
+            self.state = self.bogusDoctypeState
+        return True
+
+    def betweenDoctypePublicAndSystemIdentifiersState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            pass
+        elif data == ">":
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data == '"':
+            self.currentToken["systemId"] = ""
+            self.state = self.doctypeSystemIdentifierDoubleQuotedState
+        elif data == "'":
+            self.currentToken["systemId"] = ""
+            self.state = self.doctypeSystemIdentifierSingleQuotedState
+        elif data == EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-doctype"})
+            self.currentToken["correct"] = False
+            self.state = self.bogusDoctypeState
+        return True
+
+    def afterDoctypeSystemKeywordState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            self.state = self.beforeDoctypeSystemIdentifierState
+        elif data in ("'", '"'):
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-doctype"})
+            self.stream.unget(data)
+            self.state = self.beforeDoctypeSystemIdentifierState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.stream.unget(data)
+            self.state = self.beforeDoctypeSystemIdentifierState
+        return True
+
+    def beforeDoctypeSystemIdentifierState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            pass
+        elif data == "\"":
+            self.currentToken["systemId"] = ""
+            self.state = self.doctypeSystemIdentifierDoubleQuotedState
+        elif data == "'":
+            self.currentToken["systemId"] = ""
+            self.state = self.doctypeSystemIdentifierSingleQuotedState
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-doctype"})
+            self.currentToken["correct"] = False
+            self.state = self.bogusDoctypeState
+        return True
+
+    def doctypeSystemIdentifierDoubleQuotedState(self):
+        data = self.stream.char()
+        if data == "\"":
+            self.state = self.afterDoctypeSystemIdentifierState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["systemId"] += "\uFFFD"
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-end-of-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["systemId"] += data
+        return True
+
+    def doctypeSystemIdentifierSingleQuotedState(self):
+        data = self.stream.char()
+        if data == "'":
+            self.state = self.afterDoctypeSystemIdentifierState
+        elif data == "\u0000":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                    "data": "invalid-codepoint"})
+            self.currentToken["systemId"] += "\uFFFD"
+        elif data == ">":
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-end-of-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.currentToken["systemId"] += data
+        return True
+
+    def afterDoctypeSystemIdentifierState(self):
+        data = self.stream.char()
+        if data in spaceCharacters:
+            pass
+        elif data == ">":
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "eof-in-doctype"})
+            self.currentToken["correct"] = False
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
+                                    "unexpected-char-in-doctype"})
+            self.state = self.bogusDoctypeState
+        return True
+
+    def bogusDoctypeState(self):
+        data = self.stream.char()
+        if data == ">":
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        elif data is EOF:
+            # XXX EMIT
+            self.stream.unget(data)
+            self.tokenQueue.append(self.currentToken)
+            self.state = self.dataState
+        else:
+            pass
+        return True
+
+    def cdataSectionState(self):
+        data = []
+        while True:
+            data.append(self.stream.charsUntil("]"))
+            data.append(self.stream.charsUntil(">"))
+            char = self.stream.char()
+            if char == EOF:
+                break
+            else:
+                assert char == ">"
+                if data[-1][-2:] == "]]":
+                    data[-1] = data[-1][:-2]
+                    break
+                else:
+                    data.append(char)
+
+        data = "".join(data)  # pylint:disable=redefined-variable-type
+        # Deal with null here rather than in the parser
+        nullCount = data.count("\u0000")
+        if nullCount > 0:
+            for _ in range(nullCount):
+                self.tokenQueue.append({"type": tokenTypes["ParseError"],
+                                        "data": "invalid-codepoint"})
+            data = data.replace("\u0000", "\uFFFD")
+        if data:
+            self.tokenQueue.append({"type": tokenTypes["Characters"],
+                                    "data": data})
+        self.state = self.dataState
+        return True
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__init__.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5ba4bf123aa585ec8a47dce6838b09b4dfa0b1a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__init__.py
@@ -0,0 +1,14 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from .py import Trie as PyTrie
+
+Trie = PyTrie
+
+# pylint:disable=wrong-import-position
+try:
+    from .datrie import Trie as DATrie
+except ImportError:
+    pass
+else:
+    Trie = DATrie
+# pylint:enable=wrong-import-position
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..63056f5c63560331f8355b64ece4a726565ea961
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c83d10d5e14b9ce7b3daa0bf24bc95aa79ee8e2
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/_base.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/datrie.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/datrie.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..893892bc5eb41569fadcf6c390c234c970ea90a5
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/datrie.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/py.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/py.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f73ec56dbb9979cf74821e5c7839cf4d5c138863
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/__pycache__/py.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/_base.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1158bbbfa5b7421b17ba5d3d48d9736b87fd059
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/_base.py
@@ -0,0 +1,37 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from collections import Mapping
+
+
+class Trie(Mapping):
+    """Abstract base class for tries"""
+
+    def keys(self, prefix=None):
+        # pylint:disable=arguments-differ
+        keys = super(Trie, self).keys()
+
+        if prefix is None:
+            return set(keys)
+
+        return {x for x in keys if x.startswith(prefix)}
+
+    def has_keys_with_prefix(self, prefix):
+        for key in self.keys():
+            if key.startswith(prefix):
+                return True
+
+        return False
+
+    def longest_prefix(self, prefix):
+        if prefix in self:
+            return prefix
+
+        for i in range(1, len(prefix) + 1):
+            if prefix[:-i] in self:
+                return prefix[:-i]
+
+        raise KeyError(prefix)
+
+    def longest_prefix_item(self, prefix):
+        lprefix = self.longest_prefix(prefix)
+        return (lprefix, self[lprefix])
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/datrie.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/datrie.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2e5f86621c3881d1d812814aec9d153dca34bd1
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/datrie.py
@@ -0,0 +1,44 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from datrie import Trie as DATrie
+from pip._vendor.six import text_type
+
+from ._base import Trie as ABCTrie
+
+
+class Trie(ABCTrie):
+    def __init__(self, data):
+        chars = set()
+        for key in data.keys():
+            if not isinstance(key, text_type):
+                raise TypeError("All keys must be strings")
+            for char in key:
+                chars.add(char)
+
+        self._data = DATrie("".join(chars))
+        for key, value in data.items():
+            self._data[key] = value
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        raise NotImplementedError()
+
+    def __getitem__(self, key):
+        return self._data[key]
+
+    def keys(self, prefix=None):
+        return self._data.keys(prefix)
+
+    def has_keys_with_prefix(self, prefix):
+        return self._data.has_keys_with_prefix(prefix)
+
+    def longest_prefix(self, prefix):
+        return self._data.longest_prefix(prefix)
+
+    def longest_prefix_item(self, prefix):
+        return self._data.longest_prefix_item(prefix)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/py.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/py.py
new file mode 100644
index 0000000000000000000000000000000000000000..c178b219def8f7fbfe6d00f7822a8e61293d003a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/_trie/py.py
@@ -0,0 +1,67 @@
+from __future__ import absolute_import, division, unicode_literals
+from pip._vendor.six import text_type
+
+from bisect import bisect_left
+
+from ._base import Trie as ABCTrie
+
+
+class Trie(ABCTrie):
+    def __init__(self, data):
+        if not all(isinstance(x, text_type) for x in data.keys()):
+            raise TypeError("All keys must be strings")
+
+        self._data = data
+        self._keys = sorted(data.keys())
+        self._cachestr = ""
+        self._cachepoints = (0, len(data))
+
+    def __contains__(self, key):
+        return key in self._data
+
+    def __len__(self):
+        return len(self._data)
+
+    def __iter__(self):
+        return iter(self._data)
+
+    def __getitem__(self, key):
+        return self._data[key]
+
+    def keys(self, prefix=None):
+        if prefix is None or prefix == "" or not self._keys:
+            return set(self._keys)
+
+        if prefix.startswith(self._cachestr):
+            lo, hi = self._cachepoints
+            start = i = bisect_left(self._keys, prefix, lo, hi)
+        else:
+            start = i = bisect_left(self._keys, prefix)
+
+        keys = set()
+        if start == len(self._keys):
+            return keys
+
+        while self._keys[i].startswith(prefix):
+            keys.add(self._keys[i])
+            i += 1
+
+        self._cachestr = prefix
+        self._cachepoints = (start, i)
+
+        return keys
+
+    def has_keys_with_prefix(self, prefix):
+        if prefix in self._data:
+            return True
+
+        if prefix.startswith(self._cachestr):
+            lo, hi = self._cachepoints
+            i = bisect_left(self._keys, prefix, lo, hi)
+        else:
+            i = bisect_left(self._keys, prefix)
+
+        if i == len(self._keys):
+            return False
+
+        return self._keys[i].startswith(prefix)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/_utils.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0703afb38b13bf56998d43aa542dcea9839d8132
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/_utils.py
@@ -0,0 +1,124 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from types import ModuleType
+
+from pip._vendor.six import text_type
+
+try:
+    import xml.etree.cElementTree as default_etree
+except ImportError:
+    import xml.etree.ElementTree as default_etree
+
+
+__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair",
+           "surrogatePairToCodepoint", "moduleFactoryFactory",
+           "supports_lone_surrogates"]
+
+
+# Platforms not supporting lone surrogates (\uD800-\uDFFF) should be
+# caught by the below test. In general this would be any platform
+# using UTF-16 as its encoding of unicode strings, such as
+# Jython. This is because UTF-16 itself is based on the use of such
+# surrogates, and there is no mechanism to further escape such
+# escapes.
+try:
+    _x = eval('"\\uD800"')  # pylint:disable=eval-used
+    if not isinstance(_x, text_type):
+        # We need this with u"" because of http://bugs.jython.org/issue2039
+        _x = eval('u"\\uD800"')  # pylint:disable=eval-used
+        assert isinstance(_x, text_type)
+except:  # pylint:disable=bare-except
+    supports_lone_surrogates = False
+else:
+    supports_lone_surrogates = True
+
+
+class MethodDispatcher(dict):
+    """Dict with 2 special properties:
+
+    On initiation, keys that are lists, sets or tuples are converted to
+    multiple keys so accessing any one of the items in the original
+    list-like object returns the matching value
+
+    md = MethodDispatcher({("foo", "bar"):"baz"})
+    md["foo"] == "baz"
+
+    A default value which can be set through the default attribute.
+    """
+
+    def __init__(self, items=()):
+        # Using _dictEntries instead of directly assigning to self is about
+        # twice as fast. Please do careful performance testing before changing
+        # anything here.
+        _dictEntries = []
+        for name, value in items:
+            if isinstance(name, (list, tuple, frozenset, set)):
+                for item in name:
+                    _dictEntries.append((item, value))
+            else:
+                _dictEntries.append((name, value))
+        dict.__init__(self, _dictEntries)
+        assert len(self) == len(_dictEntries)
+        self.default = None
+
+    def __getitem__(self, key):
+        return dict.get(self, key, self.default)
+
+
+# Some utility functions to deal with weirdness around UCS2 vs UCS4
+# python builds
+
+def isSurrogatePair(data):
+    return (len(data) == 2 and
+            ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
+            ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
+
+
+def surrogatePairToCodepoint(data):
+    char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
+                (ord(data[1]) - 0xDC00))
+    return char_val
+
+# Module Factory Factory (no, this isn't Java, I know)
+# Here to stop this being duplicated all over the place.
+
+
+def moduleFactoryFactory(factory):
+    moduleCache = {}
+
+    def moduleFactory(baseModule, *args, **kwargs):
+        if isinstance(ModuleType.__name__, type("")):
+            name = "_%s_factory" % baseModule.__name__
+        else:
+            name = b"_%s_factory" % baseModule.__name__
+
+        kwargs_tuple = tuple(kwargs.items())
+
+        try:
+            return moduleCache[name][args][kwargs_tuple]
+        except KeyError:
+            mod = ModuleType(name)
+            objs = factory(baseModule, *args, **kwargs)
+            mod.__dict__.update(objs)
+            if "name" not in moduleCache:
+                moduleCache[name] = {}
+            if "args" not in moduleCache[name]:
+                moduleCache[name][args] = {}
+            if "kwargs" not in moduleCache[name][args]:
+                moduleCache[name][args][kwargs_tuple] = {}
+            moduleCache[name][args][kwargs_tuple] = mod
+            return mod
+
+    return moduleFactory
+
+
+def memoize(func):
+    cache = {}
+
+    def wrapped(*args, **kwargs):
+        key = (tuple(args), tuple(kwargs.items()))
+        if key not in cache:
+            cache[key] = func(*args, **kwargs)
+        return cache[key]
+
+    return wrapped
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/constants.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ff804190cd890e68be62fccb1a907ff9122f78a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/constants.py
@@ -0,0 +1,2947 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import string
+
+EOF = None
+
+E = {
+    "null-character":
+        "Null character in input stream, replaced with U+FFFD.",
+    "invalid-codepoint":
+        "Invalid codepoint in stream.",
+    "incorrectly-placed-solidus":
+        "Solidus (/) incorrectly placed in tag.",
+    "incorrect-cr-newline-entity":
+        "Incorrect CR newline entity, replaced with LF.",
+    "illegal-windows-1252-entity":
+        "Entity used with illegal number (windows-1252 reference).",
+    "cant-convert-numeric-entity":
+        "Numeric entity couldn't be converted to character "
+        "(codepoint U+%(charAsInt)08x).",
+    "illegal-codepoint-for-numeric-entity":
+        "Numeric entity represents an illegal codepoint: "
+        "U+%(charAsInt)08x.",
+    "numeric-entity-without-semicolon":
+        "Numeric entity didn't end with ';'.",
+    "expected-numeric-entity-but-got-eof":
+        "Numeric entity expected. Got end of file instead.",
+    "expected-numeric-entity":
+        "Numeric entity expected but none found.",
+    "named-entity-without-semicolon":
+        "Named entity didn't end with ';'.",
+    "expected-named-entity":
+        "Named entity expected. Got none.",
+    "attributes-in-end-tag":
+        "End tag contains unexpected attributes.",
+    'self-closing-flag-on-end-tag':
+        "End tag contains unexpected self-closing flag.",
+    "expected-tag-name-but-got-right-bracket":
+        "Expected tag name. Got '>' instead.",
+    "expected-tag-name-but-got-question-mark":
+        "Expected tag name. Got '?' instead. (HTML doesn't "
+        "support processing instructions.)",
+    "expected-tag-name":
+        "Expected tag name. Got something else instead",
+    "expected-closing-tag-but-got-right-bracket":
+        "Expected closing tag. Got '>' instead. Ignoring '</>'.",
+    "expected-closing-tag-but-got-eof":
+        "Expected closing tag. Unexpected end of file.",
+    "expected-closing-tag-but-got-char":
+        "Expected closing tag. Unexpected character '%(data)s' found.",
+    "eof-in-tag-name":
+        "Unexpected end of file in the tag name.",
+    "expected-attribute-name-but-got-eof":
+        "Unexpected end of file. Expected attribute name instead.",
+    "eof-in-attribute-name":
+        "Unexpected end of file in attribute name.",
+    "invalid-character-in-attribute-name":
+        "Invalid character in attribute name",
+    "duplicate-attribute":
+        "Dropped duplicate attribute on tag.",
+    "expected-end-of-tag-name-but-got-eof":
+        "Unexpected end of file. Expected = or end of tag.",
+    "expected-attribute-value-but-got-eof":
+        "Unexpected end of file. Expected attribute value.",
+    "expected-attribute-value-but-got-right-bracket":
+        "Expected attribute value. Got '>' instead.",
+    'equals-in-unquoted-attribute-value':
+        "Unexpected = in unquoted attribute",
+    'unexpected-character-in-unquoted-attribute-value':
+        "Unexpected character in unquoted attribute",
+    "invalid-character-after-attribute-name":
+        "Unexpected character after attribute name.",
+    "unexpected-character-after-attribute-value":
+        "Unexpected character after attribute value.",
+    "eof-in-attribute-value-double-quote":
+        "Unexpected end of file in attribute value (\").",
+    "eof-in-attribute-value-single-quote":
+        "Unexpected end of file in attribute value (').",
+    "eof-in-attribute-value-no-quotes":
+        "Unexpected end of file in attribute value.",
+    "unexpected-EOF-after-solidus-in-tag":
+        "Unexpected end of file in tag. Expected >",
+    "unexpected-character-after-solidus-in-tag":
+        "Unexpected character after / in tag. Expected >",
+    "expected-dashes-or-doctype":
+        "Expected '--' or 'DOCTYPE'. Not found.",
+    "unexpected-bang-after-double-dash-in-comment":
+        "Unexpected ! after -- in comment",
+    "unexpected-space-after-double-dash-in-comment":
+        "Unexpected space after -- in comment",
+    "incorrect-comment":
+        "Incorrect comment.",
+    "eof-in-comment":
+        "Unexpected end of file in comment.",
+    "eof-in-comment-end-dash":
+        "Unexpected end of file in comment (-)",
+    "unexpected-dash-after-double-dash-in-comment":
+        "Unexpected '-' after '--' found in comment.",
+    "eof-in-comment-double-dash":
+        "Unexpected end of file in comment (--).",
+    "eof-in-comment-end-space-state":
+        "Unexpected end of file in comment.",
+    "eof-in-comment-end-bang-state":
+        "Unexpected end of file in comment.",
+    "unexpected-char-in-comment":
+        "Unexpected character in comment found.",
+    "need-space-after-doctype":
+        "No space after literal string 'DOCTYPE'.",
+    "expected-doctype-name-but-got-right-bracket":
+        "Unexpected > character. Expected DOCTYPE name.",
+    "expected-doctype-name-but-got-eof":
+        "Unexpected end of file. Expected DOCTYPE name.",
+    "eof-in-doctype-name":
+        "Unexpected end of file in DOCTYPE name.",
+    "eof-in-doctype":
+        "Unexpected end of file in DOCTYPE.",
+    "expected-space-or-right-bracket-in-doctype":
+        "Expected space or '>'. Got '%(data)s'",
+    "unexpected-end-of-doctype":
+        "Unexpected end of DOCTYPE.",
+    "unexpected-char-in-doctype":
+        "Unexpected character in DOCTYPE.",
+    "eof-in-innerhtml":
+        "XXX innerHTML EOF",
+    "unexpected-doctype":
+        "Unexpected DOCTYPE. Ignored.",
+    "non-html-root":
+        "html needs to be the first start tag.",
+    "expected-doctype-but-got-eof":
+        "Unexpected End of file. Expected DOCTYPE.",
+    "unknown-doctype":
+        "Erroneous DOCTYPE.",
+    "expected-doctype-but-got-chars":
+        "Unexpected non-space characters. Expected DOCTYPE.",
+    "expected-doctype-but-got-start-tag":
+        "Unexpected start tag (%(name)s). Expected DOCTYPE.",
+    "expected-doctype-but-got-end-tag":
+        "Unexpected end tag (%(name)s). Expected DOCTYPE.",
+    "end-tag-after-implied-root":
+        "Unexpected end tag (%(name)s) after the (implied) root element.",
+    "expected-named-closing-tag-but-got-eof":
+        "Unexpected end of file. Expected end tag (%(name)s).",
+    "two-heads-are-not-better-than-one":
+        "Unexpected start tag head in existing head. Ignored.",
+    "unexpected-end-tag":
+        "Unexpected end tag (%(name)s). Ignored.",
+    "unexpected-start-tag-out-of-my-head":
+        "Unexpected start tag (%(name)s) that can be in head. Moved.",
+    "unexpected-start-tag":
+        "Unexpected start tag (%(name)s).",
+    "missing-end-tag":
+        "Missing end tag (%(name)s).",
+    "missing-end-tags":
+        "Missing end tags (%(name)s).",
+    "unexpected-start-tag-implies-end-tag":
+        "Unexpected start tag (%(startName)s) "
+        "implies end tag (%(endName)s).",
+    "unexpected-start-tag-treated-as":
+        "Unexpected start tag (%(originalName)s). Treated as %(newName)s.",
+    "deprecated-tag":
+        "Unexpected start tag %(name)s. Don't use it!",
+    "unexpected-start-tag-ignored":
+        "Unexpected start tag %(name)s. Ignored.",
+    "expected-one-end-tag-but-got-another":
+        "Unexpected end tag (%(gotName)s). "
+        "Missing end tag (%(expectedName)s).",
+    "end-tag-too-early":
+        "End tag (%(name)s) seen too early. Expected other end tag.",
+    "end-tag-too-early-named":
+        "Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).",
+    "end-tag-too-early-ignored":
+        "End tag (%(name)s) seen too early. Ignored.",
+    "adoption-agency-1.1":
+        "End tag (%(name)s) violates step 1, "
+        "paragraph 1 of the adoption agency algorithm.",
+    "adoption-agency-1.2":
+        "End tag (%(name)s) violates step 1, "
+        "paragraph 2 of the adoption agency algorithm.",
+    "adoption-agency-1.3":
+        "End tag (%(name)s) violates step 1, "
+        "paragraph 3 of the adoption agency algorithm.",
+    "adoption-agency-4.4":
+        "End tag (%(name)s) violates step 4, "
+        "paragraph 4 of the adoption agency algorithm.",
+    "unexpected-end-tag-treated-as":
+        "Unexpected end tag (%(originalName)s). Treated as %(newName)s.",
+    "no-end-tag":
+        "This element (%(name)s) has no end tag.",
+    "unexpected-implied-end-tag-in-table":
+        "Unexpected implied end tag (%(name)s) in the table phase.",
+    "unexpected-implied-end-tag-in-table-body":
+        "Unexpected implied end tag (%(name)s) in the table body phase.",
+    "unexpected-char-implies-table-voodoo":
+        "Unexpected non-space characters in "
+        "table context caused voodoo mode.",
+    "unexpected-hidden-input-in-table":
+        "Unexpected input with type hidden in table context.",
+    "unexpected-form-in-table":
+        "Unexpected form in table context.",
+    "unexpected-start-tag-implies-table-voodoo":
+        "Unexpected start tag (%(name)s) in "
+        "table context caused voodoo mode.",
+    "unexpected-end-tag-implies-table-voodoo":
+        "Unexpected end tag (%(name)s) in "
+        "table context caused voodoo mode.",
+    "unexpected-cell-in-table-body":
+        "Unexpected table cell start tag (%(name)s) "
+        "in the table body phase.",
+    "unexpected-cell-end-tag":
+        "Got table cell end tag (%(name)s) "
+        "while required end tags are missing.",
+    "unexpected-end-tag-in-table-body":
+        "Unexpected end tag (%(name)s) in the table body phase. Ignored.",
+    "unexpected-implied-end-tag-in-table-row":
+        "Unexpected implied end tag (%(name)s) in the table row phase.",
+    "unexpected-end-tag-in-table-row":
+        "Unexpected end tag (%(name)s) in the table row phase. Ignored.",
+    "unexpected-select-in-select":
+        "Unexpected select start tag in the select phase "
+        "treated as select end tag.",
+    "unexpected-input-in-select":
+        "Unexpected input start tag in the select phase.",
+    "unexpected-start-tag-in-select":
+        "Unexpected start tag token (%(name)s in the select phase. "
+        "Ignored.",
+    "unexpected-end-tag-in-select":
+        "Unexpected end tag (%(name)s) in the select phase. Ignored.",
+    "unexpected-table-element-start-tag-in-select-in-table":
+        "Unexpected table element start tag (%(name)s) in the select in table phase.",
+    "unexpected-table-element-end-tag-in-select-in-table":
+        "Unexpected table element end tag (%(name)s) in the select in table phase.",
+    "unexpected-char-after-body":
+        "Unexpected non-space characters in the after body phase.",
+    "unexpected-start-tag-after-body":
+        "Unexpected start tag token (%(name)s)"
+        " in the after body phase.",
+    "unexpected-end-tag-after-body":
+        "Unexpected end tag token (%(name)s)"
+        " in the after body phase.",
+    "unexpected-char-in-frameset":
+        "Unexpected characters in the frameset phase. Characters ignored.",
+    "unexpected-start-tag-in-frameset":
+        "Unexpected start tag token (%(name)s)"
+        " in the frameset phase. Ignored.",
+    "unexpected-frameset-in-frameset-innerhtml":
+        "Unexpected end tag token (frameset) "
+        "in the frameset phase (innerHTML).",
+    "unexpected-end-tag-in-frameset":
+        "Unexpected end tag token (%(name)s)"
+        " in the frameset phase. Ignored.",
+    "unexpected-char-after-frameset":
+        "Unexpected non-space characters in the "
+        "after frameset phase. Ignored.",
+    "unexpected-start-tag-after-frameset":
+        "Unexpected start tag (%(name)s)"
+        " in the after frameset phase. Ignored.",
+    "unexpected-end-tag-after-frameset":
+        "Unexpected end tag (%(name)s)"
+        " in the after frameset phase. Ignored.",
+    "unexpected-end-tag-after-body-innerhtml":
+        "Unexpected end tag after body(innerHtml)",
+    "expected-eof-but-got-char":
+        "Unexpected non-space characters. Expected end of file.",
+    "expected-eof-but-got-start-tag":
+        "Unexpected start tag (%(name)s)"
+        ". Expected end of file.",
+    "expected-eof-but-got-end-tag":
+        "Unexpected end tag (%(name)s)"
+        ". Expected end of file.",
+    "eof-in-table":
+        "Unexpected end of file. Expected table content.",
+    "eof-in-select":
+        "Unexpected end of file. Expected select content.",
+    "eof-in-frameset":
+        "Unexpected end of file. Expected frameset content.",
+    "eof-in-script-in-script":
+        "Unexpected end of file. Expected script content.",
+    "eof-in-foreign-lands":
+        "Unexpected end of file. Expected foreign content",
+    "non-void-element-with-trailing-solidus":
+        "Trailing solidus not allowed on element %(name)s",
+    "unexpected-html-element-in-foreign-content":
+        "Element %(name)s not allowed in a non-html context",
+    "unexpected-end-tag-before-html":
+        "Unexpected end tag (%(name)s) before html.",
+    "unexpected-inhead-noscript-tag":
+        "Element %(name)s not allowed in a inhead-noscript context",
+    "eof-in-head-noscript":
+        "Unexpected end of file. Expected inhead-noscript content",
+    "char-in-head-noscript":
+        "Unexpected non-space character. Expected inhead-noscript content",
+    "XXX-undefined-error":
+        "Undefined error (this sucks and should be fixed)",
+}
+
+namespaces = {
+    "html": "http://www.w3.org/1999/xhtml",
+    "mathml": "http://www.w3.org/1998/Math/MathML",
+    "svg": "http://www.w3.org/2000/svg",
+    "xlink": "http://www.w3.org/1999/xlink",
+    "xml": "http://www.w3.org/XML/1998/namespace",
+    "xmlns": "http://www.w3.org/2000/xmlns/"
+}
+
+scopingElements = frozenset([
+    (namespaces["html"], "applet"),
+    (namespaces["html"], "caption"),
+    (namespaces["html"], "html"),
+    (namespaces["html"], "marquee"),
+    (namespaces["html"], "object"),
+    (namespaces["html"], "table"),
+    (namespaces["html"], "td"),
+    (namespaces["html"], "th"),
+    (namespaces["mathml"], "mi"),
+    (namespaces["mathml"], "mo"),
+    (namespaces["mathml"], "mn"),
+    (namespaces["mathml"], "ms"),
+    (namespaces["mathml"], "mtext"),
+    (namespaces["mathml"], "annotation-xml"),
+    (namespaces["svg"], "foreignObject"),
+    (namespaces["svg"], "desc"),
+    (namespaces["svg"], "title"),
+])
+
+formattingElements = frozenset([
+    (namespaces["html"], "a"),
+    (namespaces["html"], "b"),
+    (namespaces["html"], "big"),
+    (namespaces["html"], "code"),
+    (namespaces["html"], "em"),
+    (namespaces["html"], "font"),
+    (namespaces["html"], "i"),
+    (namespaces["html"], "nobr"),
+    (namespaces["html"], "s"),
+    (namespaces["html"], "small"),
+    (namespaces["html"], "strike"),
+    (namespaces["html"], "strong"),
+    (namespaces["html"], "tt"),
+    (namespaces["html"], "u")
+])
+
+specialElements = frozenset([
+    (namespaces["html"], "address"),
+    (namespaces["html"], "applet"),
+    (namespaces["html"], "area"),
+    (namespaces["html"], "article"),
+    (namespaces["html"], "aside"),
+    (namespaces["html"], "base"),
+    (namespaces["html"], "basefont"),
+    (namespaces["html"], "bgsound"),
+    (namespaces["html"], "blockquote"),
+    (namespaces["html"], "body"),
+    (namespaces["html"], "br"),
+    (namespaces["html"], "button"),
+    (namespaces["html"], "caption"),
+    (namespaces["html"], "center"),
+    (namespaces["html"], "col"),
+    (namespaces["html"], "colgroup"),
+    (namespaces["html"], "command"),
+    (namespaces["html"], "dd"),
+    (namespaces["html"], "details"),
+    (namespaces["html"], "dir"),
+    (namespaces["html"], "div"),
+    (namespaces["html"], "dl"),
+    (namespaces["html"], "dt"),
+    (namespaces["html"], "embed"),
+    (namespaces["html"], "fieldset"),
+    (namespaces["html"], "figure"),
+    (namespaces["html"], "footer"),
+    (namespaces["html"], "form"),
+    (namespaces["html"], "frame"),
+    (namespaces["html"], "frameset"),
+    (namespaces["html"], "h1"),
+    (namespaces["html"], "h2"),
+    (namespaces["html"], "h3"),
+    (namespaces["html"], "h4"),
+    (namespaces["html"], "h5"),
+    (namespaces["html"], "h6"),
+    (namespaces["html"], "head"),
+    (namespaces["html"], "header"),
+    (namespaces["html"], "hr"),
+    (namespaces["html"], "html"),
+    (namespaces["html"], "iframe"),
+    # Note that image is commented out in the spec as "this isn't an
+    # element that can end up on the stack, so it doesn't matter,"
+    (namespaces["html"], "image"),
+    (namespaces["html"], "img"),
+    (namespaces["html"], "input"),
+    (namespaces["html"], "isindex"),
+    (namespaces["html"], "li"),
+    (namespaces["html"], "link"),
+    (namespaces["html"], "listing"),
+    (namespaces["html"], "marquee"),
+    (namespaces["html"], "menu"),
+    (namespaces["html"], "meta"),
+    (namespaces["html"], "nav"),
+    (namespaces["html"], "noembed"),
+    (namespaces["html"], "noframes"),
+    (namespaces["html"], "noscript"),
+    (namespaces["html"], "object"),
+    (namespaces["html"], "ol"),
+    (namespaces["html"], "p"),
+    (namespaces["html"], "param"),
+    (namespaces["html"], "plaintext"),
+    (namespaces["html"], "pre"),
+    (namespaces["html"], "script"),
+    (namespaces["html"], "section"),
+    (namespaces["html"], "select"),
+    (namespaces["html"], "style"),
+    (namespaces["html"], "table"),
+    (namespaces["html"], "tbody"),
+    (namespaces["html"], "td"),
+    (namespaces["html"], "textarea"),
+    (namespaces["html"], "tfoot"),
+    (namespaces["html"], "th"),
+    (namespaces["html"], "thead"),
+    (namespaces["html"], "title"),
+    (namespaces["html"], "tr"),
+    (namespaces["html"], "ul"),
+    (namespaces["html"], "wbr"),
+    (namespaces["html"], "xmp"),
+    (namespaces["svg"], "foreignObject")
+])
+
+htmlIntegrationPointElements = frozenset([
+    (namespaces["mathml"], "annotation-xml"),
+    (namespaces["svg"], "foreignObject"),
+    (namespaces["svg"], "desc"),
+    (namespaces["svg"], "title")
+])
+
+mathmlTextIntegrationPointElements = frozenset([
+    (namespaces["mathml"], "mi"),
+    (namespaces["mathml"], "mo"),
+    (namespaces["mathml"], "mn"),
+    (namespaces["mathml"], "ms"),
+    (namespaces["mathml"], "mtext")
+])
+
+adjustSVGAttributes = {
+    "attributename": "attributeName",
+    "attributetype": "attributeType",
+    "basefrequency": "baseFrequency",
+    "baseprofile": "baseProfile",
+    "calcmode": "calcMode",
+    "clippathunits": "clipPathUnits",
+    "contentscripttype": "contentScriptType",
+    "contentstyletype": "contentStyleType",
+    "diffuseconstant": "diffuseConstant",
+    "edgemode": "edgeMode",
+    "externalresourcesrequired": "externalResourcesRequired",
+    "filterres": "filterRes",
+    "filterunits": "filterUnits",
+    "glyphref": "glyphRef",
+    "gradienttransform": "gradientTransform",
+    "gradientunits": "gradientUnits",
+    "kernelmatrix": "kernelMatrix",
+    "kernelunitlength": "kernelUnitLength",
+    "keypoints": "keyPoints",
+    "keysplines": "keySplines",
+    "keytimes": "keyTimes",
+    "lengthadjust": "lengthAdjust",
+    "limitingconeangle": "limitingConeAngle",
+    "markerheight": "markerHeight",
+    "markerunits": "markerUnits",
+    "markerwidth": "markerWidth",
+    "maskcontentunits": "maskContentUnits",
+    "maskunits": "maskUnits",
+    "numoctaves": "numOctaves",
+    "pathlength": "pathLength",
+    "patterncontentunits": "patternContentUnits",
+    "patterntransform": "patternTransform",
+    "patternunits": "patternUnits",
+    "pointsatx": "pointsAtX",
+    "pointsaty": "pointsAtY",
+    "pointsatz": "pointsAtZ",
+    "preservealpha": "preserveAlpha",
+    "preserveaspectratio": "preserveAspectRatio",
+    "primitiveunits": "primitiveUnits",
+    "refx": "refX",
+    "refy": "refY",
+    "repeatcount": "repeatCount",
+    "repeatdur": "repeatDur",
+    "requiredextensions": "requiredExtensions",
+    "requiredfeatures": "requiredFeatures",
+    "specularconstant": "specularConstant",
+    "specularexponent": "specularExponent",
+    "spreadmethod": "spreadMethod",
+    "startoffset": "startOffset",
+    "stddeviation": "stdDeviation",
+    "stitchtiles": "stitchTiles",
+    "surfacescale": "surfaceScale",
+    "systemlanguage": "systemLanguage",
+    "tablevalues": "tableValues",
+    "targetx": "targetX",
+    "targety": "targetY",
+    "textlength": "textLength",
+    "viewbox": "viewBox",
+    "viewtarget": "viewTarget",
+    "xchannelselector": "xChannelSelector",
+    "ychannelselector": "yChannelSelector",
+    "zoomandpan": "zoomAndPan"
+}
+
+adjustMathMLAttributes = {"definitionurl": "definitionURL"}
+
+adjustForeignAttributes = {
+    "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
+    "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
+    "xlink:href": ("xlink", "href", namespaces["xlink"]),
+    "xlink:role": ("xlink", "role", namespaces["xlink"]),
+    "xlink:show": ("xlink", "show", namespaces["xlink"]),
+    "xlink:title": ("xlink", "title", namespaces["xlink"]),
+    "xlink:type": ("xlink", "type", namespaces["xlink"]),
+    "xml:base": ("xml", "base", namespaces["xml"]),
+    "xml:lang": ("xml", "lang", namespaces["xml"]),
+    "xml:space": ("xml", "space", namespaces["xml"]),
+    "xmlns": (None, "xmlns", namespaces["xmlns"]),
+    "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
+}
+
+unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
+                                  adjustForeignAttributes.items()])
+
+spaceCharacters = frozenset([
+    "\t",
+    "\n",
+    "\u000C",
+    " ",
+    "\r"
+])
+
+tableInsertModeElements = frozenset([
+    "table",
+    "tbody",
+    "tfoot",
+    "thead",
+    "tr"
+])
+
+asciiLowercase = frozenset(string.ascii_lowercase)
+asciiUppercase = frozenset(string.ascii_uppercase)
+asciiLetters = frozenset(string.ascii_letters)
+digits = frozenset(string.digits)
+hexDigits = frozenset(string.hexdigits)
+
+asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
+                         for c in string.ascii_uppercase])
+
+# Heading elements need to be ordered
+headingElements = (
+    "h1",
+    "h2",
+    "h3",
+    "h4",
+    "h5",
+    "h6"
+)
+
+voidElements = frozenset([
+    "base",
+    "command",
+    "event-source",
+    "link",
+    "meta",
+    "hr",
+    "br",
+    "img",
+    "embed",
+    "param",
+    "area",
+    "col",
+    "input",
+    "source",
+    "track"
+])
+
+cdataElements = frozenset(['title', 'textarea'])
+
+rcdataElements = frozenset([
+    'style',
+    'script',
+    'xmp',
+    'iframe',
+    'noembed',
+    'noframes',
+    'noscript'
+])
+
+booleanAttributes = {
+    "": frozenset(["irrelevant", "itemscope"]),
+    "style": frozenset(["scoped"]),
+    "img": frozenset(["ismap"]),
+    "audio": frozenset(["autoplay", "controls"]),
+    "video": frozenset(["autoplay", "controls"]),
+    "script": frozenset(["defer", "async"]),
+    "details": frozenset(["open"]),
+    "datagrid": frozenset(["multiple", "disabled"]),
+    "command": frozenset(["hidden", "disabled", "checked", "default"]),
+    "hr": frozenset(["noshade"]),
+    "menu": frozenset(["autosubmit"]),
+    "fieldset": frozenset(["disabled", "readonly"]),
+    "option": frozenset(["disabled", "readonly", "selected"]),
+    "optgroup": frozenset(["disabled", "readonly"]),
+    "button": frozenset(["disabled", "autofocus"]),
+    "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]),
+    "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]),
+    "output": frozenset(["disabled", "readonly"]),
+    "iframe": frozenset(["seamless"]),
+}
+
+# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
+# therefore can't be a frozenset.
+entitiesWindows1252 = (
+    8364,   # 0x80  0x20AC  EURO SIGN
+    65533,  # 0x81          UNDEFINED
+    8218,   # 0x82  0x201A  SINGLE LOW-9 QUOTATION MARK
+    402,    # 0x83  0x0192  LATIN SMALL LETTER F WITH HOOK
+    8222,   # 0x84  0x201E  DOUBLE LOW-9 QUOTATION MARK
+    8230,   # 0x85  0x2026  HORIZONTAL ELLIPSIS
+    8224,   # 0x86  0x2020  DAGGER
+    8225,   # 0x87  0x2021  DOUBLE DAGGER
+    710,    # 0x88  0x02C6  MODIFIER LETTER CIRCUMFLEX ACCENT
+    8240,   # 0x89  0x2030  PER MILLE SIGN
+    352,    # 0x8A  0x0160  LATIN CAPITAL LETTER S WITH CARON
+    8249,   # 0x8B  0x2039  SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+    338,    # 0x8C  0x0152  LATIN CAPITAL LIGATURE OE
+    65533,  # 0x8D          UNDEFINED
+    381,    # 0x8E  0x017D  LATIN CAPITAL LETTER Z WITH CARON
+    65533,  # 0x8F          UNDEFINED
+    65533,  # 0x90          UNDEFINED
+    8216,   # 0x91  0x2018  LEFT SINGLE QUOTATION MARK
+    8217,   # 0x92  0x2019  RIGHT SINGLE QUOTATION MARK
+    8220,   # 0x93  0x201C  LEFT DOUBLE QUOTATION MARK
+    8221,   # 0x94  0x201D  RIGHT DOUBLE QUOTATION MARK
+    8226,   # 0x95  0x2022  BULLET
+    8211,   # 0x96  0x2013  EN DASH
+    8212,   # 0x97  0x2014  EM DASH
+    732,    # 0x98  0x02DC  SMALL TILDE
+    8482,   # 0x99  0x2122  TRADE MARK SIGN
+    353,    # 0x9A  0x0161  LATIN SMALL LETTER S WITH CARON
+    8250,   # 0x9B  0x203A  SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+    339,    # 0x9C  0x0153  LATIN SMALL LIGATURE OE
+    65533,  # 0x9D          UNDEFINED
+    382,    # 0x9E  0x017E  LATIN SMALL LETTER Z WITH CARON
+    376     # 0x9F  0x0178  LATIN CAPITAL LETTER Y WITH DIAERESIS
+)
+
+xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;'])
+
+entities = {
+    "AElig": "\xc6",
+    "AElig;": "\xc6",
+    "AMP": "&",
+    "AMP;": "&",
+    "Aacute": "\xc1",
+    "Aacute;": "\xc1",
+    "Abreve;": "\u0102",
+    "Acirc": "\xc2",
+    "Acirc;": "\xc2",
+    "Acy;": "\u0410",
+    "Afr;": "\U0001d504",
+    "Agrave": "\xc0",
+    "Agrave;": "\xc0",
+    "Alpha;": "\u0391",
+    "Amacr;": "\u0100",
+    "And;": "\u2a53",
+    "Aogon;": "\u0104",
+    "Aopf;": "\U0001d538",
+    "ApplyFunction;": "\u2061",
+    "Aring": "\xc5",
+    "Aring;": "\xc5",
+    "Ascr;": "\U0001d49c",
+    "Assign;": "\u2254",
+    "Atilde": "\xc3",
+    "Atilde;": "\xc3",
+    "Auml": "\xc4",
+    "Auml;": "\xc4",
+    "Backslash;": "\u2216",
+    "Barv;": "\u2ae7",
+    "Barwed;": "\u2306",
+    "Bcy;": "\u0411",
+    "Because;": "\u2235",
+    "Bernoullis;": "\u212c",
+    "Beta;": "\u0392",
+    "Bfr;": "\U0001d505",
+    "Bopf;": "\U0001d539",
+    "Breve;": "\u02d8",
+    "Bscr;": "\u212c",
+    "Bumpeq;": "\u224e",
+    "CHcy;": "\u0427",
+    "COPY": "\xa9",
+    "COPY;": "\xa9",
+    "Cacute;": "\u0106",
+    "Cap;": "\u22d2",
+    "CapitalDifferentialD;": "\u2145",
+    "Cayleys;": "\u212d",
+    "Ccaron;": "\u010c",
+    "Ccedil": "\xc7",
+    "Ccedil;": "\xc7",
+    "Ccirc;": "\u0108",
+    "Cconint;": "\u2230",
+    "Cdot;": "\u010a",
+    "Cedilla;": "\xb8",
+    "CenterDot;": "\xb7",
+    "Cfr;": "\u212d",
+    "Chi;": "\u03a7",
+    "CircleDot;": "\u2299",
+    "CircleMinus;": "\u2296",
+    "CirclePlus;": "\u2295",
+    "CircleTimes;": "\u2297",
+    "ClockwiseContourIntegral;": "\u2232",
+    "CloseCurlyDoubleQuote;": "\u201d",
+    "CloseCurlyQuote;": "\u2019",
+    "Colon;": "\u2237",
+    "Colone;": "\u2a74",
+    "Congruent;": "\u2261",
+    "Conint;": "\u222f",
+    "ContourIntegral;": "\u222e",
+    "Copf;": "\u2102",
+    "Coproduct;": "\u2210",
+    "CounterClockwiseContourIntegral;": "\u2233",
+    "Cross;": "\u2a2f",
+    "Cscr;": "\U0001d49e",
+    "Cup;": "\u22d3",
+    "CupCap;": "\u224d",
+    "DD;": "\u2145",
+    "DDotrahd;": "\u2911",
+    "DJcy;": "\u0402",
+    "DScy;": "\u0405",
+    "DZcy;": "\u040f",
+    "Dagger;": "\u2021",
+    "Darr;": "\u21a1",
+    "Dashv;": "\u2ae4",
+    "Dcaron;": "\u010e",
+    "Dcy;": "\u0414",
+    "Del;": "\u2207",
+    "Delta;": "\u0394",
+    "Dfr;": "\U0001d507",
+    "DiacriticalAcute;": "\xb4",
+    "DiacriticalDot;": "\u02d9",
+    "DiacriticalDoubleAcute;": "\u02dd",
+    "DiacriticalGrave;": "`",
+    "DiacriticalTilde;": "\u02dc",
+    "Diamond;": "\u22c4",
+    "DifferentialD;": "\u2146",
+    "Dopf;": "\U0001d53b",
+    "Dot;": "\xa8",
+    "DotDot;": "\u20dc",
+    "DotEqual;": "\u2250",
+    "DoubleContourIntegral;": "\u222f",
+    "DoubleDot;": "\xa8",
+    "DoubleDownArrow;": "\u21d3",
+    "DoubleLeftArrow;": "\u21d0",
+    "DoubleLeftRightArrow;": "\u21d4",
+    "DoubleLeftTee;": "\u2ae4",
+    "DoubleLongLeftArrow;": "\u27f8",
+    "DoubleLongLeftRightArrow;": "\u27fa",
+    "DoubleLongRightArrow;": "\u27f9",
+    "DoubleRightArrow;": "\u21d2",
+    "DoubleRightTee;": "\u22a8",
+    "DoubleUpArrow;": "\u21d1",
+    "DoubleUpDownArrow;": "\u21d5",
+    "DoubleVerticalBar;": "\u2225",
+    "DownArrow;": "\u2193",
+    "DownArrowBar;": "\u2913",
+    "DownArrowUpArrow;": "\u21f5",
+    "DownBreve;": "\u0311",
+    "DownLeftRightVector;": "\u2950",
+    "DownLeftTeeVector;": "\u295e",
+    "DownLeftVector;": "\u21bd",
+    "DownLeftVectorBar;": "\u2956",
+    "DownRightTeeVector;": "\u295f",
+    "DownRightVector;": "\u21c1",
+    "DownRightVectorBar;": "\u2957",
+    "DownTee;": "\u22a4",
+    "DownTeeArrow;": "\u21a7",
+    "Downarrow;": "\u21d3",
+    "Dscr;": "\U0001d49f",
+    "Dstrok;": "\u0110",
+    "ENG;": "\u014a",
+    "ETH": "\xd0",
+    "ETH;": "\xd0",
+    "Eacute": "\xc9",
+    "Eacute;": "\xc9",
+    "Ecaron;": "\u011a",
+    "Ecirc": "\xca",
+    "Ecirc;": "\xca",
+    "Ecy;": "\u042d",
+    "Edot;": "\u0116",
+    "Efr;": "\U0001d508",
+    "Egrave": "\xc8",
+    "Egrave;": "\xc8",
+    "Element;": "\u2208",
+    "Emacr;": "\u0112",
+    "EmptySmallSquare;": "\u25fb",
+    "EmptyVerySmallSquare;": "\u25ab",
+    "Eogon;": "\u0118",
+    "Eopf;": "\U0001d53c",
+    "Epsilon;": "\u0395",
+    "Equal;": "\u2a75",
+    "EqualTilde;": "\u2242",
+    "Equilibrium;": "\u21cc",
+    "Escr;": "\u2130",
+    "Esim;": "\u2a73",
+    "Eta;": "\u0397",
+    "Euml": "\xcb",
+    "Euml;": "\xcb",
+    "Exists;": "\u2203",
+    "ExponentialE;": "\u2147",
+    "Fcy;": "\u0424",
+    "Ffr;": "\U0001d509",
+    "FilledSmallSquare;": "\u25fc",
+    "FilledVerySmallSquare;": "\u25aa",
+    "Fopf;": "\U0001d53d",
+    "ForAll;": "\u2200",
+    "Fouriertrf;": "\u2131",
+    "Fscr;": "\u2131",
+    "GJcy;": "\u0403",
+    "GT": ">",
+    "GT;": ">",
+    "Gamma;": "\u0393",
+    "Gammad;": "\u03dc",
+    "Gbreve;": "\u011e",
+    "Gcedil;": "\u0122",
+    "Gcirc;": "\u011c",
+    "Gcy;": "\u0413",
+    "Gdot;": "\u0120",
+    "Gfr;": "\U0001d50a",
+    "Gg;": "\u22d9",
+    "Gopf;": "\U0001d53e",
+    "GreaterEqual;": "\u2265",
+    "GreaterEqualLess;": "\u22db",
+    "GreaterFullEqual;": "\u2267",
+    "GreaterGreater;": "\u2aa2",
+    "GreaterLess;": "\u2277",
+    "GreaterSlantEqual;": "\u2a7e",
+    "GreaterTilde;": "\u2273",
+    "Gscr;": "\U0001d4a2",
+    "Gt;": "\u226b",
+    "HARDcy;": "\u042a",
+    "Hacek;": "\u02c7",
+    "Hat;": "^",
+    "Hcirc;": "\u0124",
+    "Hfr;": "\u210c",
+    "HilbertSpace;": "\u210b",
+    "Hopf;": "\u210d",
+    "HorizontalLine;": "\u2500",
+    "Hscr;": "\u210b",
+    "Hstrok;": "\u0126",
+    "HumpDownHump;": "\u224e",
+    "HumpEqual;": "\u224f",
+    "IEcy;": "\u0415",
+    "IJlig;": "\u0132",
+    "IOcy;": "\u0401",
+    "Iacute": "\xcd",
+    "Iacute;": "\xcd",
+    "Icirc": "\xce",
+    "Icirc;": "\xce",
+    "Icy;": "\u0418",
+    "Idot;": "\u0130",
+    "Ifr;": "\u2111",
+    "Igrave": "\xcc",
+    "Igrave;": "\xcc",
+    "Im;": "\u2111",
+    "Imacr;": "\u012a",
+    "ImaginaryI;": "\u2148",
+    "Implies;": "\u21d2",
+    "Int;": "\u222c",
+    "Integral;": "\u222b",
+    "Intersection;": "\u22c2",
+    "InvisibleComma;": "\u2063",
+    "InvisibleTimes;": "\u2062",
+    "Iogon;": "\u012e",
+    "Iopf;": "\U0001d540",
+    "Iota;": "\u0399",
+    "Iscr;": "\u2110",
+    "Itilde;": "\u0128",
+    "Iukcy;": "\u0406",
+    "Iuml": "\xcf",
+    "Iuml;": "\xcf",
+    "Jcirc;": "\u0134",
+    "Jcy;": "\u0419",
+    "Jfr;": "\U0001d50d",
+    "Jopf;": "\U0001d541",
+    "Jscr;": "\U0001d4a5",
+    "Jsercy;": "\u0408",
+    "Jukcy;": "\u0404",
+    "KHcy;": "\u0425",
+    "KJcy;": "\u040c",
+    "Kappa;": "\u039a",
+    "Kcedil;": "\u0136",
+    "Kcy;": "\u041a",
+    "Kfr;": "\U0001d50e",
+    "Kopf;": "\U0001d542",
+    "Kscr;": "\U0001d4a6",
+    "LJcy;": "\u0409",
+    "LT": "<",
+    "LT;": "<",
+    "Lacute;": "\u0139",
+    "Lambda;": "\u039b",
+    "Lang;": "\u27ea",
+    "Laplacetrf;": "\u2112",
+    "Larr;": "\u219e",
+    "Lcaron;": "\u013d",
+    "Lcedil;": "\u013b",
+    "Lcy;": "\u041b",
+    "LeftAngleBracket;": "\u27e8",
+    "LeftArrow;": "\u2190",
+    "LeftArrowBar;": "\u21e4",
+    "LeftArrowRightArrow;": "\u21c6",
+    "LeftCeiling;": "\u2308",
+    "LeftDoubleBracket;": "\u27e6",
+    "LeftDownTeeVector;": "\u2961",
+    "LeftDownVector;": "\u21c3",
+    "LeftDownVectorBar;": "\u2959",
+    "LeftFloor;": "\u230a",
+    "LeftRightArrow;": "\u2194",
+    "LeftRightVector;": "\u294e",
+    "LeftTee;": "\u22a3",
+    "LeftTeeArrow;": "\u21a4",
+    "LeftTeeVector;": "\u295a",
+    "LeftTriangle;": "\u22b2",
+    "LeftTriangleBar;": "\u29cf",
+    "LeftTriangleEqual;": "\u22b4",
+    "LeftUpDownVector;": "\u2951",
+    "LeftUpTeeVector;": "\u2960",
+    "LeftUpVector;": "\u21bf",
+    "LeftUpVectorBar;": "\u2958",
+    "LeftVector;": "\u21bc",
+    "LeftVectorBar;": "\u2952",
+    "Leftarrow;": "\u21d0",
+    "Leftrightarrow;": "\u21d4",
+    "LessEqualGreater;": "\u22da",
+    "LessFullEqual;": "\u2266",
+    "LessGreater;": "\u2276",
+    "LessLess;": "\u2aa1",
+    "LessSlantEqual;": "\u2a7d",
+    "LessTilde;": "\u2272",
+    "Lfr;": "\U0001d50f",
+    "Ll;": "\u22d8",
+    "Lleftarrow;": "\u21da",
+    "Lmidot;": "\u013f",
+    "LongLeftArrow;": "\u27f5",
+    "LongLeftRightArrow;": "\u27f7",
+    "LongRightArrow;": "\u27f6",
+    "Longleftarrow;": "\u27f8",
+    "Longleftrightarrow;": "\u27fa",
+    "Longrightarrow;": "\u27f9",
+    "Lopf;": "\U0001d543",
+    "LowerLeftArrow;": "\u2199",
+    "LowerRightArrow;": "\u2198",
+    "Lscr;": "\u2112",
+    "Lsh;": "\u21b0",
+    "Lstrok;": "\u0141",
+    "Lt;": "\u226a",
+    "Map;": "\u2905",
+    "Mcy;": "\u041c",
+    "MediumSpace;": "\u205f",
+    "Mellintrf;": "\u2133",
+    "Mfr;": "\U0001d510",
+    "MinusPlus;": "\u2213",
+    "Mopf;": "\U0001d544",
+    "Mscr;": "\u2133",
+    "Mu;": "\u039c",
+    "NJcy;": "\u040a",
+    "Nacute;": "\u0143",
+    "Ncaron;": "\u0147",
+    "Ncedil;": "\u0145",
+    "Ncy;": "\u041d",
+    "NegativeMediumSpace;": "\u200b",
+    "NegativeThickSpace;": "\u200b",
+    "NegativeThinSpace;": "\u200b",
+    "NegativeVeryThinSpace;": "\u200b",
+    "NestedGreaterGreater;": "\u226b",
+    "NestedLessLess;": "\u226a",
+    "NewLine;": "\n",
+    "Nfr;": "\U0001d511",
+    "NoBreak;": "\u2060",
+    "NonBreakingSpace;": "\xa0",
+    "Nopf;": "\u2115",
+    "Not;": "\u2aec",
+    "NotCongruent;": "\u2262",
+    "NotCupCap;": "\u226d",
+    "NotDoubleVerticalBar;": "\u2226",
+    "NotElement;": "\u2209",
+    "NotEqual;": "\u2260",
+    "NotEqualTilde;": "\u2242\u0338",
+    "NotExists;": "\u2204",
+    "NotGreater;": "\u226f",
+    "NotGreaterEqual;": "\u2271",
+    "NotGreaterFullEqual;": "\u2267\u0338",
+    "NotGreaterGreater;": "\u226b\u0338",
+    "NotGreaterLess;": "\u2279",
+    "NotGreaterSlantEqual;": "\u2a7e\u0338",
+    "NotGreaterTilde;": "\u2275",
+    "NotHumpDownHump;": "\u224e\u0338",
+    "NotHumpEqual;": "\u224f\u0338",
+    "NotLeftTriangle;": "\u22ea",
+    "NotLeftTriangleBar;": "\u29cf\u0338",
+    "NotLeftTriangleEqual;": "\u22ec",
+    "NotLess;": "\u226e",
+    "NotLessEqual;": "\u2270",
+    "NotLessGreater;": "\u2278",
+    "NotLessLess;": "\u226a\u0338",
+    "NotLessSlantEqual;": "\u2a7d\u0338",
+    "NotLessTilde;": "\u2274",
+    "NotNestedGreaterGreater;": "\u2aa2\u0338",
+    "NotNestedLessLess;": "\u2aa1\u0338",
+    "NotPrecedes;": "\u2280",
+    "NotPrecedesEqual;": "\u2aaf\u0338",
+    "NotPrecedesSlantEqual;": "\u22e0",
+    "NotReverseElement;": "\u220c",
+    "NotRightTriangle;": "\u22eb",
+    "NotRightTriangleBar;": "\u29d0\u0338",
+    "NotRightTriangleEqual;": "\u22ed",
+    "NotSquareSubset;": "\u228f\u0338",
+    "NotSquareSubsetEqual;": "\u22e2",
+    "NotSquareSuperset;": "\u2290\u0338",
+    "NotSquareSupersetEqual;": "\u22e3",
+    "NotSubset;": "\u2282\u20d2",
+    "NotSubsetEqual;": "\u2288",
+    "NotSucceeds;": "\u2281",
+    "NotSucceedsEqual;": "\u2ab0\u0338",
+    "NotSucceedsSlantEqual;": "\u22e1",
+    "NotSucceedsTilde;": "\u227f\u0338",
+    "NotSuperset;": "\u2283\u20d2",
+    "NotSupersetEqual;": "\u2289",
+    "NotTilde;": "\u2241",
+    "NotTildeEqual;": "\u2244",
+    "NotTildeFullEqual;": "\u2247",
+    "NotTildeTilde;": "\u2249",
+    "NotVerticalBar;": "\u2224",
+    "Nscr;": "\U0001d4a9",
+    "Ntilde": "\xd1",
+    "Ntilde;": "\xd1",
+    "Nu;": "\u039d",
+    "OElig;": "\u0152",
+    "Oacute": "\xd3",
+    "Oacute;": "\xd3",
+    "Ocirc": "\xd4",
+    "Ocirc;": "\xd4",
+    "Ocy;": "\u041e",
+    "Odblac;": "\u0150",
+    "Ofr;": "\U0001d512",
+    "Ograve": "\xd2",
+    "Ograve;": "\xd2",
+    "Omacr;": "\u014c",
+    "Omega;": "\u03a9",
+    "Omicron;": "\u039f",
+    "Oopf;": "\U0001d546",
+    "OpenCurlyDoubleQuote;": "\u201c",
+    "OpenCurlyQuote;": "\u2018",
+    "Or;": "\u2a54",
+    "Oscr;": "\U0001d4aa",
+    "Oslash": "\xd8",
+    "Oslash;": "\xd8",
+    "Otilde": "\xd5",
+    "Otilde;": "\xd5",
+    "Otimes;": "\u2a37",
+    "Ouml": "\xd6",
+    "Ouml;": "\xd6",
+    "OverBar;": "\u203e",
+    "OverBrace;": "\u23de",
+    "OverBracket;": "\u23b4",
+    "OverParenthesis;": "\u23dc",
+    "PartialD;": "\u2202",
+    "Pcy;": "\u041f",
+    "Pfr;": "\U0001d513",
+    "Phi;": "\u03a6",
+    "Pi;": "\u03a0",
+    "PlusMinus;": "\xb1",
+    "Poincareplane;": "\u210c",
+    "Popf;": "\u2119",
+    "Pr;": "\u2abb",
+    "Precedes;": "\u227a",
+    "PrecedesEqual;": "\u2aaf",
+    "PrecedesSlantEqual;": "\u227c",
+    "PrecedesTilde;": "\u227e",
+    "Prime;": "\u2033",
+    "Product;": "\u220f",
+    "Proportion;": "\u2237",
+    "Proportional;": "\u221d",
+    "Pscr;": "\U0001d4ab",
+    "Psi;": "\u03a8",
+    "QUOT": "\"",
+    "QUOT;": "\"",
+    "Qfr;": "\U0001d514",
+    "Qopf;": "\u211a",
+    "Qscr;": "\U0001d4ac",
+    "RBarr;": "\u2910",
+    "REG": "\xae",
+    "REG;": "\xae",
+    "Racute;": "\u0154",
+    "Rang;": "\u27eb",
+    "Rarr;": "\u21a0",
+    "Rarrtl;": "\u2916",
+    "Rcaron;": "\u0158",
+    "Rcedil;": "\u0156",
+    "Rcy;": "\u0420",
+    "Re;": "\u211c",
+    "ReverseElement;": "\u220b",
+    "ReverseEquilibrium;": "\u21cb",
+    "ReverseUpEquilibrium;": "\u296f",
+    "Rfr;": "\u211c",
+    "Rho;": "\u03a1",
+    "RightAngleBracket;": "\u27e9",
+    "RightArrow;": "\u2192",
+    "RightArrowBar;": "\u21e5",
+    "RightArrowLeftArrow;": "\u21c4",
+    "RightCeiling;": "\u2309",
+    "RightDoubleBracket;": "\u27e7",
+    "RightDownTeeVector;": "\u295d",
+    "RightDownVector;": "\u21c2",
+    "RightDownVectorBar;": "\u2955",
+    "RightFloor;": "\u230b",
+    "RightTee;": "\u22a2",
+    "RightTeeArrow;": "\u21a6",
+    "RightTeeVector;": "\u295b",
+    "RightTriangle;": "\u22b3",
+    "RightTriangleBar;": "\u29d0",
+    "RightTriangleEqual;": "\u22b5",
+    "RightUpDownVector;": "\u294f",
+    "RightUpTeeVector;": "\u295c",
+    "RightUpVector;": "\u21be",
+    "RightUpVectorBar;": "\u2954",
+    "RightVector;": "\u21c0",
+    "RightVectorBar;": "\u2953",
+    "Rightarrow;": "\u21d2",
+    "Ropf;": "\u211d",
+    "RoundImplies;": "\u2970",
+    "Rrightarrow;": "\u21db",
+    "Rscr;": "\u211b",
+    "Rsh;": "\u21b1",
+    "RuleDelayed;": "\u29f4",
+    "SHCHcy;": "\u0429",
+    "SHcy;": "\u0428",
+    "SOFTcy;": "\u042c",
+    "Sacute;": "\u015a",
+    "Sc;": "\u2abc",
+    "Scaron;": "\u0160",
+    "Scedil;": "\u015e",
+    "Scirc;": "\u015c",
+    "Scy;": "\u0421",
+    "Sfr;": "\U0001d516",
+    "ShortDownArrow;": "\u2193",
+    "ShortLeftArrow;": "\u2190",
+    "ShortRightArrow;": "\u2192",
+    "ShortUpArrow;": "\u2191",
+    "Sigma;": "\u03a3",
+    "SmallCircle;": "\u2218",
+    "Sopf;": "\U0001d54a",
+    "Sqrt;": "\u221a",
+    "Square;": "\u25a1",
+    "SquareIntersection;": "\u2293",
+    "SquareSubset;": "\u228f",
+    "SquareSubsetEqual;": "\u2291",
+    "SquareSuperset;": "\u2290",
+    "SquareSupersetEqual;": "\u2292",
+    "SquareUnion;": "\u2294",
+    "Sscr;": "\U0001d4ae",
+    "Star;": "\u22c6",
+    "Sub;": "\u22d0",
+    "Subset;": "\u22d0",
+    "SubsetEqual;": "\u2286",
+    "Succeeds;": "\u227b",
+    "SucceedsEqual;": "\u2ab0",
+    "SucceedsSlantEqual;": "\u227d",
+    "SucceedsTilde;": "\u227f",
+    "SuchThat;": "\u220b",
+    "Sum;": "\u2211",
+    "Sup;": "\u22d1",
+    "Superset;": "\u2283",
+    "SupersetEqual;": "\u2287",
+    "Supset;": "\u22d1",
+    "THORN": "\xde",
+    "THORN;": "\xde",
+    "TRADE;": "\u2122",
+    "TSHcy;": "\u040b",
+    "TScy;": "\u0426",
+    "Tab;": "\t",
+    "Tau;": "\u03a4",
+    "Tcaron;": "\u0164",
+    "Tcedil;": "\u0162",
+    "Tcy;": "\u0422",
+    "Tfr;": "\U0001d517",
+    "Therefore;": "\u2234",
+    "Theta;": "\u0398",
+    "ThickSpace;": "\u205f\u200a",
+    "ThinSpace;": "\u2009",
+    "Tilde;": "\u223c",
+    "TildeEqual;": "\u2243",
+    "TildeFullEqual;": "\u2245",
+    "TildeTilde;": "\u2248",
+    "Topf;": "\U0001d54b",
+    "TripleDot;": "\u20db",
+    "Tscr;": "\U0001d4af",
+    "Tstrok;": "\u0166",
+    "Uacute": "\xda",
+    "Uacute;": "\xda",
+    "Uarr;": "\u219f",
+    "Uarrocir;": "\u2949",
+    "Ubrcy;": "\u040e",
+    "Ubreve;": "\u016c",
+    "Ucirc": "\xdb",
+    "Ucirc;": "\xdb",
+    "Ucy;": "\u0423",
+    "Udblac;": "\u0170",
+    "Ufr;": "\U0001d518",
+    "Ugrave": "\xd9",
+    "Ugrave;": "\xd9",
+    "Umacr;": "\u016a",
+    "UnderBar;": "_",
+    "UnderBrace;": "\u23df",
+    "UnderBracket;": "\u23b5",
+    "UnderParenthesis;": "\u23dd",
+    "Union;": "\u22c3",
+    "UnionPlus;": "\u228e",
+    "Uogon;": "\u0172",
+    "Uopf;": "\U0001d54c",
+    "UpArrow;": "\u2191",
+    "UpArrowBar;": "\u2912",
+    "UpArrowDownArrow;": "\u21c5",
+    "UpDownArrow;": "\u2195",
+    "UpEquilibrium;": "\u296e",
+    "UpTee;": "\u22a5",
+    "UpTeeArrow;": "\u21a5",
+    "Uparrow;": "\u21d1",
+    "Updownarrow;": "\u21d5",
+    "UpperLeftArrow;": "\u2196",
+    "UpperRightArrow;": "\u2197",
+    "Upsi;": "\u03d2",
+    "Upsilon;": "\u03a5",
+    "Uring;": "\u016e",
+    "Uscr;": "\U0001d4b0",
+    "Utilde;": "\u0168",
+    "Uuml": "\xdc",
+    "Uuml;": "\xdc",
+    "VDash;": "\u22ab",
+    "Vbar;": "\u2aeb",
+    "Vcy;": "\u0412",
+    "Vdash;": "\u22a9",
+    "Vdashl;": "\u2ae6",
+    "Vee;": "\u22c1",
+    "Verbar;": "\u2016",
+    "Vert;": "\u2016",
+    "VerticalBar;": "\u2223",
+    "VerticalLine;": "|",
+    "VerticalSeparator;": "\u2758",
+    "VerticalTilde;": "\u2240",
+    "VeryThinSpace;": "\u200a",
+    "Vfr;": "\U0001d519",
+    "Vopf;": "\U0001d54d",
+    "Vscr;": "\U0001d4b1",
+    "Vvdash;": "\u22aa",
+    "Wcirc;": "\u0174",
+    "Wedge;": "\u22c0",
+    "Wfr;": "\U0001d51a",
+    "Wopf;": "\U0001d54e",
+    "Wscr;": "\U0001d4b2",
+    "Xfr;": "\U0001d51b",
+    "Xi;": "\u039e",
+    "Xopf;": "\U0001d54f",
+    "Xscr;": "\U0001d4b3",
+    "YAcy;": "\u042f",
+    "YIcy;": "\u0407",
+    "YUcy;": "\u042e",
+    "Yacute": "\xdd",
+    "Yacute;": "\xdd",
+    "Ycirc;": "\u0176",
+    "Ycy;": "\u042b",
+    "Yfr;": "\U0001d51c",
+    "Yopf;": "\U0001d550",
+    "Yscr;": "\U0001d4b4",
+    "Yuml;": "\u0178",
+    "ZHcy;": "\u0416",
+    "Zacute;": "\u0179",
+    "Zcaron;": "\u017d",
+    "Zcy;": "\u0417",
+    "Zdot;": "\u017b",
+    "ZeroWidthSpace;": "\u200b",
+    "Zeta;": "\u0396",
+    "Zfr;": "\u2128",
+    "Zopf;": "\u2124",
+    "Zscr;": "\U0001d4b5",
+    "aacute": "\xe1",
+    "aacute;": "\xe1",
+    "abreve;": "\u0103",
+    "ac;": "\u223e",
+    "acE;": "\u223e\u0333",
+    "acd;": "\u223f",
+    "acirc": "\xe2",
+    "acirc;": "\xe2",
+    "acute": "\xb4",
+    "acute;": "\xb4",
+    "acy;": "\u0430",
+    "aelig": "\xe6",
+    "aelig;": "\xe6",
+    "af;": "\u2061",
+    "afr;": "\U0001d51e",
+    "agrave": "\xe0",
+    "agrave;": "\xe0",
+    "alefsym;": "\u2135",
+    "aleph;": "\u2135",
+    "alpha;": "\u03b1",
+    "amacr;": "\u0101",
+    "amalg;": "\u2a3f",
+    "amp": "&",
+    "amp;": "&",
+    "and;": "\u2227",
+    "andand;": "\u2a55",
+    "andd;": "\u2a5c",
+    "andslope;": "\u2a58",
+    "andv;": "\u2a5a",
+    "ang;": "\u2220",
+    "ange;": "\u29a4",
+    "angle;": "\u2220",
+    "angmsd;": "\u2221",
+    "angmsdaa;": "\u29a8",
+    "angmsdab;": "\u29a9",
+    "angmsdac;": "\u29aa",
+    "angmsdad;": "\u29ab",
+    "angmsdae;": "\u29ac",
+    "angmsdaf;": "\u29ad",
+    "angmsdag;": "\u29ae",
+    "angmsdah;": "\u29af",
+    "angrt;": "\u221f",
+    "angrtvb;": "\u22be",
+    "angrtvbd;": "\u299d",
+    "angsph;": "\u2222",
+    "angst;": "\xc5",
+    "angzarr;": "\u237c",
+    "aogon;": "\u0105",
+    "aopf;": "\U0001d552",
+    "ap;": "\u2248",
+    "apE;": "\u2a70",
+    "apacir;": "\u2a6f",
+    "ape;": "\u224a",
+    "apid;": "\u224b",
+    "apos;": "'",
+    "approx;": "\u2248",
+    "approxeq;": "\u224a",
+    "aring": "\xe5",
+    "aring;": "\xe5",
+    "ascr;": "\U0001d4b6",
+    "ast;": "*",
+    "asymp;": "\u2248",
+    "asympeq;": "\u224d",
+    "atilde": "\xe3",
+    "atilde;": "\xe3",
+    "auml": "\xe4",
+    "auml;": "\xe4",
+    "awconint;": "\u2233",
+    "awint;": "\u2a11",
+    "bNot;": "\u2aed",
+    "backcong;": "\u224c",
+    "backepsilon;": "\u03f6",
+    "backprime;": "\u2035",
+    "backsim;": "\u223d",
+    "backsimeq;": "\u22cd",
+    "barvee;": "\u22bd",
+    "barwed;": "\u2305",
+    "barwedge;": "\u2305",
+    "bbrk;": "\u23b5",
+    "bbrktbrk;": "\u23b6",
+    "bcong;": "\u224c",
+    "bcy;": "\u0431",
+    "bdquo;": "\u201e",
+    "becaus;": "\u2235",
+    "because;": "\u2235",
+    "bemptyv;": "\u29b0",
+    "bepsi;": "\u03f6",
+    "bernou;": "\u212c",
+    "beta;": "\u03b2",
+    "beth;": "\u2136",
+    "between;": "\u226c",
+    "bfr;": "\U0001d51f",
+    "bigcap;": "\u22c2",
+    "bigcirc;": "\u25ef",
+    "bigcup;": "\u22c3",
+    "bigodot;": "\u2a00",
+    "bigoplus;": "\u2a01",
+    "bigotimes;": "\u2a02",
+    "bigsqcup;": "\u2a06",
+    "bigstar;": "\u2605",
+    "bigtriangledown;": "\u25bd",
+    "bigtriangleup;": "\u25b3",
+    "biguplus;": "\u2a04",
+    "bigvee;": "\u22c1",
+    "bigwedge;": "\u22c0",
+    "bkarow;": "\u290d",
+    "blacklozenge;": "\u29eb",
+    "blacksquare;": "\u25aa",
+    "blacktriangle;": "\u25b4",
+    "blacktriangledown;": "\u25be",
+    "blacktriangleleft;": "\u25c2",
+    "blacktriangleright;": "\u25b8",
+    "blank;": "\u2423",
+    "blk12;": "\u2592",
+    "blk14;": "\u2591",
+    "blk34;": "\u2593",
+    "block;": "\u2588",
+    "bne;": "=\u20e5",
+    "bnequiv;": "\u2261\u20e5",
+    "bnot;": "\u2310",
+    "bopf;": "\U0001d553",
+    "bot;": "\u22a5",
+    "bottom;": "\u22a5",
+    "bowtie;": "\u22c8",
+    "boxDL;": "\u2557",
+    "boxDR;": "\u2554",
+    "boxDl;": "\u2556",
+    "boxDr;": "\u2553",
+    "boxH;": "\u2550",
+    "boxHD;": "\u2566",
+    "boxHU;": "\u2569",
+    "boxHd;": "\u2564",
+    "boxHu;": "\u2567",
+    "boxUL;": "\u255d",
+    "boxUR;": "\u255a",
+    "boxUl;": "\u255c",
+    "boxUr;": "\u2559",
+    "boxV;": "\u2551",
+    "boxVH;": "\u256c",
+    "boxVL;": "\u2563",
+    "boxVR;": "\u2560",
+    "boxVh;": "\u256b",
+    "boxVl;": "\u2562",
+    "boxVr;": "\u255f",
+    "boxbox;": "\u29c9",
+    "boxdL;": "\u2555",
+    "boxdR;": "\u2552",
+    "boxdl;": "\u2510",
+    "boxdr;": "\u250c",
+    "boxh;": "\u2500",
+    "boxhD;": "\u2565",
+    "boxhU;": "\u2568",
+    "boxhd;": "\u252c",
+    "boxhu;": "\u2534",
+    "boxminus;": "\u229f",
+    "boxplus;": "\u229e",
+    "boxtimes;": "\u22a0",
+    "boxuL;": "\u255b",
+    "boxuR;": "\u2558",
+    "boxul;": "\u2518",
+    "boxur;": "\u2514",
+    "boxv;": "\u2502",
+    "boxvH;": "\u256a",
+    "boxvL;": "\u2561",
+    "boxvR;": "\u255e",
+    "boxvh;": "\u253c",
+    "boxvl;": "\u2524",
+    "boxvr;": "\u251c",
+    "bprime;": "\u2035",
+    "breve;": "\u02d8",
+    "brvbar": "\xa6",
+    "brvbar;": "\xa6",
+    "bscr;": "\U0001d4b7",
+    "bsemi;": "\u204f",
+    "bsim;": "\u223d",
+    "bsime;": "\u22cd",
+    "bsol;": "\\",
+    "bsolb;": "\u29c5",
+    "bsolhsub;": "\u27c8",
+    "bull;": "\u2022",
+    "bullet;": "\u2022",
+    "bump;": "\u224e",
+    "bumpE;": "\u2aae",
+    "bumpe;": "\u224f",
+    "bumpeq;": "\u224f",
+    "cacute;": "\u0107",
+    "cap;": "\u2229",
+    "capand;": "\u2a44",
+    "capbrcup;": "\u2a49",
+    "capcap;": "\u2a4b",
+    "capcup;": "\u2a47",
+    "capdot;": "\u2a40",
+    "caps;": "\u2229\ufe00",
+    "caret;": "\u2041",
+    "caron;": "\u02c7",
+    "ccaps;": "\u2a4d",
+    "ccaron;": "\u010d",
+    "ccedil": "\xe7",
+    "ccedil;": "\xe7",
+    "ccirc;": "\u0109",
+    "ccups;": "\u2a4c",
+    "ccupssm;": "\u2a50",
+    "cdot;": "\u010b",
+    "cedil": "\xb8",
+    "cedil;": "\xb8",
+    "cemptyv;": "\u29b2",
+    "cent": "\xa2",
+    "cent;": "\xa2",
+    "centerdot;": "\xb7",
+    "cfr;": "\U0001d520",
+    "chcy;": "\u0447",
+    "check;": "\u2713",
+    "checkmark;": "\u2713",
+    "chi;": "\u03c7",
+    "cir;": "\u25cb",
+    "cirE;": "\u29c3",
+    "circ;": "\u02c6",
+    "circeq;": "\u2257",
+    "circlearrowleft;": "\u21ba",
+    "circlearrowright;": "\u21bb",
+    "circledR;": "\xae",
+    "circledS;": "\u24c8",
+    "circledast;": "\u229b",
+    "circledcirc;": "\u229a",
+    "circleddash;": "\u229d",
+    "cire;": "\u2257",
+    "cirfnint;": "\u2a10",
+    "cirmid;": "\u2aef",
+    "cirscir;": "\u29c2",
+    "clubs;": "\u2663",
+    "clubsuit;": "\u2663",
+    "colon;": ":",
+    "colone;": "\u2254",
+    "coloneq;": "\u2254",
+    "comma;": ",",
+    "commat;": "@",
+    "comp;": "\u2201",
+    "compfn;": "\u2218",
+    "complement;": "\u2201",
+    "complexes;": "\u2102",
+    "cong;": "\u2245",
+    "congdot;": "\u2a6d",
+    "conint;": "\u222e",
+    "copf;": "\U0001d554",
+    "coprod;": "\u2210",
+    "copy": "\xa9",
+    "copy;": "\xa9",
+    "copysr;": "\u2117",
+    "crarr;": "\u21b5",
+    "cross;": "\u2717",
+    "cscr;": "\U0001d4b8",
+    "csub;": "\u2acf",
+    "csube;": "\u2ad1",
+    "csup;": "\u2ad0",
+    "csupe;": "\u2ad2",
+    "ctdot;": "\u22ef",
+    "cudarrl;": "\u2938",
+    "cudarrr;": "\u2935",
+    "cuepr;": "\u22de",
+    "cuesc;": "\u22df",
+    "cularr;": "\u21b6",
+    "cularrp;": "\u293d",
+    "cup;": "\u222a",
+    "cupbrcap;": "\u2a48",
+    "cupcap;": "\u2a46",
+    "cupcup;": "\u2a4a",
+    "cupdot;": "\u228d",
+    "cupor;": "\u2a45",
+    "cups;": "\u222a\ufe00",
+    "curarr;": "\u21b7",
+    "curarrm;": "\u293c",
+    "curlyeqprec;": "\u22de",
+    "curlyeqsucc;": "\u22df",
+    "curlyvee;": "\u22ce",
+    "curlywedge;": "\u22cf",
+    "curren": "\xa4",
+    "curren;": "\xa4",
+    "curvearrowleft;": "\u21b6",
+    "curvearrowright;": "\u21b7",
+    "cuvee;": "\u22ce",
+    "cuwed;": "\u22cf",
+    "cwconint;": "\u2232",
+    "cwint;": "\u2231",
+    "cylcty;": "\u232d",
+    "dArr;": "\u21d3",
+    "dHar;": "\u2965",
+    "dagger;": "\u2020",
+    "daleth;": "\u2138",
+    "darr;": "\u2193",
+    "dash;": "\u2010",
+    "dashv;": "\u22a3",
+    "dbkarow;": "\u290f",
+    "dblac;": "\u02dd",
+    "dcaron;": "\u010f",
+    "dcy;": "\u0434",
+    "dd;": "\u2146",
+    "ddagger;": "\u2021",
+    "ddarr;": "\u21ca",
+    "ddotseq;": "\u2a77",
+    "deg": "\xb0",
+    "deg;": "\xb0",
+    "delta;": "\u03b4",
+    "demptyv;": "\u29b1",
+    "dfisht;": "\u297f",
+    "dfr;": "\U0001d521",
+    "dharl;": "\u21c3",
+    "dharr;": "\u21c2",
+    "diam;": "\u22c4",
+    "diamond;": "\u22c4",
+    "diamondsuit;": "\u2666",
+    "diams;": "\u2666",
+    "die;": "\xa8",
+    "digamma;": "\u03dd",
+    "disin;": "\u22f2",
+    "div;": "\xf7",
+    "divide": "\xf7",
+    "divide;": "\xf7",
+    "divideontimes;": "\u22c7",
+    "divonx;": "\u22c7",
+    "djcy;": "\u0452",
+    "dlcorn;": "\u231e",
+    "dlcrop;": "\u230d",
+    "dollar;": "$",
+    "dopf;": "\U0001d555",
+    "dot;": "\u02d9",
+    "doteq;": "\u2250",
+    "doteqdot;": "\u2251",
+    "dotminus;": "\u2238",
+    "dotplus;": "\u2214",
+    "dotsquare;": "\u22a1",
+    "doublebarwedge;": "\u2306",
+    "downarrow;": "\u2193",
+    "downdownarrows;": "\u21ca",
+    "downharpoonleft;": "\u21c3",
+    "downharpoonright;": "\u21c2",
+    "drbkarow;": "\u2910",
+    "drcorn;": "\u231f",
+    "drcrop;": "\u230c",
+    "dscr;": "\U0001d4b9",
+    "dscy;": "\u0455",
+    "dsol;": "\u29f6",
+    "dstrok;": "\u0111",
+    "dtdot;": "\u22f1",
+    "dtri;": "\u25bf",
+    "dtrif;": "\u25be",
+    "duarr;": "\u21f5",
+    "duhar;": "\u296f",
+    "dwangle;": "\u29a6",
+    "dzcy;": "\u045f",
+    "dzigrarr;": "\u27ff",
+    "eDDot;": "\u2a77",
+    "eDot;": "\u2251",
+    "eacute": "\xe9",
+    "eacute;": "\xe9",
+    "easter;": "\u2a6e",
+    "ecaron;": "\u011b",
+    "ecir;": "\u2256",
+    "ecirc": "\xea",
+    "ecirc;": "\xea",
+    "ecolon;": "\u2255",
+    "ecy;": "\u044d",
+    "edot;": "\u0117",
+    "ee;": "\u2147",
+    "efDot;": "\u2252",
+    "efr;": "\U0001d522",
+    "eg;": "\u2a9a",
+    "egrave": "\xe8",
+    "egrave;": "\xe8",
+    "egs;": "\u2a96",
+    "egsdot;": "\u2a98",
+    "el;": "\u2a99",
+    "elinters;": "\u23e7",
+    "ell;": "\u2113",
+    "els;": "\u2a95",
+    "elsdot;": "\u2a97",
+    "emacr;": "\u0113",
+    "empty;": "\u2205",
+    "emptyset;": "\u2205",
+    "emptyv;": "\u2205",
+    "emsp13;": "\u2004",
+    "emsp14;": "\u2005",
+    "emsp;": "\u2003",
+    "eng;": "\u014b",
+    "ensp;": "\u2002",
+    "eogon;": "\u0119",
+    "eopf;": "\U0001d556",
+    "epar;": "\u22d5",
+    "eparsl;": "\u29e3",
+    "eplus;": "\u2a71",
+    "epsi;": "\u03b5",
+    "epsilon;": "\u03b5",
+    "epsiv;": "\u03f5",
+    "eqcirc;": "\u2256",
+    "eqcolon;": "\u2255",
+    "eqsim;": "\u2242",
+    "eqslantgtr;": "\u2a96",
+    "eqslantless;": "\u2a95",
+    "equals;": "=",
+    "equest;": "\u225f",
+    "equiv;": "\u2261",
+    "equivDD;": "\u2a78",
+    "eqvparsl;": "\u29e5",
+    "erDot;": "\u2253",
+    "erarr;": "\u2971",
+    "escr;": "\u212f",
+    "esdot;": "\u2250",
+    "esim;": "\u2242",
+    "eta;": "\u03b7",
+    "eth": "\xf0",
+    "eth;": "\xf0",
+    "euml": "\xeb",
+    "euml;": "\xeb",
+    "euro;": "\u20ac",
+    "excl;": "!",
+    "exist;": "\u2203",
+    "expectation;": "\u2130",
+    "exponentiale;": "\u2147",
+    "fallingdotseq;": "\u2252",
+    "fcy;": "\u0444",
+    "female;": "\u2640",
+    "ffilig;": "\ufb03",
+    "fflig;": "\ufb00",
+    "ffllig;": "\ufb04",
+    "ffr;": "\U0001d523",
+    "filig;": "\ufb01",
+    "fjlig;": "fj",
+    "flat;": "\u266d",
+    "fllig;": "\ufb02",
+    "fltns;": "\u25b1",
+    "fnof;": "\u0192",
+    "fopf;": "\U0001d557",
+    "forall;": "\u2200",
+    "fork;": "\u22d4",
+    "forkv;": "\u2ad9",
+    "fpartint;": "\u2a0d",
+    "frac12": "\xbd",
+    "frac12;": "\xbd",
+    "frac13;": "\u2153",
+    "frac14": "\xbc",
+    "frac14;": "\xbc",
+    "frac15;": "\u2155",
+    "frac16;": "\u2159",
+    "frac18;": "\u215b",
+    "frac23;": "\u2154",
+    "frac25;": "\u2156",
+    "frac34": "\xbe",
+    "frac34;": "\xbe",
+    "frac35;": "\u2157",
+    "frac38;": "\u215c",
+    "frac45;": "\u2158",
+    "frac56;": "\u215a",
+    "frac58;": "\u215d",
+    "frac78;": "\u215e",
+    "frasl;": "\u2044",
+    "frown;": "\u2322",
+    "fscr;": "\U0001d4bb",
+    "gE;": "\u2267",
+    "gEl;": "\u2a8c",
+    "gacute;": "\u01f5",
+    "gamma;": "\u03b3",
+    "gammad;": "\u03dd",
+    "gap;": "\u2a86",
+    "gbreve;": "\u011f",
+    "gcirc;": "\u011d",
+    "gcy;": "\u0433",
+    "gdot;": "\u0121",
+    "ge;": "\u2265",
+    "gel;": "\u22db",
+    "geq;": "\u2265",
+    "geqq;": "\u2267",
+    "geqslant;": "\u2a7e",
+    "ges;": "\u2a7e",
+    "gescc;": "\u2aa9",
+    "gesdot;": "\u2a80",
+    "gesdoto;": "\u2a82",
+    "gesdotol;": "\u2a84",
+    "gesl;": "\u22db\ufe00",
+    "gesles;": "\u2a94",
+    "gfr;": "\U0001d524",
+    "gg;": "\u226b",
+    "ggg;": "\u22d9",
+    "gimel;": "\u2137",
+    "gjcy;": "\u0453",
+    "gl;": "\u2277",
+    "glE;": "\u2a92",
+    "gla;": "\u2aa5",
+    "glj;": "\u2aa4",
+    "gnE;": "\u2269",
+    "gnap;": "\u2a8a",
+    "gnapprox;": "\u2a8a",
+    "gne;": "\u2a88",
+    "gneq;": "\u2a88",
+    "gneqq;": "\u2269",
+    "gnsim;": "\u22e7",
+    "gopf;": "\U0001d558",
+    "grave;": "`",
+    "gscr;": "\u210a",
+    "gsim;": "\u2273",
+    "gsime;": "\u2a8e",
+    "gsiml;": "\u2a90",
+    "gt": ">",
+    "gt;": ">",
+    "gtcc;": "\u2aa7",
+    "gtcir;": "\u2a7a",
+    "gtdot;": "\u22d7",
+    "gtlPar;": "\u2995",
+    "gtquest;": "\u2a7c",
+    "gtrapprox;": "\u2a86",
+    "gtrarr;": "\u2978",
+    "gtrdot;": "\u22d7",
+    "gtreqless;": "\u22db",
+    "gtreqqless;": "\u2a8c",
+    "gtrless;": "\u2277",
+    "gtrsim;": "\u2273",
+    "gvertneqq;": "\u2269\ufe00",
+    "gvnE;": "\u2269\ufe00",
+    "hArr;": "\u21d4",
+    "hairsp;": "\u200a",
+    "half;": "\xbd",
+    "hamilt;": "\u210b",
+    "hardcy;": "\u044a",
+    "harr;": "\u2194",
+    "harrcir;": "\u2948",
+    "harrw;": "\u21ad",
+    "hbar;": "\u210f",
+    "hcirc;": "\u0125",
+    "hearts;": "\u2665",
+    "heartsuit;": "\u2665",
+    "hellip;": "\u2026",
+    "hercon;": "\u22b9",
+    "hfr;": "\U0001d525",
+    "hksearow;": "\u2925",
+    "hkswarow;": "\u2926",
+    "hoarr;": "\u21ff",
+    "homtht;": "\u223b",
+    "hookleftarrow;": "\u21a9",
+    "hookrightarrow;": "\u21aa",
+    "hopf;": "\U0001d559",
+    "horbar;": "\u2015",
+    "hscr;": "\U0001d4bd",
+    "hslash;": "\u210f",
+    "hstrok;": "\u0127",
+    "hybull;": "\u2043",
+    "hyphen;": "\u2010",
+    "iacute": "\xed",
+    "iacute;": "\xed",
+    "ic;": "\u2063",
+    "icirc": "\xee",
+    "icirc;": "\xee",
+    "icy;": "\u0438",
+    "iecy;": "\u0435",
+    "iexcl": "\xa1",
+    "iexcl;": "\xa1",
+    "iff;": "\u21d4",
+    "ifr;": "\U0001d526",
+    "igrave": "\xec",
+    "igrave;": "\xec",
+    "ii;": "\u2148",
+    "iiiint;": "\u2a0c",
+    "iiint;": "\u222d",
+    "iinfin;": "\u29dc",
+    "iiota;": "\u2129",
+    "ijlig;": "\u0133",
+    "imacr;": "\u012b",
+    "image;": "\u2111",
+    "imagline;": "\u2110",
+    "imagpart;": "\u2111",
+    "imath;": "\u0131",
+    "imof;": "\u22b7",
+    "imped;": "\u01b5",
+    "in;": "\u2208",
+    "incare;": "\u2105",
+    "infin;": "\u221e",
+    "infintie;": "\u29dd",
+    "inodot;": "\u0131",
+    "int;": "\u222b",
+    "intcal;": "\u22ba",
+    "integers;": "\u2124",
+    "intercal;": "\u22ba",
+    "intlarhk;": "\u2a17",
+    "intprod;": "\u2a3c",
+    "iocy;": "\u0451",
+    "iogon;": "\u012f",
+    "iopf;": "\U0001d55a",
+    "iota;": "\u03b9",
+    "iprod;": "\u2a3c",
+    "iquest": "\xbf",
+    "iquest;": "\xbf",
+    "iscr;": "\U0001d4be",
+    "isin;": "\u2208",
+    "isinE;": "\u22f9",
+    "isindot;": "\u22f5",
+    "isins;": "\u22f4",
+    "isinsv;": "\u22f3",
+    "isinv;": "\u2208",
+    "it;": "\u2062",
+    "itilde;": "\u0129",
+    "iukcy;": "\u0456",
+    "iuml": "\xef",
+    "iuml;": "\xef",
+    "jcirc;": "\u0135",
+    "jcy;": "\u0439",
+    "jfr;": "\U0001d527",
+    "jmath;": "\u0237",
+    "jopf;": "\U0001d55b",
+    "jscr;": "\U0001d4bf",
+    "jsercy;": "\u0458",
+    "jukcy;": "\u0454",
+    "kappa;": "\u03ba",
+    "kappav;": "\u03f0",
+    "kcedil;": "\u0137",
+    "kcy;": "\u043a",
+    "kfr;": "\U0001d528",
+    "kgreen;": "\u0138",
+    "khcy;": "\u0445",
+    "kjcy;": "\u045c",
+    "kopf;": "\U0001d55c",
+    "kscr;": "\U0001d4c0",
+    "lAarr;": "\u21da",
+    "lArr;": "\u21d0",
+    "lAtail;": "\u291b",
+    "lBarr;": "\u290e",
+    "lE;": "\u2266",
+    "lEg;": "\u2a8b",
+    "lHar;": "\u2962",
+    "lacute;": "\u013a",
+    "laemptyv;": "\u29b4",
+    "lagran;": "\u2112",
+    "lambda;": "\u03bb",
+    "lang;": "\u27e8",
+    "langd;": "\u2991",
+    "langle;": "\u27e8",
+    "lap;": "\u2a85",
+    "laquo": "\xab",
+    "laquo;": "\xab",
+    "larr;": "\u2190",
+    "larrb;": "\u21e4",
+    "larrbfs;": "\u291f",
+    "larrfs;": "\u291d",
+    "larrhk;": "\u21a9",
+    "larrlp;": "\u21ab",
+    "larrpl;": "\u2939",
+    "larrsim;": "\u2973",
+    "larrtl;": "\u21a2",
+    "lat;": "\u2aab",
+    "latail;": "\u2919",
+    "late;": "\u2aad",
+    "lates;": "\u2aad\ufe00",
+    "lbarr;": "\u290c",
+    "lbbrk;": "\u2772",
+    "lbrace;": "{",
+    "lbrack;": "[",
+    "lbrke;": "\u298b",
+    "lbrksld;": "\u298f",
+    "lbrkslu;": "\u298d",
+    "lcaron;": "\u013e",
+    "lcedil;": "\u013c",
+    "lceil;": "\u2308",
+    "lcub;": "{",
+    "lcy;": "\u043b",
+    "ldca;": "\u2936",
+    "ldquo;": "\u201c",
+    "ldquor;": "\u201e",
+    "ldrdhar;": "\u2967",
+    "ldrushar;": "\u294b",
+    "ldsh;": "\u21b2",
+    "le;": "\u2264",
+    "leftarrow;": "\u2190",
+    "leftarrowtail;": "\u21a2",
+    "leftharpoondown;": "\u21bd",
+    "leftharpoonup;": "\u21bc",
+    "leftleftarrows;": "\u21c7",
+    "leftrightarrow;": "\u2194",
+    "leftrightarrows;": "\u21c6",
+    "leftrightharpoons;": "\u21cb",
+    "leftrightsquigarrow;": "\u21ad",
+    "leftthreetimes;": "\u22cb",
+    "leg;": "\u22da",
+    "leq;": "\u2264",
+    "leqq;": "\u2266",
+    "leqslant;": "\u2a7d",
+    "les;": "\u2a7d",
+    "lescc;": "\u2aa8",
+    "lesdot;": "\u2a7f",
+    "lesdoto;": "\u2a81",
+    "lesdotor;": "\u2a83",
+    "lesg;": "\u22da\ufe00",
+    "lesges;": "\u2a93",
+    "lessapprox;": "\u2a85",
+    "lessdot;": "\u22d6",
+    "lesseqgtr;": "\u22da",
+    "lesseqqgtr;": "\u2a8b",
+    "lessgtr;": "\u2276",
+    "lesssim;": "\u2272",
+    "lfisht;": "\u297c",
+    "lfloor;": "\u230a",
+    "lfr;": "\U0001d529",
+    "lg;": "\u2276",
+    "lgE;": "\u2a91",
+    "lhard;": "\u21bd",
+    "lharu;": "\u21bc",
+    "lharul;": "\u296a",
+    "lhblk;": "\u2584",
+    "ljcy;": "\u0459",
+    "ll;": "\u226a",
+    "llarr;": "\u21c7",
+    "llcorner;": "\u231e",
+    "llhard;": "\u296b",
+    "lltri;": "\u25fa",
+    "lmidot;": "\u0140",
+    "lmoust;": "\u23b0",
+    "lmoustache;": "\u23b0",
+    "lnE;": "\u2268",
+    "lnap;": "\u2a89",
+    "lnapprox;": "\u2a89",
+    "lne;": "\u2a87",
+    "lneq;": "\u2a87",
+    "lneqq;": "\u2268",
+    "lnsim;": "\u22e6",
+    "loang;": "\u27ec",
+    "loarr;": "\u21fd",
+    "lobrk;": "\u27e6",
+    "longleftarrow;": "\u27f5",
+    "longleftrightarrow;": "\u27f7",
+    "longmapsto;": "\u27fc",
+    "longrightarrow;": "\u27f6",
+    "looparrowleft;": "\u21ab",
+    "looparrowright;": "\u21ac",
+    "lopar;": "\u2985",
+    "lopf;": "\U0001d55d",
+    "loplus;": "\u2a2d",
+    "lotimes;": "\u2a34",
+    "lowast;": "\u2217",
+    "lowbar;": "_",
+    "loz;": "\u25ca",
+    "lozenge;": "\u25ca",
+    "lozf;": "\u29eb",
+    "lpar;": "(",
+    "lparlt;": "\u2993",
+    "lrarr;": "\u21c6",
+    "lrcorner;": "\u231f",
+    "lrhar;": "\u21cb",
+    "lrhard;": "\u296d",
+    "lrm;": "\u200e",
+    "lrtri;": "\u22bf",
+    "lsaquo;": "\u2039",
+    "lscr;": "\U0001d4c1",
+    "lsh;": "\u21b0",
+    "lsim;": "\u2272",
+    "lsime;": "\u2a8d",
+    "lsimg;": "\u2a8f",
+    "lsqb;": "[",
+    "lsquo;": "\u2018",
+    "lsquor;": "\u201a",
+    "lstrok;": "\u0142",
+    "lt": "<",
+    "lt;": "<",
+    "ltcc;": "\u2aa6",
+    "ltcir;": "\u2a79",
+    "ltdot;": "\u22d6",
+    "lthree;": "\u22cb",
+    "ltimes;": "\u22c9",
+    "ltlarr;": "\u2976",
+    "ltquest;": "\u2a7b",
+    "ltrPar;": "\u2996",
+    "ltri;": "\u25c3",
+    "ltrie;": "\u22b4",
+    "ltrif;": "\u25c2",
+    "lurdshar;": "\u294a",
+    "luruhar;": "\u2966",
+    "lvertneqq;": "\u2268\ufe00",
+    "lvnE;": "\u2268\ufe00",
+    "mDDot;": "\u223a",
+    "macr": "\xaf",
+    "macr;": "\xaf",
+    "male;": "\u2642",
+    "malt;": "\u2720",
+    "maltese;": "\u2720",
+    "map;": "\u21a6",
+    "mapsto;": "\u21a6",
+    "mapstodown;": "\u21a7",
+    "mapstoleft;": "\u21a4",
+    "mapstoup;": "\u21a5",
+    "marker;": "\u25ae",
+    "mcomma;": "\u2a29",
+    "mcy;": "\u043c",
+    "mdash;": "\u2014",
+    "measuredangle;": "\u2221",
+    "mfr;": "\U0001d52a",
+    "mho;": "\u2127",
+    "micro": "\xb5",
+    "micro;": "\xb5",
+    "mid;": "\u2223",
+    "midast;": "*",
+    "midcir;": "\u2af0",
+    "middot": "\xb7",
+    "middot;": "\xb7",
+    "minus;": "\u2212",
+    "minusb;": "\u229f",
+    "minusd;": "\u2238",
+    "minusdu;": "\u2a2a",
+    "mlcp;": "\u2adb",
+    "mldr;": "\u2026",
+    "mnplus;": "\u2213",
+    "models;": "\u22a7",
+    "mopf;": "\U0001d55e",
+    "mp;": "\u2213",
+    "mscr;": "\U0001d4c2",
+    "mstpos;": "\u223e",
+    "mu;": "\u03bc",
+    "multimap;": "\u22b8",
+    "mumap;": "\u22b8",
+    "nGg;": "\u22d9\u0338",
+    "nGt;": "\u226b\u20d2",
+    "nGtv;": "\u226b\u0338",
+    "nLeftarrow;": "\u21cd",
+    "nLeftrightarrow;": "\u21ce",
+    "nLl;": "\u22d8\u0338",
+    "nLt;": "\u226a\u20d2",
+    "nLtv;": "\u226a\u0338",
+    "nRightarrow;": "\u21cf",
+    "nVDash;": "\u22af",
+    "nVdash;": "\u22ae",
+    "nabla;": "\u2207",
+    "nacute;": "\u0144",
+    "nang;": "\u2220\u20d2",
+    "nap;": "\u2249",
+    "napE;": "\u2a70\u0338",
+    "napid;": "\u224b\u0338",
+    "napos;": "\u0149",
+    "napprox;": "\u2249",
+    "natur;": "\u266e",
+    "natural;": "\u266e",
+    "naturals;": "\u2115",
+    "nbsp": "\xa0",
+    "nbsp;": "\xa0",
+    "nbump;": "\u224e\u0338",
+    "nbumpe;": "\u224f\u0338",
+    "ncap;": "\u2a43",
+    "ncaron;": "\u0148",
+    "ncedil;": "\u0146",
+    "ncong;": "\u2247",
+    "ncongdot;": "\u2a6d\u0338",
+    "ncup;": "\u2a42",
+    "ncy;": "\u043d",
+    "ndash;": "\u2013",
+    "ne;": "\u2260",
+    "neArr;": "\u21d7",
+    "nearhk;": "\u2924",
+    "nearr;": "\u2197",
+    "nearrow;": "\u2197",
+    "nedot;": "\u2250\u0338",
+    "nequiv;": "\u2262",
+    "nesear;": "\u2928",
+    "nesim;": "\u2242\u0338",
+    "nexist;": "\u2204",
+    "nexists;": "\u2204",
+    "nfr;": "\U0001d52b",
+    "ngE;": "\u2267\u0338",
+    "nge;": "\u2271",
+    "ngeq;": "\u2271",
+    "ngeqq;": "\u2267\u0338",
+    "ngeqslant;": "\u2a7e\u0338",
+    "nges;": "\u2a7e\u0338",
+    "ngsim;": "\u2275",
+    "ngt;": "\u226f",
+    "ngtr;": "\u226f",
+    "nhArr;": "\u21ce",
+    "nharr;": "\u21ae",
+    "nhpar;": "\u2af2",
+    "ni;": "\u220b",
+    "nis;": "\u22fc",
+    "nisd;": "\u22fa",
+    "niv;": "\u220b",
+    "njcy;": "\u045a",
+    "nlArr;": "\u21cd",
+    "nlE;": "\u2266\u0338",
+    "nlarr;": "\u219a",
+    "nldr;": "\u2025",
+    "nle;": "\u2270",
+    "nleftarrow;": "\u219a",
+    "nleftrightarrow;": "\u21ae",
+    "nleq;": "\u2270",
+    "nleqq;": "\u2266\u0338",
+    "nleqslant;": "\u2a7d\u0338",
+    "nles;": "\u2a7d\u0338",
+    "nless;": "\u226e",
+    "nlsim;": "\u2274",
+    "nlt;": "\u226e",
+    "nltri;": "\u22ea",
+    "nltrie;": "\u22ec",
+    "nmid;": "\u2224",
+    "nopf;": "\U0001d55f",
+    "not": "\xac",
+    "not;": "\xac",
+    "notin;": "\u2209",
+    "notinE;": "\u22f9\u0338",
+    "notindot;": "\u22f5\u0338",
+    "notinva;": "\u2209",
+    "notinvb;": "\u22f7",
+    "notinvc;": "\u22f6",
+    "notni;": "\u220c",
+    "notniva;": "\u220c",
+    "notnivb;": "\u22fe",
+    "notnivc;": "\u22fd",
+    "npar;": "\u2226",
+    "nparallel;": "\u2226",
+    "nparsl;": "\u2afd\u20e5",
+    "npart;": "\u2202\u0338",
+    "npolint;": "\u2a14",
+    "npr;": "\u2280",
+    "nprcue;": "\u22e0",
+    "npre;": "\u2aaf\u0338",
+    "nprec;": "\u2280",
+    "npreceq;": "\u2aaf\u0338",
+    "nrArr;": "\u21cf",
+    "nrarr;": "\u219b",
+    "nrarrc;": "\u2933\u0338",
+    "nrarrw;": "\u219d\u0338",
+    "nrightarrow;": "\u219b",
+    "nrtri;": "\u22eb",
+    "nrtrie;": "\u22ed",
+    "nsc;": "\u2281",
+    "nsccue;": "\u22e1",
+    "nsce;": "\u2ab0\u0338",
+    "nscr;": "\U0001d4c3",
+    "nshortmid;": "\u2224",
+    "nshortparallel;": "\u2226",
+    "nsim;": "\u2241",
+    "nsime;": "\u2244",
+    "nsimeq;": "\u2244",
+    "nsmid;": "\u2224",
+    "nspar;": "\u2226",
+    "nsqsube;": "\u22e2",
+    "nsqsupe;": "\u22e3",
+    "nsub;": "\u2284",
+    "nsubE;": "\u2ac5\u0338",
+    "nsube;": "\u2288",
+    "nsubset;": "\u2282\u20d2",
+    "nsubseteq;": "\u2288",
+    "nsubseteqq;": "\u2ac5\u0338",
+    "nsucc;": "\u2281",
+    "nsucceq;": "\u2ab0\u0338",
+    "nsup;": "\u2285",
+    "nsupE;": "\u2ac6\u0338",
+    "nsupe;": "\u2289",
+    "nsupset;": "\u2283\u20d2",
+    "nsupseteq;": "\u2289",
+    "nsupseteqq;": "\u2ac6\u0338",
+    "ntgl;": "\u2279",
+    "ntilde": "\xf1",
+    "ntilde;": "\xf1",
+    "ntlg;": "\u2278",
+    "ntriangleleft;": "\u22ea",
+    "ntrianglelefteq;": "\u22ec",
+    "ntriangleright;": "\u22eb",
+    "ntrianglerighteq;": "\u22ed",
+    "nu;": "\u03bd",
+    "num;": "#",
+    "numero;": "\u2116",
+    "numsp;": "\u2007",
+    "nvDash;": "\u22ad",
+    "nvHarr;": "\u2904",
+    "nvap;": "\u224d\u20d2",
+    "nvdash;": "\u22ac",
+    "nvge;": "\u2265\u20d2",
+    "nvgt;": ">\u20d2",
+    "nvinfin;": "\u29de",
+    "nvlArr;": "\u2902",
+    "nvle;": "\u2264\u20d2",
+    "nvlt;": "<\u20d2",
+    "nvltrie;": "\u22b4\u20d2",
+    "nvrArr;": "\u2903",
+    "nvrtrie;": "\u22b5\u20d2",
+    "nvsim;": "\u223c\u20d2",
+    "nwArr;": "\u21d6",
+    "nwarhk;": "\u2923",
+    "nwarr;": "\u2196",
+    "nwarrow;": "\u2196",
+    "nwnear;": "\u2927",
+    "oS;": "\u24c8",
+    "oacute": "\xf3",
+    "oacute;": "\xf3",
+    "oast;": "\u229b",
+    "ocir;": "\u229a",
+    "ocirc": "\xf4",
+    "ocirc;": "\xf4",
+    "ocy;": "\u043e",
+    "odash;": "\u229d",
+    "odblac;": "\u0151",
+    "odiv;": "\u2a38",
+    "odot;": "\u2299",
+    "odsold;": "\u29bc",
+    "oelig;": "\u0153",
+    "ofcir;": "\u29bf",
+    "ofr;": "\U0001d52c",
+    "ogon;": "\u02db",
+    "ograve": "\xf2",
+    "ograve;": "\xf2",
+    "ogt;": "\u29c1",
+    "ohbar;": "\u29b5",
+    "ohm;": "\u03a9",
+    "oint;": "\u222e",
+    "olarr;": "\u21ba",
+    "olcir;": "\u29be",
+    "olcross;": "\u29bb",
+    "oline;": "\u203e",
+    "olt;": "\u29c0",
+    "omacr;": "\u014d",
+    "omega;": "\u03c9",
+    "omicron;": "\u03bf",
+    "omid;": "\u29b6",
+    "ominus;": "\u2296",
+    "oopf;": "\U0001d560",
+    "opar;": "\u29b7",
+    "operp;": "\u29b9",
+    "oplus;": "\u2295",
+    "or;": "\u2228",
+    "orarr;": "\u21bb",
+    "ord;": "\u2a5d",
+    "order;": "\u2134",
+    "orderof;": "\u2134",
+    "ordf": "\xaa",
+    "ordf;": "\xaa",
+    "ordm": "\xba",
+    "ordm;": "\xba",
+    "origof;": "\u22b6",
+    "oror;": "\u2a56",
+    "orslope;": "\u2a57",
+    "orv;": "\u2a5b",
+    "oscr;": "\u2134",
+    "oslash": "\xf8",
+    "oslash;": "\xf8",
+    "osol;": "\u2298",
+    "otilde": "\xf5",
+    "otilde;": "\xf5",
+    "otimes;": "\u2297",
+    "otimesas;": "\u2a36",
+    "ouml": "\xf6",
+    "ouml;": "\xf6",
+    "ovbar;": "\u233d",
+    "par;": "\u2225",
+    "para": "\xb6",
+    "para;": "\xb6",
+    "parallel;": "\u2225",
+    "parsim;": "\u2af3",
+    "parsl;": "\u2afd",
+    "part;": "\u2202",
+    "pcy;": "\u043f",
+    "percnt;": "%",
+    "period;": ".",
+    "permil;": "\u2030",
+    "perp;": "\u22a5",
+    "pertenk;": "\u2031",
+    "pfr;": "\U0001d52d",
+    "phi;": "\u03c6",
+    "phiv;": "\u03d5",
+    "phmmat;": "\u2133",
+    "phone;": "\u260e",
+    "pi;": "\u03c0",
+    "pitchfork;": "\u22d4",
+    "piv;": "\u03d6",
+    "planck;": "\u210f",
+    "planckh;": "\u210e",
+    "plankv;": "\u210f",
+    "plus;": "+",
+    "plusacir;": "\u2a23",
+    "plusb;": "\u229e",
+    "pluscir;": "\u2a22",
+    "plusdo;": "\u2214",
+    "plusdu;": "\u2a25",
+    "pluse;": "\u2a72",
+    "plusmn": "\xb1",
+    "plusmn;": "\xb1",
+    "plussim;": "\u2a26",
+    "plustwo;": "\u2a27",
+    "pm;": "\xb1",
+    "pointint;": "\u2a15",
+    "popf;": "\U0001d561",
+    "pound": "\xa3",
+    "pound;": "\xa3",
+    "pr;": "\u227a",
+    "prE;": "\u2ab3",
+    "prap;": "\u2ab7",
+    "prcue;": "\u227c",
+    "pre;": "\u2aaf",
+    "prec;": "\u227a",
+    "precapprox;": "\u2ab7",
+    "preccurlyeq;": "\u227c",
+    "preceq;": "\u2aaf",
+    "precnapprox;": "\u2ab9",
+    "precneqq;": "\u2ab5",
+    "precnsim;": "\u22e8",
+    "precsim;": "\u227e",
+    "prime;": "\u2032",
+    "primes;": "\u2119",
+    "prnE;": "\u2ab5",
+    "prnap;": "\u2ab9",
+    "prnsim;": "\u22e8",
+    "prod;": "\u220f",
+    "profalar;": "\u232e",
+    "profline;": "\u2312",
+    "profsurf;": "\u2313",
+    "prop;": "\u221d",
+    "propto;": "\u221d",
+    "prsim;": "\u227e",
+    "prurel;": "\u22b0",
+    "pscr;": "\U0001d4c5",
+    "psi;": "\u03c8",
+    "puncsp;": "\u2008",
+    "qfr;": "\U0001d52e",
+    "qint;": "\u2a0c",
+    "qopf;": "\U0001d562",
+    "qprime;": "\u2057",
+    "qscr;": "\U0001d4c6",
+    "quaternions;": "\u210d",
+    "quatint;": "\u2a16",
+    "quest;": "?",
+    "questeq;": "\u225f",
+    "quot": "\"",
+    "quot;": "\"",
+    "rAarr;": "\u21db",
+    "rArr;": "\u21d2",
+    "rAtail;": "\u291c",
+    "rBarr;": "\u290f",
+    "rHar;": "\u2964",
+    "race;": "\u223d\u0331",
+    "racute;": "\u0155",
+    "radic;": "\u221a",
+    "raemptyv;": "\u29b3",
+    "rang;": "\u27e9",
+    "rangd;": "\u2992",
+    "range;": "\u29a5",
+    "rangle;": "\u27e9",
+    "raquo": "\xbb",
+    "raquo;": "\xbb",
+    "rarr;": "\u2192",
+    "rarrap;": "\u2975",
+    "rarrb;": "\u21e5",
+    "rarrbfs;": "\u2920",
+    "rarrc;": "\u2933",
+    "rarrfs;": "\u291e",
+    "rarrhk;": "\u21aa",
+    "rarrlp;": "\u21ac",
+    "rarrpl;": "\u2945",
+    "rarrsim;": "\u2974",
+    "rarrtl;": "\u21a3",
+    "rarrw;": "\u219d",
+    "ratail;": "\u291a",
+    "ratio;": "\u2236",
+    "rationals;": "\u211a",
+    "rbarr;": "\u290d",
+    "rbbrk;": "\u2773",
+    "rbrace;": "}",
+    "rbrack;": "]",
+    "rbrke;": "\u298c",
+    "rbrksld;": "\u298e",
+    "rbrkslu;": "\u2990",
+    "rcaron;": "\u0159",
+    "rcedil;": "\u0157",
+    "rceil;": "\u2309",
+    "rcub;": "}",
+    "rcy;": "\u0440",
+    "rdca;": "\u2937",
+    "rdldhar;": "\u2969",
+    "rdquo;": "\u201d",
+    "rdquor;": "\u201d",
+    "rdsh;": "\u21b3",
+    "real;": "\u211c",
+    "realine;": "\u211b",
+    "realpart;": "\u211c",
+    "reals;": "\u211d",
+    "rect;": "\u25ad",
+    "reg": "\xae",
+    "reg;": "\xae",
+    "rfisht;": "\u297d",
+    "rfloor;": "\u230b",
+    "rfr;": "\U0001d52f",
+    "rhard;": "\u21c1",
+    "rharu;": "\u21c0",
+    "rharul;": "\u296c",
+    "rho;": "\u03c1",
+    "rhov;": "\u03f1",
+    "rightarrow;": "\u2192",
+    "rightarrowtail;": "\u21a3",
+    "rightharpoondown;": "\u21c1",
+    "rightharpoonup;": "\u21c0",
+    "rightleftarrows;": "\u21c4",
+    "rightleftharpoons;": "\u21cc",
+    "rightrightarrows;": "\u21c9",
+    "rightsquigarrow;": "\u219d",
+    "rightthreetimes;": "\u22cc",
+    "ring;": "\u02da",
+    "risingdotseq;": "\u2253",
+    "rlarr;": "\u21c4",
+    "rlhar;": "\u21cc",
+    "rlm;": "\u200f",
+    "rmoust;": "\u23b1",
+    "rmoustache;": "\u23b1",
+    "rnmid;": "\u2aee",
+    "roang;": "\u27ed",
+    "roarr;": "\u21fe",
+    "robrk;": "\u27e7",
+    "ropar;": "\u2986",
+    "ropf;": "\U0001d563",
+    "roplus;": "\u2a2e",
+    "rotimes;": "\u2a35",
+    "rpar;": ")",
+    "rpargt;": "\u2994",
+    "rppolint;": "\u2a12",
+    "rrarr;": "\u21c9",
+    "rsaquo;": "\u203a",
+    "rscr;": "\U0001d4c7",
+    "rsh;": "\u21b1",
+    "rsqb;": "]",
+    "rsquo;": "\u2019",
+    "rsquor;": "\u2019",
+    "rthree;": "\u22cc",
+    "rtimes;": "\u22ca",
+    "rtri;": "\u25b9",
+    "rtrie;": "\u22b5",
+    "rtrif;": "\u25b8",
+    "rtriltri;": "\u29ce",
+    "ruluhar;": "\u2968",
+    "rx;": "\u211e",
+    "sacute;": "\u015b",
+    "sbquo;": "\u201a",
+    "sc;": "\u227b",
+    "scE;": "\u2ab4",
+    "scap;": "\u2ab8",
+    "scaron;": "\u0161",
+    "sccue;": "\u227d",
+    "sce;": "\u2ab0",
+    "scedil;": "\u015f",
+    "scirc;": "\u015d",
+    "scnE;": "\u2ab6",
+    "scnap;": "\u2aba",
+    "scnsim;": "\u22e9",
+    "scpolint;": "\u2a13",
+    "scsim;": "\u227f",
+    "scy;": "\u0441",
+    "sdot;": "\u22c5",
+    "sdotb;": "\u22a1",
+    "sdote;": "\u2a66",
+    "seArr;": "\u21d8",
+    "searhk;": "\u2925",
+    "searr;": "\u2198",
+    "searrow;": "\u2198",
+    "sect": "\xa7",
+    "sect;": "\xa7",
+    "semi;": ";",
+    "seswar;": "\u2929",
+    "setminus;": "\u2216",
+    "setmn;": "\u2216",
+    "sext;": "\u2736",
+    "sfr;": "\U0001d530",
+    "sfrown;": "\u2322",
+    "sharp;": "\u266f",
+    "shchcy;": "\u0449",
+    "shcy;": "\u0448",
+    "shortmid;": "\u2223",
+    "shortparallel;": "\u2225",
+    "shy": "\xad",
+    "shy;": "\xad",
+    "sigma;": "\u03c3",
+    "sigmaf;": "\u03c2",
+    "sigmav;": "\u03c2",
+    "sim;": "\u223c",
+    "simdot;": "\u2a6a",
+    "sime;": "\u2243",
+    "simeq;": "\u2243",
+    "simg;": "\u2a9e",
+    "simgE;": "\u2aa0",
+    "siml;": "\u2a9d",
+    "simlE;": "\u2a9f",
+    "simne;": "\u2246",
+    "simplus;": "\u2a24",
+    "simrarr;": "\u2972",
+    "slarr;": "\u2190",
+    "smallsetminus;": "\u2216",
+    "smashp;": "\u2a33",
+    "smeparsl;": "\u29e4",
+    "smid;": "\u2223",
+    "smile;": "\u2323",
+    "smt;": "\u2aaa",
+    "smte;": "\u2aac",
+    "smtes;": "\u2aac\ufe00",
+    "softcy;": "\u044c",
+    "sol;": "/",
+    "solb;": "\u29c4",
+    "solbar;": "\u233f",
+    "sopf;": "\U0001d564",
+    "spades;": "\u2660",
+    "spadesuit;": "\u2660",
+    "spar;": "\u2225",
+    "sqcap;": "\u2293",
+    "sqcaps;": "\u2293\ufe00",
+    "sqcup;": "\u2294",
+    "sqcups;": "\u2294\ufe00",
+    "sqsub;": "\u228f",
+    "sqsube;": "\u2291",
+    "sqsubset;": "\u228f",
+    "sqsubseteq;": "\u2291",
+    "sqsup;": "\u2290",
+    "sqsupe;": "\u2292",
+    "sqsupset;": "\u2290",
+    "sqsupseteq;": "\u2292",
+    "squ;": "\u25a1",
+    "square;": "\u25a1",
+    "squarf;": "\u25aa",
+    "squf;": "\u25aa",
+    "srarr;": "\u2192",
+    "sscr;": "\U0001d4c8",
+    "ssetmn;": "\u2216",
+    "ssmile;": "\u2323",
+    "sstarf;": "\u22c6",
+    "star;": "\u2606",
+    "starf;": "\u2605",
+    "straightepsilon;": "\u03f5",
+    "straightphi;": "\u03d5",
+    "strns;": "\xaf",
+    "sub;": "\u2282",
+    "subE;": "\u2ac5",
+    "subdot;": "\u2abd",
+    "sube;": "\u2286",
+    "subedot;": "\u2ac3",
+    "submult;": "\u2ac1",
+    "subnE;": "\u2acb",
+    "subne;": "\u228a",
+    "subplus;": "\u2abf",
+    "subrarr;": "\u2979",
+    "subset;": "\u2282",
+    "subseteq;": "\u2286",
+    "subseteqq;": "\u2ac5",
+    "subsetneq;": "\u228a",
+    "subsetneqq;": "\u2acb",
+    "subsim;": "\u2ac7",
+    "subsub;": "\u2ad5",
+    "subsup;": "\u2ad3",
+    "succ;": "\u227b",
+    "succapprox;": "\u2ab8",
+    "succcurlyeq;": "\u227d",
+    "succeq;": "\u2ab0",
+    "succnapprox;": "\u2aba",
+    "succneqq;": "\u2ab6",
+    "succnsim;": "\u22e9",
+    "succsim;": "\u227f",
+    "sum;": "\u2211",
+    "sung;": "\u266a",
+    "sup1": "\xb9",
+    "sup1;": "\xb9",
+    "sup2": "\xb2",
+    "sup2;": "\xb2",
+    "sup3": "\xb3",
+    "sup3;": "\xb3",
+    "sup;": "\u2283",
+    "supE;": "\u2ac6",
+    "supdot;": "\u2abe",
+    "supdsub;": "\u2ad8",
+    "supe;": "\u2287",
+    "supedot;": "\u2ac4",
+    "suphsol;": "\u27c9",
+    "suphsub;": "\u2ad7",
+    "suplarr;": "\u297b",
+    "supmult;": "\u2ac2",
+    "supnE;": "\u2acc",
+    "supne;": "\u228b",
+    "supplus;": "\u2ac0",
+    "supset;": "\u2283",
+    "supseteq;": "\u2287",
+    "supseteqq;": "\u2ac6",
+    "supsetneq;": "\u228b",
+    "supsetneqq;": "\u2acc",
+    "supsim;": "\u2ac8",
+    "supsub;": "\u2ad4",
+    "supsup;": "\u2ad6",
+    "swArr;": "\u21d9",
+    "swarhk;": "\u2926",
+    "swarr;": "\u2199",
+    "swarrow;": "\u2199",
+    "swnwar;": "\u292a",
+    "szlig": "\xdf",
+    "szlig;": "\xdf",
+    "target;": "\u2316",
+    "tau;": "\u03c4",
+    "tbrk;": "\u23b4",
+    "tcaron;": "\u0165",
+    "tcedil;": "\u0163",
+    "tcy;": "\u0442",
+    "tdot;": "\u20db",
+    "telrec;": "\u2315",
+    "tfr;": "\U0001d531",
+    "there4;": "\u2234",
+    "therefore;": "\u2234",
+    "theta;": "\u03b8",
+    "thetasym;": "\u03d1",
+    "thetav;": "\u03d1",
+    "thickapprox;": "\u2248",
+    "thicksim;": "\u223c",
+    "thinsp;": "\u2009",
+    "thkap;": "\u2248",
+    "thksim;": "\u223c",
+    "thorn": "\xfe",
+    "thorn;": "\xfe",
+    "tilde;": "\u02dc",
+    "times": "\xd7",
+    "times;": "\xd7",
+    "timesb;": "\u22a0",
+    "timesbar;": "\u2a31",
+    "timesd;": "\u2a30",
+    "tint;": "\u222d",
+    "toea;": "\u2928",
+    "top;": "\u22a4",
+    "topbot;": "\u2336",
+    "topcir;": "\u2af1",
+    "topf;": "\U0001d565",
+    "topfork;": "\u2ada",
+    "tosa;": "\u2929",
+    "tprime;": "\u2034",
+    "trade;": "\u2122",
+    "triangle;": "\u25b5",
+    "triangledown;": "\u25bf",
+    "triangleleft;": "\u25c3",
+    "trianglelefteq;": "\u22b4",
+    "triangleq;": "\u225c",
+    "triangleright;": "\u25b9",
+    "trianglerighteq;": "\u22b5",
+    "tridot;": "\u25ec",
+    "trie;": "\u225c",
+    "triminus;": "\u2a3a",
+    "triplus;": "\u2a39",
+    "trisb;": "\u29cd",
+    "tritime;": "\u2a3b",
+    "trpezium;": "\u23e2",
+    "tscr;": "\U0001d4c9",
+    "tscy;": "\u0446",
+    "tshcy;": "\u045b",
+    "tstrok;": "\u0167",
+    "twixt;": "\u226c",
+    "twoheadleftarrow;": "\u219e",
+    "twoheadrightarrow;": "\u21a0",
+    "uArr;": "\u21d1",
+    "uHar;": "\u2963",
+    "uacute": "\xfa",
+    "uacute;": "\xfa",
+    "uarr;": "\u2191",
+    "ubrcy;": "\u045e",
+    "ubreve;": "\u016d",
+    "ucirc": "\xfb",
+    "ucirc;": "\xfb",
+    "ucy;": "\u0443",
+    "udarr;": "\u21c5",
+    "udblac;": "\u0171",
+    "udhar;": "\u296e",
+    "ufisht;": "\u297e",
+    "ufr;": "\U0001d532",
+    "ugrave": "\xf9",
+    "ugrave;": "\xf9",
+    "uharl;": "\u21bf",
+    "uharr;": "\u21be",
+    "uhblk;": "\u2580",
+    "ulcorn;": "\u231c",
+    "ulcorner;": "\u231c",
+    "ulcrop;": "\u230f",
+    "ultri;": "\u25f8",
+    "umacr;": "\u016b",
+    "uml": "\xa8",
+    "uml;": "\xa8",
+    "uogon;": "\u0173",
+    "uopf;": "\U0001d566",
+    "uparrow;": "\u2191",
+    "updownarrow;": "\u2195",
+    "upharpoonleft;": "\u21bf",
+    "upharpoonright;": "\u21be",
+    "uplus;": "\u228e",
+    "upsi;": "\u03c5",
+    "upsih;": "\u03d2",
+    "upsilon;": "\u03c5",
+    "upuparrows;": "\u21c8",
+    "urcorn;": "\u231d",
+    "urcorner;": "\u231d",
+    "urcrop;": "\u230e",
+    "uring;": "\u016f",
+    "urtri;": "\u25f9",
+    "uscr;": "\U0001d4ca",
+    "utdot;": "\u22f0",
+    "utilde;": "\u0169",
+    "utri;": "\u25b5",
+    "utrif;": "\u25b4",
+    "uuarr;": "\u21c8",
+    "uuml": "\xfc",
+    "uuml;": "\xfc",
+    "uwangle;": "\u29a7",
+    "vArr;": "\u21d5",
+    "vBar;": "\u2ae8",
+    "vBarv;": "\u2ae9",
+    "vDash;": "\u22a8",
+    "vangrt;": "\u299c",
+    "varepsilon;": "\u03f5",
+    "varkappa;": "\u03f0",
+    "varnothing;": "\u2205",
+    "varphi;": "\u03d5",
+    "varpi;": "\u03d6",
+    "varpropto;": "\u221d",
+    "varr;": "\u2195",
+    "varrho;": "\u03f1",
+    "varsigma;": "\u03c2",
+    "varsubsetneq;": "\u228a\ufe00",
+    "varsubsetneqq;": "\u2acb\ufe00",
+    "varsupsetneq;": "\u228b\ufe00",
+    "varsupsetneqq;": "\u2acc\ufe00",
+    "vartheta;": "\u03d1",
+    "vartriangleleft;": "\u22b2",
+    "vartriangleright;": "\u22b3",
+    "vcy;": "\u0432",
+    "vdash;": "\u22a2",
+    "vee;": "\u2228",
+    "veebar;": "\u22bb",
+    "veeeq;": "\u225a",
+    "vellip;": "\u22ee",
+    "verbar;": "|",
+    "vert;": "|",
+    "vfr;": "\U0001d533",
+    "vltri;": "\u22b2",
+    "vnsub;": "\u2282\u20d2",
+    "vnsup;": "\u2283\u20d2",
+    "vopf;": "\U0001d567",
+    "vprop;": "\u221d",
+    "vrtri;": "\u22b3",
+    "vscr;": "\U0001d4cb",
+    "vsubnE;": "\u2acb\ufe00",
+    "vsubne;": "\u228a\ufe00",
+    "vsupnE;": "\u2acc\ufe00",
+    "vsupne;": "\u228b\ufe00",
+    "vzigzag;": "\u299a",
+    "wcirc;": "\u0175",
+    "wedbar;": "\u2a5f",
+    "wedge;": "\u2227",
+    "wedgeq;": "\u2259",
+    "weierp;": "\u2118",
+    "wfr;": "\U0001d534",
+    "wopf;": "\U0001d568",
+    "wp;": "\u2118",
+    "wr;": "\u2240",
+    "wreath;": "\u2240",
+    "wscr;": "\U0001d4cc",
+    "xcap;": "\u22c2",
+    "xcirc;": "\u25ef",
+    "xcup;": "\u22c3",
+    "xdtri;": "\u25bd",
+    "xfr;": "\U0001d535",
+    "xhArr;": "\u27fa",
+    "xharr;": "\u27f7",
+    "xi;": "\u03be",
+    "xlArr;": "\u27f8",
+    "xlarr;": "\u27f5",
+    "xmap;": "\u27fc",
+    "xnis;": "\u22fb",
+    "xodot;": "\u2a00",
+    "xopf;": "\U0001d569",
+    "xoplus;": "\u2a01",
+    "xotime;": "\u2a02",
+    "xrArr;": "\u27f9",
+    "xrarr;": "\u27f6",
+    "xscr;": "\U0001d4cd",
+    "xsqcup;": "\u2a06",
+    "xuplus;": "\u2a04",
+    "xutri;": "\u25b3",
+    "xvee;": "\u22c1",
+    "xwedge;": "\u22c0",
+    "yacute": "\xfd",
+    "yacute;": "\xfd",
+    "yacy;": "\u044f",
+    "ycirc;": "\u0177",
+    "ycy;": "\u044b",
+    "yen": "\xa5",
+    "yen;": "\xa5",
+    "yfr;": "\U0001d536",
+    "yicy;": "\u0457",
+    "yopf;": "\U0001d56a",
+    "yscr;": "\U0001d4ce",
+    "yucy;": "\u044e",
+    "yuml": "\xff",
+    "yuml;": "\xff",
+    "zacute;": "\u017a",
+    "zcaron;": "\u017e",
+    "zcy;": "\u0437",
+    "zdot;": "\u017c",
+    "zeetrf;": "\u2128",
+    "zeta;": "\u03b6",
+    "zfr;": "\U0001d537",
+    "zhcy;": "\u0436",
+    "zigrarr;": "\u21dd",
+    "zopf;": "\U0001d56b",
+    "zscr;": "\U0001d4cf",
+    "zwj;": "\u200d",
+    "zwnj;": "\u200c",
+}
+
+replacementCharacters = {
+    0x0: "\uFFFD",
+    0x0d: "\u000D",
+    0x80: "\u20AC",
+    0x81: "\u0081",
+    0x82: "\u201A",
+    0x83: "\u0192",
+    0x84: "\u201E",
+    0x85: "\u2026",
+    0x86: "\u2020",
+    0x87: "\u2021",
+    0x88: "\u02C6",
+    0x89: "\u2030",
+    0x8A: "\u0160",
+    0x8B: "\u2039",
+    0x8C: "\u0152",
+    0x8D: "\u008D",
+    0x8E: "\u017D",
+    0x8F: "\u008F",
+    0x90: "\u0090",
+    0x91: "\u2018",
+    0x92: "\u2019",
+    0x93: "\u201C",
+    0x94: "\u201D",
+    0x95: "\u2022",
+    0x96: "\u2013",
+    0x97: "\u2014",
+    0x98: "\u02DC",
+    0x99: "\u2122",
+    0x9A: "\u0161",
+    0x9B: "\u203A",
+    0x9C: "\u0153",
+    0x9D: "\u009D",
+    0x9E: "\u017E",
+    0x9F: "\u0178",
+}
+
+tokenTypes = {
+    "Doctype": 0,
+    "Characters": 1,
+    "SpaceCharacters": 2,
+    "StartTag": 3,
+    "EndTag": 4,
+    "EmptyTag": 5,
+    "Comment": 6,
+    "ParseError": 7
+}
+
+tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"],
+                           tokenTypes["EmptyTag"]])
+
+
+prefixes = dict([(v, k) for k, v in namespaces.items()])
+prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
+
+
+class DataLossWarning(UserWarning):
+    """Raised when the current tree is unable to represent the input data"""
+    pass
+
+
+class _ReparseException(Exception):
+    pass
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__init__.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3385a8014eab1169842f28d95ba0fe9897ef2f61
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..01e886f92bb30f4cd579e6beac46f2cede0e0923
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/alphabeticalattributes.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/base.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/base.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d247a6fb97c013bd3dac27ac92411f8a801a4973
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/base.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c1589a9fe3d3da570a5a19dfd33d2244245fe3ad
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/inject_meta_charset.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/lint.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/lint.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..50aae5c80b1d3d0ee09bd5b45dfa97ace40d6f13
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/lint.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c4a28c9903d1a8e2ab378fa1326723b1d7eea47
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/optionaltags.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6cfbfa78ef751d3d7c9a9b326ff3283776819927
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/sanitizer.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..821895d35284423be3d4f4e433139c73dcc18bdf
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/__pycache__/whitespace.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ba926e3b09a71121d3c10b962c26137221d5a34
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/alphabeticalattributes.py
@@ -0,0 +1,29 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from . import base
+
+from collections import OrderedDict
+
+
+def _attr_key(attr):
+    """Return an appropriate key for an attribute for sorting
+
+    Attributes have a namespace that can be either ``None`` or a string. We
+    can't compare the two because they're different types, so we convert
+    ``None`` to an empty string first.
+
+    """
+    return (attr[0][0] or ''), attr[0][1]
+
+
+class Filter(base.Filter):
+    """Alphabetizes attributes for elements"""
+    def __iter__(self):
+        for token in base.Filter.__iter__(self):
+            if token["type"] in ("StartTag", "EmptyTag"):
+                attrs = OrderedDict()
+                for name, value in sorted(token["data"].items(),
+                                          key=_attr_key):
+                    attrs[name] = value
+                token["data"] = attrs
+            yield token
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/base.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7dbaed0fab5afb4f8947b24dd175d2239d35d6b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/base.py
@@ -0,0 +1,12 @@
+from __future__ import absolute_import, division, unicode_literals
+
+
+class Filter(object):
+    def __init__(self, source):
+        self.source = source
+
+    def __iter__(self):
+        return iter(self.source)
+
+    def __getattr__(self, name):
+        return getattr(self.source, name)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py
new file mode 100644
index 0000000000000000000000000000000000000000..aefb5c842c2f55075546065cfba3c66d137e8184
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py
@@ -0,0 +1,73 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from . import base
+
+
+class Filter(base.Filter):
+    """Injects ``<meta charset=ENCODING>`` tag into head of document"""
+    def __init__(self, source, encoding):
+        """Creates a Filter
+
+        :arg source: the source token stream
+
+        :arg encoding: the encoding to set
+
+        """
+        base.Filter.__init__(self, source)
+        self.encoding = encoding
+
+    def __iter__(self):
+        state = "pre_head"
+        meta_found = (self.encoding is None)
+        pending = []
+
+        for token in base.Filter.__iter__(self):
+            type = token["type"]
+            if type == "StartTag":
+                if token["name"].lower() == "head":
+                    state = "in_head"
+
+            elif type == "EmptyTag":
+                if token["name"].lower() == "meta":
+                    # replace charset with actual encoding
+                    has_http_equiv_content_type = False
+                    for (namespace, name), value in token["data"].items():
+                        if namespace is not None:
+                            continue
+                        elif name.lower() == 'charset':
+                            token["data"][(namespace, name)] = self.encoding
+                            meta_found = True
+                            break
+                        elif name == 'http-equiv' and value.lower() == 'content-type':
+                            has_http_equiv_content_type = True
+                    else:
+                        if has_http_equiv_content_type and (None, "content") in token["data"]:
+                            token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding
+                            meta_found = True
+
+                elif token["name"].lower() == "head" and not meta_found:
+                    # insert meta into empty head
+                    yield {"type": "StartTag", "name": "head",
+                           "data": token["data"]}
+                    yield {"type": "EmptyTag", "name": "meta",
+                           "data": {(None, "charset"): self.encoding}}
+                    yield {"type": "EndTag", "name": "head"}
+                    meta_found = True
+                    continue
+
+            elif type == "EndTag":
+                if token["name"].lower() == "head" and pending:
+                    # insert meta into head (if necessary) and flush pending queue
+                    yield pending.pop(0)
+                    if not meta_found:
+                        yield {"type": "EmptyTag", "name": "meta",
+                               "data": {(None, "charset"): self.encoding}}
+                    while pending:
+                        yield pending.pop(0)
+                    meta_found = True
+                    state = "post_head"
+
+            if state == "in_head":
+                pending.append(token)
+            else:
+                yield token
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/lint.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/lint.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcc07eec5b2e5ea926fa8b2af199e14c9cac50dd
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/lint.py
@@ -0,0 +1,93 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from pip._vendor.six import text_type
+
+from . import base
+from ..constants import namespaces, voidElements
+
+from ..constants import spaceCharacters
+spaceCharacters = "".join(spaceCharacters)
+
+
+class Filter(base.Filter):
+    """Lints the token stream for errors
+
+    If it finds any errors, it'll raise an ``AssertionError``.
+
+    """
+    def __init__(self, source, require_matching_tags=True):
+        """Creates a Filter
+
+        :arg source: the source token stream
+
+        :arg require_matching_tags: whether or not to require matching tags
+
+        """
+        super(Filter, self).__init__(source)
+        self.require_matching_tags = require_matching_tags
+
+    def __iter__(self):
+        open_elements = []
+        for token in base.Filter.__iter__(self):
+            type = token["type"]
+            if type in ("StartTag", "EmptyTag"):
+                namespace = token["namespace"]
+                name = token["name"]
+                assert namespace is None or isinstance(namespace, text_type)
+                assert namespace != ""
+                assert isinstance(name, text_type)
+                assert name != ""
+                assert isinstance(token["data"], dict)
+                if (not namespace or namespace == namespaces["html"]) and name in voidElements:
+                    assert type == "EmptyTag"
+                else:
+                    assert type == "StartTag"
+                if type == "StartTag" and self.require_matching_tags:
+                    open_elements.append((namespace, name))
+                for (namespace, name), value in token["data"].items():
+                    assert namespace is None or isinstance(namespace, text_type)
+                    assert namespace != ""
+                    assert isinstance(name, text_type)
+                    assert name != ""
+                    assert isinstance(value, text_type)
+
+            elif type == "EndTag":
+                namespace = token["namespace"]
+                name = token["name"]
+                assert namespace is None or isinstance(namespace, text_type)
+                assert namespace != ""
+                assert isinstance(name, text_type)
+                assert name != ""
+                if (not namespace or namespace == namespaces["html"]) and name in voidElements:
+                    assert False, "Void element reported as EndTag token: %(tag)s" % {"tag": name}
+                elif self.require_matching_tags:
+                    start = open_elements.pop()
+                    assert start == (namespace, name)
+
+            elif type == "Comment":
+                data = token["data"]
+                assert isinstance(data, text_type)
+
+            elif type in ("Characters", "SpaceCharacters"):
+                data = token["data"]
+                assert isinstance(data, text_type)
+                assert data != ""
+                if type == "SpaceCharacters":
+                    assert data.strip(spaceCharacters) == ""
+
+            elif type == "Doctype":
+                name = token["name"]
+                assert name is None or isinstance(name, text_type)
+                assert token["publicId"] is None or isinstance(name, text_type)
+                assert token["systemId"] is None or isinstance(name, text_type)
+
+            elif type == "Entity":
+                assert isinstance(token["name"], text_type)
+
+            elif type == "SerializerError":
+                assert isinstance(token["data"], text_type)
+
+            else:
+                assert False, "Unknown token type: %(type)s" % {"type": type}
+
+            yield token
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a865012c16237c3c4dd3404af7ed767b98d235a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py
@@ -0,0 +1,207 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from . import base
+
+
+class Filter(base.Filter):
+    """Removes optional tags from the token stream"""
+    def slider(self):
+        previous1 = previous2 = None
+        for token in self.source:
+            if previous1 is not None:
+                yield previous2, previous1, token
+            previous2 = previous1
+            previous1 = token
+        if previous1 is not None:
+            yield previous2, previous1, None
+
+    def __iter__(self):
+        for previous, token, next in self.slider():
+            type = token["type"]
+            if type == "StartTag":
+                if (token["data"] or
+                        not self.is_optional_start(token["name"], previous, next)):
+                    yield token
+            elif type == "EndTag":
+                if not self.is_optional_end(token["name"], next):
+                    yield token
+            else:
+                yield token
+
+    def is_optional_start(self, tagname, previous, next):
+        type = next and next["type"] or None
+        if tagname in 'html':
+            # An html element's start tag may be omitted if the first thing
+            # inside the html element is not a space character or a comment.
+            return type not in ("Comment", "SpaceCharacters")
+        elif tagname == 'head':
+            # A head element's start tag may be omitted if the first thing
+            # inside the head element is an element.
+            # XXX: we also omit the start tag if the head element is empty
+            if type in ("StartTag", "EmptyTag"):
+                return True
+            elif type == "EndTag":
+                return next["name"] == "head"
+        elif tagname == 'body':
+            # A body element's start tag may be omitted if the first thing
+            # inside the body element is not a space character or a comment,
+            # except if the first thing inside the body element is a script
+            # or style element and the node immediately preceding the body
+            # element is a head element whose end tag has been omitted.
+            if type in ("Comment", "SpaceCharacters"):
+                return False
+            elif type == "StartTag":
+                # XXX: we do not look at the preceding event, so we never omit
+                # the body element's start tag if it's followed by a script or
+                # a style element.
+                return next["name"] not in ('script', 'style')
+            else:
+                return True
+        elif tagname == 'colgroup':
+            # A colgroup element's start tag may be omitted if the first thing
+            # inside the colgroup element is a col element, and if the element
+            # is not immediately preceded by another colgroup element whose
+            # end tag has been omitted.
+            if type in ("StartTag", "EmptyTag"):
+                # XXX: we do not look at the preceding event, so instead we never
+                # omit the colgroup element's end tag when it is immediately
+                # followed by another colgroup element. See is_optional_end.
+                return next["name"] == "col"
+            else:
+                return False
+        elif tagname == 'tbody':
+            # A tbody element's start tag may be omitted if the first thing
+            # inside the tbody element is a tr element, and if the element is
+            # not immediately preceded by a tbody, thead, or tfoot element
+            # whose end tag has been omitted.
+            if type == "StartTag":
+                # omit the thead and tfoot elements' end tag when they are
+                # immediately followed by a tbody element. See is_optional_end.
+                if previous and previous['type'] == 'EndTag' and \
+                        previous['name'] in ('tbody', 'thead', 'tfoot'):
+                    return False
+                return next["name"] == 'tr'
+            else:
+                return False
+        return False
+
+    def is_optional_end(self, tagname, next):
+        type = next and next["type"] or None
+        if tagname in ('html', 'head', 'body'):
+            # An html element's end tag may be omitted if the html element
+            # is not immediately followed by a space character or a comment.
+            return type not in ("Comment", "SpaceCharacters")
+        elif tagname in ('li', 'optgroup', 'tr'):
+            # A li element's end tag may be omitted if the li element is
+            # immediately followed by another li element or if there is
+            # no more content in the parent element.
+            # An optgroup element's end tag may be omitted if the optgroup
+            # element is immediately followed by another optgroup element,
+            # or if there is no more content in the parent element.
+            # A tr element's end tag may be omitted if the tr element is
+            # immediately followed by another tr element, or if there is
+            # no more content in the parent element.
+            if type == "StartTag":
+                return next["name"] == tagname
+            else:
+                return type == "EndTag" or type is None
+        elif tagname in ('dt', 'dd'):
+            # A dt element's end tag may be omitted if the dt element is
+            # immediately followed by another dt element or a dd element.
+            # A dd element's end tag may be omitted if the dd element is
+            # immediately followed by another dd element or a dt element,
+            # or if there is no more content in the parent element.
+            if type == "StartTag":
+                return next["name"] in ('dt', 'dd')
+            elif tagname == 'dd':
+                return type == "EndTag" or type is None
+            else:
+                return False
+        elif tagname == 'p':
+            # A p element's end tag may be omitted if the p element is
+            # immediately followed by an address, article, aside,
+            # blockquote, datagrid, dialog, dir, div, dl, fieldset,
+            # footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
+            # nav, ol, p, pre, section, table, or ul, element, or if
+            # there is no more content in the parent element.
+            if type in ("StartTag", "EmptyTag"):
+                return next["name"] in ('address', 'article', 'aside',
+                                        'blockquote', 'datagrid', 'dialog',
+                                        'dir', 'div', 'dl', 'fieldset', 'footer',
+                                        'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
+                                        'header', 'hr', 'menu', 'nav', 'ol',
+                                        'p', 'pre', 'section', 'table', 'ul')
+            else:
+                return type == "EndTag" or type is None
+        elif tagname == 'option':
+            # An option element's end tag may be omitted if the option
+            # element is immediately followed by another option element,
+            # or if it is immediately followed by an <code>optgroup</code>
+            # element, or if there is no more content in the parent
+            # element.
+            if type == "StartTag":
+                return next["name"] in ('option', 'optgroup')
+            else:
+                return type == "EndTag" or type is None
+        elif tagname in ('rt', 'rp'):
+            # An rt element's end tag may be omitted if the rt element is
+            # immediately followed by an rt or rp element, or if there is
+            # no more content in the parent element.
+            # An rp element's end tag may be omitted if the rp element is
+            # immediately followed by an rt or rp element, or if there is
+            # no more content in the parent element.
+            if type == "StartTag":
+                return next["name"] in ('rt', 'rp')
+            else:
+                return type == "EndTag" or type is None
+        elif tagname == 'colgroup':
+            # A colgroup element's end tag may be omitted if the colgroup
+            # element is not immediately followed by a space character or
+            # a comment.
+            if type in ("Comment", "SpaceCharacters"):
+                return False
+            elif type == "StartTag":
+                # XXX: we also look for an immediately following colgroup
+                # element. See is_optional_start.
+                return next["name"] != 'colgroup'
+            else:
+                return True
+        elif tagname in ('thead', 'tbody'):
+            # A thead element's end tag may be omitted if the thead element
+            # is immediately followed by a tbody or tfoot element.
+            # A tbody element's end tag may be omitted if the tbody element
+            # is immediately followed by a tbody or tfoot element, or if
+            # there is no more content in the parent element.
+            # A tfoot element's end tag may be omitted if the tfoot element
+            # is immediately followed by a tbody element, or if there is no
+            # more content in the parent element.
+            # XXX: we never omit the end tag when the following element is
+            # a tbody. See is_optional_start.
+            if type == "StartTag":
+                return next["name"] in ['tbody', 'tfoot']
+            elif tagname == 'tbody':
+                return type == "EndTag" or type is None
+            else:
+                return False
+        elif tagname == 'tfoot':
+            # A tfoot element's end tag may be omitted if the tfoot element
+            # is immediately followed by a tbody element, or if there is no
+            # more content in the parent element.
+            # XXX: we never omit the end tag when the following element is
+            # a tbody. See is_optional_start.
+            if type == "StartTag":
+                return next["name"] == 'tbody'
+            else:
+                return type == "EndTag" or type is None
+        elif tagname in ('td', 'th'):
+            # A td element's end tag may be omitted if the td element is
+            # immediately followed by a td or th element, or if there is
+            # no more content in the parent element.
+            # A th element's end tag may be omitted if the th element is
+            # immediately followed by a td or th element, or if there is
+            # no more content in the parent element.
+            if type == "StartTag":
+                return next["name"] in ('td', 'th')
+            else:
+                return type == "EndTag" or type is None
+        return False
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..af8e77b81e373c4639114f764daa6e182dce1422
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py
@@ -0,0 +1,896 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import re
+from xml.sax.saxutils import escape, unescape
+
+from pip._vendor.six.moves import urllib_parse as urlparse
+
+from . import base
+from ..constants import namespaces, prefixes
+
+__all__ = ["Filter"]
+
+
+allowed_elements = frozenset((
+    (namespaces['html'], 'a'),
+    (namespaces['html'], 'abbr'),
+    (namespaces['html'], 'acronym'),
+    (namespaces['html'], 'address'),
+    (namespaces['html'], 'area'),
+    (namespaces['html'], 'article'),
+    (namespaces['html'], 'aside'),
+    (namespaces['html'], 'audio'),
+    (namespaces['html'], 'b'),
+    (namespaces['html'], 'big'),
+    (namespaces['html'], 'blockquote'),
+    (namespaces['html'], 'br'),
+    (namespaces['html'], 'button'),
+    (namespaces['html'], 'canvas'),
+    (namespaces['html'], 'caption'),
+    (namespaces['html'], 'center'),
+    (namespaces['html'], 'cite'),
+    (namespaces['html'], 'code'),
+    (namespaces['html'], 'col'),
+    (namespaces['html'], 'colgroup'),
+    (namespaces['html'], 'command'),
+    (namespaces['html'], 'datagrid'),
+    (namespaces['html'], 'datalist'),
+    (namespaces['html'], 'dd'),
+    (namespaces['html'], 'del'),
+    (namespaces['html'], 'details'),
+    (namespaces['html'], 'dfn'),
+    (namespaces['html'], 'dialog'),
+    (namespaces['html'], 'dir'),
+    (namespaces['html'], 'div'),
+    (namespaces['html'], 'dl'),
+    (namespaces['html'], 'dt'),
+    (namespaces['html'], 'em'),
+    (namespaces['html'], 'event-source'),
+    (namespaces['html'], 'fieldset'),
+    (namespaces['html'], 'figcaption'),
+    (namespaces['html'], 'figure'),
+    (namespaces['html'], 'footer'),
+    (namespaces['html'], 'font'),
+    (namespaces['html'], 'form'),
+    (namespaces['html'], 'header'),
+    (namespaces['html'], 'h1'),
+    (namespaces['html'], 'h2'),
+    (namespaces['html'], 'h3'),
+    (namespaces['html'], 'h4'),
+    (namespaces['html'], 'h5'),
+    (namespaces['html'], 'h6'),
+    (namespaces['html'], 'hr'),
+    (namespaces['html'], 'i'),
+    (namespaces['html'], 'img'),
+    (namespaces['html'], 'input'),
+    (namespaces['html'], 'ins'),
+    (namespaces['html'], 'keygen'),
+    (namespaces['html'], 'kbd'),
+    (namespaces['html'], 'label'),
+    (namespaces['html'], 'legend'),
+    (namespaces['html'], 'li'),
+    (namespaces['html'], 'm'),
+    (namespaces['html'], 'map'),
+    (namespaces['html'], 'menu'),
+    (namespaces['html'], 'meter'),
+    (namespaces['html'], 'multicol'),
+    (namespaces['html'], 'nav'),
+    (namespaces['html'], 'nextid'),
+    (namespaces['html'], 'ol'),
+    (namespaces['html'], 'output'),
+    (namespaces['html'], 'optgroup'),
+    (namespaces['html'], 'option'),
+    (namespaces['html'], 'p'),
+    (namespaces['html'], 'pre'),
+    (namespaces['html'], 'progress'),
+    (namespaces['html'], 'q'),
+    (namespaces['html'], 's'),
+    (namespaces['html'], 'samp'),
+    (namespaces['html'], 'section'),
+    (namespaces['html'], 'select'),
+    (namespaces['html'], 'small'),
+    (namespaces['html'], 'sound'),
+    (namespaces['html'], 'source'),
+    (namespaces['html'], 'spacer'),
+    (namespaces['html'], 'span'),
+    (namespaces['html'], 'strike'),
+    (namespaces['html'], 'strong'),
+    (namespaces['html'], 'sub'),
+    (namespaces['html'], 'sup'),
+    (namespaces['html'], 'table'),
+    (namespaces['html'], 'tbody'),
+    (namespaces['html'], 'td'),
+    (namespaces['html'], 'textarea'),
+    (namespaces['html'], 'time'),
+    (namespaces['html'], 'tfoot'),
+    (namespaces['html'], 'th'),
+    (namespaces['html'], 'thead'),
+    (namespaces['html'], 'tr'),
+    (namespaces['html'], 'tt'),
+    (namespaces['html'], 'u'),
+    (namespaces['html'], 'ul'),
+    (namespaces['html'], 'var'),
+    (namespaces['html'], 'video'),
+    (namespaces['mathml'], 'maction'),
+    (namespaces['mathml'], 'math'),
+    (namespaces['mathml'], 'merror'),
+    (namespaces['mathml'], 'mfrac'),
+    (namespaces['mathml'], 'mi'),
+    (namespaces['mathml'], 'mmultiscripts'),
+    (namespaces['mathml'], 'mn'),
+    (namespaces['mathml'], 'mo'),
+    (namespaces['mathml'], 'mover'),
+    (namespaces['mathml'], 'mpadded'),
+    (namespaces['mathml'], 'mphantom'),
+    (namespaces['mathml'], 'mprescripts'),
+    (namespaces['mathml'], 'mroot'),
+    (namespaces['mathml'], 'mrow'),
+    (namespaces['mathml'], 'mspace'),
+    (namespaces['mathml'], 'msqrt'),
+    (namespaces['mathml'], 'mstyle'),
+    (namespaces['mathml'], 'msub'),
+    (namespaces['mathml'], 'msubsup'),
+    (namespaces['mathml'], 'msup'),
+    (namespaces['mathml'], 'mtable'),
+    (namespaces['mathml'], 'mtd'),
+    (namespaces['mathml'], 'mtext'),
+    (namespaces['mathml'], 'mtr'),
+    (namespaces['mathml'], 'munder'),
+    (namespaces['mathml'], 'munderover'),
+    (namespaces['mathml'], 'none'),
+    (namespaces['svg'], 'a'),
+    (namespaces['svg'], 'animate'),
+    (namespaces['svg'], 'animateColor'),
+    (namespaces['svg'], 'animateMotion'),
+    (namespaces['svg'], 'animateTransform'),
+    (namespaces['svg'], 'clipPath'),
+    (namespaces['svg'], 'circle'),
+    (namespaces['svg'], 'defs'),
+    (namespaces['svg'], 'desc'),
+    (namespaces['svg'], 'ellipse'),
+    (namespaces['svg'], 'font-face'),
+    (namespaces['svg'], 'font-face-name'),
+    (namespaces['svg'], 'font-face-src'),
+    (namespaces['svg'], 'g'),
+    (namespaces['svg'], 'glyph'),
+    (namespaces['svg'], 'hkern'),
+    (namespaces['svg'], 'linearGradient'),
+    (namespaces['svg'], 'line'),
+    (namespaces['svg'], 'marker'),
+    (namespaces['svg'], 'metadata'),
+    (namespaces['svg'], 'missing-glyph'),
+    (namespaces['svg'], 'mpath'),
+    (namespaces['svg'], 'path'),
+    (namespaces['svg'], 'polygon'),
+    (namespaces['svg'], 'polyline'),
+    (namespaces['svg'], 'radialGradient'),
+    (namespaces['svg'], 'rect'),
+    (namespaces['svg'], 'set'),
+    (namespaces['svg'], 'stop'),
+    (namespaces['svg'], 'svg'),
+    (namespaces['svg'], 'switch'),
+    (namespaces['svg'], 'text'),
+    (namespaces['svg'], 'title'),
+    (namespaces['svg'], 'tspan'),
+    (namespaces['svg'], 'use'),
+))
+
+allowed_attributes = frozenset((
+    # HTML attributes
+    (None, 'abbr'),
+    (None, 'accept'),
+    (None, 'accept-charset'),
+    (None, 'accesskey'),
+    (None, 'action'),
+    (None, 'align'),
+    (None, 'alt'),
+    (None, 'autocomplete'),
+    (None, 'autofocus'),
+    (None, 'axis'),
+    (None, 'background'),
+    (None, 'balance'),
+    (None, 'bgcolor'),
+    (None, 'bgproperties'),
+    (None, 'border'),
+    (None, 'bordercolor'),
+    (None, 'bordercolordark'),
+    (None, 'bordercolorlight'),
+    (None, 'bottompadding'),
+    (None, 'cellpadding'),
+    (None, 'cellspacing'),
+    (None, 'ch'),
+    (None, 'challenge'),
+    (None, 'char'),
+    (None, 'charoff'),
+    (None, 'choff'),
+    (None, 'charset'),
+    (None, 'checked'),
+    (None, 'cite'),
+    (None, 'class'),
+    (None, 'clear'),
+    (None, 'color'),
+    (None, 'cols'),
+    (None, 'colspan'),
+    (None, 'compact'),
+    (None, 'contenteditable'),
+    (None, 'controls'),
+    (None, 'coords'),
+    (None, 'data'),
+    (None, 'datafld'),
+    (None, 'datapagesize'),
+    (None, 'datasrc'),
+    (None, 'datetime'),
+    (None, 'default'),
+    (None, 'delay'),
+    (None, 'dir'),
+    (None, 'disabled'),
+    (None, 'draggable'),
+    (None, 'dynsrc'),
+    (None, 'enctype'),
+    (None, 'end'),
+    (None, 'face'),
+    (None, 'for'),
+    (None, 'form'),
+    (None, 'frame'),
+    (None, 'galleryimg'),
+    (None, 'gutter'),
+    (None, 'headers'),
+    (None, 'height'),
+    (None, 'hidefocus'),
+    (None, 'hidden'),
+    (None, 'high'),
+    (None, 'href'),
+    (None, 'hreflang'),
+    (None, 'hspace'),
+    (None, 'icon'),
+    (None, 'id'),
+    (None, 'inputmode'),
+    (None, 'ismap'),
+    (None, 'keytype'),
+    (None, 'label'),
+    (None, 'leftspacing'),
+    (None, 'lang'),
+    (None, 'list'),
+    (None, 'longdesc'),
+    (None, 'loop'),
+    (None, 'loopcount'),
+    (None, 'loopend'),
+    (None, 'loopstart'),
+    (None, 'low'),
+    (None, 'lowsrc'),
+    (None, 'max'),
+    (None, 'maxlength'),
+    (None, 'media'),
+    (None, 'method'),
+    (None, 'min'),
+    (None, 'multiple'),
+    (None, 'name'),
+    (None, 'nohref'),
+    (None, 'noshade'),
+    (None, 'nowrap'),
+    (None, 'open'),
+    (None, 'optimum'),
+    (None, 'pattern'),
+    (None, 'ping'),
+    (None, 'point-size'),
+    (None, 'poster'),
+    (None, 'pqg'),
+    (None, 'preload'),
+    (None, 'prompt'),
+    (None, 'radiogroup'),
+    (None, 'readonly'),
+    (None, 'rel'),
+    (None, 'repeat-max'),
+    (None, 'repeat-min'),
+    (None, 'replace'),
+    (None, 'required'),
+    (None, 'rev'),
+    (None, 'rightspacing'),
+    (None, 'rows'),
+    (None, 'rowspan'),
+    (None, 'rules'),
+    (None, 'scope'),
+    (None, 'selected'),
+    (None, 'shape'),
+    (None, 'size'),
+    (None, 'span'),
+    (None, 'src'),
+    (None, 'start'),
+    (None, 'step'),
+    (None, 'style'),
+    (None, 'summary'),
+    (None, 'suppress'),
+    (None, 'tabindex'),
+    (None, 'target'),
+    (None, 'template'),
+    (None, 'title'),
+    (None, 'toppadding'),
+    (None, 'type'),
+    (None, 'unselectable'),
+    (None, 'usemap'),
+    (None, 'urn'),
+    (None, 'valign'),
+    (None, 'value'),
+    (None, 'variable'),
+    (None, 'volume'),
+    (None, 'vspace'),
+    (None, 'vrml'),
+    (None, 'width'),
+    (None, 'wrap'),
+    (namespaces['xml'], 'lang'),
+    # MathML attributes
+    (None, 'actiontype'),
+    (None, 'align'),
+    (None, 'columnalign'),
+    (None, 'columnalign'),
+    (None, 'columnalign'),
+    (None, 'columnlines'),
+    (None, 'columnspacing'),
+    (None, 'columnspan'),
+    (None, 'depth'),
+    (None, 'display'),
+    (None, 'displaystyle'),
+    (None, 'equalcolumns'),
+    (None, 'equalrows'),
+    (None, 'fence'),
+    (None, 'fontstyle'),
+    (None, 'fontweight'),
+    (None, 'frame'),
+    (None, 'height'),
+    (None, 'linethickness'),
+    (None, 'lspace'),
+    (None, 'mathbackground'),
+    (None, 'mathcolor'),
+    (None, 'mathvariant'),
+    (None, 'mathvariant'),
+    (None, 'maxsize'),
+    (None, 'minsize'),
+    (None, 'other'),
+    (None, 'rowalign'),
+    (None, 'rowalign'),
+    (None, 'rowalign'),
+    (None, 'rowlines'),
+    (None, 'rowspacing'),
+    (None, 'rowspan'),
+    (None, 'rspace'),
+    (None, 'scriptlevel'),
+    (None, 'selection'),
+    (None, 'separator'),
+    (None, 'stretchy'),
+    (None, 'width'),
+    (None, 'width'),
+    (namespaces['xlink'], 'href'),
+    (namespaces['xlink'], 'show'),
+    (namespaces['xlink'], 'type'),
+    # SVG attributes
+    (None, 'accent-height'),
+    (None, 'accumulate'),
+    (None, 'additive'),
+    (None, 'alphabetic'),
+    (None, 'arabic-form'),
+    (None, 'ascent'),
+    (None, 'attributeName'),
+    (None, 'attributeType'),
+    (None, 'baseProfile'),
+    (None, 'bbox'),
+    (None, 'begin'),
+    (None, 'by'),
+    (None, 'calcMode'),
+    (None, 'cap-height'),
+    (None, 'class'),
+    (None, 'clip-path'),
+    (None, 'color'),
+    (None, 'color-rendering'),
+    (None, 'content'),
+    (None, 'cx'),
+    (None, 'cy'),
+    (None, 'd'),
+    (None, 'dx'),
+    (None, 'dy'),
+    (None, 'descent'),
+    (None, 'display'),
+    (None, 'dur'),
+    (None, 'end'),
+    (None, 'fill'),
+    (None, 'fill-opacity'),
+    (None, 'fill-rule'),
+    (None, 'font-family'),
+    (None, 'font-size'),
+    (None, 'font-stretch'),
+    (None, 'font-style'),
+    (None, 'font-variant'),
+    (None, 'font-weight'),
+    (None, 'from'),
+    (None, 'fx'),
+    (None, 'fy'),
+    (None, 'g1'),
+    (None, 'g2'),
+    (None, 'glyph-name'),
+    (None, 'gradientUnits'),
+    (None, 'hanging'),
+    (None, 'height'),
+    (None, 'horiz-adv-x'),
+    (None, 'horiz-origin-x'),
+    (None, 'id'),
+    (None, 'ideographic'),
+    (None, 'k'),
+    (None, 'keyPoints'),
+    (None, 'keySplines'),
+    (None, 'keyTimes'),
+    (None, 'lang'),
+    (None, 'marker-end'),
+    (None, 'marker-mid'),
+    (None, 'marker-start'),
+    (None, 'markerHeight'),
+    (None, 'markerUnits'),
+    (None, 'markerWidth'),
+    (None, 'mathematical'),
+    (None, 'max'),
+    (None, 'min'),
+    (None, 'name'),
+    (None, 'offset'),
+    (None, 'opacity'),
+    (None, 'orient'),
+    (None, 'origin'),
+    (None, 'overline-position'),
+    (None, 'overline-thickness'),
+    (None, 'panose-1'),
+    (None, 'path'),
+    (None, 'pathLength'),
+    (None, 'points'),
+    (None, 'preserveAspectRatio'),
+    (None, 'r'),
+    (None, 'refX'),
+    (None, 'refY'),
+    (None, 'repeatCount'),
+    (None, 'repeatDur'),
+    (None, 'requiredExtensions'),
+    (None, 'requiredFeatures'),
+    (None, 'restart'),
+    (None, 'rotate'),
+    (None, 'rx'),
+    (None, 'ry'),
+    (None, 'slope'),
+    (None, 'stemh'),
+    (None, 'stemv'),
+    (None, 'stop-color'),
+    (None, 'stop-opacity'),
+    (None, 'strikethrough-position'),
+    (None, 'strikethrough-thickness'),
+    (None, 'stroke'),
+    (None, 'stroke-dasharray'),
+    (None, 'stroke-dashoffset'),
+    (None, 'stroke-linecap'),
+    (None, 'stroke-linejoin'),
+    (None, 'stroke-miterlimit'),
+    (None, 'stroke-opacity'),
+    (None, 'stroke-width'),
+    (None, 'systemLanguage'),
+    (None, 'target'),
+    (None, 'text-anchor'),
+    (None, 'to'),
+    (None, 'transform'),
+    (None, 'type'),
+    (None, 'u1'),
+    (None, 'u2'),
+    (None, 'underline-position'),
+    (None, 'underline-thickness'),
+    (None, 'unicode'),
+    (None, 'unicode-range'),
+    (None, 'units-per-em'),
+    (None, 'values'),
+    (None, 'version'),
+    (None, 'viewBox'),
+    (None, 'visibility'),
+    (None, 'width'),
+    (None, 'widths'),
+    (None, 'x'),
+    (None, 'x-height'),
+    (None, 'x1'),
+    (None, 'x2'),
+    (namespaces['xlink'], 'actuate'),
+    (namespaces['xlink'], 'arcrole'),
+    (namespaces['xlink'], 'href'),
+    (namespaces['xlink'], 'role'),
+    (namespaces['xlink'], 'show'),
+    (namespaces['xlink'], 'title'),
+    (namespaces['xlink'], 'type'),
+    (namespaces['xml'], 'base'),
+    (namespaces['xml'], 'lang'),
+    (namespaces['xml'], 'space'),
+    (None, 'y'),
+    (None, 'y1'),
+    (None, 'y2'),
+    (None, 'zoomAndPan'),
+))
+
+attr_val_is_uri = frozenset((
+    (None, 'href'),
+    (None, 'src'),
+    (None, 'cite'),
+    (None, 'action'),
+    (None, 'longdesc'),
+    (None, 'poster'),
+    (None, 'background'),
+    (None, 'datasrc'),
+    (None, 'dynsrc'),
+    (None, 'lowsrc'),
+    (None, 'ping'),
+    (namespaces['xlink'], 'href'),
+    (namespaces['xml'], 'base'),
+))
+
+svg_attr_val_allows_ref = frozenset((
+    (None, 'clip-path'),
+    (None, 'color-profile'),
+    (None, 'cursor'),
+    (None, 'fill'),
+    (None, 'filter'),
+    (None, 'marker'),
+    (None, 'marker-start'),
+    (None, 'marker-mid'),
+    (None, 'marker-end'),
+    (None, 'mask'),
+    (None, 'stroke'),
+))
+
+svg_allow_local_href = frozenset((
+    (None, 'altGlyph'),
+    (None, 'animate'),
+    (None, 'animateColor'),
+    (None, 'animateMotion'),
+    (None, 'animateTransform'),
+    (None, 'cursor'),
+    (None, 'feImage'),
+    (None, 'filter'),
+    (None, 'linearGradient'),
+    (None, 'pattern'),
+    (None, 'radialGradient'),
+    (None, 'textpath'),
+    (None, 'tref'),
+    (None, 'set'),
+    (None, 'use')
+))
+
+allowed_css_properties = frozenset((
+    'azimuth',
+    'background-color',
+    'border-bottom-color',
+    'border-collapse',
+    'border-color',
+    'border-left-color',
+    'border-right-color',
+    'border-top-color',
+    'clear',
+    'color',
+    'cursor',
+    'direction',
+    'display',
+    'elevation',
+    'float',
+    'font',
+    'font-family',
+    'font-size',
+    'font-style',
+    'font-variant',
+    'font-weight',
+    'height',
+    'letter-spacing',
+    'line-height',
+    'overflow',
+    'pause',
+    'pause-after',
+    'pause-before',
+    'pitch',
+    'pitch-range',
+    'richness',
+    'speak',
+    'speak-header',
+    'speak-numeral',
+    'speak-punctuation',
+    'speech-rate',
+    'stress',
+    'text-align',
+    'text-decoration',
+    'text-indent',
+    'unicode-bidi',
+    'vertical-align',
+    'voice-family',
+    'volume',
+    'white-space',
+    'width',
+))
+
+allowed_css_keywords = frozenset((
+    'auto',
+    'aqua',
+    'black',
+    'block',
+    'blue',
+    'bold',
+    'both',
+    'bottom',
+    'brown',
+    'center',
+    'collapse',
+    'dashed',
+    'dotted',
+    'fuchsia',
+    'gray',
+    'green',
+    '!important',
+    'italic',
+    'left',
+    'lime',
+    'maroon',
+    'medium',
+    'none',
+    'navy',
+    'normal',
+    'nowrap',
+    'olive',
+    'pointer',
+    'purple',
+    'red',
+    'right',
+    'solid',
+    'silver',
+    'teal',
+    'top',
+    'transparent',
+    'underline',
+    'white',
+    'yellow',
+))
+
+allowed_svg_properties = frozenset((
+    'fill',
+    'fill-opacity',
+    'fill-rule',
+    'stroke',
+    'stroke-width',
+    'stroke-linecap',
+    'stroke-linejoin',
+    'stroke-opacity',
+))
+
+allowed_protocols = frozenset((
+    'ed2k',
+    'ftp',
+    'http',
+    'https',
+    'irc',
+    'mailto',
+    'news',
+    'gopher',
+    'nntp',
+    'telnet',
+    'webcal',
+    'xmpp',
+    'callto',
+    'feed',
+    'urn',
+    'aim',
+    'rsync',
+    'tag',
+    'ssh',
+    'sftp',
+    'rtsp',
+    'afs',
+    'data',
+))
+
+allowed_content_types = frozenset((
+    'image/png',
+    'image/jpeg',
+    'image/gif',
+    'image/webp',
+    'image/bmp',
+    'text/plain',
+))
+
+
+data_content_type = re.compile(r'''
+                                ^
+                                # Match a content type <application>/<type>
+                                (?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+)
+                                # Match any character set and encoding
+                                (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?)
+                                  |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?)
+                                # Assume the rest is data
+                                ,.*
+                                $
+                                ''',
+                               re.VERBOSE)
+
+
+class Filter(base.Filter):
+    """Sanitizes token stream of XHTML+MathML+SVG and of inline style attributes"""
+    def __init__(self,
+                 source,
+                 allowed_elements=allowed_elements,
+                 allowed_attributes=allowed_attributes,
+                 allowed_css_properties=allowed_css_properties,
+                 allowed_css_keywords=allowed_css_keywords,
+                 allowed_svg_properties=allowed_svg_properties,
+                 allowed_protocols=allowed_protocols,
+                 allowed_content_types=allowed_content_types,
+                 attr_val_is_uri=attr_val_is_uri,
+                 svg_attr_val_allows_ref=svg_attr_val_allows_ref,
+                 svg_allow_local_href=svg_allow_local_href):
+        """Creates a Filter
+
+        :arg allowed_elements: set of elements to allow--everything else will
+            be escaped
+
+        :arg allowed_attributes: set of attributes to allow in
+            elements--everything else will be stripped
+
+        :arg allowed_css_properties: set of CSS properties to allow--everything
+            else will be stripped
+
+        :arg allowed_css_keywords: set of CSS keywords to allow--everything
+            else will be stripped
+
+        :arg allowed_svg_properties: set of SVG properties to allow--everything
+            else will be removed
+
+        :arg allowed_protocols: set of allowed protocols for URIs
+
+        :arg allowed_content_types: set of allowed content types for ``data`` URIs.
+
+        :arg attr_val_is_uri: set of attributes that have URI values--values
+            that have a scheme not listed in ``allowed_protocols`` are removed
+
+        :arg svg_attr_val_allows_ref: set of SVG attributes that can have
+            references
+
+        :arg svg_allow_local_href: set of SVG elements that can have local
+            hrefs--these are removed
+
+        """
+        super(Filter, self).__init__(source)
+        self.allowed_elements = allowed_elements
+        self.allowed_attributes = allowed_attributes
+        self.allowed_css_properties = allowed_css_properties
+        self.allowed_css_keywords = allowed_css_keywords
+        self.allowed_svg_properties = allowed_svg_properties
+        self.allowed_protocols = allowed_protocols
+        self.allowed_content_types = allowed_content_types
+        self.attr_val_is_uri = attr_val_is_uri
+        self.svg_attr_val_allows_ref = svg_attr_val_allows_ref
+        self.svg_allow_local_href = svg_allow_local_href
+
+    def __iter__(self):
+        for token in base.Filter.__iter__(self):
+            token = self.sanitize_token(token)
+            if token:
+                yield token
+
+    # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
+    # stripping out all attributes not in ALLOWED_ATTRIBUTES. Style attributes
+    # are parsed, and a restricted set, specified by ALLOWED_CSS_PROPERTIES and
+    # ALLOWED_CSS_KEYWORDS, are allowed through. attributes in ATTR_VAL_IS_URI
+    # are scanned, and only URI schemes specified in ALLOWED_PROTOCOLS are
+    # allowed.
+    #
+    #   sanitize_html('<script> do_nasty_stuff() </script>')
+    #    => &lt;script> do_nasty_stuff() &lt;/script>
+    #   sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
+    #    => <a>Click here for $100</a>
+    def sanitize_token(self, token):
+
+        # accommodate filters which use token_type differently
+        token_type = token["type"]
+        if token_type in ("StartTag", "EndTag", "EmptyTag"):
+            name = token["name"]
+            namespace = token["namespace"]
+            if ((namespace, name) in self.allowed_elements or
+                (namespace is None and
+                 (namespaces["html"], name) in self.allowed_elements)):
+                return self.allowed_token(token)
+            else:
+                return self.disallowed_token(token)
+        elif token_type == "Comment":
+            pass
+        else:
+            return token
+
+    def allowed_token(self, token):
+        if "data" in token:
+            attrs = token["data"]
+            attr_names = set(attrs.keys())
+
+            # Remove forbidden attributes
+            for to_remove in (attr_names - self.allowed_attributes):
+                del token["data"][to_remove]
+                attr_names.remove(to_remove)
+
+            # Remove attributes with disallowed URL values
+            for attr in (attr_names & self.attr_val_is_uri):
+                assert attr in attrs
+                # I don't have a clue where this regexp comes from or why it matches those
+                # characters, nor why we call unescape. I just know it's always been here.
+                # Should you be worried by this comment in a sanitizer? Yes. On the other hand, all
+                # this will do is remove *more* than it otherwise would.
+                val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\\s]+", '',
+                                       unescape(attrs[attr])).lower()
+                # remove replacement characters from unescaped characters
+                val_unescaped = val_unescaped.replace("\ufffd", "")
+                try:
+                    uri = urlparse.urlparse(val_unescaped)
+                except ValueError:
+                    uri = None
+                    del attrs[attr]
+                if uri and uri.scheme:
+                    if uri.scheme not in self.allowed_protocols:
+                        del attrs[attr]
+                    if uri.scheme == 'data':
+                        m = data_content_type.match(uri.path)
+                        if not m:
+                            del attrs[attr]
+                        elif m.group('content_type') not in self.allowed_content_types:
+                            del attrs[attr]
+
+            for attr in self.svg_attr_val_allows_ref:
+                if attr in attrs:
+                    attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
+                                         ' ',
+                                         unescape(attrs[attr]))
+            if (token["name"] in self.svg_allow_local_href and
+                (namespaces['xlink'], 'href') in attrs and re.search(r'^\s*[^#\s].*',
+                                                                     attrs[(namespaces['xlink'], 'href')])):
+                del attrs[(namespaces['xlink'], 'href')]
+            if (None, 'style') in attrs:
+                attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')])
+            token["data"] = attrs
+        return token
+
+    def disallowed_token(self, token):
+        token_type = token["type"]
+        if token_type == "EndTag":
+            token["data"] = "</%s>" % token["name"]
+        elif token["data"]:
+            assert token_type in ("StartTag", "EmptyTag")
+            attrs = []
+            for (ns, name), v in token["data"].items():
+                attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v)))
+            token["data"] = "<%s%s>" % (token["name"], ''.join(attrs))
+        else:
+            token["data"] = "<%s>" % token["name"]
+        if token.get("selfClosing"):
+            token["data"] = token["data"][:-1] + "/>"
+
+        token["type"] = "Characters"
+
+        del token["name"]
+        return token
+
+    def sanitize_css(self, style):
+        # disallow urls
+        style = re.compile(r'url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style)
+
+        # gauntlet
+        if not re.match(r"""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style):
+            return ''
+        if not re.match(r"^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style):
+            return ''
+
+        clean = []
+        for prop, value in re.findall(r"([-\w]+)\s*:\s*([^:;]*)", style):
+            if not value:
+                continue
+            if prop.lower() in self.allowed_css_properties:
+                clean.append(prop + ': ' + value + ';')
+            elif prop.split('-')[0].lower() in ['background', 'border', 'margin',
+                                                'padding']:
+                for keyword in value.split():
+                    if keyword not in self.allowed_css_keywords and \
+                            not re.match(r"^(#[0-9a-fA-F]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword):  # noqa
+                        break
+                else:
+                    clean.append(prop + ': ' + value + ';')
+            elif prop.lower() in self.allowed_svg_properties:
+                clean.append(prop + ': ' + value + ';')
+
+        return ' '.join(clean)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d12584b45995d35110f75af00193fdad0fa10f4
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/filters/whitespace.py
@@ -0,0 +1,38 @@
+from __future__ import absolute_import, division, unicode_literals
+
+import re
+
+from . import base
+from ..constants import rcdataElements, spaceCharacters
+spaceCharacters = "".join(spaceCharacters)
+
+SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
+
+
+class Filter(base.Filter):
+    """Collapses whitespace except in pre, textarea, and script elements"""
+    spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
+
+    def __iter__(self):
+        preserve = 0
+        for token in base.Filter.__iter__(self):
+            type = token["type"]
+            if type == "StartTag" \
+                    and (preserve or token["name"] in self.spacePreserveElements):
+                preserve += 1
+
+            elif type == "EndTag" and preserve:
+                preserve -= 1
+
+            elif not preserve and type == "SpaceCharacters" and token["data"]:
+                # Test on token["data"] above to not introduce spaces where there were not
+                token["data"] = " "
+
+            elif not preserve and type == "Characters":
+                token["data"] = collapse_spaces(token["data"])
+
+            yield token
+
+
+def collapse_spaces(text):
+    return SPACES_REGEX.sub(' ', text)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/html5parser.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/html5parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae41a133761c02adcec54d500a66da0a49dd7009
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/html5parser.py
@@ -0,0 +1,2791 @@
+from __future__ import absolute_import, division, unicode_literals
+from pip._vendor.six import with_metaclass, viewkeys
+
+import types
+from collections import OrderedDict
+
+from . import _inputstream
+from . import _tokenizer
+
+from . import treebuilders
+from .treebuilders.base import Marker
+
+from . import _utils
+from .constants import (
+    spaceCharacters, asciiUpper2Lower,
+    specialElements, headingElements, cdataElements, rcdataElements,
+    tokenTypes, tagTokenTypes,
+    namespaces,
+    htmlIntegrationPointElements, mathmlTextIntegrationPointElements,
+    adjustForeignAttributes as adjustForeignAttributesMap,
+    adjustMathMLAttributes, adjustSVGAttributes,
+    E,
+    _ReparseException
+)
+
+
+def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs):
+    """Parse an HTML document as a string or file-like object into a tree
+
+    :arg doc: the document to parse as a string or file-like object
+
+    :arg treebuilder: the treebuilder to use when parsing
+
+    :arg namespaceHTMLElements: whether or not to namespace HTML elements
+
+    :returns: parsed tree
+
+    Example:
+
+    >>> from html5lib.html5parser import parse
+    >>> parse('<html><body><p>This is a doc</p></body></html>')
+    <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0>
+
+    """
+    tb = treebuilders.getTreeBuilder(treebuilder)
+    p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
+    return p.parse(doc, **kwargs)
+
+
+def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs):
+    """Parse an HTML fragment as a string or file-like object into a tree
+
+    :arg doc: the fragment to parse as a string or file-like object
+
+    :arg container: the container context to parse the fragment in
+
+    :arg treebuilder: the treebuilder to use when parsing
+
+    :arg namespaceHTMLElements: whether or not to namespace HTML elements
+
+    :returns: parsed tree
+
+    Example:
+
+    >>> from html5lib.html5libparser import parseFragment
+    >>> parseFragment('<b>this is a fragment</b>')
+    <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090>
+
+    """
+    tb = treebuilders.getTreeBuilder(treebuilder)
+    p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
+    return p.parseFragment(doc, container=container, **kwargs)
+
+
+def method_decorator_metaclass(function):
+    class Decorated(type):
+        def __new__(meta, classname, bases, classDict):
+            for attributeName, attribute in classDict.items():
+                if isinstance(attribute, types.FunctionType):
+                    attribute = function(attribute)
+
+                classDict[attributeName] = attribute
+            return type.__new__(meta, classname, bases, classDict)
+    return Decorated
+
+
+class HTMLParser(object):
+    """HTML parser
+
+    Generates a tree structure from a stream of (possibly malformed) HTML.
+
+    """
+
+    def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False):
+        """
+        :arg tree: a treebuilder class controlling the type of tree that will be
+            returned. Built in treebuilders can be accessed through
+            html5lib.treebuilders.getTreeBuilder(treeType)
+
+        :arg strict: raise an exception when a parse error is encountered
+
+        :arg namespaceHTMLElements: whether or not to namespace HTML elements
+
+        :arg debug: whether or not to enable debug mode which logs things
+
+        Example:
+
+        >>> from html5lib.html5parser import HTMLParser
+        >>> parser = HTMLParser()                     # generates parser with etree builder
+        >>> parser = HTMLParser('lxml', strict=True)  # generates parser with lxml builder which is strict
+
+        """
+
+        # Raise an exception on the first error encountered
+        self.strict = strict
+
+        if tree is None:
+            tree = treebuilders.getTreeBuilder("etree")
+        self.tree = tree(namespaceHTMLElements)
+        self.errors = []
+
+        self.phases = dict([(name, cls(self, self.tree)) for name, cls in
+                            getPhases(debug).items()])
+
+    def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs):
+
+        self.innerHTMLMode = innerHTML
+        self.container = container
+        self.scripting = scripting
+        self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs)
+        self.reset()
+
+        try:
+            self.mainLoop()
+        except _ReparseException:
+            self.reset()
+            self.mainLoop()
+
+    def reset(self):
+        self.tree.reset()
+        self.firstStartTag = False
+        self.errors = []
+        self.log = []  # only used with debug mode
+        # "quirks" / "limited quirks" / "no quirks"
+        self.compatMode = "no quirks"
+
+        if self.innerHTMLMode:
+            self.innerHTML = self.container.lower()
+
+            if self.innerHTML in cdataElements:
+                self.tokenizer.state = self.tokenizer.rcdataState
+            elif self.innerHTML in rcdataElements:
+                self.tokenizer.state = self.tokenizer.rawtextState
+            elif self.innerHTML == 'plaintext':
+                self.tokenizer.state = self.tokenizer.plaintextState
+            else:
+                # state already is data state
+                # self.tokenizer.state = self.tokenizer.dataState
+                pass
+            self.phase = self.phases["beforeHtml"]
+            self.phase.insertHtmlElement()
+            self.resetInsertionMode()
+        else:
+            self.innerHTML = False  # pylint:disable=redefined-variable-type
+            self.phase = self.phases["initial"]
+
+        self.lastPhase = None
+
+        self.beforeRCDataPhase = None
+
+        self.framesetOK = True
+
+    @property
+    def documentEncoding(self):
+        """Name of the character encoding that was used to decode the input stream, or
+        :obj:`None` if that is not determined yet
+
+        """
+        if not hasattr(self, 'tokenizer'):
+            return None
+        return self.tokenizer.stream.charEncoding[0].name
+
+    def isHTMLIntegrationPoint(self, element):
+        if (element.name == "annotation-xml" and
+                element.namespace == namespaces["mathml"]):
+            return ("encoding" in element.attributes and
+                    element.attributes["encoding"].translate(
+                        asciiUpper2Lower) in
+                    ("text/html", "application/xhtml+xml"))
+        else:
+            return (element.namespace, element.name) in htmlIntegrationPointElements
+
+    def isMathMLTextIntegrationPoint(self, element):
+        return (element.namespace, element.name) in mathmlTextIntegrationPointElements
+
+    def mainLoop(self):
+        CharactersToken = tokenTypes["Characters"]
+        SpaceCharactersToken = tokenTypes["SpaceCharacters"]
+        StartTagToken = tokenTypes["StartTag"]
+        EndTagToken = tokenTypes["EndTag"]
+        CommentToken = tokenTypes["Comment"]
+        DoctypeToken = tokenTypes["Doctype"]
+        ParseErrorToken = tokenTypes["ParseError"]
+
+        for token in self.normalizedTokens():
+            prev_token = None
+            new_token = token
+            while new_token is not None:
+                prev_token = new_token
+                currentNode = self.tree.openElements[-1] if self.tree.openElements else None
+                currentNodeNamespace = currentNode.namespace if currentNode else None
+                currentNodeName = currentNode.name if currentNode else None
+
+                type = new_token["type"]
+
+                if type == ParseErrorToken:
+                    self.parseError(new_token["data"], new_token.get("datavars", {}))
+                    new_token = None
+                else:
+                    if (len(self.tree.openElements) == 0 or
+                        currentNodeNamespace == self.tree.defaultNamespace or
+                        (self.isMathMLTextIntegrationPoint(currentNode) and
+                         ((type == StartTagToken and
+                           token["name"] not in frozenset(["mglyph", "malignmark"])) or
+                          type in (CharactersToken, SpaceCharactersToken))) or
+                        (currentNodeNamespace == namespaces["mathml"] and
+                         currentNodeName == "annotation-xml" and
+                         type == StartTagToken and
+                         token["name"] == "svg") or
+                        (self.isHTMLIntegrationPoint(currentNode) and
+                         type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
+                        phase = self.phase
+                    else:
+                        phase = self.phases["inForeignContent"]
+
+                    if type == CharactersToken:
+                        new_token = phase.processCharacters(new_token)
+                    elif type == SpaceCharactersToken:
+                        new_token = phase.processSpaceCharacters(new_token)
+                    elif type == StartTagToken:
+                        new_token = phase.processStartTag(new_token)
+                    elif type == EndTagToken:
+                        new_token = phase.processEndTag(new_token)
+                    elif type == CommentToken:
+                        new_token = phase.processComment(new_token)
+                    elif type == DoctypeToken:
+                        new_token = phase.processDoctype(new_token)
+
+            if (type == StartTagToken and prev_token["selfClosing"] and
+                    not prev_token["selfClosingAcknowledged"]):
+                self.parseError("non-void-element-with-trailing-solidus",
+                                {"name": prev_token["name"]})
+
+        # When the loop finishes it's EOF
+        reprocess = True
+        phases = []
+        while reprocess:
+            phases.append(self.phase)
+            reprocess = self.phase.processEOF()
+            if reprocess:
+                assert self.phase not in phases
+
+    def normalizedTokens(self):
+        for token in self.tokenizer:
+            yield self.normalizeToken(token)
+
+    def parse(self, stream, *args, **kwargs):
+        """Parse a HTML document into a well-formed tree
+
+        :arg stream: a file-like object or string containing the HTML to be parsed
+
+            The optional encoding parameter must be a string that indicates
+            the encoding.  If specified, that encoding will be used,
+            regardless of any BOM or later declaration (such as in a meta
+            element).
+
+        :arg scripting: treat noscript elements as if JavaScript was turned on
+
+        :returns: parsed tree
+
+        Example:
+
+        >>> from html5lib.html5parser import HTMLParser
+        >>> parser = HTMLParser()
+        >>> parser.parse('<html><body><p>This is a doc</p></body></html>')
+        <Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0>
+
+        """
+        self._parse(stream, False, None, *args, **kwargs)
+        return self.tree.getDocument()
+
+    def parseFragment(self, stream, *args, **kwargs):
+        """Parse a HTML fragment into a well-formed tree fragment
+
+        :arg container: name of the element we're setting the innerHTML
+            property if set to None, default to 'div'
+
+        :arg stream: a file-like object or string containing the HTML to be parsed
+
+            The optional encoding parameter must be a string that indicates
+            the encoding.  If specified, that encoding will be used,
+            regardless of any BOM or later declaration (such as in a meta
+            element)
+
+        :arg scripting: treat noscript elements as if JavaScript was turned on
+
+        :returns: parsed tree
+
+        Example:
+
+        >>> from html5lib.html5libparser import HTMLParser
+        >>> parser = HTMLParser()
+        >>> parser.parseFragment('<b>this is a fragment</b>')
+        <Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090>
+
+        """
+        self._parse(stream, True, *args, **kwargs)
+        return self.tree.getFragment()
+
+    def parseError(self, errorcode="XXX-undefined-error", datavars=None):
+        # XXX The idea is to make errorcode mandatory.
+        if datavars is None:
+            datavars = {}
+        self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
+        if self.strict:
+            raise ParseError(E[errorcode] % datavars)
+
+    def normalizeToken(self, token):
+        # HTML5 specific normalizations to the token stream
+        if token["type"] == tokenTypes["StartTag"]:
+            raw = token["data"]
+            token["data"] = OrderedDict(raw)
+            if len(raw) > len(token["data"]):
+                # we had some duplicated attribute, fix so first wins
+                token["data"].update(raw[::-1])
+
+        return token
+
+    def adjustMathMLAttributes(self, token):
+        adjust_attributes(token, adjustMathMLAttributes)
+
+    def adjustSVGAttributes(self, token):
+        adjust_attributes(token, adjustSVGAttributes)
+
+    def adjustForeignAttributes(self, token):
+        adjust_attributes(token, adjustForeignAttributesMap)
+
+    def reparseTokenNormal(self, token):
+        # pylint:disable=unused-argument
+        self.parser.phase()
+
+    def resetInsertionMode(self):
+        # The name of this method is mostly historical. (It's also used in the
+        # specification.)
+        last = False
+        newModes = {
+            "select": "inSelect",
+            "td": "inCell",
+            "th": "inCell",
+            "tr": "inRow",
+            "tbody": "inTableBody",
+            "thead": "inTableBody",
+            "tfoot": "inTableBody",
+            "caption": "inCaption",
+            "colgroup": "inColumnGroup",
+            "table": "inTable",
+            "head": "inBody",
+            "body": "inBody",
+            "frameset": "inFrameset",
+            "html": "beforeHead"
+        }
+        for node in self.tree.openElements[::-1]:
+            nodeName = node.name
+            new_phase = None
+            if node == self.tree.openElements[0]:
+                assert self.innerHTML
+                last = True
+                nodeName = self.innerHTML
+            # Check for conditions that should only happen in the innerHTML
+            # case
+            if nodeName in ("select", "colgroup", "head", "html"):
+                assert self.innerHTML
+
+            if not last and node.namespace != self.tree.defaultNamespace:
+                continue
+
+            if nodeName in newModes:
+                new_phase = self.phases[newModes[nodeName]]
+                break
+            elif last:
+                new_phase = self.phases["inBody"]
+                break
+
+        self.phase = new_phase
+
+    def parseRCDataRawtext(self, token, contentType):
+        # Generic RCDATA/RAWTEXT Parsing algorithm
+        assert contentType in ("RAWTEXT", "RCDATA")
+
+        self.tree.insertElement(token)
+
+        if contentType == "RAWTEXT":
+            self.tokenizer.state = self.tokenizer.rawtextState
+        else:
+            self.tokenizer.state = self.tokenizer.rcdataState
+
+        self.originalPhase = self.phase
+
+        self.phase = self.phases["text"]
+
+
+@_utils.memoize
+def getPhases(debug):
+    def log(function):
+        """Logger that records which phase processes each token"""
+        type_names = dict((value, key) for key, value in
+                          tokenTypes.items())
+
+        def wrapped(self, *args, **kwargs):
+            if function.__name__.startswith("process") and len(args) > 0:
+                token = args[0]
+                try:
+                    info = {"type": type_names[token['type']]}
+                except:
+                    raise
+                if token['type'] in tagTokenTypes:
+                    info["name"] = token['name']
+
+                self.parser.log.append((self.parser.tokenizer.state.__name__,
+                                        self.parser.phase.__class__.__name__,
+                                        self.__class__.__name__,
+                                        function.__name__,
+                                        info))
+                return function(self, *args, **kwargs)
+            else:
+                return function(self, *args, **kwargs)
+        return wrapped
+
+    def getMetaclass(use_metaclass, metaclass_func):
+        if use_metaclass:
+            return method_decorator_metaclass(metaclass_func)
+        else:
+            return type
+
+    # pylint:disable=unused-argument
+    class Phase(with_metaclass(getMetaclass(debug, log))):
+        """Base class for helper object that implements each phase of processing
+        """
+
+        def __init__(self, parser, tree):
+            self.parser = parser
+            self.tree = tree
+
+        def processEOF(self):
+            raise NotImplementedError
+
+        def processComment(self, token):
+            # For most phases the following is correct. Where it's not it will be
+            # overridden.
+            self.tree.insertComment(token, self.tree.openElements[-1])
+
+        def processDoctype(self, token):
+            self.parser.parseError("unexpected-doctype")
+
+        def processCharacters(self, token):
+            self.tree.insertText(token["data"])
+
+        def processSpaceCharacters(self, token):
+            self.tree.insertText(token["data"])
+
+        def processStartTag(self, token):
+            return self.startTagHandler[token["name"]](token)
+
+        def startTagHtml(self, token):
+            if not self.parser.firstStartTag and token["name"] == "html":
+                self.parser.parseError("non-html-root")
+            # XXX Need a check here to see if the first start tag token emitted is
+            # this token... If it's not, invoke self.parser.parseError().
+            for attr, value in token["data"].items():
+                if attr not in self.tree.openElements[0].attributes:
+                    self.tree.openElements[0].attributes[attr] = value
+            self.parser.firstStartTag = False
+
+        def processEndTag(self, token):
+            return self.endTagHandler[token["name"]](token)
+
+    class InitialPhase(Phase):
+        def processSpaceCharacters(self, token):
+            pass
+
+        def processComment(self, token):
+            self.tree.insertComment(token, self.tree.document)
+
+        def processDoctype(self, token):
+            name = token["name"]
+            publicId = token["publicId"]
+            systemId = token["systemId"]
+            correct = token["correct"]
+
+            if (name != "html" or publicId is not None or
+                    systemId is not None and systemId != "about:legacy-compat"):
+                self.parser.parseError("unknown-doctype")
+
+            if publicId is None:
+                publicId = ""
+
+            self.tree.insertDoctype(token)
+
+            if publicId != "":
+                publicId = publicId.translate(asciiUpper2Lower)
+
+            if (not correct or token["name"] != "html" or
+                    publicId.startswith(
+                        ("+//silmaril//dtd html pro v0r11 19970101//",
+                         "-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
+                         "-//as//dtd html 3.0 aswedit + extensions//",
+                         "-//ietf//dtd html 2.0 level 1//",
+                         "-//ietf//dtd html 2.0 level 2//",
+                         "-//ietf//dtd html 2.0 strict level 1//",
+                         "-//ietf//dtd html 2.0 strict level 2//",
+                         "-//ietf//dtd html 2.0 strict//",
+                         "-//ietf//dtd html 2.0//",
+                         "-//ietf//dtd html 2.1e//",
+                         "-//ietf//dtd html 3.0//",
+                         "-//ietf//dtd html 3.2 final//",
+                         "-//ietf//dtd html 3.2//",
+                         "-//ietf//dtd html 3//",
+                         "-//ietf//dtd html level 0//",
+                         "-//ietf//dtd html level 1//",
+                         "-//ietf//dtd html level 2//",
+                         "-//ietf//dtd html level 3//",
+                         "-//ietf//dtd html strict level 0//",
+                         "-//ietf//dtd html strict level 1//",
+                         "-//ietf//dtd html strict level 2//",
+                         "-//ietf//dtd html strict level 3//",
+                         "-//ietf//dtd html strict//",
+                         "-//ietf//dtd html//",
+                         "-//metrius//dtd metrius presentational//",
+                         "-//microsoft//dtd internet explorer 2.0 html strict//",
+                         "-//microsoft//dtd internet explorer 2.0 html//",
+                         "-//microsoft//dtd internet explorer 2.0 tables//",
+                         "-//microsoft//dtd internet explorer 3.0 html strict//",
+                         "-//microsoft//dtd internet explorer 3.0 html//",
+                         "-//microsoft//dtd internet explorer 3.0 tables//",
+                         "-//netscape comm. corp.//dtd html//",
+                         "-//netscape comm. corp.//dtd strict html//",
+                         "-//o'reilly and associates//dtd html 2.0//",
+                         "-//o'reilly and associates//dtd html extended 1.0//",
+                         "-//o'reilly and associates//dtd html extended relaxed 1.0//",
+                         "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
+                         "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
+                         "-//spyglass//dtd html 2.0 extended//",
+                         "-//sq//dtd html 2.0 hotmetal + extensions//",
+                         "-//sun microsystems corp.//dtd hotjava html//",
+                         "-//sun microsystems corp.//dtd hotjava strict html//",
+                         "-//w3c//dtd html 3 1995-03-24//",
+                         "-//w3c//dtd html 3.2 draft//",
+                         "-//w3c//dtd html 3.2 final//",
+                         "-//w3c//dtd html 3.2//",
+                         "-//w3c//dtd html 3.2s draft//",
+                         "-//w3c//dtd html 4.0 frameset//",
+                         "-//w3c//dtd html 4.0 transitional//",
+                         "-//w3c//dtd html experimental 19960712//",
+                         "-//w3c//dtd html experimental 970421//",
+                         "-//w3c//dtd w3 html//",
+                         "-//w3o//dtd w3 html 3.0//",
+                         "-//webtechs//dtd mozilla html 2.0//",
+                         "-//webtechs//dtd mozilla html//")) or
+                    publicId in ("-//w3o//dtd w3 html strict 3.0//en//",
+                                 "-/w3c/dtd html 4.0 transitional/en",
+                                 "html") or
+                    publicId.startswith(
+                        ("-//w3c//dtd html 4.01 frameset//",
+                         "-//w3c//dtd html 4.01 transitional//")) and
+                    systemId is None or
+                    systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
+                self.parser.compatMode = "quirks"
+            elif (publicId.startswith(
+                    ("-//w3c//dtd xhtml 1.0 frameset//",
+                     "-//w3c//dtd xhtml 1.0 transitional//")) or
+                  publicId.startswith(
+                      ("-//w3c//dtd html 4.01 frameset//",
+                       "-//w3c//dtd html 4.01 transitional//")) and
+                  systemId is not None):
+                self.parser.compatMode = "limited quirks"
+
+            self.parser.phase = self.parser.phases["beforeHtml"]
+
+        def anythingElse(self):
+            self.parser.compatMode = "quirks"
+            self.parser.phase = self.parser.phases["beforeHtml"]
+
+        def processCharacters(self, token):
+            self.parser.parseError("expected-doctype-but-got-chars")
+            self.anythingElse()
+            return token
+
+        def processStartTag(self, token):
+            self.parser.parseError("expected-doctype-but-got-start-tag",
+                                   {"name": token["name"]})
+            self.anythingElse()
+            return token
+
+        def processEndTag(self, token):
+            self.parser.parseError("expected-doctype-but-got-end-tag",
+                                   {"name": token["name"]})
+            self.anythingElse()
+            return token
+
+        def processEOF(self):
+            self.parser.parseError("expected-doctype-but-got-eof")
+            self.anythingElse()
+            return True
+
+    class BeforeHtmlPhase(Phase):
+        # helper methods
+        def insertHtmlElement(self):
+            self.tree.insertRoot(impliedTagToken("html", "StartTag"))
+            self.parser.phase = self.parser.phases["beforeHead"]
+
+        # other
+        def processEOF(self):
+            self.insertHtmlElement()
+            return True
+
+        def processComment(self, token):
+            self.tree.insertComment(token, self.tree.document)
+
+        def processSpaceCharacters(self, token):
+            pass
+
+        def processCharacters(self, token):
+            self.insertHtmlElement()
+            return token
+
+        def processStartTag(self, token):
+            if token["name"] == "html":
+                self.parser.firstStartTag = True
+            self.insertHtmlElement()
+            return token
+
+        def processEndTag(self, token):
+            if token["name"] not in ("head", "body", "html", "br"):
+                self.parser.parseError("unexpected-end-tag-before-html",
+                                       {"name": token["name"]})
+            else:
+                self.insertHtmlElement()
+                return token
+
+    class BeforeHeadPhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                ("head", self.startTagHead)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                (("head", "body", "html", "br"), self.endTagImplyHead)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        def processEOF(self):
+            self.startTagHead(impliedTagToken("head", "StartTag"))
+            return True
+
+        def processSpaceCharacters(self, token):
+            pass
+
+        def processCharacters(self, token):
+            self.startTagHead(impliedTagToken("head", "StartTag"))
+            return token
+
+        def startTagHtml(self, token):
+            return self.parser.phases["inBody"].processStartTag(token)
+
+        def startTagHead(self, token):
+            self.tree.insertElement(token)
+            self.tree.headPointer = self.tree.openElements[-1]
+            self.parser.phase = self.parser.phases["inHead"]
+
+        def startTagOther(self, token):
+            self.startTagHead(impliedTagToken("head", "StartTag"))
+            return token
+
+        def endTagImplyHead(self, token):
+            self.startTagHead(impliedTagToken("head", "StartTag"))
+            return token
+
+        def endTagOther(self, token):
+            self.parser.parseError("end-tag-after-implied-root",
+                                   {"name": token["name"]})
+
+    class InHeadPhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                ("title", self.startTagTitle),
+                (("noframes", "style"), self.startTagNoFramesStyle),
+                ("noscript", self.startTagNoscript),
+                ("script", self.startTagScript),
+                (("base", "basefont", "bgsound", "command", "link"),
+                 self.startTagBaseLinkCommand),
+                ("meta", self.startTagMeta),
+                ("head", self.startTagHead)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("head", self.endTagHead),
+                (("br", "html", "body"), self.endTagHtmlBodyBr)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        # the real thing
+        def processEOF(self):
+            self.anythingElse()
+            return True
+
+        def processCharacters(self, token):
+            self.anythingElse()
+            return token
+
+        def startTagHtml(self, token):
+            return self.parser.phases["inBody"].processStartTag(token)
+
+        def startTagHead(self, token):
+            self.parser.parseError("two-heads-are-not-better-than-one")
+
+        def startTagBaseLinkCommand(self, token):
+            self.tree.insertElement(token)
+            self.tree.openElements.pop()
+            token["selfClosingAcknowledged"] = True
+
+        def startTagMeta(self, token):
+            self.tree.insertElement(token)
+            self.tree.openElements.pop()
+            token["selfClosingAcknowledged"] = True
+
+            attributes = token["data"]
+            if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
+                if "charset" in attributes:
+                    self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
+                elif ("content" in attributes and
+                      "http-equiv" in attributes and
+                      attributes["http-equiv"].lower() == "content-type"):
+                    # Encoding it as UTF-8 here is a hack, as really we should pass
+                    # the abstract Unicode string, and just use the
+                    # ContentAttrParser on that, but using UTF-8 allows all chars
+                    # to be encoded and as a ASCII-superset works.
+                    data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
+                    parser = _inputstream.ContentAttrParser(data)
+                    codec = parser.parse()
+                    self.parser.tokenizer.stream.changeEncoding(codec)
+
+        def startTagTitle(self, token):
+            self.parser.parseRCDataRawtext(token, "RCDATA")
+
+        def startTagNoFramesStyle(self, token):
+            # Need to decide whether to implement the scripting-disabled case
+            self.parser.parseRCDataRawtext(token, "RAWTEXT")
+
+        def startTagNoscript(self, token):
+            if self.parser.scripting:
+                self.parser.parseRCDataRawtext(token, "RAWTEXT")
+            else:
+                self.tree.insertElement(token)
+                self.parser.phase = self.parser.phases["inHeadNoscript"]
+
+        def startTagScript(self, token):
+            self.tree.insertElement(token)
+            self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
+            self.parser.originalPhase = self.parser.phase
+            self.parser.phase = self.parser.phases["text"]
+
+        def startTagOther(self, token):
+            self.anythingElse()
+            return token
+
+        def endTagHead(self, token):
+            node = self.parser.tree.openElements.pop()
+            assert node.name == "head", "Expected head got %s" % node.name
+            self.parser.phase = self.parser.phases["afterHead"]
+
+        def endTagHtmlBodyBr(self, token):
+            self.anythingElse()
+            return token
+
+        def endTagOther(self, token):
+            self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+        def anythingElse(self):
+            self.endTagHead(impliedTagToken("head"))
+
+    class InHeadNoscriptPhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                (("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand),
+                (("head", "noscript"), self.startTagHeadNoscript),
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("noscript", self.endTagNoscript),
+                ("br", self.endTagBr),
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        def processEOF(self):
+            self.parser.parseError("eof-in-head-noscript")
+            self.anythingElse()
+            return True
+
+        def processComment(self, token):
+            return self.parser.phases["inHead"].processComment(token)
+
+        def processCharacters(self, token):
+            self.parser.parseError("char-in-head-noscript")
+            self.anythingElse()
+            return token
+
+        def processSpaceCharacters(self, token):
+            return self.parser.phases["inHead"].processSpaceCharacters(token)
+
+        def startTagHtml(self, token):
+            return self.parser.phases["inBody"].processStartTag(token)
+
+        def startTagBaseLinkCommand(self, token):
+            return self.parser.phases["inHead"].processStartTag(token)
+
+        def startTagHeadNoscript(self, token):
+            self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
+
+        def startTagOther(self, token):
+            self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
+            self.anythingElse()
+            return token
+
+        def endTagNoscript(self, token):
+            node = self.parser.tree.openElements.pop()
+            assert node.name == "noscript", "Expected noscript got %s" % node.name
+            self.parser.phase = self.parser.phases["inHead"]
+
+        def endTagBr(self, token):
+            self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
+            self.anythingElse()
+            return token
+
+        def endTagOther(self, token):
+            self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+        def anythingElse(self):
+            # Caller must raise parse error first!
+            self.endTagNoscript(impliedTagToken("noscript"))
+
+    class AfterHeadPhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                ("body", self.startTagBody),
+                ("frameset", self.startTagFrameset),
+                (("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
+                  "style", "title"),
+                 self.startTagFromHead),
+                ("head", self.startTagHead)
+            ])
+            self.startTagHandler.default = self.startTagOther
+            self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"),
+                                                           self.endTagHtmlBodyBr)])
+            self.endTagHandler.default = self.endTagOther
+
+        def processEOF(self):
+            self.anythingElse()
+            return True
+
+        def processCharacters(self, token):
+            self.anythingElse()
+            return token
+
+        def startTagHtml(self, token):
+            return self.parser.phases["inBody"].processStartTag(token)
+
+        def startTagBody(self, token):
+            self.parser.framesetOK = False
+            self.tree.insertElement(token)
+            self.parser.phase = self.parser.phases["inBody"]
+
+        def startTagFrameset(self, token):
+            self.tree.insertElement(token)
+            self.parser.phase = self.parser.phases["inFrameset"]
+
+        def startTagFromHead(self, token):
+            self.parser.parseError("unexpected-start-tag-out-of-my-head",
+                                   {"name": token["name"]})
+            self.tree.openElements.append(self.tree.headPointer)
+            self.parser.phases["inHead"].processStartTag(token)
+            for node in self.tree.openElements[::-1]:
+                if node.name == "head":
+                    self.tree.openElements.remove(node)
+                    break
+
+        def startTagHead(self, token):
+            self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
+
+        def startTagOther(self, token):
+            self.anythingElse()
+            return token
+
+        def endTagHtmlBodyBr(self, token):
+            self.anythingElse()
+            return token
+
+        def endTagOther(self, token):
+            self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+        def anythingElse(self):
+            self.tree.insertElement(impliedTagToken("body", "StartTag"))
+            self.parser.phase = self.parser.phases["inBody"]
+            self.parser.framesetOK = True
+
+    class InBodyPhase(Phase):
+        # http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
+        # the really-really-really-very crazy mode
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            # Set this to the default handler
+            self.processSpaceCharacters = self.processSpaceCharactersNonPre
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                (("base", "basefont", "bgsound", "command", "link", "meta",
+                  "script", "style", "title"),
+                 self.startTagProcessInHead),
+                ("body", self.startTagBody),
+                ("frameset", self.startTagFrameset),
+                (("address", "article", "aside", "blockquote", "center", "details",
+                  "dir", "div", "dl", "fieldset", "figcaption", "figure",
+                  "footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
+                  "section", "summary", "ul"),
+                 self.startTagCloseP),
+                (headingElements, self.startTagHeading),
+                (("pre", "listing"), self.startTagPreListing),
+                ("form", self.startTagForm),
+                (("li", "dd", "dt"), self.startTagListItem),
+                ("plaintext", self.startTagPlaintext),
+                ("a", self.startTagA),
+                (("b", "big", "code", "em", "font", "i", "s", "small", "strike",
+                  "strong", "tt", "u"), self.startTagFormatting),
+                ("nobr", self.startTagNobr),
+                ("button", self.startTagButton),
+                (("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
+                ("xmp", self.startTagXmp),
+                ("table", self.startTagTable),
+                (("area", "br", "embed", "img", "keygen", "wbr"),
+                 self.startTagVoidFormatting),
+                (("param", "source", "track"), self.startTagParamSource),
+                ("input", self.startTagInput),
+                ("hr", self.startTagHr),
+                ("image", self.startTagImage),
+                ("isindex", self.startTagIsIndex),
+                ("textarea", self.startTagTextarea),
+                ("iframe", self.startTagIFrame),
+                ("noscript", self.startTagNoscript),
+                (("noembed", "noframes"), self.startTagRawtext),
+                ("select", self.startTagSelect),
+                (("rp", "rt"), self.startTagRpRt),
+                (("option", "optgroup"), self.startTagOpt),
+                (("math"), self.startTagMath),
+                (("svg"), self.startTagSvg),
+                (("caption", "col", "colgroup", "frame", "head",
+                  "tbody", "td", "tfoot", "th", "thead",
+                  "tr"), self.startTagMisplaced)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("body", self.endTagBody),
+                ("html", self.endTagHtml),
+                (("address", "article", "aside", "blockquote", "button", "center",
+                  "details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
+                  "footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
+                  "section", "summary", "ul"), self.endTagBlock),
+                ("form", self.endTagForm),
+                ("p", self.endTagP),
+                (("dd", "dt", "li"), self.endTagListItem),
+                (headingElements, self.endTagHeading),
+                (("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
+                  "strike", "strong", "tt", "u"), self.endTagFormatting),
+                (("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
+                ("br", self.endTagBr),
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        def isMatchingFormattingElement(self, node1, node2):
+            return (node1.name == node2.name and
+                    node1.namespace == node2.namespace and
+                    node1.attributes == node2.attributes)
+
+        # helper
+        def addFormattingElement(self, token):
+            self.tree.insertElement(token)
+            element = self.tree.openElements[-1]
+
+            matchingElements = []
+            for node in self.tree.activeFormattingElements[::-1]:
+                if node is Marker:
+                    break
+                elif self.isMatchingFormattingElement(node, element):
+                    matchingElements.append(node)
+
+            assert len(matchingElements) <= 3
+            if len(matchingElements) == 3:
+                self.tree.activeFormattingElements.remove(matchingElements[-1])
+            self.tree.activeFormattingElements.append(element)
+
+        # the real deal
+        def processEOF(self):
+            allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
+                                          "tfoot", "th", "thead", "tr", "body",
+                                          "html"))
+            for node in self.tree.openElements[::-1]:
+                if node.name not in allowed_elements:
+                    self.parser.parseError("expected-closing-tag-but-got-eof")
+                    break
+            # Stop parsing
+
+        def processSpaceCharactersDropNewline(self, token):
+            # Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
+            # want to drop leading newlines
+            data = token["data"]
+            self.processSpaceCharacters = self.processSpaceCharactersNonPre
+            if (data.startswith("\n") and
+                self.tree.openElements[-1].name in ("pre", "listing", "textarea") and
+                    not self.tree.openElements[-1].hasContent()):
+                data = data[1:]
+            if data:
+                self.tree.reconstructActiveFormattingElements()
+                self.tree.insertText(data)
+
+        def processCharacters(self, token):
+            if token["data"] == "\u0000":
+                # The tokenizer should always emit null on its own
+                return
+            self.tree.reconstructActiveFormattingElements()
+            self.tree.insertText(token["data"])
+            # This must be bad for performance
+            if (self.parser.framesetOK and
+                any([char not in spaceCharacters
+                     for char in token["data"]])):
+                self.parser.framesetOK = False
+
+        def processSpaceCharactersNonPre(self, token):
+            self.tree.reconstructActiveFormattingElements()
+            self.tree.insertText(token["data"])
+
+        def startTagProcessInHead(self, token):
+            return self.parser.phases["inHead"].processStartTag(token)
+
+        def startTagBody(self, token):
+            self.parser.parseError("unexpected-start-tag", {"name": "body"})
+            if (len(self.tree.openElements) == 1 or
+                    self.tree.openElements[1].name != "body"):
+                assert self.parser.innerHTML
+            else:
+                self.parser.framesetOK = False
+                for attr, value in token["data"].items():
+                    if attr not in self.tree.openElements[1].attributes:
+                        self.tree.openElements[1].attributes[attr] = value
+
+        def startTagFrameset(self, token):
+            self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
+            if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
+                assert self.parser.innerHTML
+            elif not self.parser.framesetOK:
+                pass
+            else:
+                if self.tree.openElements[1].parent:
+                    self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
+                while self.tree.openElements[-1].name != "html":
+                    self.tree.openElements.pop()
+                self.tree.insertElement(token)
+                self.parser.phase = self.parser.phases["inFrameset"]
+
+        def startTagCloseP(self, token):
+            if self.tree.elementInScope("p", variant="button"):
+                self.endTagP(impliedTagToken("p"))
+            self.tree.insertElement(token)
+
+        def startTagPreListing(self, token):
+            if self.tree.elementInScope("p", variant="button"):
+                self.endTagP(impliedTagToken("p"))
+            self.tree.insertElement(token)
+            self.parser.framesetOK = False
+            self.processSpaceCharacters = self.processSpaceCharactersDropNewline
+
+        def startTagForm(self, token):
+            if self.tree.formPointer:
+                self.parser.parseError("unexpected-start-tag", {"name": "form"})
+            else:
+                if self.tree.elementInScope("p", variant="button"):
+                    self.endTagP(impliedTagToken("p"))
+                self.tree.insertElement(token)
+                self.tree.formPointer = self.tree.openElements[-1]
+
+        def startTagListItem(self, token):
+            self.parser.framesetOK = False
+
+            stopNamesMap = {"li": ["li"],
+                            "dt": ["dt", "dd"],
+                            "dd": ["dt", "dd"]}
+            stopNames = stopNamesMap[token["name"]]
+            for node in reversed(self.tree.openElements):
+                if node.name in stopNames:
+                    self.parser.phase.processEndTag(
+                        impliedTagToken(node.name, "EndTag"))
+                    break
+                if (node.nameTuple in specialElements and
+                        node.name not in ("address", "div", "p")):
+                    break
+
+            if self.tree.elementInScope("p", variant="button"):
+                self.parser.phase.processEndTag(
+                    impliedTagToken("p", "EndTag"))
+
+            self.tree.insertElement(token)
+
+        def startTagPlaintext(self, token):
+            if self.tree.elementInScope("p", variant="button"):
+                self.endTagP(impliedTagToken("p"))
+            self.tree.insertElement(token)
+            self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
+
+        def startTagHeading(self, token):
+            if self.tree.elementInScope("p", variant="button"):
+                self.endTagP(impliedTagToken("p"))
+            if self.tree.openElements[-1].name in headingElements:
+                self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
+                self.tree.openElements.pop()
+            self.tree.insertElement(token)
+
+        def startTagA(self, token):
+            afeAElement = self.tree.elementInActiveFormattingElements("a")
+            if afeAElement:
+                self.parser.parseError("unexpected-start-tag-implies-end-tag",
+                                       {"startName": "a", "endName": "a"})
+                self.endTagFormatting(impliedTagToken("a"))
+                if afeAElement in self.tree.openElements:
+                    self.tree.openElements.remove(afeAElement)
+                if afeAElement in self.tree.activeFormattingElements:
+                    self.tree.activeFormattingElements.remove(afeAElement)
+            self.tree.reconstructActiveFormattingElements()
+            self.addFormattingElement(token)
+
+        def startTagFormatting(self, token):
+            self.tree.reconstructActiveFormattingElements()
+            self.addFormattingElement(token)
+
+        def startTagNobr(self, token):
+            self.tree.reconstructActiveFormattingElements()
+            if self.tree.elementInScope("nobr"):
+                self.parser.parseError("unexpected-start-tag-implies-end-tag",
+                                       {"startName": "nobr", "endName": "nobr"})
+                self.processEndTag(impliedTagToken("nobr"))
+                # XXX Need tests that trigger the following
+                self.tree.reconstructActiveFormattingElements()
+            self.addFormattingElement(token)
+
+        def startTagButton(self, token):
+            if self.tree.elementInScope("button"):
+                self.parser.parseError("unexpected-start-tag-implies-end-tag",
+                                       {"startName": "button", "endName": "button"})
+                self.processEndTag(impliedTagToken("button"))
+                return token
+            else:
+                self.tree.reconstructActiveFormattingElements()
+                self.tree.insertElement(token)
+                self.parser.framesetOK = False
+
+        def startTagAppletMarqueeObject(self, token):
+            self.tree.reconstructActiveFormattingElements()
+            self.tree.insertElement(token)
+            self.tree.activeFormattingElements.append(Marker)
+            self.parser.framesetOK = False
+
+        def startTagXmp(self, token):
+            if self.tree.elementInScope("p", variant="button"):
+                self.endTagP(impliedTagToken("p"))
+            self.tree.reconstructActiveFormattingElements()
+            self.parser.framesetOK = False
+            self.parser.parseRCDataRawtext(token, "RAWTEXT")
+
+        def startTagTable(self, token):
+            if self.parser.compatMode != "quirks":
+                if self.tree.elementInScope("p", variant="button"):
+                    self.processEndTag(impliedTagToken("p"))
+            self.tree.insertElement(token)
+            self.parser.framesetOK = False
+            self.parser.phase = self.parser.phases["inTable"]
+
+        def startTagVoidFormatting(self, token):
+            self.tree.reconstructActiveFormattingElements()
+            self.tree.insertElement(token)
+            self.tree.openElements.pop()
+            token["selfClosingAcknowledged"] = True
+            self.parser.framesetOK = False
+
+        def startTagInput(self, token):
+            framesetOK = self.parser.framesetOK
+            self.startTagVoidFormatting(token)
+            if ("type" in token["data"] and
+                    token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
+                # input type=hidden doesn't change framesetOK
+                self.parser.framesetOK = framesetOK
+
+        def startTagParamSource(self, token):
+            self.tree.insertElement(token)
+            self.tree.openElements.pop()
+            token["selfClosingAcknowledged"] = True
+
+        def startTagHr(self, token):
+            if self.tree.elementInScope("p", variant="button"):
+                self.endTagP(impliedTagToken("p"))
+            self.tree.insertElement(token)
+            self.tree.openElements.pop()
+            token["selfClosingAcknowledged"] = True
+            self.parser.framesetOK = False
+
+        def startTagImage(self, token):
+            # No really...
+            self.parser.parseError("unexpected-start-tag-treated-as",
+                                   {"originalName": "image", "newName": "img"})
+            self.processStartTag(impliedTagToken("img", "StartTag",
+                                                 attributes=token["data"],
+                                                 selfClosing=token["selfClosing"]))
+
+        def startTagIsIndex(self, token):
+            self.parser.parseError("deprecated-tag", {"name": "isindex"})
+            if self.tree.formPointer:
+                return
+            form_attrs = {}
+            if "action" in token["data"]:
+                form_attrs["action"] = token["data"]["action"]
+            self.processStartTag(impliedTagToken("form", "StartTag",
+                                                 attributes=form_attrs))
+            self.processStartTag(impliedTagToken("hr", "StartTag"))
+            self.processStartTag(impliedTagToken("label", "StartTag"))
+            # XXX Localization ...
+            if "prompt" in token["data"]:
+                prompt = token["data"]["prompt"]
+            else:
+                prompt = "This is a searchable index. Enter search keywords: "
+            self.processCharacters(
+                {"type": tokenTypes["Characters"], "data": prompt})
+            attributes = token["data"].copy()
+            if "action" in attributes:
+                del attributes["action"]
+            if "prompt" in attributes:
+                del attributes["prompt"]
+            attributes["name"] = "isindex"
+            self.processStartTag(impliedTagToken("input", "StartTag",
+                                                 attributes=attributes,
+                                                 selfClosing=token["selfClosing"]))
+            self.processEndTag(impliedTagToken("label"))
+            self.processStartTag(impliedTagToken("hr", "StartTag"))
+            self.processEndTag(impliedTagToken("form"))
+
+        def startTagTextarea(self, token):
+            self.tree.insertElement(token)
+            self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
+            self.processSpaceCharacters = self.processSpaceCharactersDropNewline
+            self.parser.framesetOK = False
+
+        def startTagIFrame(self, token):
+            self.parser.framesetOK = False
+            self.startTagRawtext(token)
+
+        def startTagNoscript(self, token):
+            if self.parser.scripting:
+                self.startTagRawtext(token)
+            else:
+                self.startTagOther(token)
+
+        def startTagRawtext(self, token):
+            """iframe, noembed noframes, noscript(if scripting enabled)"""
+            self.parser.parseRCDataRawtext(token, "RAWTEXT")
+
+        def startTagOpt(self, token):
+            if self.tree.openElements[-1].name == "option":
+                self.parser.phase.processEndTag(impliedTagToken("option"))
+            self.tree.reconstructActiveFormattingElements()
+            self.parser.tree.insertElement(token)
+
+        def startTagSelect(self, token):
+            self.tree.reconstructActiveFormattingElements()
+            self.tree.insertElement(token)
+            self.parser.framesetOK = False
+            if self.parser.phase in (self.parser.phases["inTable"],
+                                     self.parser.phases["inCaption"],
+                                     self.parser.phases["inColumnGroup"],
+                                     self.parser.phases["inTableBody"],
+                                     self.parser.phases["inRow"],
+                                     self.parser.phases["inCell"]):
+                self.parser.phase = self.parser.phases["inSelectInTable"]
+            else:
+                self.parser.phase = self.parser.phases["inSelect"]
+
+        def startTagRpRt(self, token):
+            if self.tree.elementInScope("ruby"):
+                self.tree.generateImpliedEndTags()
+                if self.tree.openElements[-1].name != "ruby":
+                    self.parser.parseError()
+            self.tree.insertElement(token)
+
+        def startTagMath(self, token):
+            self.tree.reconstructActiveFormattingElements()
+            self.parser.adjustMathMLAttributes(token)
+            self.parser.adjustForeignAttributes(token)
+            token["namespace"] = namespaces["mathml"]
+            self.tree.insertElement(token)
+            # Need to get the parse error right for the case where the token
+            # has a namespace not equal to the xmlns attribute
+            if token["selfClosing"]:
+                self.tree.openElements.pop()
+                token["selfClosingAcknowledged"] = True
+
+        def startTagSvg(self, token):
+            self.tree.reconstructActiveFormattingElements()
+            self.parser.adjustSVGAttributes(token)
+            self.parser.adjustForeignAttributes(token)
+            token["namespace"] = namespaces["svg"]
+            self.tree.insertElement(token)
+            # Need to get the parse error right for the case where the token
+            # has a namespace not equal to the xmlns attribute
+            if token["selfClosing"]:
+                self.tree.openElements.pop()
+                token["selfClosingAcknowledged"] = True
+
+        def startTagMisplaced(self, token):
+            """ Elements that should be children of other elements that have a
+            different insertion mode; here they are ignored
+            "caption", "col", "colgroup", "frame", "frameset", "head",
+            "option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
+            "tr", "noscript"
+            """
+            self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
+
+        def startTagOther(self, token):
+            self.tree.reconstructActiveFormattingElements()
+            self.tree.insertElement(token)
+
+        def endTagP(self, token):
+            if not self.tree.elementInScope("p", variant="button"):
+                self.startTagCloseP(impliedTagToken("p", "StartTag"))
+                self.parser.parseError("unexpected-end-tag", {"name": "p"})
+                self.endTagP(impliedTagToken("p", "EndTag"))
+            else:
+                self.tree.generateImpliedEndTags("p")
+                if self.tree.openElements[-1].name != "p":
+                    self.parser.parseError("unexpected-end-tag", {"name": "p"})
+                node = self.tree.openElements.pop()
+                while node.name != "p":
+                    node = self.tree.openElements.pop()
+
+        def endTagBody(self, token):
+            if not self.tree.elementInScope("body"):
+                self.parser.parseError()
+                return
+            elif self.tree.openElements[-1].name != "body":
+                for node in self.tree.openElements[2:]:
+                    if node.name not in frozenset(("dd", "dt", "li", "optgroup",
+                                                   "option", "p", "rp", "rt",
+                                                   "tbody", "td", "tfoot",
+                                                   "th", "thead", "tr", "body",
+                                                   "html")):
+                        # Not sure this is the correct name for the parse error
+                        self.parser.parseError(
+                            "expected-one-end-tag-but-got-another",
+                            {"gotName": "body", "expectedName": node.name})
+                        break
+            self.parser.phase = self.parser.phases["afterBody"]
+
+        def endTagHtml(self, token):
+            # We repeat the test for the body end tag token being ignored here
+            if self.tree.elementInScope("body"):
+                self.endTagBody(impliedTagToken("body"))
+                return token
+
+        def endTagBlock(self, token):
+            # Put us back in the right whitespace handling mode
+            if token["name"] == "pre":
+                self.processSpaceCharacters = self.processSpaceCharactersNonPre
+            inScope = self.tree.elementInScope(token["name"])
+            if inScope:
+                self.tree.generateImpliedEndTags()
+            if self.tree.openElements[-1].name != token["name"]:
+                self.parser.parseError("end-tag-too-early", {"name": token["name"]})
+            if inScope:
+                node = self.tree.openElements.pop()
+                while node.name != token["name"]:
+                    node = self.tree.openElements.pop()
+
+        def endTagForm(self, token):
+            node = self.tree.formPointer
+            self.tree.formPointer = None
+            if node is None or not self.tree.elementInScope(node):
+                self.parser.parseError("unexpected-end-tag",
+                                       {"name": "form"})
+            else:
+                self.tree.generateImpliedEndTags()
+                if self.tree.openElements[-1] != node:
+                    self.parser.parseError("end-tag-too-early-ignored",
+                                           {"name": "form"})
+                self.tree.openElements.remove(node)
+
+        def endTagListItem(self, token):
+            if token["name"] == "li":
+                variant = "list"
+            else:
+                variant = None
+            if not self.tree.elementInScope(token["name"], variant=variant):
+                self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+            else:
+                self.tree.generateImpliedEndTags(exclude=token["name"])
+                if self.tree.openElements[-1].name != token["name"]:
+                    self.parser.parseError(
+                        "end-tag-too-early",
+                        {"name": token["name"]})
+                node = self.tree.openElements.pop()
+                while node.name != token["name"]:
+                    node = self.tree.openElements.pop()
+
+        def endTagHeading(self, token):
+            for item in headingElements:
+                if self.tree.elementInScope(item):
+                    self.tree.generateImpliedEndTags()
+                    break
+            if self.tree.openElements[-1].name != token["name"]:
+                self.parser.parseError("end-tag-too-early", {"name": token["name"]})
+
+            for item in headingElements:
+                if self.tree.elementInScope(item):
+                    item = self.tree.openElements.pop()
+                    while item.name not in headingElements:
+                        item = self.tree.openElements.pop()
+                    break
+
+        def endTagFormatting(self, token):
+            """The much-feared adoption agency algorithm"""
+            # http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
+            # XXX Better parseError messages appreciated.
+
+            # Step 1
+            outerLoopCounter = 0
+
+            # Step 2
+            while outerLoopCounter < 8:
+
+                # Step 3
+                outerLoopCounter += 1
+
+                # Step 4:
+
+                # Let the formatting element be the last element in
+                # the list of active formatting elements that:
+                # - is between the end of the list and the last scope
+                # marker in the list, if any, or the start of the list
+                # otherwise, and
+                # - has the same tag name as the token.
+                formattingElement = self.tree.elementInActiveFormattingElements(
+                    token["name"])
+                if (not formattingElement or
+                    (formattingElement in self.tree.openElements and
+                     not self.tree.elementInScope(formattingElement.name))):
+                    # If there is no such node, then abort these steps
+                    # and instead act as described in the "any other
+                    # end tag" entry below.
+                    self.endTagOther(token)
+                    return
+
+                # Otherwise, if there is such a node, but that node is
+                # not in the stack of open elements, then this is a
+                # parse error; remove the element from the list, and
+                # abort these steps.
+                elif formattingElement not in self.tree.openElements:
+                    self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
+                    self.tree.activeFormattingElements.remove(formattingElement)
+                    return
+
+                # Otherwise, if there is such a node, and that node is
+                # also in the stack of open elements, but the element
+                # is not in scope, then this is a parse error; ignore
+                # the token, and abort these steps.
+                elif not self.tree.elementInScope(formattingElement.name):
+                    self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
+                    return
+
+                # Otherwise, there is a formatting element and that
+                # element is in the stack and is in scope. If the
+                # element is not the current node, this is a parse
+                # error. In any case, proceed with the algorithm as
+                # written in the following steps.
+                else:
+                    if formattingElement != self.tree.openElements[-1]:
+                        self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
+
+                # Step 5:
+
+                # Let the furthest block be the topmost node in the
+                # stack of open elements that is lower in the stack
+                # than the formatting element, and is an element in
+                # the special category. There might not be one.
+                afeIndex = self.tree.openElements.index(formattingElement)
+                furthestBlock = None
+                for element in self.tree.openElements[afeIndex:]:
+                    if element.nameTuple in specialElements:
+                        furthestBlock = element
+                        break
+
+                # Step 6:
+
+                # If there is no furthest block, then the UA must
+                # first pop all the nodes from the bottom of the stack
+                # of open elements, from the current node up to and
+                # including the formatting element, then remove the
+                # formatting element from the list of active
+                # formatting elements, and finally abort these steps.
+                if furthestBlock is None:
+                    element = self.tree.openElements.pop()
+                    while element != formattingElement:
+                        element = self.tree.openElements.pop()
+                    self.tree.activeFormattingElements.remove(element)
+                    return
+
+                # Step 7
+                commonAncestor = self.tree.openElements[afeIndex - 1]
+
+                # Step 8:
+                # The bookmark is supposed to help us identify where to reinsert
+                # nodes in step 15. We have to ensure that we reinsert nodes after
+                # the node before the active formatting element. Note the bookmark
+                # can move in step 9.7
+                bookmark = self.tree.activeFormattingElements.index(formattingElement)
+
+                # Step 9
+                lastNode = node = furthestBlock
+                innerLoopCounter = 0
+
+                index = self.tree.openElements.index(node)
+                while innerLoopCounter < 3:
+                    innerLoopCounter += 1
+                    # Node is element before node in open elements
+                    index -= 1
+                    node = self.tree.openElements[index]
+                    if node not in self.tree.activeFormattingElements:
+                        self.tree.openElements.remove(node)
+                        continue
+                    # Step 9.6
+                    if node == formattingElement:
+                        break
+                    # Step 9.7
+                    if lastNode == furthestBlock:
+                        bookmark = self.tree.activeFormattingElements.index(node) + 1
+                    # Step 9.8
+                    clone = node.cloneNode()
+                    # Replace node with clone
+                    self.tree.activeFormattingElements[
+                        self.tree.activeFormattingElements.index(node)] = clone
+                    self.tree.openElements[
+                        self.tree.openElements.index(node)] = clone
+                    node = clone
+                    # Step 9.9
+                    # Remove lastNode from its parents, if any
+                    if lastNode.parent:
+                        lastNode.parent.removeChild(lastNode)
+                    node.appendChild(lastNode)
+                    # Step 9.10
+                    lastNode = node
+
+                # Step 10
+                # Foster parent lastNode if commonAncestor is a
+                # table, tbody, tfoot, thead, or tr we need to foster
+                # parent the lastNode
+                if lastNode.parent:
+                    lastNode.parent.removeChild(lastNode)
+
+                if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
+                    parent, insertBefore = self.tree.getTableMisnestedNodePosition()
+                    parent.insertBefore(lastNode, insertBefore)
+                else:
+                    commonAncestor.appendChild(lastNode)
+
+                # Step 11
+                clone = formattingElement.cloneNode()
+
+                # Step 12
+                furthestBlock.reparentChildren(clone)
+
+                # Step 13
+                furthestBlock.appendChild(clone)
+
+                # Step 14
+                self.tree.activeFormattingElements.remove(formattingElement)
+                self.tree.activeFormattingElements.insert(bookmark, clone)
+
+                # Step 15
+                self.tree.openElements.remove(formattingElement)
+                self.tree.openElements.insert(
+                    self.tree.openElements.index(furthestBlock) + 1, clone)
+
+        def endTagAppletMarqueeObject(self, token):
+            if self.tree.elementInScope(token["name"]):
+                self.tree.generateImpliedEndTags()
+            if self.tree.openElements[-1].name != token["name"]:
+                self.parser.parseError("end-tag-too-early", {"name": token["name"]})
+
+            if self.tree.elementInScope(token["name"]):
+                element = self.tree.openElements.pop()
+                while element.name != token["name"]:
+                    element = self.tree.openElements.pop()
+                self.tree.clearActiveFormattingElements()
+
+        def endTagBr(self, token):
+            self.parser.parseError("unexpected-end-tag-treated-as",
+                                   {"originalName": "br", "newName": "br element"})
+            self.tree.reconstructActiveFormattingElements()
+            self.tree.insertElement(impliedTagToken("br", "StartTag"))
+            self.tree.openElements.pop()
+
+        def endTagOther(self, token):
+            for node in self.tree.openElements[::-1]:
+                if node.name == token["name"]:
+                    self.tree.generateImpliedEndTags(exclude=token["name"])
+                    if self.tree.openElements[-1].name != token["name"]:
+                        self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+                    while self.tree.openElements.pop() != node:
+                        pass
+                    break
+                else:
+                    if node.nameTuple in specialElements:
+                        self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+                        break
+
+    class TextPhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+            self.startTagHandler = _utils.MethodDispatcher([])
+            self.startTagHandler.default = self.startTagOther
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("script", self.endTagScript)])
+            self.endTagHandler.default = self.endTagOther
+
+        def processCharacters(self, token):
+            self.tree.insertText(token["data"])
+
+        def processEOF(self):
+            self.parser.parseError("expected-named-closing-tag-but-got-eof",
+                                   {"name": self.tree.openElements[-1].name})
+            self.tree.openElements.pop()
+            self.parser.phase = self.parser.originalPhase
+            return True
+
+        def startTagOther(self, token):
+            assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
+
+        def endTagScript(self, token):
+            node = self.tree.openElements.pop()
+            assert node.name == "script"
+            self.parser.phase = self.parser.originalPhase
+            # The rest of this method is all stuff that only happens if
+            # document.write works
+
+        def endTagOther(self, token):
+            self.tree.openElements.pop()
+            self.parser.phase = self.parser.originalPhase
+
+    class InTablePhase(Phase):
+        # http://www.whatwg.org/specs/web-apps/current-work/#in-table
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                ("caption", self.startTagCaption),
+                ("colgroup", self.startTagColgroup),
+                ("col", self.startTagCol),
+                (("tbody", "tfoot", "thead"), self.startTagRowGroup),
+                (("td", "th", "tr"), self.startTagImplyTbody),
+                ("table", self.startTagTable),
+                (("style", "script"), self.startTagStyleScript),
+                ("input", self.startTagInput),
+                ("form", self.startTagForm)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("table", self.endTagTable),
+                (("body", "caption", "col", "colgroup", "html", "tbody", "td",
+                  "tfoot", "th", "thead", "tr"), self.endTagIgnore)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        # helper methods
+        def clearStackToTableContext(self):
+            # "clear the stack back to a table context"
+            while self.tree.openElements[-1].name not in ("table", "html"):
+                # self.parser.parseError("unexpected-implied-end-tag-in-table",
+                #  {"name":  self.tree.openElements[-1].name})
+                self.tree.openElements.pop()
+            # When the current node is <html> it's an innerHTML case
+
+        # processing methods
+        def processEOF(self):
+            if self.tree.openElements[-1].name != "html":
+                self.parser.parseError("eof-in-table")
+            else:
+                assert self.parser.innerHTML
+            # Stop parsing
+
+        def processSpaceCharacters(self, token):
+            originalPhase = self.parser.phase
+            self.parser.phase = self.parser.phases["inTableText"]
+            self.parser.phase.originalPhase = originalPhase
+            self.parser.phase.processSpaceCharacters(token)
+
+        def processCharacters(self, token):
+            originalPhase = self.parser.phase
+            self.parser.phase = self.parser.phases["inTableText"]
+            self.parser.phase.originalPhase = originalPhase
+            self.parser.phase.processCharacters(token)
+
+        def insertText(self, token):
+            # If we get here there must be at least one non-whitespace character
+            # Do the table magic!
+            self.tree.insertFromTable = True
+            self.parser.phases["inBody"].processCharacters(token)
+            self.tree.insertFromTable = False
+
+        def startTagCaption(self, token):
+            self.clearStackToTableContext()
+            self.tree.activeFormattingElements.append(Marker)
+            self.tree.insertElement(token)
+            self.parser.phase = self.parser.phases["inCaption"]
+
+        def startTagColgroup(self, token):
+            self.clearStackToTableContext()
+            self.tree.insertElement(token)
+            self.parser.phase = self.parser.phases["inColumnGroup"]
+
+        def startTagCol(self, token):
+            self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
+            return token
+
+        def startTagRowGroup(self, token):
+            self.clearStackToTableContext()
+            self.tree.insertElement(token)
+            self.parser.phase = self.parser.phases["inTableBody"]
+
+        def startTagImplyTbody(self, token):
+            self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
+            return token
+
+        def startTagTable(self, token):
+            self.parser.parseError("unexpected-start-tag-implies-end-tag",
+                                   {"startName": "table", "endName": "table"})
+            self.parser.phase.processEndTag(impliedTagToken("table"))
+            if not self.parser.innerHTML:
+                return token
+
+        def startTagStyleScript(self, token):
+            return self.parser.phases["inHead"].processStartTag(token)
+
+        def startTagInput(self, token):
+            if ("type" in token["data"] and
+                    token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
+                self.parser.parseError("unexpected-hidden-input-in-table")
+                self.tree.insertElement(token)
+                # XXX associate with form
+                self.tree.openElements.pop()
+            else:
+                self.startTagOther(token)
+
+        def startTagForm(self, token):
+            self.parser.parseError("unexpected-form-in-table")
+            if self.tree.formPointer is None:
+                self.tree.insertElement(token)
+                self.tree.formPointer = self.tree.openElements[-1]
+                self.tree.openElements.pop()
+
+        def startTagOther(self, token):
+            self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
+            # Do the table magic!
+            self.tree.insertFromTable = True
+            self.parser.phases["inBody"].processStartTag(token)
+            self.tree.insertFromTable = False
+
+        def endTagTable(self, token):
+            if self.tree.elementInScope("table", variant="table"):
+                self.tree.generateImpliedEndTags()
+                if self.tree.openElements[-1].name != "table":
+                    self.parser.parseError("end-tag-too-early-named",
+                                           {"gotName": "table",
+                                            "expectedName": self.tree.openElements[-1].name})
+                while self.tree.openElements[-1].name != "table":
+                    self.tree.openElements.pop()
+                self.tree.openElements.pop()
+                self.parser.resetInsertionMode()
+            else:
+                # innerHTML case
+                assert self.parser.innerHTML
+                self.parser.parseError()
+
+        def endTagIgnore(self, token):
+            self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+        def endTagOther(self, token):
+            self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
+            # Do the table magic!
+            self.tree.insertFromTable = True
+            self.parser.phases["inBody"].processEndTag(token)
+            self.tree.insertFromTable = False
+
+    class InTableTextPhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+            self.originalPhase = None
+            self.characterTokens = []
+
+        def flushCharacters(self):
+            data = "".join([item["data"] for item in self.characterTokens])
+            if any([item not in spaceCharacters for item in data]):
+                token = {"type": tokenTypes["Characters"], "data": data}
+                self.parser.phases["inTable"].insertText(token)
+            elif data:
+                self.tree.insertText(data)
+            self.characterTokens = []
+
+        def processComment(self, token):
+            self.flushCharacters()
+            self.parser.phase = self.originalPhase
+            return token
+
+        def processEOF(self):
+            self.flushCharacters()
+            self.parser.phase = self.originalPhase
+            return True
+
+        def processCharacters(self, token):
+            if token["data"] == "\u0000":
+                return
+            self.characterTokens.append(token)
+
+        def processSpaceCharacters(self, token):
+            # pretty sure we should never reach here
+            self.characterTokens.append(token)
+    #        assert False
+
+        def processStartTag(self, token):
+            self.flushCharacters()
+            self.parser.phase = self.originalPhase
+            return token
+
+        def processEndTag(self, token):
+            self.flushCharacters()
+            self.parser.phase = self.originalPhase
+            return token
+
+    class InCaptionPhase(Phase):
+        # http://www.whatwg.org/specs/web-apps/current-work/#in-caption
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
+                  "thead", "tr"), self.startTagTableElement)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("caption", self.endTagCaption),
+                ("table", self.endTagTable),
+                (("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
+                  "thead", "tr"), self.endTagIgnore)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        def ignoreEndTagCaption(self):
+            return not self.tree.elementInScope("caption", variant="table")
+
+        def processEOF(self):
+            self.parser.phases["inBody"].processEOF()
+
+        def processCharacters(self, token):
+            return self.parser.phases["inBody"].processCharacters(token)
+
+        def startTagTableElement(self, token):
+            self.parser.parseError()
+            # XXX Have to duplicate logic here to find out if the tag is ignored
+            ignoreEndTag = self.ignoreEndTagCaption()
+            self.parser.phase.processEndTag(impliedTagToken("caption"))
+            if not ignoreEndTag:
+                return token
+
+        def startTagOther(self, token):
+            return self.parser.phases["inBody"].processStartTag(token)
+
+        def endTagCaption(self, token):
+            if not self.ignoreEndTagCaption():
+                # AT this code is quite similar to endTagTable in "InTable"
+                self.tree.generateImpliedEndTags()
+                if self.tree.openElements[-1].name != "caption":
+                    self.parser.parseError("expected-one-end-tag-but-got-another",
+                                           {"gotName": "caption",
+                                            "expectedName": self.tree.openElements[-1].name})
+                while self.tree.openElements[-1].name != "caption":
+                    self.tree.openElements.pop()
+                self.tree.openElements.pop()
+                self.tree.clearActiveFormattingElements()
+                self.parser.phase = self.parser.phases["inTable"]
+            else:
+                # innerHTML case
+                assert self.parser.innerHTML
+                self.parser.parseError()
+
+        def endTagTable(self, token):
+            self.parser.parseError()
+            ignoreEndTag = self.ignoreEndTagCaption()
+            self.parser.phase.processEndTag(impliedTagToken("caption"))
+            if not ignoreEndTag:
+                return token
+
+        def endTagIgnore(self, token):
+            self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+        def endTagOther(self, token):
+            return self.parser.phases["inBody"].processEndTag(token)
+
+    class InColumnGroupPhase(Phase):
+        # http://www.whatwg.org/specs/web-apps/current-work/#in-column
+
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                ("col", self.startTagCol)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("colgroup", self.endTagColgroup),
+                ("col", self.endTagCol)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        def ignoreEndTagColgroup(self):
+            return self.tree.openElements[-1].name == "html"
+
+        def processEOF(self):
+            if self.tree.openElements[-1].name == "html":
+                assert self.parser.innerHTML
+                return
+            else:
+                ignoreEndTag = self.ignoreEndTagColgroup()
+                self.endTagColgroup(impliedTagToken("colgroup"))
+                if not ignoreEndTag:
+                    return True
+
+        def processCharacters(self, token):
+            ignoreEndTag = self.ignoreEndTagColgroup()
+            self.endTagColgroup(impliedTagToken("colgroup"))
+            if not ignoreEndTag:
+                return token
+
+        def startTagCol(self, token):
+            self.tree.insertElement(token)
+            self.tree.openElements.pop()
+            token["selfClosingAcknowledged"] = True
+
+        def startTagOther(self, token):
+            ignoreEndTag = self.ignoreEndTagColgroup()
+            self.endTagColgroup(impliedTagToken("colgroup"))
+            if not ignoreEndTag:
+                return token
+
+        def endTagColgroup(self, token):
+            if self.ignoreEndTagColgroup():
+                # innerHTML case
+                assert self.parser.innerHTML
+                self.parser.parseError()
+            else:
+                self.tree.openElements.pop()
+                self.parser.phase = self.parser.phases["inTable"]
+
+        def endTagCol(self, token):
+            self.parser.parseError("no-end-tag", {"name": "col"})
+
+        def endTagOther(self, token):
+            ignoreEndTag = self.ignoreEndTagColgroup()
+            self.endTagColgroup(impliedTagToken("colgroup"))
+            if not ignoreEndTag:
+                return token
+
+    class InTableBodyPhase(Phase):
+        # http://www.whatwg.org/specs/web-apps/current-work/#in-table0
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                ("tr", self.startTagTr),
+                (("td", "th"), self.startTagTableCell),
+                (("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
+                 self.startTagTableOther)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                (("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
+                ("table", self.endTagTable),
+                (("body", "caption", "col", "colgroup", "html", "td", "th",
+                  "tr"), self.endTagIgnore)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        # helper methods
+        def clearStackToTableBodyContext(self):
+            while self.tree.openElements[-1].name not in ("tbody", "tfoot",
+                                                          "thead", "html"):
+                # self.parser.parseError("unexpected-implied-end-tag-in-table",
+                #  {"name": self.tree.openElements[-1].name})
+                self.tree.openElements.pop()
+            if self.tree.openElements[-1].name == "html":
+                assert self.parser.innerHTML
+
+        # the rest
+        def processEOF(self):
+            self.parser.phases["inTable"].processEOF()
+
+        def processSpaceCharacters(self, token):
+            return self.parser.phases["inTable"].processSpaceCharacters(token)
+
+        def processCharacters(self, token):
+            return self.parser.phases["inTable"].processCharacters(token)
+
+        def startTagTr(self, token):
+            self.clearStackToTableBodyContext()
+            self.tree.insertElement(token)
+            self.parser.phase = self.parser.phases["inRow"]
+
+        def startTagTableCell(self, token):
+            self.parser.parseError("unexpected-cell-in-table-body",
+                                   {"name": token["name"]})
+            self.startTagTr(impliedTagToken("tr", "StartTag"))
+            return token
+
+        def startTagTableOther(self, token):
+            # XXX AT Any ideas on how to share this with endTagTable?
+            if (self.tree.elementInScope("tbody", variant="table") or
+                self.tree.elementInScope("thead", variant="table") or
+                    self.tree.elementInScope("tfoot", variant="table")):
+                self.clearStackToTableBodyContext()
+                self.endTagTableRowGroup(
+                    impliedTagToken(self.tree.openElements[-1].name))
+                return token
+            else:
+                # innerHTML case
+                assert self.parser.innerHTML
+                self.parser.parseError()
+
+        def startTagOther(self, token):
+            return self.parser.phases["inTable"].processStartTag(token)
+
+        def endTagTableRowGroup(self, token):
+            if self.tree.elementInScope(token["name"], variant="table"):
+                self.clearStackToTableBodyContext()
+                self.tree.openElements.pop()
+                self.parser.phase = self.parser.phases["inTable"]
+            else:
+                self.parser.parseError("unexpected-end-tag-in-table-body",
+                                       {"name": token["name"]})
+
+        def endTagTable(self, token):
+            if (self.tree.elementInScope("tbody", variant="table") or
+                self.tree.elementInScope("thead", variant="table") or
+                    self.tree.elementInScope("tfoot", variant="table")):
+                self.clearStackToTableBodyContext()
+                self.endTagTableRowGroup(
+                    impliedTagToken(self.tree.openElements[-1].name))
+                return token
+            else:
+                # innerHTML case
+                assert self.parser.innerHTML
+                self.parser.parseError()
+
+        def endTagIgnore(self, token):
+            self.parser.parseError("unexpected-end-tag-in-table-body",
+                                   {"name": token["name"]})
+
+        def endTagOther(self, token):
+            return self.parser.phases["inTable"].processEndTag(token)
+
+    class InRowPhase(Phase):
+        # http://www.whatwg.org/specs/web-apps/current-work/#in-row
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                (("td", "th"), self.startTagTableCell),
+                (("caption", "col", "colgroup", "tbody", "tfoot", "thead",
+                  "tr"), self.startTagTableOther)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("tr", self.endTagTr),
+                ("table", self.endTagTable),
+                (("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
+                (("body", "caption", "col", "colgroup", "html", "td", "th"),
+                 self.endTagIgnore)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        # helper methods (XXX unify this with other table helper methods)
+        def clearStackToTableRowContext(self):
+            while self.tree.openElements[-1].name not in ("tr", "html"):
+                self.parser.parseError("unexpected-implied-end-tag-in-table-row",
+                                       {"name": self.tree.openElements[-1].name})
+                self.tree.openElements.pop()
+
+        def ignoreEndTagTr(self):
+            return not self.tree.elementInScope("tr", variant="table")
+
+        # the rest
+        def processEOF(self):
+            self.parser.phases["inTable"].processEOF()
+
+        def processSpaceCharacters(self, token):
+            return self.parser.phases["inTable"].processSpaceCharacters(token)
+
+        def processCharacters(self, token):
+            return self.parser.phases["inTable"].processCharacters(token)
+
+        def startTagTableCell(self, token):
+            self.clearStackToTableRowContext()
+            self.tree.insertElement(token)
+            self.parser.phase = self.parser.phases["inCell"]
+            self.tree.activeFormattingElements.append(Marker)
+
+        def startTagTableOther(self, token):
+            ignoreEndTag = self.ignoreEndTagTr()
+            self.endTagTr(impliedTagToken("tr"))
+            # XXX how are we sure it's always ignored in the innerHTML case?
+            if not ignoreEndTag:
+                return token
+
+        def startTagOther(self, token):
+            return self.parser.phases["inTable"].processStartTag(token)
+
+        def endTagTr(self, token):
+            if not self.ignoreEndTagTr():
+                self.clearStackToTableRowContext()
+                self.tree.openElements.pop()
+                self.parser.phase = self.parser.phases["inTableBody"]
+            else:
+                # innerHTML case
+                assert self.parser.innerHTML
+                self.parser.parseError()
+
+        def endTagTable(self, token):
+            ignoreEndTag = self.ignoreEndTagTr()
+            self.endTagTr(impliedTagToken("tr"))
+            # Reprocess the current tag if the tr end tag was not ignored
+            # XXX how are we sure it's always ignored in the innerHTML case?
+            if not ignoreEndTag:
+                return token
+
+        def endTagTableRowGroup(self, token):
+            if self.tree.elementInScope(token["name"], variant="table"):
+                self.endTagTr(impliedTagToken("tr"))
+                return token
+            else:
+                self.parser.parseError()
+
+        def endTagIgnore(self, token):
+            self.parser.parseError("unexpected-end-tag-in-table-row",
+                                   {"name": token["name"]})
+
+        def endTagOther(self, token):
+            return self.parser.phases["inTable"].processEndTag(token)
+
+    class InCellPhase(Phase):
+        # http://www.whatwg.org/specs/web-apps/current-work/#in-cell
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                (("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
+                  "thead", "tr"), self.startTagTableOther)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                (("td", "th"), self.endTagTableCell),
+                (("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
+                (("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        # helper
+        def closeCell(self):
+            if self.tree.elementInScope("td", variant="table"):
+                self.endTagTableCell(impliedTagToken("td"))
+            elif self.tree.elementInScope("th", variant="table"):
+                self.endTagTableCell(impliedTagToken("th"))
+
+        # the rest
+        def processEOF(self):
+            self.parser.phases["inBody"].processEOF()
+
+        def processCharacters(self, token):
+            return self.parser.phases["inBody"].processCharacters(token)
+
+        def startTagTableOther(self, token):
+            if (self.tree.elementInScope("td", variant="table") or
+                    self.tree.elementInScope("th", variant="table")):
+                self.closeCell()
+                return token
+            else:
+                # innerHTML case
+                assert self.parser.innerHTML
+                self.parser.parseError()
+
+        def startTagOther(self, token):
+            return self.parser.phases["inBody"].processStartTag(token)
+
+        def endTagTableCell(self, token):
+            if self.tree.elementInScope(token["name"], variant="table"):
+                self.tree.generateImpliedEndTags(token["name"])
+                if self.tree.openElements[-1].name != token["name"]:
+                    self.parser.parseError("unexpected-cell-end-tag",
+                                           {"name": token["name"]})
+                    while True:
+                        node = self.tree.openElements.pop()
+                        if node.name == token["name"]:
+                            break
+                else:
+                    self.tree.openElements.pop()
+                self.tree.clearActiveFormattingElements()
+                self.parser.phase = self.parser.phases["inRow"]
+            else:
+                self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+        def endTagIgnore(self, token):
+            self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+        def endTagImply(self, token):
+            if self.tree.elementInScope(token["name"], variant="table"):
+                self.closeCell()
+                return token
+            else:
+                # sometimes innerHTML case
+                self.parser.parseError()
+
+        def endTagOther(self, token):
+            return self.parser.phases["inBody"].processEndTag(token)
+
+    class InSelectPhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                ("option", self.startTagOption),
+                ("optgroup", self.startTagOptgroup),
+                ("select", self.startTagSelect),
+                (("input", "keygen", "textarea"), self.startTagInput),
+                ("script", self.startTagScript)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("option", self.endTagOption),
+                ("optgroup", self.endTagOptgroup),
+                ("select", self.endTagSelect)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        # http://www.whatwg.org/specs/web-apps/current-work/#in-select
+        def processEOF(self):
+            if self.tree.openElements[-1].name != "html":
+                self.parser.parseError("eof-in-select")
+            else:
+                assert self.parser.innerHTML
+
+        def processCharacters(self, token):
+            if token["data"] == "\u0000":
+                return
+            self.tree.insertText(token["data"])
+
+        def startTagOption(self, token):
+            # We need to imply </option> if <option> is the current node.
+            if self.tree.openElements[-1].name == "option":
+                self.tree.openElements.pop()
+            self.tree.insertElement(token)
+
+        def startTagOptgroup(self, token):
+            if self.tree.openElements[-1].name == "option":
+                self.tree.openElements.pop()
+            if self.tree.openElements[-1].name == "optgroup":
+                self.tree.openElements.pop()
+            self.tree.insertElement(token)
+
+        def startTagSelect(self, token):
+            self.parser.parseError("unexpected-select-in-select")
+            self.endTagSelect(impliedTagToken("select"))
+
+        def startTagInput(self, token):
+            self.parser.parseError("unexpected-input-in-select")
+            if self.tree.elementInScope("select", variant="select"):
+                self.endTagSelect(impliedTagToken("select"))
+                return token
+            else:
+                assert self.parser.innerHTML
+
+        def startTagScript(self, token):
+            return self.parser.phases["inHead"].processStartTag(token)
+
+        def startTagOther(self, token):
+            self.parser.parseError("unexpected-start-tag-in-select",
+                                   {"name": token["name"]})
+
+        def endTagOption(self, token):
+            if self.tree.openElements[-1].name == "option":
+                self.tree.openElements.pop()
+            else:
+                self.parser.parseError("unexpected-end-tag-in-select",
+                                       {"name": "option"})
+
+        def endTagOptgroup(self, token):
+            # </optgroup> implicitly closes <option>
+            if (self.tree.openElements[-1].name == "option" and
+                    self.tree.openElements[-2].name == "optgroup"):
+                self.tree.openElements.pop()
+            # It also closes </optgroup>
+            if self.tree.openElements[-1].name == "optgroup":
+                self.tree.openElements.pop()
+            # But nothing else
+            else:
+                self.parser.parseError("unexpected-end-tag-in-select",
+                                       {"name": "optgroup"})
+
+        def endTagSelect(self, token):
+            if self.tree.elementInScope("select", variant="select"):
+                node = self.tree.openElements.pop()
+                while node.name != "select":
+                    node = self.tree.openElements.pop()
+                self.parser.resetInsertionMode()
+            else:
+                # innerHTML case
+                assert self.parser.innerHTML
+                self.parser.parseError()
+
+        def endTagOther(self, token):
+            self.parser.parseError("unexpected-end-tag-in-select",
+                                   {"name": token["name"]})
+
+    class InSelectInTablePhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
+                 self.startTagTable)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                (("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
+                 self.endTagTable)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        def processEOF(self):
+            self.parser.phases["inSelect"].processEOF()
+
+        def processCharacters(self, token):
+            return self.parser.phases["inSelect"].processCharacters(token)
+
+        def startTagTable(self, token):
+            self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
+            self.endTagOther(impliedTagToken("select"))
+            return token
+
+        def startTagOther(self, token):
+            return self.parser.phases["inSelect"].processStartTag(token)
+
+        def endTagTable(self, token):
+            self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
+            if self.tree.elementInScope(token["name"], variant="table"):
+                self.endTagOther(impliedTagToken("select"))
+                return token
+
+        def endTagOther(self, token):
+            return self.parser.phases["inSelect"].processEndTag(token)
+
+    class InForeignContentPhase(Phase):
+        breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
+                                      "center", "code", "dd", "div", "dl", "dt",
+                                      "em", "embed", "h1", "h2", "h3",
+                                      "h4", "h5", "h6", "head", "hr", "i", "img",
+                                      "li", "listing", "menu", "meta", "nobr",
+                                      "ol", "p", "pre", "ruby", "s", "small",
+                                      "span", "strong", "strike", "sub", "sup",
+                                      "table", "tt", "u", "ul", "var"])
+
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+        def adjustSVGTagNames(self, token):
+            replacements = {"altglyph": "altGlyph",
+                            "altglyphdef": "altGlyphDef",
+                            "altglyphitem": "altGlyphItem",
+                            "animatecolor": "animateColor",
+                            "animatemotion": "animateMotion",
+                            "animatetransform": "animateTransform",
+                            "clippath": "clipPath",
+                            "feblend": "feBlend",
+                            "fecolormatrix": "feColorMatrix",
+                            "fecomponenttransfer": "feComponentTransfer",
+                            "fecomposite": "feComposite",
+                            "feconvolvematrix": "feConvolveMatrix",
+                            "fediffuselighting": "feDiffuseLighting",
+                            "fedisplacementmap": "feDisplacementMap",
+                            "fedistantlight": "feDistantLight",
+                            "feflood": "feFlood",
+                            "fefunca": "feFuncA",
+                            "fefuncb": "feFuncB",
+                            "fefuncg": "feFuncG",
+                            "fefuncr": "feFuncR",
+                            "fegaussianblur": "feGaussianBlur",
+                            "feimage": "feImage",
+                            "femerge": "feMerge",
+                            "femergenode": "feMergeNode",
+                            "femorphology": "feMorphology",
+                            "feoffset": "feOffset",
+                            "fepointlight": "fePointLight",
+                            "fespecularlighting": "feSpecularLighting",
+                            "fespotlight": "feSpotLight",
+                            "fetile": "feTile",
+                            "feturbulence": "feTurbulence",
+                            "foreignobject": "foreignObject",
+                            "glyphref": "glyphRef",
+                            "lineargradient": "linearGradient",
+                            "radialgradient": "radialGradient",
+                            "textpath": "textPath"}
+
+            if token["name"] in replacements:
+                token["name"] = replacements[token["name"]]
+
+        def processCharacters(self, token):
+            if token["data"] == "\u0000":
+                token["data"] = "\uFFFD"
+            elif (self.parser.framesetOK and
+                  any(char not in spaceCharacters for char in token["data"])):
+                self.parser.framesetOK = False
+            Phase.processCharacters(self, token)
+
+        def processStartTag(self, token):
+            currentNode = self.tree.openElements[-1]
+            if (token["name"] in self.breakoutElements or
+                (token["name"] == "font" and
+                 set(token["data"].keys()) & set(["color", "face", "size"]))):
+                self.parser.parseError("unexpected-html-element-in-foreign-content",
+                                       {"name": token["name"]})
+                while (self.tree.openElements[-1].namespace !=
+                       self.tree.defaultNamespace and
+                       not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
+                       not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
+                    self.tree.openElements.pop()
+                return token
+
+            else:
+                if currentNode.namespace == namespaces["mathml"]:
+                    self.parser.adjustMathMLAttributes(token)
+                elif currentNode.namespace == namespaces["svg"]:
+                    self.adjustSVGTagNames(token)
+                    self.parser.adjustSVGAttributes(token)
+                self.parser.adjustForeignAttributes(token)
+                token["namespace"] = currentNode.namespace
+                self.tree.insertElement(token)
+                if token["selfClosing"]:
+                    self.tree.openElements.pop()
+                    token["selfClosingAcknowledged"] = True
+
+        def processEndTag(self, token):
+            nodeIndex = len(self.tree.openElements) - 1
+            node = self.tree.openElements[-1]
+            if node.name.translate(asciiUpper2Lower) != token["name"]:
+                self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
+
+            while True:
+                if node.name.translate(asciiUpper2Lower) == token["name"]:
+                    # XXX this isn't in the spec but it seems necessary
+                    if self.parser.phase == self.parser.phases["inTableText"]:
+                        self.parser.phase.flushCharacters()
+                        self.parser.phase = self.parser.phase.originalPhase
+                    while self.tree.openElements.pop() != node:
+                        assert self.tree.openElements
+                    new_token = None
+                    break
+                nodeIndex -= 1
+
+                node = self.tree.openElements[nodeIndex]
+                if node.namespace != self.tree.defaultNamespace:
+                    continue
+                else:
+                    new_token = self.parser.phase.processEndTag(token)
+                    break
+            return new_token
+
+    class AfterBodyPhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([("html", self.endTagHtml)])
+            self.endTagHandler.default = self.endTagOther
+
+        def processEOF(self):
+            # Stop parsing
+            pass
+
+        def processComment(self, token):
+            # This is needed because data is to be appended to the <html> element
+            # here and not to whatever is currently open.
+            self.tree.insertComment(token, self.tree.openElements[0])
+
+        def processCharacters(self, token):
+            self.parser.parseError("unexpected-char-after-body")
+            self.parser.phase = self.parser.phases["inBody"]
+            return token
+
+        def startTagHtml(self, token):
+            return self.parser.phases["inBody"].processStartTag(token)
+
+        def startTagOther(self, token):
+            self.parser.parseError("unexpected-start-tag-after-body",
+                                   {"name": token["name"]})
+            self.parser.phase = self.parser.phases["inBody"]
+            return token
+
+        def endTagHtml(self, name):
+            if self.parser.innerHTML:
+                self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
+            else:
+                self.parser.phase = self.parser.phases["afterAfterBody"]
+
+        def endTagOther(self, token):
+            self.parser.parseError("unexpected-end-tag-after-body",
+                                   {"name": token["name"]})
+            self.parser.phase = self.parser.phases["inBody"]
+            return token
+
+    class InFramesetPhase(Phase):
+        # http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                ("frameset", self.startTagFrameset),
+                ("frame", self.startTagFrame),
+                ("noframes", self.startTagNoframes)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("frameset", self.endTagFrameset)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        def processEOF(self):
+            if self.tree.openElements[-1].name != "html":
+                self.parser.parseError("eof-in-frameset")
+            else:
+                assert self.parser.innerHTML
+
+        def processCharacters(self, token):
+            self.parser.parseError("unexpected-char-in-frameset")
+
+        def startTagFrameset(self, token):
+            self.tree.insertElement(token)
+
+        def startTagFrame(self, token):
+            self.tree.insertElement(token)
+            self.tree.openElements.pop()
+
+        def startTagNoframes(self, token):
+            return self.parser.phases["inBody"].processStartTag(token)
+
+        def startTagOther(self, token):
+            self.parser.parseError("unexpected-start-tag-in-frameset",
+                                   {"name": token["name"]})
+
+        def endTagFrameset(self, token):
+            if self.tree.openElements[-1].name == "html":
+                # innerHTML case
+                self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
+            else:
+                self.tree.openElements.pop()
+            if (not self.parser.innerHTML and
+                    self.tree.openElements[-1].name != "frameset"):
+                # If we're not in innerHTML mode and the current node is not a
+                # "frameset" element (anymore) then switch.
+                self.parser.phase = self.parser.phases["afterFrameset"]
+
+        def endTagOther(self, token):
+            self.parser.parseError("unexpected-end-tag-in-frameset",
+                                   {"name": token["name"]})
+
+    class AfterFramesetPhase(Phase):
+        # http://www.whatwg.org/specs/web-apps/current-work/#after3
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                ("noframes", self.startTagNoframes)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+            self.endTagHandler = _utils.MethodDispatcher([
+                ("html", self.endTagHtml)
+            ])
+            self.endTagHandler.default = self.endTagOther
+
+        def processEOF(self):
+            # Stop parsing
+            pass
+
+        def processCharacters(self, token):
+            self.parser.parseError("unexpected-char-after-frameset")
+
+        def startTagNoframes(self, token):
+            return self.parser.phases["inHead"].processStartTag(token)
+
+        def startTagOther(self, token):
+            self.parser.parseError("unexpected-start-tag-after-frameset",
+                                   {"name": token["name"]})
+
+        def endTagHtml(self, token):
+            self.parser.phase = self.parser.phases["afterAfterFrameset"]
+
+        def endTagOther(self, token):
+            self.parser.parseError("unexpected-end-tag-after-frameset",
+                                   {"name": token["name"]})
+
+    class AfterAfterBodyPhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+        def processEOF(self):
+            pass
+
+        def processComment(self, token):
+            self.tree.insertComment(token, self.tree.document)
+
+        def processSpaceCharacters(self, token):
+            return self.parser.phases["inBody"].processSpaceCharacters(token)
+
+        def processCharacters(self, token):
+            self.parser.parseError("expected-eof-but-got-char")
+            self.parser.phase = self.parser.phases["inBody"]
+            return token
+
+        def startTagHtml(self, token):
+            return self.parser.phases["inBody"].processStartTag(token)
+
+        def startTagOther(self, token):
+            self.parser.parseError("expected-eof-but-got-start-tag",
+                                   {"name": token["name"]})
+            self.parser.phase = self.parser.phases["inBody"]
+            return token
+
+        def processEndTag(self, token):
+            self.parser.parseError("expected-eof-but-got-end-tag",
+                                   {"name": token["name"]})
+            self.parser.phase = self.parser.phases["inBody"]
+            return token
+
+    class AfterAfterFramesetPhase(Phase):
+        def __init__(self, parser, tree):
+            Phase.__init__(self, parser, tree)
+
+            self.startTagHandler = _utils.MethodDispatcher([
+                ("html", self.startTagHtml),
+                ("noframes", self.startTagNoFrames)
+            ])
+            self.startTagHandler.default = self.startTagOther
+
+        def processEOF(self):
+            pass
+
+        def processComment(self, token):
+            self.tree.insertComment(token, self.tree.document)
+
+        def processSpaceCharacters(self, token):
+            return self.parser.phases["inBody"].processSpaceCharacters(token)
+
+        def processCharacters(self, token):
+            self.parser.parseError("expected-eof-but-got-char")
+
+        def startTagHtml(self, token):
+            return self.parser.phases["inBody"].processStartTag(token)
+
+        def startTagNoFrames(self, token):
+            return self.parser.phases["inHead"].processStartTag(token)
+
+        def startTagOther(self, token):
+            self.parser.parseError("expected-eof-but-got-start-tag",
+                                   {"name": token["name"]})
+
+        def processEndTag(self, token):
+            self.parser.parseError("expected-eof-but-got-end-tag",
+                                   {"name": token["name"]})
+    # pylint:enable=unused-argument
+
+    return {
+        "initial": InitialPhase,
+        "beforeHtml": BeforeHtmlPhase,
+        "beforeHead": BeforeHeadPhase,
+        "inHead": InHeadPhase,
+        "inHeadNoscript": InHeadNoscriptPhase,
+        "afterHead": AfterHeadPhase,
+        "inBody": InBodyPhase,
+        "text": TextPhase,
+        "inTable": InTablePhase,
+        "inTableText": InTableTextPhase,
+        "inCaption": InCaptionPhase,
+        "inColumnGroup": InColumnGroupPhase,
+        "inTableBody": InTableBodyPhase,
+        "inRow": InRowPhase,
+        "inCell": InCellPhase,
+        "inSelect": InSelectPhase,
+        "inSelectInTable": InSelectInTablePhase,
+        "inForeignContent": InForeignContentPhase,
+        "afterBody": AfterBodyPhase,
+        "inFrameset": InFramesetPhase,
+        "afterFrameset": AfterFramesetPhase,
+        "afterAfterBody": AfterAfterBodyPhase,
+        "afterAfterFrameset": AfterAfterFramesetPhase,
+        # XXX after after frameset
+    }
+
+
+def adjust_attributes(token, replacements):
+    needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
+    if needs_adjustment:
+        token['data'] = OrderedDict((replacements.get(k, k), v)
+                                    for k, v in token['data'].items())
+
+
+def impliedTagToken(name, type="EndTag", attributes=None,
+                    selfClosing=False):
+    if attributes is None:
+        attributes = {}
+    return {"type": tokenTypes[type], "name": name, "data": attributes,
+            "selfClosing": selfClosing}
+
+
+class ParseError(Exception):
+    """Error in parsed document"""
+    pass
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/serializer.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/serializer.py
new file mode 100644
index 0000000000000000000000000000000000000000..53f4d44c39742a80ab170507e22347cc59219c72
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/serializer.py
@@ -0,0 +1,409 @@
+from __future__ import absolute_import, division, unicode_literals
+from pip._vendor.six import text_type
+
+import re
+
+from codecs import register_error, xmlcharrefreplace_errors
+
+from .constants import voidElements, booleanAttributes, spaceCharacters
+from .constants import rcdataElements, entities, xmlEntities
+from . import treewalkers, _utils
+from xml.sax.saxutils import escape
+
+_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
+_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
+_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
+                                   "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
+                                   "\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
+                                   "\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
+                                   "\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
+                                   "\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
+                                   "\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
+                                   "\u3000]")
+
+
+_encode_entity_map = {}
+_is_ucs4 = len("\U0010FFFF") == 1
+for k, v in list(entities.items()):
+    # skip multi-character entities
+    if ((_is_ucs4 and len(v) > 1) or
+            (not _is_ucs4 and len(v) > 2)):
+        continue
+    if v != "&":
+        if len(v) == 2:
+            v = _utils.surrogatePairToCodepoint(v)
+        else:
+            v = ord(v)
+        if v not in _encode_entity_map or k.islower():
+            # prefer &lt; over &LT; and similarly for &amp;, &gt;, etc.
+            _encode_entity_map[v] = k
+
+
+def htmlentityreplace_errors(exc):
+    if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
+        res = []
+        codepoints = []
+        skip = False
+        for i, c in enumerate(exc.object[exc.start:exc.end]):
+            if skip:
+                skip = False
+                continue
+            index = i + exc.start
+            if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
+                codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
+                skip = True
+            else:
+                codepoint = ord(c)
+            codepoints.append(codepoint)
+        for cp in codepoints:
+            e = _encode_entity_map.get(cp)
+            if e:
+                res.append("&")
+                res.append(e)
+                if not e.endswith(";"):
+                    res.append(";")
+            else:
+                res.append("&#x%s;" % (hex(cp)[2:]))
+        return ("".join(res), exc.end)
+    else:
+        return xmlcharrefreplace_errors(exc)
+
+
+register_error("htmlentityreplace", htmlentityreplace_errors)
+
+
+def serialize(input, tree="etree", encoding=None, **serializer_opts):
+    """Serializes the input token stream using the specified treewalker
+
+    :arg input: the token stream to serialize
+
+    :arg tree: the treewalker to use
+
+    :arg encoding: the encoding to use
+
+    :arg serializer_opts: any options to pass to the
+        :py:class:`html5lib.serializer.HTMLSerializer` that gets created
+
+    :returns: the tree serialized as a string
+
+    Example:
+
+    >>> from html5lib.html5parser import parse
+    >>> from html5lib.serializer import serialize
+    >>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
+    >>> serialize(token_stream, omit_optional_tags=False)
+    '<html><head></head><body><p>Hi!</p></body></html>'
+
+    """
+    # XXX: Should we cache this?
+    walker = treewalkers.getTreeWalker(tree)
+    s = HTMLSerializer(**serializer_opts)
+    return s.render(walker(input), encoding)
+
+
+class HTMLSerializer(object):
+
+    # attribute quoting options
+    quote_attr_values = "legacy"  # be secure by default
+    quote_char = '"'
+    use_best_quote_char = True
+
+    # tag syntax options
+    omit_optional_tags = True
+    minimize_boolean_attributes = True
+    use_trailing_solidus = False
+    space_before_trailing_solidus = True
+
+    # escaping options
+    escape_lt_in_attrs = False
+    escape_rcdata = False
+    resolve_entities = True
+
+    # miscellaneous options
+    alphabetical_attributes = False
+    inject_meta_charset = True
+    strip_whitespace = False
+    sanitize = False
+
+    options = ("quote_attr_values", "quote_char", "use_best_quote_char",
+               "omit_optional_tags", "minimize_boolean_attributes",
+               "use_trailing_solidus", "space_before_trailing_solidus",
+               "escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
+               "alphabetical_attributes", "inject_meta_charset",
+               "strip_whitespace", "sanitize")
+
+    def __init__(self, **kwargs):
+        """Initialize HTMLSerializer
+
+        :arg inject_meta_charset: Whether or not to inject the meta charset.
+
+            Defaults to ``True``.
+
+        :arg quote_attr_values: Whether to quote attribute values that don't
+            require quoting per legacy browser behavior (``"legacy"``), when
+            required by the standard (``"spec"``), or always (``"always"``).
+
+            Defaults to ``"legacy"``.
+
+        :arg quote_char: Use given quote character for attribute quoting.
+
+            Defaults to ``"`` which will use double quotes unless attribute
+            value contains a double quote, in which case single quotes are
+            used.
+
+        :arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute
+            values.
+
+            Defaults to ``False``.
+
+        :arg escape_rcdata: Whether to escape characters that need to be
+            escaped within normal elements within rcdata elements such as
+            style.
+
+            Defaults to ``False``.
+
+        :arg resolve_entities: Whether to resolve named character entities that
+            appear in the source tree. The XML predefined entities &lt; &gt;
+            &amp; &quot; &apos; are unaffected by this setting.
+
+            Defaults to ``True``.
+
+        :arg strip_whitespace: Whether to remove semantically meaningless
+            whitespace. (This compresses all whitespace to a single space
+            except within ``pre``.)
+
+            Defaults to ``False``.
+
+        :arg minimize_boolean_attributes: Shortens boolean attributes to give
+            just the attribute value, for example::
+
+              <input disabled="disabled">
+
+            becomes::
+
+              <input disabled>
+
+            Defaults to ``True``.
+
+        :arg use_trailing_solidus: Includes a close-tag slash at the end of the
+            start tag of void elements (empty elements whose end tag is
+            forbidden). E.g. ``<hr/>``.
+
+            Defaults to ``False``.
+
+        :arg space_before_trailing_solidus: Places a space immediately before
+            the closing slash in a tag using a trailing solidus. E.g.
+            ``<hr />``. Requires ``use_trailing_solidus=True``.
+
+            Defaults to ``True``.
+
+        :arg sanitize: Strip all unsafe or unknown constructs from output.
+            See :py:class:`html5lib.filters.sanitizer.Filter`.
+
+            Defaults to ``False``.
+
+        :arg omit_optional_tags: Omit start/end tags that are optional.
+
+            Defaults to ``True``.
+
+        :arg alphabetical_attributes: Reorder attributes to be in alphabetical order.
+
+            Defaults to ``False``.
+
+        """
+        unexpected_args = frozenset(kwargs) - frozenset(self.options)
+        if len(unexpected_args) > 0:
+            raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
+        if 'quote_char' in kwargs:
+            self.use_best_quote_char = False
+        for attr in self.options:
+            setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
+        self.errors = []
+        self.strict = False
+
+    def encode(self, string):
+        assert(isinstance(string, text_type))
+        if self.encoding:
+            return string.encode(self.encoding, "htmlentityreplace")
+        else:
+            return string
+
+    def encodeStrict(self, string):
+        assert(isinstance(string, text_type))
+        if self.encoding:
+            return string.encode(self.encoding, "strict")
+        else:
+            return string
+
+    def serialize(self, treewalker, encoding=None):
+        # pylint:disable=too-many-nested-blocks
+        self.encoding = encoding
+        in_cdata = False
+        self.errors = []
+
+        if encoding and self.inject_meta_charset:
+            from .filters.inject_meta_charset import Filter
+            treewalker = Filter(treewalker, encoding)
+        # Alphabetical attributes is here under the assumption that none of
+        # the later filters add or change order of attributes; it needs to be
+        # before the sanitizer so escaped elements come out correctly
+        if self.alphabetical_attributes:
+            from .filters.alphabeticalattributes import Filter
+            treewalker = Filter(treewalker)
+        # WhitespaceFilter should be used before OptionalTagFilter
+        # for maximum efficiently of this latter filter
+        if self.strip_whitespace:
+            from .filters.whitespace import Filter
+            treewalker = Filter(treewalker)
+        if self.sanitize:
+            from .filters.sanitizer import Filter
+            treewalker = Filter(treewalker)
+        if self.omit_optional_tags:
+            from .filters.optionaltags import Filter
+            treewalker = Filter(treewalker)
+
+        for token in treewalker:
+            type = token["type"]
+            if type == "Doctype":
+                doctype = "<!DOCTYPE %s" % token["name"]
+
+                if token["publicId"]:
+                    doctype += ' PUBLIC "%s"' % token["publicId"]
+                elif token["systemId"]:
+                    doctype += " SYSTEM"
+                if token["systemId"]:
+                    if token["systemId"].find('"') >= 0:
+                        if token["systemId"].find("'") >= 0:
+                            self.serializeError("System identifer contains both single and double quote characters")
+                        quote_char = "'"
+                    else:
+                        quote_char = '"'
+                    doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
+
+                doctype += ">"
+                yield self.encodeStrict(doctype)
+
+            elif type in ("Characters", "SpaceCharacters"):
+                if type == "SpaceCharacters" or in_cdata:
+                    if in_cdata and token["data"].find("</") >= 0:
+                        self.serializeError("Unexpected </ in CDATA")
+                    yield self.encode(token["data"])
+                else:
+                    yield self.encode(escape(token["data"]))
+
+            elif type in ("StartTag", "EmptyTag"):
+                name = token["name"]
+                yield self.encodeStrict("<%s" % name)
+                if name in rcdataElements and not self.escape_rcdata:
+                    in_cdata = True
+                elif in_cdata:
+                    self.serializeError("Unexpected child element of a CDATA element")
+                for (_, attr_name), attr_value in token["data"].items():
+                    # TODO: Add namespace support here
+                    k = attr_name
+                    v = attr_value
+                    yield self.encodeStrict(' ')
+
+                    yield self.encodeStrict(k)
+                    if not self.minimize_boolean_attributes or \
+                        (k not in booleanAttributes.get(name, tuple()) and
+                         k not in booleanAttributes.get("", tuple())):
+                        yield self.encodeStrict("=")
+                        if self.quote_attr_values == "always" or len(v) == 0:
+                            quote_attr = True
+                        elif self.quote_attr_values == "spec":
+                            quote_attr = _quoteAttributeSpec.search(v) is not None
+                        elif self.quote_attr_values == "legacy":
+                            quote_attr = _quoteAttributeLegacy.search(v) is not None
+                        else:
+                            raise ValueError("quote_attr_values must be one of: "
+                                             "'always', 'spec', or 'legacy'")
+                        v = v.replace("&", "&amp;")
+                        if self.escape_lt_in_attrs:
+                            v = v.replace("<", "&lt;")
+                        if quote_attr:
+                            quote_char = self.quote_char
+                            if self.use_best_quote_char:
+                                if "'" in v and '"' not in v:
+                                    quote_char = '"'
+                                elif '"' in v and "'" not in v:
+                                    quote_char = "'"
+                            if quote_char == "'":
+                                v = v.replace("'", "&#39;")
+                            else:
+                                v = v.replace('"', "&quot;")
+                            yield self.encodeStrict(quote_char)
+                            yield self.encode(v)
+                            yield self.encodeStrict(quote_char)
+                        else:
+                            yield self.encode(v)
+                if name in voidElements and self.use_trailing_solidus:
+                    if self.space_before_trailing_solidus:
+                        yield self.encodeStrict(" /")
+                    else:
+                        yield self.encodeStrict("/")
+                yield self.encode(">")
+
+            elif type == "EndTag":
+                name = token["name"]
+                if name in rcdataElements:
+                    in_cdata = False
+                elif in_cdata:
+                    self.serializeError("Unexpected child element of a CDATA element")
+                yield self.encodeStrict("</%s>" % name)
+
+            elif type == "Comment":
+                data = token["data"]
+                if data.find("--") >= 0:
+                    self.serializeError("Comment contains --")
+                yield self.encodeStrict("<!--%s-->" % token["data"])
+
+            elif type == "Entity":
+                name = token["name"]
+                key = name + ";"
+                if key not in entities:
+                    self.serializeError("Entity %s not recognized" % name)
+                if self.resolve_entities and key not in xmlEntities:
+                    data = entities[key]
+                else:
+                    data = "&%s;" % name
+                yield self.encodeStrict(data)
+
+            else:
+                self.serializeError(token["data"])
+
+    def render(self, treewalker, encoding=None):
+        """Serializes the stream from the treewalker into a string
+
+        :arg treewalker: the treewalker to serialize
+
+        :arg encoding: the string encoding to use
+
+        :returns: the serialized tree
+
+        Example:
+
+        >>> from html5lib import parse, getTreeWalker
+        >>> from html5lib.serializer import HTMLSerializer
+        >>> token_stream = parse('<html><body>Hi!</body></html>')
+        >>> walker = getTreeWalker('etree')
+        >>> serializer = HTMLSerializer(omit_optional_tags=False)
+        >>> serializer.render(walker(token_stream))
+        '<html><head></head><body>Hi!</body></html>'
+
+        """
+        if encoding:
+            return b"".join(list(self.serialize(treewalker, encoding)))
+        else:
+            return "".join(list(self.serialize(treewalker)))
+
+    def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
+        # XXX The idea is to make data mandatory.
+        self.errors.append(data)
+        if self.strict:
+            raise SerializeError
+
+
+class SerializeError(Exception):
+    """Error in serialized tree"""
+    pass
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ef59590c76ee75733d78b061d4108d49f209ee5
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__init__.py
@@ -0,0 +1,30 @@
+"""Tree adapters let you convert from one tree structure to another
+
+Example:
+
+.. code-block:: python
+
+   from pip._vendor import html5lib
+   from pip._vendor.html5lib.treeadapters import genshi
+
+   doc = '<html><body>Hi!</body></html>'
+   treebuilder = html5lib.getTreeBuilder('etree')
+   parser = html5lib.HTMLParser(tree=treebuilder)
+   tree = parser.parse(doc)
+   TreeWalker = html5lib.getTreeWalker('etree')
+
+   genshi_tree = genshi.to_genshi(TreeWalker(tree))
+
+"""
+from __future__ import absolute_import, division, unicode_literals
+
+from . import sax
+
+__all__ = ["sax"]
+
+try:
+    from . import genshi  # noqa
+except ImportError:
+    pass
+else:
+    __all__.append("genshi")
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..968e454d3aca50f9677086b7a170fe8fb36d4d7c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5050ed675faa66d2475f329d12bf307cde2c4bf3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/genshi.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b87d864d8ed64da185ab740812f0cfe3353cf34
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/__pycache__/sax.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py
new file mode 100644
index 0000000000000000000000000000000000000000..61d5fb6ac42ca4152f056d996af0cb0b0d2ddc35
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/genshi.py
@@ -0,0 +1,54 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from genshi.core import QName, Attrs
+from genshi.core import START, END, TEXT, COMMENT, DOCTYPE
+
+
+def to_genshi(walker):
+    """Convert a tree to a genshi tree
+
+    :arg walker: the treewalker to use to walk the tree to convert it
+
+    :returns: generator of genshi nodes
+
+    """
+    text = []
+    for token in walker:
+        type = token["type"]
+        if type in ("Characters", "SpaceCharacters"):
+            text.append(token["data"])
+        elif text:
+            yield TEXT, "".join(text), (None, -1, -1)
+            text = []
+
+        if type in ("StartTag", "EmptyTag"):
+            if token["namespace"]:
+                name = "{%s}%s" % (token["namespace"], token["name"])
+            else:
+                name = token["name"]
+            attrs = Attrs([(QName("{%s}%s" % attr if attr[0] is not None else attr[1]), value)
+                           for attr, value in token["data"].items()])
+            yield (START, (QName(name), attrs), (None, -1, -1))
+            if type == "EmptyTag":
+                type = "EndTag"
+
+        if type == "EndTag":
+            if token["namespace"]:
+                name = "{%s}%s" % (token["namespace"], token["name"])
+            else:
+                name = token["name"]
+
+            yield END, QName(name), (None, -1, -1)
+
+        elif type == "Comment":
+            yield COMMENT, token["data"], (None, -1, -1)
+
+        elif type == "Doctype":
+            yield DOCTYPE, (token["name"], token["publicId"],
+                            token["systemId"]), (None, -1, -1)
+
+        else:
+            pass  # FIXME: What to do?
+
+    if text:
+        yield TEXT, "".join(text), (None, -1, -1)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4ccea5a25653dd9e4c1bcf1047f407184562a1b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treeadapters/sax.py
@@ -0,0 +1,50 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from xml.sax.xmlreader import AttributesNSImpl
+
+from ..constants import adjustForeignAttributes, unadjustForeignAttributes
+
+prefix_mapping = {}
+for prefix, localName, namespace in adjustForeignAttributes.values():
+    if prefix is not None:
+        prefix_mapping[prefix] = namespace
+
+
+def to_sax(walker, handler):
+    """Call SAX-like content handler based on treewalker walker
+
+    :arg walker: the treewalker to use to walk the tree to convert it
+
+    :arg handler: SAX handler to use
+
+    """
+    handler.startDocument()
+    for prefix, namespace in prefix_mapping.items():
+        handler.startPrefixMapping(prefix, namespace)
+
+    for token in walker:
+        type = token["type"]
+        if type == "Doctype":
+            continue
+        elif type in ("StartTag", "EmptyTag"):
+            attrs = AttributesNSImpl(token["data"],
+                                     unadjustForeignAttributes)
+            handler.startElementNS((token["namespace"], token["name"]),
+                                   token["name"],
+                                   attrs)
+            if type == "EmptyTag":
+                handler.endElementNS((token["namespace"], token["name"]),
+                                     token["name"])
+        elif type == "EndTag":
+            handler.endElementNS((token["namespace"], token["name"]),
+                                 token["name"])
+        elif type in ("Characters", "SpaceCharacters"):
+            handler.characters(token["data"])
+        elif type == "Comment":
+            pass
+        else:
+            assert False, "Unknown token type"
+
+    for prefix, namespace in prefix_mapping.items():
+        handler.endPrefixMapping(prefix)
+    handler.endDocument()
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d44447eaf5a3912ea699e6d895d51f9b0782cfba
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py
@@ -0,0 +1,88 @@
+"""A collection of modules for building different kinds of trees from HTML
+documents.
+
+To create a treebuilder for a new type of tree, you need to do
+implement several things:
+
+1. A set of classes for various types of elements: Document, Doctype, Comment,
+   Element. These must implement the interface of ``base.treebuilders.Node``
+   (although comment nodes have a different signature for their constructor,
+   see ``treebuilders.etree.Comment``) Textual content may also be implemented
+   as another node type, or not, as your tree implementation requires.
+
+2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits
+   from ``treebuilders.base.TreeBuilder``. This has 4 required attributes:
+
+   * ``documentClass`` - the class to use for the bottommost node of a document
+   * ``elementClass`` - the class to use for HTML Elements
+   * ``commentClass`` - the class to use for comments
+   * ``doctypeClass`` - the class to use for doctypes
+
+   It also has one required method:
+
+   * ``getDocument`` - Returns the root node of the complete document tree
+
+3. If you wish to run the unit tests, you must also create a ``testSerializer``
+   method on your treebuilder which accepts a node and returns a string
+   containing Node and its children serialized according to the format used in
+   the unittests
+
+"""
+
+from __future__ import absolute_import, division, unicode_literals
+
+from .._utils import default_etree
+
+treeBuilderCache = {}
+
+
+def getTreeBuilder(treeType, implementation=None, **kwargs):
+    """Get a TreeBuilder class for various types of trees with built-in support
+
+    :arg treeType: the name of the tree type required (case-insensitive). Supported
+        values are:
+
+        * "dom" - A generic builder for DOM implementations, defaulting to a
+          xml.dom.minidom based implementation.
+        * "etree" - A generic builder for tree implementations exposing an
+          ElementTree-like interface, defaulting to xml.etree.cElementTree if
+          available and xml.etree.ElementTree if not.
+        * "lxml" - A etree-based builder for lxml.etree, handling limitations
+          of lxml's implementation.
+
+    :arg implementation: (Currently applies to the "etree" and "dom" tree
+        types). A module implementing the tree type e.g. xml.etree.ElementTree
+        or xml.etree.cElementTree.
+
+    :arg kwargs: Any additional options to pass to the TreeBuilder when
+        creating it.
+
+    Example:
+
+    >>> from html5lib.treebuilders import getTreeBuilder
+    >>> builder = getTreeBuilder('etree')
+
+    """
+
+    treeType = treeType.lower()
+    if treeType not in treeBuilderCache:
+        if treeType == "dom":
+            from . import dom
+            # Come up with a sane default (pref. from the stdlib)
+            if implementation is None:
+                from xml.dom import minidom
+                implementation = minidom
+            # NEVER cache here, caching is done in the dom submodule
+            return dom.getDomModule(implementation, **kwargs).TreeBuilder
+        elif treeType == "lxml":
+            from . import etree_lxml
+            treeBuilderCache[treeType] = etree_lxml.TreeBuilder
+        elif treeType == "etree":
+            from . import etree
+            if implementation is None:
+                implementation = default_etree
+            # NEVER cache here, caching is done in the etree submodule
+            return etree.getETreeModule(implementation, **kwargs).TreeBuilder
+        else:
+            raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
+    return treeBuilderCache.get(treeType)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..501ce4506024cf3126f891e4f2d0aedd58f1de68
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ccd68b8874194da55a2fc8322e19a5897ae7e73d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/base.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1d89f7b323973eb64d85828703b209b35d937d0
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/dom.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0031af5fe1226b1b42f49121b536827c2c72eabe
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ebd939c950bb240328d97acd7eb96d30212551c3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/__pycache__/etree_lxml.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/base.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..73973db51b87cedc5a76db5850700c929c9b254f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/base.py
@@ -0,0 +1,417 @@
+from __future__ import absolute_import, division, unicode_literals
+from pip._vendor.six import text_type
+
+from ..constants import scopingElements, tableInsertModeElements, namespaces
+
+# The scope markers are inserted when entering object elements,
+# marquees, table cells, and table captions, and are used to prevent formatting
+# from "leaking" into tables, object elements, and marquees.
+Marker = None
+
+listElementsMap = {
+    None: (frozenset(scopingElements), False),
+    "button": (frozenset(scopingElements | set([(namespaces["html"], "button")])), False),
+    "list": (frozenset(scopingElements | set([(namespaces["html"], "ol"),
+                                              (namespaces["html"], "ul")])), False),
+    "table": (frozenset([(namespaces["html"], "html"),
+                         (namespaces["html"], "table")]), False),
+    "select": (frozenset([(namespaces["html"], "optgroup"),
+                          (namespaces["html"], "option")]), True)
+}
+
+
+class Node(object):
+    """Represents an item in the tree"""
+    def __init__(self, name):
+        """Creates a Node
+
+        :arg name: The tag name associated with the node
+
+        """
+        # The tag name assocaited with the node
+        self.name = name
+        # The parent of the current node (or None for the document node)
+        self.parent = None
+        # The value of the current node (applies to text nodes and comments)
+        self.value = None
+        # A dict holding name -> value pairs for attributes of the node
+        self.attributes = {}
+        # A list of child nodes of the current node. This must include all
+        # elements but not necessarily other node types.
+        self.childNodes = []
+        # A list of miscellaneous flags that can be set on the node.
+        self._flags = []
+
+    def __str__(self):
+        attributesStr = " ".join(["%s=\"%s\"" % (name, value)
+                                  for name, value in
+                                  self.attributes.items()])
+        if attributesStr:
+            return "<%s %s>" % (self.name, attributesStr)
+        else:
+            return "<%s>" % (self.name)
+
+    def __repr__(self):
+        return "<%s>" % (self.name)
+
+    def appendChild(self, node):
+        """Insert node as a child of the current node
+
+        :arg node: the node to insert
+
+        """
+        raise NotImplementedError
+
+    def insertText(self, data, insertBefore=None):
+        """Insert data as text in the current node, positioned before the
+        start of node insertBefore or to the end of the node's text.
+
+        :arg data: the data to insert
+
+        :arg insertBefore: True if you want to insert the text before the node
+            and False if you want to insert it after the node
+
+        """
+        raise NotImplementedError
+
+    def insertBefore(self, node, refNode):
+        """Insert node as a child of the current node, before refNode in the
+        list of child nodes. Raises ValueError if refNode is not a child of
+        the current node
+
+        :arg node: the node to insert
+
+        :arg refNode: the child node to insert the node before
+
+        """
+        raise NotImplementedError
+
+    def removeChild(self, node):
+        """Remove node from the children of the current node
+
+        :arg node: the child node to remove
+
+        """
+        raise NotImplementedError
+
+    def reparentChildren(self, newParent):
+        """Move all the children of the current node to newParent.
+        This is needed so that trees that don't store text as nodes move the
+        text in the correct way
+
+        :arg newParent: the node to move all this node's children to
+
+        """
+        # XXX - should this method be made more general?
+        for child in self.childNodes:
+            newParent.appendChild(child)
+        self.childNodes = []
+
+    def cloneNode(self):
+        """Return a shallow copy of the current node i.e. a node with the same
+        name and attributes but with no parent or child nodes
+        """
+        raise NotImplementedError
+
+    def hasContent(self):
+        """Return true if the node has children or text, false otherwise
+        """
+        raise NotImplementedError
+
+
+class ActiveFormattingElements(list):
+    def append(self, node):
+        equalCount = 0
+        if node != Marker:
+            for element in self[::-1]:
+                if element == Marker:
+                    break
+                if self.nodesEqual(element, node):
+                    equalCount += 1
+                if equalCount == 3:
+                    self.remove(element)
+                    break
+        list.append(self, node)
+
+    def nodesEqual(self, node1, node2):
+        if not node1.nameTuple == node2.nameTuple:
+            return False
+
+        if not node1.attributes == node2.attributes:
+            return False
+
+        return True
+
+
+class TreeBuilder(object):
+    """Base treebuilder implementation
+
+    * documentClass - the class to use for the bottommost node of a document
+    * elementClass - the class to use for HTML Elements
+    * commentClass - the class to use for comments
+    * doctypeClass - the class to use for doctypes
+
+    """
+    # pylint:disable=not-callable
+
+    # Document class
+    documentClass = None
+
+    # The class to use for creating a node
+    elementClass = None
+
+    # The class to use for creating comments
+    commentClass = None
+
+    # The class to use for creating doctypes
+    doctypeClass = None
+
+    # Fragment class
+    fragmentClass = None
+
+    def __init__(self, namespaceHTMLElements):
+        """Create a TreeBuilder
+
+        :arg namespaceHTMLElements: whether or not to namespace HTML elements
+
+        """
+        if namespaceHTMLElements:
+            self.defaultNamespace = "http://www.w3.org/1999/xhtml"
+        else:
+            self.defaultNamespace = None
+        self.reset()
+
+    def reset(self):
+        self.openElements = []
+        self.activeFormattingElements = ActiveFormattingElements()
+
+        # XXX - rename these to headElement, formElement
+        self.headPointer = None
+        self.formPointer = None
+
+        self.insertFromTable = False
+
+        self.document = self.documentClass()
+
+    def elementInScope(self, target, variant=None):
+
+        # If we pass a node in we match that. if we pass a string
+        # match any node with that name
+        exactNode = hasattr(target, "nameTuple")
+        if not exactNode:
+            if isinstance(target, text_type):
+                target = (namespaces["html"], target)
+            assert isinstance(target, tuple)
+
+        listElements, invert = listElementsMap[variant]
+
+        for node in reversed(self.openElements):
+            if exactNode and node == target:
+                return True
+            elif not exactNode and node.nameTuple == target:
+                return True
+            elif (invert ^ (node.nameTuple in listElements)):
+                return False
+
+        assert False  # We should never reach this point
+
+    def reconstructActiveFormattingElements(self):
+        # Within this algorithm the order of steps described in the
+        # specification is not quite the same as the order of steps in the
+        # code. It should still do the same though.
+
+        # Step 1: stop the algorithm when there's nothing to do.
+        if not self.activeFormattingElements:
+            return
+
+        # Step 2 and step 3: we start with the last element. So i is -1.
+        i = len(self.activeFormattingElements) - 1
+        entry = self.activeFormattingElements[i]
+        if entry == Marker or entry in self.openElements:
+            return
+
+        # Step 6
+        while entry != Marker and entry not in self.openElements:
+            if i == 0:
+                # This will be reset to 0 below
+                i = -1
+                break
+            i -= 1
+            # Step 5: let entry be one earlier in the list.
+            entry = self.activeFormattingElements[i]
+
+        while True:
+            # Step 7
+            i += 1
+
+            # Step 8
+            entry = self.activeFormattingElements[i]
+            clone = entry.cloneNode()  # Mainly to get a new copy of the attributes
+
+            # Step 9
+            element = self.insertElement({"type": "StartTag",
+                                          "name": clone.name,
+                                          "namespace": clone.namespace,
+                                          "data": clone.attributes})
+
+            # Step 10
+            self.activeFormattingElements[i] = element
+
+            # Step 11
+            if element == self.activeFormattingElements[-1]:
+                break
+
+    def clearActiveFormattingElements(self):
+        entry = self.activeFormattingElements.pop()
+        while self.activeFormattingElements and entry != Marker:
+            entry = self.activeFormattingElements.pop()
+
+    def elementInActiveFormattingElements(self, name):
+        """Check if an element exists between the end of the active
+        formatting elements and the last marker. If it does, return it, else
+        return false"""
+
+        for item in self.activeFormattingElements[::-1]:
+            # Check for Marker first because if it's a Marker it doesn't have a
+            # name attribute.
+            if item == Marker:
+                break
+            elif item.name == name:
+                return item
+        return False
+
+    def insertRoot(self, token):
+        element = self.createElement(token)
+        self.openElements.append(element)
+        self.document.appendChild(element)
+
+    def insertDoctype(self, token):
+        name = token["name"]
+        publicId = token["publicId"]
+        systemId = token["systemId"]
+
+        doctype = self.doctypeClass(name, publicId, systemId)
+        self.document.appendChild(doctype)
+
+    def insertComment(self, token, parent=None):
+        if parent is None:
+            parent = self.openElements[-1]
+        parent.appendChild(self.commentClass(token["data"]))
+
+    def createElement(self, token):
+        """Create an element but don't insert it anywhere"""
+        name = token["name"]
+        namespace = token.get("namespace", self.defaultNamespace)
+        element = self.elementClass(name, namespace)
+        element.attributes = token["data"]
+        return element
+
+    def _getInsertFromTable(self):
+        return self._insertFromTable
+
+    def _setInsertFromTable(self, value):
+        """Switch the function used to insert an element from the
+        normal one to the misnested table one and back again"""
+        self._insertFromTable = value
+        if value:
+            self.insertElement = self.insertElementTable
+        else:
+            self.insertElement = self.insertElementNormal
+
+    insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
+
+    def insertElementNormal(self, token):
+        name = token["name"]
+        assert isinstance(name, text_type), "Element %s not unicode" % name
+        namespace = token.get("namespace", self.defaultNamespace)
+        element = self.elementClass(name, namespace)
+        element.attributes = token["data"]
+        self.openElements[-1].appendChild(element)
+        self.openElements.append(element)
+        return element
+
+    def insertElementTable(self, token):
+        """Create an element and insert it into the tree"""
+        element = self.createElement(token)
+        if self.openElements[-1].name not in tableInsertModeElements:
+            return self.insertElementNormal(token)
+        else:
+            # We should be in the InTable mode. This means we want to do
+            # special magic element rearranging
+            parent, insertBefore = self.getTableMisnestedNodePosition()
+            if insertBefore is None:
+                parent.appendChild(element)
+            else:
+                parent.insertBefore(element, insertBefore)
+            self.openElements.append(element)
+        return element
+
+    def insertText(self, data, parent=None):
+        """Insert text data."""
+        if parent is None:
+            parent = self.openElements[-1]
+
+        if (not self.insertFromTable or (self.insertFromTable and
+                                         self.openElements[-1].name
+                                         not in tableInsertModeElements)):
+            parent.insertText(data)
+        else:
+            # We should be in the InTable mode. This means we want to do
+            # special magic element rearranging
+            parent, insertBefore = self.getTableMisnestedNodePosition()
+            parent.insertText(data, insertBefore)
+
+    def getTableMisnestedNodePosition(self):
+        """Get the foster parent element, and sibling to insert before
+        (or None) when inserting a misnested table node"""
+        # The foster parent element is the one which comes before the most
+        # recently opened table element
+        # XXX - this is really inelegant
+        lastTable = None
+        fosterParent = None
+        insertBefore = None
+        for elm in self.openElements[::-1]:
+            if elm.name == "table":
+                lastTable = elm
+                break
+        if lastTable:
+            # XXX - we should really check that this parent is actually a
+            # node here
+            if lastTable.parent:
+                fosterParent = lastTable.parent
+                insertBefore = lastTable
+            else:
+                fosterParent = self.openElements[
+                    self.openElements.index(lastTable) - 1]
+        else:
+            fosterParent = self.openElements[0]
+        return fosterParent, insertBefore
+
+    def generateImpliedEndTags(self, exclude=None):
+        name = self.openElements[-1].name
+        # XXX td, th and tr are not actually needed
+        if (name in frozenset(("dd", "dt", "li", "option", "optgroup", "p", "rp", "rt")) and
+                name != exclude):
+            self.openElements.pop()
+            # XXX This is not entirely what the specification says. We should
+            # investigate it more closely.
+            self.generateImpliedEndTags(exclude)
+
+    def getDocument(self):
+        """Return the final tree"""
+        return self.document
+
+    def getFragment(self):
+        """Return the final fragment"""
+        # assert self.innerHTML
+        fragment = self.fragmentClass()
+        self.openElements[0].reparentChildren(fragment)
+        return fragment
+
+    def testSerializer(self, node):
+        """Serialize the subtree of node in the format required by unit tests
+
+        :arg node: the node from which to start serializing
+
+        """
+        raise NotImplementedError
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcfac220bfaf9906b1f57ee747baff2a627cfa58
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.py
@@ -0,0 +1,236 @@
+from __future__ import absolute_import, division, unicode_literals
+
+
+from collections import MutableMapping
+from xml.dom import minidom, Node
+import weakref
+
+from . import base
+from .. import constants
+from ..constants import namespaces
+from .._utils import moduleFactoryFactory
+
+
+def getDomBuilder(DomImplementation):
+    Dom = DomImplementation
+
+    class AttrList(MutableMapping):
+        def __init__(self, element):
+            self.element = element
+
+        def __iter__(self):
+            return iter(self.element.attributes.keys())
+
+        def __setitem__(self, name, value):
+            if isinstance(name, tuple):
+                raise NotImplementedError
+            else:
+                attr = self.element.ownerDocument.createAttribute(name)
+                attr.value = value
+                self.element.attributes[name] = attr
+
+        def __len__(self):
+            return len(self.element.attributes)
+
+        def items(self):
+            return list(self.element.attributes.items())
+
+        def values(self):
+            return list(self.element.attributes.values())
+
+        def __getitem__(self, name):
+            if isinstance(name, tuple):
+                raise NotImplementedError
+            else:
+                return self.element.attributes[name].value
+
+        def __delitem__(self, name):
+            if isinstance(name, tuple):
+                raise NotImplementedError
+            else:
+                del self.element.attributes[name]
+
+    class NodeBuilder(base.Node):
+        def __init__(self, element):
+            base.Node.__init__(self, element.nodeName)
+            self.element = element
+
+        namespace = property(lambda self: hasattr(self.element, "namespaceURI") and
+                             self.element.namespaceURI or None)
+
+        def appendChild(self, node):
+            node.parent = self
+            self.element.appendChild(node.element)
+
+        def insertText(self, data, insertBefore=None):
+            text = self.element.ownerDocument.createTextNode(data)
+            if insertBefore:
+                self.element.insertBefore(text, insertBefore.element)
+            else:
+                self.element.appendChild(text)
+
+        def insertBefore(self, node, refNode):
+            self.element.insertBefore(node.element, refNode.element)
+            node.parent = self
+
+        def removeChild(self, node):
+            if node.element.parentNode == self.element:
+                self.element.removeChild(node.element)
+            node.parent = None
+
+        def reparentChildren(self, newParent):
+            while self.element.hasChildNodes():
+                child = self.element.firstChild
+                self.element.removeChild(child)
+                newParent.element.appendChild(child)
+            self.childNodes = []
+
+        def getAttributes(self):
+            return AttrList(self.element)
+
+        def setAttributes(self, attributes):
+            if attributes:
+                for name, value in list(attributes.items()):
+                    if isinstance(name, tuple):
+                        if name[0] is not None:
+                            qualifiedName = (name[0] + ":" + name[1])
+                        else:
+                            qualifiedName = name[1]
+                        self.element.setAttributeNS(name[2], qualifiedName,
+                                                    value)
+                    else:
+                        self.element.setAttribute(
+                            name, value)
+        attributes = property(getAttributes, setAttributes)
+
+        def cloneNode(self):
+            return NodeBuilder(self.element.cloneNode(False))
+
+        def hasContent(self):
+            return self.element.hasChildNodes()
+
+        def getNameTuple(self):
+            if self.namespace is None:
+                return namespaces["html"], self.name
+            else:
+                return self.namespace, self.name
+
+        nameTuple = property(getNameTuple)
+
+    class TreeBuilder(base.TreeBuilder):  # pylint:disable=unused-variable
+        def documentClass(self):
+            self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
+            return weakref.proxy(self)
+
+        def insertDoctype(self, token):
+            name = token["name"]
+            publicId = token["publicId"]
+            systemId = token["systemId"]
+
+            domimpl = Dom.getDOMImplementation()
+            doctype = domimpl.createDocumentType(name, publicId, systemId)
+            self.document.appendChild(NodeBuilder(doctype))
+            if Dom == minidom:
+                doctype.ownerDocument = self.dom
+
+        def elementClass(self, name, namespace=None):
+            if namespace is None and self.defaultNamespace is None:
+                node = self.dom.createElement(name)
+            else:
+                node = self.dom.createElementNS(namespace, name)
+
+            return NodeBuilder(node)
+
+        def commentClass(self, data):
+            return NodeBuilder(self.dom.createComment(data))
+
+        def fragmentClass(self):
+            return NodeBuilder(self.dom.createDocumentFragment())
+
+        def appendChild(self, node):
+            self.dom.appendChild(node.element)
+
+        def testSerializer(self, element):
+            return testSerializer(element)
+
+        def getDocument(self):
+            return self.dom
+
+        def getFragment(self):
+            return base.TreeBuilder.getFragment(self).element
+
+        def insertText(self, data, parent=None):
+            data = data
+            if parent != self:
+                base.TreeBuilder.insertText(self, data, parent)
+            else:
+                # HACK: allow text nodes as children of the document node
+                if hasattr(self.dom, '_child_node_types'):
+                    # pylint:disable=protected-access
+                    if Node.TEXT_NODE not in self.dom._child_node_types:
+                        self.dom._child_node_types = list(self.dom._child_node_types)
+                        self.dom._child_node_types.append(Node.TEXT_NODE)
+                self.dom.appendChild(self.dom.createTextNode(data))
+
+        implementation = DomImplementation
+        name = None
+
+    def testSerializer(element):
+        element.normalize()
+        rv = []
+
+        def serializeElement(element, indent=0):
+            if element.nodeType == Node.DOCUMENT_TYPE_NODE:
+                if element.name:
+                    if element.publicId or element.systemId:
+                        publicId = element.publicId or ""
+                        systemId = element.systemId or ""
+                        rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
+                                  (' ' * indent, element.name, publicId, systemId))
+                    else:
+                        rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
+                else:
+                    rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
+            elif element.nodeType == Node.DOCUMENT_NODE:
+                rv.append("#document")
+            elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
+                rv.append("#document-fragment")
+            elif element.nodeType == Node.COMMENT_NODE:
+                rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
+            elif element.nodeType == Node.TEXT_NODE:
+                rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
+            else:
+                if (hasattr(element, "namespaceURI") and
+                        element.namespaceURI is not None):
+                    name = "%s %s" % (constants.prefixes[element.namespaceURI],
+                                      element.nodeName)
+                else:
+                    name = element.nodeName
+                rv.append("|%s<%s>" % (' ' * indent, name))
+                if element.hasAttributes():
+                    attributes = []
+                    for i in range(len(element.attributes)):
+                        attr = element.attributes.item(i)
+                        name = attr.nodeName
+                        value = attr.value
+                        ns = attr.namespaceURI
+                        if ns:
+                            name = "%s %s" % (constants.prefixes[ns], attr.localName)
+                        else:
+                            name = attr.nodeName
+                        attributes.append((name, value))
+
+                    for name, value in sorted(attributes):
+                        rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
+            indent += 2
+            for child in element.childNodes:
+                serializeElement(child, indent)
+        serializeElement(element, 0)
+
+        return "\n".join(rv)
+
+    return locals()
+
+
+# The actual means to get a module!
+getDomModule = moduleFactoryFactory(getDomBuilder)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dedf441643902d5258b4aef64f001f9282e7220
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/etree.py
@@ -0,0 +1,340 @@
+from __future__ import absolute_import, division, unicode_literals
+# pylint:disable=protected-access
+
+from pip._vendor.six import text_type
+
+import re
+
+from . import base
+from .. import _ihatexml
+from .. import constants
+from ..constants import namespaces
+from .._utils import moduleFactoryFactory
+
+tag_regexp = re.compile("{([^}]*)}(.*)")
+
+
+def getETreeBuilder(ElementTreeImplementation, fullTree=False):
+    ElementTree = ElementTreeImplementation
+    ElementTreeCommentType = ElementTree.Comment("asd").tag
+
+    class Element(base.Node):
+        def __init__(self, name, namespace=None):
+            self._name = name
+            self._namespace = namespace
+            self._element = ElementTree.Element(self._getETreeTag(name,
+                                                                  namespace))
+            if namespace is None:
+                self.nameTuple = namespaces["html"], self._name
+            else:
+                self.nameTuple = self._namespace, self._name
+            self.parent = None
+            self._childNodes = []
+            self._flags = []
+
+        def _getETreeTag(self, name, namespace):
+            if namespace is None:
+                etree_tag = name
+            else:
+                etree_tag = "{%s}%s" % (namespace, name)
+            return etree_tag
+
+        def _setName(self, name):
+            self._name = name
+            self._element.tag = self._getETreeTag(self._name, self._namespace)
+
+        def _getName(self):
+            return self._name
+
+        name = property(_getName, _setName)
+
+        def _setNamespace(self, namespace):
+            self._namespace = namespace
+            self._element.tag = self._getETreeTag(self._name, self._namespace)
+
+        def _getNamespace(self):
+            return self._namespace
+
+        namespace = property(_getNamespace, _setNamespace)
+
+        def _getAttributes(self):
+            return self._element.attrib
+
+        def _setAttributes(self, attributes):
+            # Delete existing attributes first
+            # XXX - there may be a better way to do this...
+            for key in list(self._element.attrib.keys()):
+                del self._element.attrib[key]
+            for key, value in attributes.items():
+                if isinstance(key, tuple):
+                    name = "{%s}%s" % (key[2], key[1])
+                else:
+                    name = key
+                self._element.set(name, value)
+
+        attributes = property(_getAttributes, _setAttributes)
+
+        def _getChildNodes(self):
+            return self._childNodes
+
+        def _setChildNodes(self, value):
+            del self._element[:]
+            self._childNodes = []
+            for element in value:
+                self.insertChild(element)
+
+        childNodes = property(_getChildNodes, _setChildNodes)
+
+        def hasContent(self):
+            """Return true if the node has children or text"""
+            return bool(self._element.text or len(self._element))
+
+        def appendChild(self, node):
+            self._childNodes.append(node)
+            self._element.append(node._element)
+            node.parent = self
+
+        def insertBefore(self, node, refNode):
+            index = list(self._element).index(refNode._element)
+            self._element.insert(index, node._element)
+            node.parent = self
+
+        def removeChild(self, node):
+            self._childNodes.remove(node)
+            self._element.remove(node._element)
+            node.parent = None
+
+        def insertText(self, data, insertBefore=None):
+            if not(len(self._element)):
+                if not self._element.text:
+                    self._element.text = ""
+                self._element.text += data
+            elif insertBefore is None:
+                # Insert the text as the tail of the last child element
+                if not self._element[-1].tail:
+                    self._element[-1].tail = ""
+                self._element[-1].tail += data
+            else:
+                # Insert the text before the specified node
+                children = list(self._element)
+                index = children.index(insertBefore._element)
+                if index > 0:
+                    if not self._element[index - 1].tail:
+                        self._element[index - 1].tail = ""
+                    self._element[index - 1].tail += data
+                else:
+                    if not self._element.text:
+                        self._element.text = ""
+                    self._element.text += data
+
+        def cloneNode(self):
+            element = type(self)(self.name, self.namespace)
+            for name, value in self.attributes.items():
+                element.attributes[name] = value
+            return element
+
+        def reparentChildren(self, newParent):
+            if newParent.childNodes:
+                newParent.childNodes[-1]._element.tail += self._element.text
+            else:
+                if not newParent._element.text:
+                    newParent._element.text = ""
+                if self._element.text is not None:
+                    newParent._element.text += self._element.text
+            self._element.text = ""
+            base.Node.reparentChildren(self, newParent)
+
+    class Comment(Element):
+        def __init__(self, data):
+            # Use the superclass constructor to set all properties on the
+            # wrapper element
+            self._element = ElementTree.Comment(data)
+            self.parent = None
+            self._childNodes = []
+            self._flags = []
+
+        def _getData(self):
+            return self._element.text
+
+        def _setData(self, value):
+            self._element.text = value
+
+        data = property(_getData, _setData)
+
+    class DocumentType(Element):
+        def __init__(self, name, publicId, systemId):
+            Element.__init__(self, "<!DOCTYPE>")
+            self._element.text = name
+            self.publicId = publicId
+            self.systemId = systemId
+
+        def _getPublicId(self):
+            return self._element.get("publicId", "")
+
+        def _setPublicId(self, value):
+            if value is not None:
+                self._element.set("publicId", value)
+
+        publicId = property(_getPublicId, _setPublicId)
+
+        def _getSystemId(self):
+            return self._element.get("systemId", "")
+
+        def _setSystemId(self, value):
+            if value is not None:
+                self._element.set("systemId", value)
+
+        systemId = property(_getSystemId, _setSystemId)
+
+    class Document(Element):
+        def __init__(self):
+            Element.__init__(self, "DOCUMENT_ROOT")
+
+    class DocumentFragment(Element):
+        def __init__(self):
+            Element.__init__(self, "DOCUMENT_FRAGMENT")
+
+    def testSerializer(element):
+        rv = []
+
+        def serializeElement(element, indent=0):
+            if not(hasattr(element, "tag")):
+                element = element.getroot()
+            if element.tag == "<!DOCTYPE>":
+                if element.get("publicId") or element.get("systemId"):
+                    publicId = element.get("publicId") or ""
+                    systemId = element.get("systemId") or ""
+                    rv.append("""<!DOCTYPE %s "%s" "%s">""" %
+                              (element.text, publicId, systemId))
+                else:
+                    rv.append("<!DOCTYPE %s>" % (element.text,))
+            elif element.tag == "DOCUMENT_ROOT":
+                rv.append("#document")
+                if element.text is not None:
+                    rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
+                if element.tail is not None:
+                    raise TypeError("Document node cannot have tail")
+                if hasattr(element, "attrib") and len(element.attrib):
+                    raise TypeError("Document node cannot have attributes")
+            elif element.tag == ElementTreeCommentType:
+                rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
+            else:
+                assert isinstance(element.tag, text_type), \
+                    "Expected unicode, got %s, %s" % (type(element.tag), element.tag)
+                nsmatch = tag_regexp.match(element.tag)
+
+                if nsmatch is None:
+                    name = element.tag
+                else:
+                    ns, name = nsmatch.groups()
+                    prefix = constants.prefixes[ns]
+                    name = "%s %s" % (prefix, name)
+                rv.append("|%s<%s>" % (' ' * indent, name))
+
+                if hasattr(element, "attrib"):
+                    attributes = []
+                    for name, value in element.attrib.items():
+                        nsmatch = tag_regexp.match(name)
+                        if nsmatch is not None:
+                            ns, name = nsmatch.groups()
+                            prefix = constants.prefixes[ns]
+                            attr_string = "%s %s" % (prefix, name)
+                        else:
+                            attr_string = name
+                        attributes.append((attr_string, value))
+
+                    for name, value in sorted(attributes):
+                        rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
+                if element.text:
+                    rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
+            indent += 2
+            for child in element:
+                serializeElement(child, indent)
+            if element.tail:
+                rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
+        serializeElement(element, 0)
+
+        return "\n".join(rv)
+
+    def tostring(element):  # pylint:disable=unused-variable
+        """Serialize an element and its child nodes to a string"""
+        rv = []
+        filter = _ihatexml.InfosetFilter()
+
+        def serializeElement(element):
+            if isinstance(element, ElementTree.ElementTree):
+                element = element.getroot()
+
+            if element.tag == "<!DOCTYPE>":
+                if element.get("publicId") or element.get("systemId"):
+                    publicId = element.get("publicId") or ""
+                    systemId = element.get("systemId") or ""
+                    rv.append("""<!DOCTYPE %s PUBLIC "%s" "%s">""" %
+                              (element.text, publicId, systemId))
+                else:
+                    rv.append("<!DOCTYPE %s>" % (element.text,))
+            elif element.tag == "DOCUMENT_ROOT":
+                if element.text is not None:
+                    rv.append(element.text)
+                if element.tail is not None:
+                    raise TypeError("Document node cannot have tail")
+                if hasattr(element, "attrib") and len(element.attrib):
+                    raise TypeError("Document node cannot have attributes")
+
+                for child in element:
+                    serializeElement(child)
+
+            elif element.tag == ElementTreeCommentType:
+                rv.append("<!--%s-->" % (element.text,))
+            else:
+                # This is assumed to be an ordinary element
+                if not element.attrib:
+                    rv.append("<%s>" % (filter.fromXmlName(element.tag),))
+                else:
+                    attr = " ".join(["%s=\"%s\"" % (
+                        filter.fromXmlName(name), value)
+                        for name, value in element.attrib.items()])
+                    rv.append("<%s %s>" % (element.tag, attr))
+                if element.text:
+                    rv.append(element.text)
+
+                for child in element:
+                    serializeElement(child)
+
+                rv.append("</%s>" % (element.tag,))
+
+            if element.tail:
+                rv.append(element.tail)
+
+        serializeElement(element)
+
+        return "".join(rv)
+
+    class TreeBuilder(base.TreeBuilder):  # pylint:disable=unused-variable
+        documentClass = Document
+        doctypeClass = DocumentType
+        elementClass = Element
+        commentClass = Comment
+        fragmentClass = DocumentFragment
+        implementation = ElementTreeImplementation
+
+        def testSerializer(self, element):
+            return testSerializer(element)
+
+        def getDocument(self):
+            if fullTree:
+                return self.document._element
+            else:
+                if self.defaultNamespace is not None:
+                    return self.document._element.find(
+                        "{%s}html" % self.defaultNamespace)
+                else:
+                    return self.document._element.find("html")
+
+        def getFragment(self):
+            return base.TreeBuilder.getFragment(self)._element
+
+    return locals()
+
+
+getETreeModule = moduleFactoryFactory(getETreeBuilder)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca12a99cccf98a1be4bc463bce29c402ff634e62
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py
@@ -0,0 +1,366 @@
+"""Module for supporting the lxml.etree library. The idea here is to use as much
+of the native library as possible, without using fragile hacks like custom element
+names that break between releases. The downside of this is that we cannot represent
+all possible trees; specifically the following are known to cause problems:
+
+Text or comments as siblings of the root element
+Docypes with no name
+
+When any of these things occur, we emit a DataLossWarning
+"""
+
+from __future__ import absolute_import, division, unicode_literals
+# pylint:disable=protected-access
+
+import warnings
+import re
+import sys
+
+from . import base
+from ..constants import DataLossWarning
+from .. import constants
+from . import etree as etree_builders
+from .. import _ihatexml
+
+import lxml.etree as etree
+
+
+fullTree = True
+tag_regexp = re.compile("{([^}]*)}(.*)")
+
+comment_type = etree.Comment("asd").tag
+
+
+class DocumentType(object):
+    def __init__(self, name, publicId, systemId):
+        self.name = name
+        self.publicId = publicId
+        self.systemId = systemId
+
+
+class Document(object):
+    def __init__(self):
+        self._elementTree = None
+        self._childNodes = []
+
+    def appendChild(self, element):
+        self._elementTree.getroot().addnext(element._element)
+
+    def _getChildNodes(self):
+        return self._childNodes
+
+    childNodes = property(_getChildNodes)
+
+
+def testSerializer(element):
+    rv = []
+    infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
+
+    def serializeElement(element, indent=0):
+        if not hasattr(element, "tag"):
+            if hasattr(element, "getroot"):
+                # Full tree case
+                rv.append("#document")
+                if element.docinfo.internalDTD:
+                    if not (element.docinfo.public_id or
+                            element.docinfo.system_url):
+                        dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
+                    else:
+                        dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
+                            element.docinfo.root_name,
+                            element.docinfo.public_id,
+                            element.docinfo.system_url)
+                    rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
+                next_element = element.getroot()
+                while next_element.getprevious() is not None:
+                    next_element = next_element.getprevious()
+                while next_element is not None:
+                    serializeElement(next_element, indent + 2)
+                    next_element = next_element.getnext()
+            elif isinstance(element, str) or isinstance(element, bytes):
+                # Text in a fragment
+                assert isinstance(element, str) or sys.version_info[0] == 2
+                rv.append("|%s\"%s\"" % (' ' * indent, element))
+            else:
+                # Fragment case
+                rv.append("#document-fragment")
+                for next_element in element:
+                    serializeElement(next_element, indent + 2)
+        elif element.tag == comment_type:
+            rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
+            if hasattr(element, "tail") and element.tail:
+                rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
+        else:
+            assert isinstance(element, etree._Element)
+            nsmatch = etree_builders.tag_regexp.match(element.tag)
+            if nsmatch is not None:
+                ns = nsmatch.group(1)
+                tag = nsmatch.group(2)
+                prefix = constants.prefixes[ns]
+                rv.append("|%s<%s %s>" % (' ' * indent, prefix,
+                                          infosetFilter.fromXmlName(tag)))
+            else:
+                rv.append("|%s<%s>" % (' ' * indent,
+                                       infosetFilter.fromXmlName(element.tag)))
+
+            if hasattr(element, "attrib"):
+                attributes = []
+                for name, value in element.attrib.items():
+                    nsmatch = tag_regexp.match(name)
+                    if nsmatch is not None:
+                        ns, name = nsmatch.groups()
+                        name = infosetFilter.fromXmlName(name)
+                        prefix = constants.prefixes[ns]
+                        attr_string = "%s %s" % (prefix, name)
+                    else:
+                        attr_string = infosetFilter.fromXmlName(name)
+                    attributes.append((attr_string, value))
+
+                for name, value in sorted(attributes):
+                    rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
+
+            if element.text:
+                rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
+            indent += 2
+            for child in element:
+                serializeElement(child, indent)
+            if hasattr(element, "tail") and element.tail:
+                rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
+    serializeElement(element, 0)
+
+    return "\n".join(rv)
+
+
+def tostring(element):
+    """Serialize an element and its child nodes to a string"""
+    rv = []
+
+    def serializeElement(element):
+        if not hasattr(element, "tag"):
+            if element.docinfo.internalDTD:
+                if element.docinfo.doctype:
+                    dtd_str = element.docinfo.doctype
+                else:
+                    dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
+                rv.append(dtd_str)
+            serializeElement(element.getroot())
+
+        elif element.tag == comment_type:
+            rv.append("<!--%s-->" % (element.text,))
+
+        else:
+            # This is assumed to be an ordinary element
+            if not element.attrib:
+                rv.append("<%s>" % (element.tag,))
+            else:
+                attr = " ".join(["%s=\"%s\"" % (name, value)
+                                 for name, value in element.attrib.items()])
+                rv.append("<%s %s>" % (element.tag, attr))
+            if element.text:
+                rv.append(element.text)
+
+            for child in element:
+                serializeElement(child)
+
+            rv.append("</%s>" % (element.tag,))
+
+        if hasattr(element, "tail") and element.tail:
+            rv.append(element.tail)
+
+    serializeElement(element)
+
+    return "".join(rv)
+
+
+class TreeBuilder(base.TreeBuilder):
+    documentClass = Document
+    doctypeClass = DocumentType
+    elementClass = None
+    commentClass = None
+    fragmentClass = Document
+    implementation = etree
+
+    def __init__(self, namespaceHTMLElements, fullTree=False):
+        builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
+        infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
+        self.namespaceHTMLElements = namespaceHTMLElements
+
+        class Attributes(dict):
+            def __init__(self, element, value=None):
+                if value is None:
+                    value = {}
+                self._element = element
+                dict.__init__(self, value)  # pylint:disable=non-parent-init-called
+                for key, value in self.items():
+                    if isinstance(key, tuple):
+                        name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
+                    else:
+                        name = infosetFilter.coerceAttribute(key)
+                    self._element._element.attrib[name] = value
+
+            def __setitem__(self, key, value):
+                dict.__setitem__(self, key, value)
+                if isinstance(key, tuple):
+                    name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
+                else:
+                    name = infosetFilter.coerceAttribute(key)
+                self._element._element.attrib[name] = value
+
+        class Element(builder.Element):
+            def __init__(self, name, namespace):
+                name = infosetFilter.coerceElement(name)
+                builder.Element.__init__(self, name, namespace=namespace)
+                self._attributes = Attributes(self)
+
+            def _setName(self, name):
+                self._name = infosetFilter.coerceElement(name)
+                self._element.tag = self._getETreeTag(
+                    self._name, self._namespace)
+
+            def _getName(self):
+                return infosetFilter.fromXmlName(self._name)
+
+            name = property(_getName, _setName)
+
+            def _getAttributes(self):
+                return self._attributes
+
+            def _setAttributes(self, attributes):
+                self._attributes = Attributes(self, attributes)
+
+            attributes = property(_getAttributes, _setAttributes)
+
+            def insertText(self, data, insertBefore=None):
+                data = infosetFilter.coerceCharacters(data)
+                builder.Element.insertText(self, data, insertBefore)
+
+            def appendChild(self, child):
+                builder.Element.appendChild(self, child)
+
+        class Comment(builder.Comment):
+            def __init__(self, data):
+                data = infosetFilter.coerceComment(data)
+                builder.Comment.__init__(self, data)
+
+            def _setData(self, data):
+                data = infosetFilter.coerceComment(data)
+                self._element.text = data
+
+            def _getData(self):
+                return self._element.text
+
+            data = property(_getData, _setData)
+
+        self.elementClass = Element
+        self.commentClass = Comment
+        # self.fragmentClass = builder.DocumentFragment
+        base.TreeBuilder.__init__(self, namespaceHTMLElements)
+
+    def reset(self):
+        base.TreeBuilder.reset(self)
+        self.insertComment = self.insertCommentInitial
+        self.initial_comments = []
+        self.doctype = None
+
+    def testSerializer(self, element):
+        return testSerializer(element)
+
+    def getDocument(self):
+        if fullTree:
+            return self.document._elementTree
+        else:
+            return self.document._elementTree.getroot()
+
+    def getFragment(self):
+        fragment = []
+        element = self.openElements[0]._element
+        if element.text:
+            fragment.append(element.text)
+        fragment.extend(list(element))
+        if element.tail:
+            fragment.append(element.tail)
+        return fragment
+
+    def insertDoctype(self, token):
+        name = token["name"]
+        publicId = token["publicId"]
+        systemId = token["systemId"]
+
+        if not name:
+            warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
+            self.doctype = None
+        else:
+            coercedName = self.infosetFilter.coerceElement(name)
+            if coercedName != name:
+                warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
+
+            doctype = self.doctypeClass(coercedName, publicId, systemId)
+            self.doctype = doctype
+
+    def insertCommentInitial(self, data, parent=None):
+        assert parent is None or parent is self.document
+        assert self.document._elementTree is None
+        self.initial_comments.append(data)
+
+    def insertCommentMain(self, data, parent=None):
+        if (parent == self.document and
+                self.document._elementTree.getroot()[-1].tag == comment_type):
+            warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
+        super(TreeBuilder, self).insertComment(data, parent)
+
+    def insertRoot(self, token):
+        # Because of the way libxml2 works, it doesn't seem to be possible to
+        # alter information like the doctype after the tree has been parsed.
+        # Therefore we need to use the built-in parser to create our initial
+        # tree, after which we can add elements like normal
+        docStr = ""
+        if self.doctype:
+            assert self.doctype.name
+            docStr += "<!DOCTYPE %s" % self.doctype.name
+            if (self.doctype.publicId is not None or
+                    self.doctype.systemId is not None):
+                docStr += (' PUBLIC "%s" ' %
+                           (self.infosetFilter.coercePubid(self.doctype.publicId or "")))
+                if self.doctype.systemId:
+                    sysid = self.doctype.systemId
+                    if sysid.find("'") >= 0 and sysid.find('"') >= 0:
+                        warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
+                        sysid = sysid.replace("'", 'U00027')
+                    if sysid.find("'") >= 0:
+                        docStr += '"%s"' % sysid
+                    else:
+                        docStr += "'%s'" % sysid
+                else:
+                    docStr += "''"
+            docStr += ">"
+            if self.doctype.name != token["name"]:
+                warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
+        docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
+        root = etree.fromstring(docStr)
+
+        # Append the initial comments:
+        for comment_token in self.initial_comments:
+            comment = self.commentClass(comment_token["data"])
+            root.addprevious(comment._element)
+
+        # Create the root document and add the ElementTree to it
+        self.document = self.documentClass()
+        self.document._elementTree = root.getroottree()
+
+        # Give the root element the right name
+        name = token["name"]
+        namespace = token.get("namespace", self.defaultNamespace)
+        if namespace is None:
+            etree_tag = name
+        else:
+            etree_tag = "{%s}%s" % (namespace, name)
+        root.tag = etree_tag
+
+        # Add the root element to the internal child/open data structures
+        root_element = self.elementClass(name, namespace)
+        root_element._element = root
+        self.document._childNodes.append(root_element)
+        self.openElements.append(root_element)
+
+        # Reset to the default insert comment function
+        self.insertComment = self.insertCommentMain
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bec2076f3f60e644cdd3265d8e113ca17149dfd
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py
@@ -0,0 +1,154 @@
+"""A collection of modules for iterating through different kinds of
+tree, generating tokens identical to those produced by the tokenizer
+module.
+
+To create a tree walker for a new type of tree, you need to do
+implement a tree walker object (called TreeWalker by convention) that
+implements a 'serialize' method taking a tree as sole argument and
+returning an iterator generating tokens.
+"""
+
+from __future__ import absolute_import, division, unicode_literals
+
+from .. import constants
+from .._utils import default_etree
+
+__all__ = ["getTreeWalker", "pprint"]
+
+treeWalkerCache = {}
+
+
+def getTreeWalker(treeType, implementation=None, **kwargs):
+    """Get a TreeWalker class for various types of tree with built-in support
+
+    :arg str treeType: the name of the tree type required (case-insensitive).
+        Supported values are:
+
+        * "dom": The xml.dom.minidom DOM implementation
+        * "etree": A generic walker for tree implementations exposing an
+          elementtree-like interface (known to work with ElementTree,
+          cElementTree and lxml.etree).
+        * "lxml": Optimized walker for lxml.etree
+        * "genshi": a Genshi stream
+
+    :arg implementation: A module implementing the tree type e.g.
+        xml.etree.ElementTree or cElementTree (Currently applies to the "etree"
+        tree type only).
+
+    :arg kwargs: keyword arguments passed to the etree walker--for other
+        walkers, this has no effect
+
+    :returns: a TreeWalker class
+
+    """
+
+    treeType = treeType.lower()
+    if treeType not in treeWalkerCache:
+        if treeType == "dom":
+            from . import dom
+            treeWalkerCache[treeType] = dom.TreeWalker
+        elif treeType == "genshi":
+            from . import genshi
+            treeWalkerCache[treeType] = genshi.TreeWalker
+        elif treeType == "lxml":
+            from . import etree_lxml
+            treeWalkerCache[treeType] = etree_lxml.TreeWalker
+        elif treeType == "etree":
+            from . import etree
+            if implementation is None:
+                implementation = default_etree
+            # XXX: NEVER cache here, caching is done in the etree submodule
+            return etree.getETreeModule(implementation, **kwargs).TreeWalker
+    return treeWalkerCache.get(treeType)
+
+
+def concatenateCharacterTokens(tokens):
+    pendingCharacters = []
+    for token in tokens:
+        type = token["type"]
+        if type in ("Characters", "SpaceCharacters"):
+            pendingCharacters.append(token["data"])
+        else:
+            if pendingCharacters:
+                yield {"type": "Characters", "data": "".join(pendingCharacters)}
+                pendingCharacters = []
+            yield token
+    if pendingCharacters:
+        yield {"type": "Characters", "data": "".join(pendingCharacters)}
+
+
+def pprint(walker):
+    """Pretty printer for tree walkers
+
+    Takes a TreeWalker instance and pretty prints the output of walking the tree.
+
+    :arg walker: a TreeWalker instance
+
+    """
+    output = []
+    indent = 0
+    for token in concatenateCharacterTokens(walker):
+        type = token["type"]
+        if type in ("StartTag", "EmptyTag"):
+            # tag name
+            if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
+                if token["namespace"] in constants.prefixes:
+                    ns = constants.prefixes[token["namespace"]]
+                else:
+                    ns = token["namespace"]
+                name = "%s %s" % (ns, token["name"])
+            else:
+                name = token["name"]
+            output.append("%s<%s>" % (" " * indent, name))
+            indent += 2
+            # attributes (sorted for consistent ordering)
+            attrs = token["data"]
+            for (namespace, localname), value in sorted(attrs.items()):
+                if namespace:
+                    if namespace in constants.prefixes:
+                        ns = constants.prefixes[namespace]
+                    else:
+                        ns = namespace
+                    name = "%s %s" % (ns, localname)
+                else:
+                    name = localname
+                output.append("%s%s=\"%s\"" % (" " * indent, name, value))
+            # self-closing
+            if type == "EmptyTag":
+                indent -= 2
+
+        elif type == "EndTag":
+            indent -= 2
+
+        elif type == "Comment":
+            output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
+
+        elif type == "Doctype":
+            if token["name"]:
+                if token["publicId"]:
+                    output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
+                                  (" " * indent,
+                                   token["name"],
+                                   token["publicId"],
+                                   token["systemId"] if token["systemId"] else ""))
+                elif token["systemId"]:
+                    output.append("""%s<!DOCTYPE %s "" "%s">""" %
+                                  (" " * indent,
+                                   token["name"],
+                                   token["systemId"]))
+                else:
+                    output.append("%s<!DOCTYPE %s>" % (" " * indent,
+                                                       token["name"]))
+            else:
+                output.append("%s<!DOCTYPE >" % (" " * indent,))
+
+        elif type == "Characters":
+            output.append("%s\"%s\"" % (" " * indent, token["data"]))
+
+        elif type == "SpaceCharacters":
+            assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
+
+        else:
+            raise ValueError("Unknown token type, %s" % type)
+
+    return "\n".join(output)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e7aecddec94a1ff1222761471b05a98d6752b9b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/base.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/base.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..636783bb484cf560ffedec40ffadd9bd247b9945
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/base.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..577701466c47faafd179227aa1a4e0f61886edfd
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/dom.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..062143220edab76d67620fd4a859cbc5c5a42ce9
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e2616e0bd0a7d2bfc15eb9466a13369d18f86ae3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/etree_lxml.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..035289d4f794d46b6d494198b90097fcefea9b76
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/__pycache__/genshi.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/base.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..80c474c4e939c149a22e811a5a1a5419313b7cc7
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/base.py
@@ -0,0 +1,252 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from xml.dom import Node
+from ..constants import namespaces, voidElements, spaceCharacters
+
+__all__ = ["DOCUMENT", "DOCTYPE", "TEXT", "ELEMENT", "COMMENT", "ENTITY", "UNKNOWN",
+           "TreeWalker", "NonRecursiveTreeWalker"]
+
+DOCUMENT = Node.DOCUMENT_NODE
+DOCTYPE = Node.DOCUMENT_TYPE_NODE
+TEXT = Node.TEXT_NODE
+ELEMENT = Node.ELEMENT_NODE
+COMMENT = Node.COMMENT_NODE
+ENTITY = Node.ENTITY_NODE
+UNKNOWN = "<#UNKNOWN#>"
+
+spaceCharacters = "".join(spaceCharacters)
+
+
+class TreeWalker(object):
+    """Walks a tree yielding tokens
+
+    Tokens are dicts that all have a ``type`` field specifying the type of the
+    token.
+
+    """
+    def __init__(self, tree):
+        """Creates a TreeWalker
+
+        :arg tree: the tree to walk
+
+        """
+        self.tree = tree
+
+    def __iter__(self):
+        raise NotImplementedError
+
+    def error(self, msg):
+        """Generates an error token with the given message
+
+        :arg msg: the error message
+
+        :returns: SerializeError token
+
+        """
+        return {"type": "SerializeError", "data": msg}
+
+    def emptyTag(self, namespace, name, attrs, hasChildren=False):
+        """Generates an EmptyTag token
+
+        :arg namespace: the namespace of the token--can be ``None``
+
+        :arg name: the name of the element
+
+        :arg attrs: the attributes of the element as a dict
+
+        :arg hasChildren: whether or not to yield a SerializationError because
+            this tag shouldn't have children
+
+        :returns: EmptyTag token
+
+        """
+        yield {"type": "EmptyTag", "name": name,
+               "namespace": namespace,
+               "data": attrs}
+        if hasChildren:
+            yield self.error("Void element has children")
+
+    def startTag(self, namespace, name, attrs):
+        """Generates a StartTag token
+
+        :arg namespace: the namespace of the token--can be ``None``
+
+        :arg name: the name of the element
+
+        :arg attrs: the attributes of the element as a dict
+
+        :returns: StartTag token
+
+        """
+        return {"type": "StartTag",
+                "name": name,
+                "namespace": namespace,
+                "data": attrs}
+
+    def endTag(self, namespace, name):
+        """Generates an EndTag token
+
+        :arg namespace: the namespace of the token--can be ``None``
+
+        :arg name: the name of the element
+
+        :returns: EndTag token
+
+        """
+        return {"type": "EndTag",
+                "name": name,
+                "namespace": namespace}
+
+    def text(self, data):
+        """Generates SpaceCharacters and Characters tokens
+
+        Depending on what's in the data, this generates one or more
+        ``SpaceCharacters`` and ``Characters`` tokens.
+
+        For example:
+
+            >>> from html5lib.treewalkers.base import TreeWalker
+            >>> # Give it an empty tree just so it instantiates
+            >>> walker = TreeWalker([])
+            >>> list(walker.text(''))
+            []
+            >>> list(walker.text('  '))
+            [{u'data': '  ', u'type': u'SpaceCharacters'}]
+            >>> list(walker.text(' abc '))  # doctest: +NORMALIZE_WHITESPACE
+            [{u'data': ' ', u'type': u'SpaceCharacters'},
+            {u'data': u'abc', u'type': u'Characters'},
+            {u'data': u' ', u'type': u'SpaceCharacters'}]
+
+        :arg data: the text data
+
+        :returns: one or more ``SpaceCharacters`` and ``Characters`` tokens
+
+        """
+        data = data
+        middle = data.lstrip(spaceCharacters)
+        left = data[:len(data) - len(middle)]
+        if left:
+            yield {"type": "SpaceCharacters", "data": left}
+        data = middle
+        middle = data.rstrip(spaceCharacters)
+        right = data[len(middle):]
+        if middle:
+            yield {"type": "Characters", "data": middle}
+        if right:
+            yield {"type": "SpaceCharacters", "data": right}
+
+    def comment(self, data):
+        """Generates a Comment token
+
+        :arg data: the comment
+
+        :returns: Comment token
+
+        """
+        return {"type": "Comment", "data": data}
+
+    def doctype(self, name, publicId=None, systemId=None):
+        """Generates a Doctype token
+
+        :arg name:
+
+        :arg publicId:
+
+        :arg systemId:
+
+        :returns: the Doctype token
+
+        """
+        return {"type": "Doctype",
+                "name": name,
+                "publicId": publicId,
+                "systemId": systemId}
+
+    def entity(self, name):
+        """Generates an Entity token
+
+        :arg name: the entity name
+
+        :returns: an Entity token
+
+        """
+        return {"type": "Entity", "name": name}
+
+    def unknown(self, nodeType):
+        """Handles unknown node types"""
+        return self.error("Unknown node type: " + nodeType)
+
+
+class NonRecursiveTreeWalker(TreeWalker):
+    def getNodeDetails(self, node):
+        raise NotImplementedError
+
+    def getFirstChild(self, node):
+        raise NotImplementedError
+
+    def getNextSibling(self, node):
+        raise NotImplementedError
+
+    def getParentNode(self, node):
+        raise NotImplementedError
+
+    def __iter__(self):
+        currentNode = self.tree
+        while currentNode is not None:
+            details = self.getNodeDetails(currentNode)
+            type, details = details[0], details[1:]
+            hasChildren = False
+
+            if type == DOCTYPE:
+                yield self.doctype(*details)
+
+            elif type == TEXT:
+                for token in self.text(*details):
+                    yield token
+
+            elif type == ELEMENT:
+                namespace, name, attributes, hasChildren = details
+                if (not namespace or namespace == namespaces["html"]) and name in voidElements:
+                    for token in self.emptyTag(namespace, name, attributes,
+                                               hasChildren):
+                        yield token
+                    hasChildren = False
+                else:
+                    yield self.startTag(namespace, name, attributes)
+
+            elif type == COMMENT:
+                yield self.comment(details[0])
+
+            elif type == ENTITY:
+                yield self.entity(details[0])
+
+            elif type == DOCUMENT:
+                hasChildren = True
+
+            else:
+                yield self.unknown(details[0])
+
+            if hasChildren:
+                firstChild = self.getFirstChild(currentNode)
+            else:
+                firstChild = None
+
+            if firstChild is not None:
+                currentNode = firstChild
+            else:
+                while currentNode is not None:
+                    details = self.getNodeDetails(currentNode)
+                    type, details = details[0], details[1:]
+                    if type == ELEMENT:
+                        namespace, name, attributes, hasChildren = details
+                        if (namespace and namespace != namespaces["html"]) or name not in voidElements:
+                            yield self.endTag(namespace, name)
+                    if self.tree is currentNode:
+                        currentNode = None
+                        break
+                    nextSibling = self.getNextSibling(currentNode)
+                    if nextSibling is not None:
+                        currentNode = nextSibling
+                        break
+                    else:
+                        currentNode = self.getParentNode(currentNode)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/dom.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/dom.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0c89b001fd3b60511734c31e452c1d2053468d0
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/dom.py
@@ -0,0 +1,43 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from xml.dom import Node
+
+from . import base
+
+
+class TreeWalker(base.NonRecursiveTreeWalker):
+    def getNodeDetails(self, node):
+        if node.nodeType == Node.DOCUMENT_TYPE_NODE:
+            return base.DOCTYPE, node.name, node.publicId, node.systemId
+
+        elif node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
+            return base.TEXT, node.nodeValue
+
+        elif node.nodeType == Node.ELEMENT_NODE:
+            attrs = {}
+            for attr in list(node.attributes.keys()):
+                attr = node.getAttributeNode(attr)
+                if attr.namespaceURI:
+                    attrs[(attr.namespaceURI, attr.localName)] = attr.value
+                else:
+                    attrs[(None, attr.name)] = attr.value
+            return (base.ELEMENT, node.namespaceURI, node.nodeName,
+                    attrs, node.hasChildNodes())
+
+        elif node.nodeType == Node.COMMENT_NODE:
+            return base.COMMENT, node.nodeValue
+
+        elif node.nodeType in (Node.DOCUMENT_NODE, Node.DOCUMENT_FRAGMENT_NODE):
+            return (base.DOCUMENT,)
+
+        else:
+            return base.UNKNOWN, node.nodeType
+
+    def getFirstChild(self, node):
+        return node.firstChild
+
+    def getNextSibling(self, node):
+        return node.nextSibling
+
+    def getParentNode(self, node):
+        return node.parentNode
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.py
new file mode 100644
index 0000000000000000000000000000000000000000..95fc0c170301532cf2b499cd1697fe1179ae80c5
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.py
@@ -0,0 +1,130 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from collections import OrderedDict
+import re
+
+from pip._vendor.six import string_types
+
+from . import base
+from .._utils import moduleFactoryFactory
+
+tag_regexp = re.compile("{([^}]*)}(.*)")
+
+
+def getETreeBuilder(ElementTreeImplementation):
+    ElementTree = ElementTreeImplementation
+    ElementTreeCommentType = ElementTree.Comment("asd").tag
+
+    class TreeWalker(base.NonRecursiveTreeWalker):  # pylint:disable=unused-variable
+        """Given the particular ElementTree representation, this implementation,
+        to avoid using recursion, returns "nodes" as tuples with the following
+        content:
+
+        1. The current element
+
+        2. The index of the element relative to its parent
+
+        3. A stack of ancestor elements
+
+        4. A flag "text", "tail" or None to indicate if the current node is a
+           text node; either the text or tail of the current element (1)
+        """
+        def getNodeDetails(self, node):
+            if isinstance(node, tuple):  # It might be the root Element
+                elt, _, _, flag = node
+                if flag in ("text", "tail"):
+                    return base.TEXT, getattr(elt, flag)
+                else:
+                    node = elt
+
+            if not(hasattr(node, "tag")):
+                node = node.getroot()
+
+            if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
+                return (base.DOCUMENT,)
+
+            elif node.tag == "<!DOCTYPE>":
+                return (base.DOCTYPE, node.text,
+                        node.get("publicId"), node.get("systemId"))
+
+            elif node.tag == ElementTreeCommentType:
+                return base.COMMENT, node.text
+
+            else:
+                assert isinstance(node.tag, string_types), type(node.tag)
+                # This is assumed to be an ordinary element
+                match = tag_regexp.match(node.tag)
+                if match:
+                    namespace, tag = match.groups()
+                else:
+                    namespace = None
+                    tag = node.tag
+                attrs = OrderedDict()
+                for name, value in list(node.attrib.items()):
+                    match = tag_regexp.match(name)
+                    if match:
+                        attrs[(match.group(1), match.group(2))] = value
+                    else:
+                        attrs[(None, name)] = value
+                return (base.ELEMENT, namespace, tag,
+                        attrs, len(node) or node.text)
+
+        def getFirstChild(self, node):
+            if isinstance(node, tuple):
+                element, key, parents, flag = node
+            else:
+                element, key, parents, flag = node, None, [], None
+
+            if flag in ("text", "tail"):
+                return None
+            else:
+                if element.text:
+                    return element, key, parents, "text"
+                elif len(element):
+                    parents.append(element)
+                    return element[0], 0, parents, None
+                else:
+                    return None
+
+        def getNextSibling(self, node):
+            if isinstance(node, tuple):
+                element, key, parents, flag = node
+            else:
+                return None
+
+            if flag == "text":
+                if len(element):
+                    parents.append(element)
+                    return element[0], 0, parents, None
+                else:
+                    return None
+            else:
+                if element.tail and flag != "tail":
+                    return element, key, parents, "tail"
+                elif key < len(parents[-1]) - 1:
+                    return parents[-1][key + 1], key + 1, parents, None
+                else:
+                    return None
+
+        def getParentNode(self, node):
+            if isinstance(node, tuple):
+                element, key, parents, flag = node
+            else:
+                return None
+
+            if flag == "text":
+                if not parents:
+                    return element
+                else:
+                    return element, key, parents, None
+            else:
+                parent = parents.pop()
+                if not parents:
+                    return parent
+                else:
+                    assert list(parents[-1]).count(parent) == 1
+                    return parent, list(parents[-1]).index(parent), parents, None
+
+    return locals()
+
+getETreeModule = moduleFactoryFactory(getETreeBuilder)
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py
new file mode 100644
index 0000000000000000000000000000000000000000..e81ddf33b2eff3b278effd4602ef1511311ee4f6
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/etree_lxml.py
@@ -0,0 +1,213 @@
+from __future__ import absolute_import, division, unicode_literals
+from pip._vendor.six import text_type
+
+from lxml import etree
+from ..treebuilders.etree import tag_regexp
+
+from . import base
+
+from .. import _ihatexml
+
+
+def ensure_str(s):
+    if s is None:
+        return None
+    elif isinstance(s, text_type):
+        return s
+    else:
+        return s.decode("ascii", "strict")
+
+
+class Root(object):
+    def __init__(self, et):
+        self.elementtree = et
+        self.children = []
+
+        try:
+            if et.docinfo.internalDTD:
+                self.children.append(Doctype(self,
+                                             ensure_str(et.docinfo.root_name),
+                                             ensure_str(et.docinfo.public_id),
+                                             ensure_str(et.docinfo.system_url)))
+        except AttributeError:
+            pass
+
+        try:
+            node = et.getroot()
+        except AttributeError:
+            node = et
+
+        while node.getprevious() is not None:
+            node = node.getprevious()
+        while node is not None:
+            self.children.append(node)
+            node = node.getnext()
+
+        self.text = None
+        self.tail = None
+
+    def __getitem__(self, key):
+        return self.children[key]
+
+    def getnext(self):
+        return None
+
+    def __len__(self):
+        return 1
+
+
+class Doctype(object):
+    def __init__(self, root_node, name, public_id, system_id):
+        self.root_node = root_node
+        self.name = name
+        self.public_id = public_id
+        self.system_id = system_id
+
+        self.text = None
+        self.tail = None
+
+    def getnext(self):
+        return self.root_node.children[1]
+
+
+class FragmentRoot(Root):
+    def __init__(self, children):
+        self.children = [FragmentWrapper(self, child) for child in children]
+        self.text = self.tail = None
+
+    def getnext(self):
+        return None
+
+
+class FragmentWrapper(object):
+    def __init__(self, fragment_root, obj):
+        self.root_node = fragment_root
+        self.obj = obj
+        if hasattr(self.obj, 'text'):
+            self.text = ensure_str(self.obj.text)
+        else:
+            self.text = None
+        if hasattr(self.obj, 'tail'):
+            self.tail = ensure_str(self.obj.tail)
+        else:
+            self.tail = None
+
+    def __getattr__(self, name):
+        return getattr(self.obj, name)
+
+    def getnext(self):
+        siblings = self.root_node.children
+        idx = siblings.index(self)
+        if idx < len(siblings) - 1:
+            return siblings[idx + 1]
+        else:
+            return None
+
+    def __getitem__(self, key):
+        return self.obj[key]
+
+    def __bool__(self):
+        return bool(self.obj)
+
+    def getparent(self):
+        return None
+
+    def __str__(self):
+        return str(self.obj)
+
+    def __unicode__(self):
+        return str(self.obj)
+
+    def __len__(self):
+        return len(self.obj)
+
+
+class TreeWalker(base.NonRecursiveTreeWalker):
+    def __init__(self, tree):
+        # pylint:disable=redefined-variable-type
+        if isinstance(tree, list):
+            self.fragmentChildren = set(tree)
+            tree = FragmentRoot(tree)
+        else:
+            self.fragmentChildren = set()
+            tree = Root(tree)
+        base.NonRecursiveTreeWalker.__init__(self, tree)
+        self.filter = _ihatexml.InfosetFilter()
+
+    def getNodeDetails(self, node):
+        if isinstance(node, tuple):  # Text node
+            node, key = node
+            assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
+            return base.TEXT, ensure_str(getattr(node, key))
+
+        elif isinstance(node, Root):
+            return (base.DOCUMENT,)
+
+        elif isinstance(node, Doctype):
+            return base.DOCTYPE, node.name, node.public_id, node.system_id
+
+        elif isinstance(node, FragmentWrapper) and not hasattr(node, "tag"):
+            return base.TEXT, ensure_str(node.obj)
+
+        elif node.tag == etree.Comment:
+            return base.COMMENT, ensure_str(node.text)
+
+        elif node.tag == etree.Entity:
+            return base.ENTITY, ensure_str(node.text)[1:-1]  # strip &;
+
+        else:
+            # This is assumed to be an ordinary element
+            match = tag_regexp.match(ensure_str(node.tag))
+            if match:
+                namespace, tag = match.groups()
+            else:
+                namespace = None
+                tag = ensure_str(node.tag)
+            attrs = {}
+            for name, value in list(node.attrib.items()):
+                name = ensure_str(name)
+                value = ensure_str(value)
+                match = tag_regexp.match(name)
+                if match:
+                    attrs[(match.group(1), match.group(2))] = value
+                else:
+                    attrs[(None, name)] = value
+            return (base.ELEMENT, namespace, self.filter.fromXmlName(tag),
+                    attrs, len(node) > 0 or node.text)
+
+    def getFirstChild(self, node):
+        assert not isinstance(node, tuple), "Text nodes have no children"
+
+        assert len(node) or node.text, "Node has no children"
+        if node.text:
+            return (node, "text")
+        else:
+            return node[0]
+
+    def getNextSibling(self, node):
+        if isinstance(node, tuple):  # Text node
+            node, key = node
+            assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
+            if key == "text":
+                # XXX: we cannot use a "bool(node) and node[0] or None" construct here
+                # because node[0] might evaluate to False if it has no child element
+                if len(node):
+                    return node[0]
+                else:
+                    return None
+            else:  # tail
+                return node.getnext()
+
+        return (node, "tail") if node.tail else node.getnext()
+
+    def getParentNode(self, node):
+        if isinstance(node, tuple):  # Text node
+            node, key = node
+            assert key in ("text", "tail"), "Text nodes are text or tail, found %s" % key
+            if key == "text":
+                return node
+            # else: fallback to "normal" processing
+        elif node in self.fragmentChildren:
+            return None
+
+        return node.getparent()
diff --git a/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py
new file mode 100644
index 0000000000000000000000000000000000000000..7483be27d4d24f845e56b6954ee63eec730c00aa
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/html5lib/treewalkers/genshi.py
@@ -0,0 +1,69 @@
+from __future__ import absolute_import, division, unicode_literals
+
+from genshi.core import QName
+from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
+from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
+
+from . import base
+
+from ..constants import voidElements, namespaces
+
+
+class TreeWalker(base.TreeWalker):
+    def __iter__(self):
+        # Buffer the events so we can pass in the following one
+        previous = None
+        for event in self.tree:
+            if previous is not None:
+                for token in self.tokens(previous, event):
+                    yield token
+            previous = event
+
+        # Don't forget the final event!
+        if previous is not None:
+            for token in self.tokens(previous, None):
+                yield token
+
+    def tokens(self, event, next):
+        kind, data, _ = event
+        if kind == START:
+            tag, attribs = data
+            name = tag.localname
+            namespace = tag.namespace
+            converted_attribs = {}
+            for k, v in attribs:
+                if isinstance(k, QName):
+                    converted_attribs[(k.namespace, k.localname)] = v
+                else:
+                    converted_attribs[(None, k)] = v
+
+            if namespace == namespaces["html"] and name in voidElements:
+                for token in self.emptyTag(namespace, name, converted_attribs,
+                                           not next or next[0] != END or
+                                           next[1] != tag):
+                    yield token
+            else:
+                yield self.startTag(namespace, name, converted_attribs)
+
+        elif kind == END:
+            name = data.localname
+            namespace = data.namespace
+            if namespace != namespaces["html"] or name not in voidElements:
+                yield self.endTag(namespace, name)
+
+        elif kind == COMMENT:
+            yield self.comment(data)
+
+        elif kind == TEXT:
+            for token in self.text(data):
+                yield token
+
+        elif kind == DOCTYPE:
+            yield self.doctype(*data)
+
+        elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
+                      START_CDATA, END_CDATA, PI):
+            pass
+
+        else:
+            yield self.unknown(kind)
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/__init__.py b/lib/python3.7/site-packages/pip/_vendor/idna/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..847bf9354781fddc44ed2f47055a67c025972fd4
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/idna/__init__.py
@@ -0,0 +1,2 @@
+from .package_data import __version__
+from .core import *
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..76665824059648e5cd8aa3c3b3bc893a053121ec
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e0d55c41e0c1e16d402c0cd454bca15f47a663a
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/codec.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..af16a48433a9b6ced08753d3ec56924379eaf6df
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/core.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/core.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6070993883c0a3a1be9a7a9e6f3a0ef4cb5be830
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/core.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dfb1719b38d24b55940504f08432e047cf468d8f
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/idnadata.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c106f2273f21edbab89a4fc770a9fc70efb43a7d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/intranges.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9dd58da7ca60c7ee73260a274abecef73dda209
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/package_data.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..330de2b8b10eee91cd0bff2cdd636094e4d35f21
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/idna/__pycache__/uts46data.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/codec.py b/lib/python3.7/site-packages/pip/_vendor/idna/codec.py
new file mode 100644
index 0000000000000000000000000000000000000000..98c65ead146e42b359a11703d719bafe916b1ad7
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/idna/codec.py
@@ -0,0 +1,118 @@
+from .core import encode, decode, alabel, ulabel, IDNAError
+import codecs
+import re
+
+_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
+
+class Codec(codecs.Codec):
+
+    def encode(self, data, errors='strict'):
+
+        if errors != 'strict':
+            raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
+
+        if not data:
+            return "", 0
+
+        return encode(data), len(data)
+
+    def decode(self, data, errors='strict'):
+
+        if errors != 'strict':
+            raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
+
+        if not data:
+            return u"", 0
+
+        return decode(data), len(data)
+
+class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
+    def _buffer_encode(self, data, errors, final):
+        if errors != 'strict':
+            raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
+
+        if not data:
+            return ("", 0)
+
+        labels = _unicode_dots_re.split(data)
+        trailing_dot = u''
+        if labels:
+            if not labels[-1]:
+                trailing_dot = '.'
+                del labels[-1]
+            elif not final:
+                # Keep potentially unfinished label until the next call
+                del labels[-1]
+                if labels:
+                    trailing_dot = '.'
+
+        result = []
+        size = 0
+        for label in labels:
+            result.append(alabel(label))
+            if size:
+                size += 1
+            size += len(label)
+
+        # Join with U+002E
+        result = ".".join(result) + trailing_dot
+        size += len(trailing_dot)
+        return (result, size)
+
+class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
+    def _buffer_decode(self, data, errors, final):
+        if errors != 'strict':
+            raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
+
+        if not data:
+            return (u"", 0)
+
+        # IDNA allows decoding to operate on Unicode strings, too.
+        if isinstance(data, unicode):
+            labels = _unicode_dots_re.split(data)
+        else:
+            # Must be ASCII string
+            data = str(data)
+            unicode(data, "ascii")
+            labels = data.split(".")
+
+        trailing_dot = u''
+        if labels:
+            if not labels[-1]:
+                trailing_dot = u'.'
+                del labels[-1]
+            elif not final:
+                # Keep potentially unfinished label until the next call
+                del labels[-1]
+                if labels:
+                    trailing_dot = u'.'
+
+        result = []
+        size = 0
+        for label in labels:
+            result.append(ulabel(label))
+            if size:
+                size += 1
+            size += len(label)
+
+        result = u".".join(result) + trailing_dot
+        size += len(trailing_dot)
+        return (result, size)
+
+
+class StreamWriter(Codec, codecs.StreamWriter):
+    pass
+
+class StreamReader(Codec, codecs.StreamReader):
+    pass
+
+def getregentry():
+    return codecs.CodecInfo(
+        name='idna',
+        encode=Codec().encode,
+        decode=Codec().decode,
+        incrementalencoder=IncrementalEncoder,
+        incrementaldecoder=IncrementalDecoder,
+        streamwriter=StreamWriter,
+        streamreader=StreamReader,
+    )
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/compat.py b/lib/python3.7/site-packages/pip/_vendor/idna/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d47f336dbc55ca7141150c14ec1a7924dd0e854
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/idna/compat.py
@@ -0,0 +1,12 @@
+from .core import *
+from .codec import *
+
+def ToASCII(label):
+    return encode(label)
+
+def ToUnicode(label):
+    return decode(label)
+
+def nameprep(s):
+    raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol")
+
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/core.py b/lib/python3.7/site-packages/pip/_vendor/idna/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..104624ad2ddd7b18e255a74dcaa099a84223d959
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/idna/core.py
@@ -0,0 +1,396 @@
+from . import idnadata
+import bisect
+import unicodedata
+import re
+import sys
+from .intranges import intranges_contain
+
+_virama_combining_class = 9
+_alabel_prefix = b'xn--'
+_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
+
+if sys.version_info[0] == 3:
+    unicode = str
+    unichr = chr
+
+class IDNAError(UnicodeError):
+    """ Base exception for all IDNA-encoding related problems """
+    pass
+
+
+class IDNABidiError(IDNAError):
+    """ Exception when bidirectional requirements are not satisfied """
+    pass
+
+
+class InvalidCodepoint(IDNAError):
+    """ Exception when a disallowed or unallocated codepoint is used """
+    pass
+
+
+class InvalidCodepointContext(IDNAError):
+    """ Exception when the codepoint is not valid in the context it is used """
+    pass
+
+
+def _combining_class(cp):
+    v = unicodedata.combining(unichr(cp))
+    if v == 0:
+        if not unicodedata.name(unichr(cp)):
+            raise ValueError("Unknown character in unicodedata")
+    return v
+
+def _is_script(cp, script):
+    return intranges_contain(ord(cp), idnadata.scripts[script])
+
+def _punycode(s):
+    return s.encode('punycode')
+
+def _unot(s):
+    return 'U+{0:04X}'.format(s)
+
+
+def valid_label_length(label):
+
+    if len(label) > 63:
+        return False
+    return True
+
+
+def valid_string_length(label, trailing_dot):
+
+    if len(label) > (254 if trailing_dot else 253):
+        return False
+    return True
+
+
+def check_bidi(label, check_ltr=False):
+
+    # Bidi rules should only be applied if string contains RTL characters
+    bidi_label = False
+    for (idx, cp) in enumerate(label, 1):
+        direction = unicodedata.bidirectional(cp)
+        if direction == '':
+            # String likely comes from a newer version of Unicode
+            raise IDNABidiError('Unknown directionality in label {0} at position {1}'.format(repr(label), idx))
+        if direction in ['R', 'AL', 'AN']:
+            bidi_label = True
+    if not bidi_label and not check_ltr:
+        return True
+
+    # Bidi rule 1
+    direction = unicodedata.bidirectional(label[0])
+    if direction in ['R', 'AL']:
+        rtl = True
+    elif direction == 'L':
+        rtl = False
+    else:
+        raise IDNABidiError('First codepoint in label {0} must be directionality L, R or AL'.format(repr(label)))
+
+    valid_ending = False
+    number_type = False
+    for (idx, cp) in enumerate(label, 1):
+        direction = unicodedata.bidirectional(cp)
+
+        if rtl:
+            # Bidi rule 2
+            if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
+                raise IDNABidiError('Invalid direction for codepoint at position {0} in a right-to-left label'.format(idx))
+            # Bidi rule 3
+            if direction in ['R', 'AL', 'EN', 'AN']:
+                valid_ending = True
+            elif direction != 'NSM':
+                valid_ending = False
+            # Bidi rule 4
+            if direction in ['AN', 'EN']:
+                if not number_type:
+                    number_type = direction
+                else:
+                    if number_type != direction:
+                        raise IDNABidiError('Can not mix numeral types in a right-to-left label')
+        else:
+            # Bidi rule 5
+            if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
+                raise IDNABidiError('Invalid direction for codepoint at position {0} in a left-to-right label'.format(idx))
+            # Bidi rule 6
+            if direction in ['L', 'EN']:
+                valid_ending = True
+            elif direction != 'NSM':
+                valid_ending = False
+
+    if not valid_ending:
+        raise IDNABidiError('Label ends with illegal codepoint directionality')
+
+    return True
+
+
+def check_initial_combiner(label):
+
+    if unicodedata.category(label[0])[0] == 'M':
+        raise IDNAError('Label begins with an illegal combining character')
+    return True
+
+
+def check_hyphen_ok(label):
+
+    if label[2:4] == '--':
+        raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
+    if label[0] == '-' or label[-1] == '-':
+        raise IDNAError('Label must not start or end with a hyphen')
+    return True
+
+
+def check_nfc(label):
+
+    if unicodedata.normalize('NFC', label) != label:
+        raise IDNAError('Label must be in Normalization Form C')
+
+
+def valid_contextj(label, pos):
+
+    cp_value = ord(label[pos])
+
+    if cp_value == 0x200c:
+
+        if pos > 0:
+            if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
+                return True
+
+        ok = False
+        for i in range(pos-1, -1, -1):
+            joining_type = idnadata.joining_types.get(ord(label[i]))
+            if joining_type == ord('T'):
+                continue
+            if joining_type in [ord('L'), ord('D')]:
+                ok = True
+                break
+
+        if not ok:
+            return False
+
+        ok = False
+        for i in range(pos+1, len(label)):
+            joining_type = idnadata.joining_types.get(ord(label[i]))
+            if joining_type == ord('T'):
+                continue
+            if joining_type in [ord('R'), ord('D')]:
+                ok = True
+                break
+        return ok
+
+    if cp_value == 0x200d:
+
+        if pos > 0:
+            if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
+                return True
+        return False
+
+    else:
+
+        return False
+
+
+def valid_contexto(label, pos, exception=False):
+
+    cp_value = ord(label[pos])
+
+    if cp_value == 0x00b7:
+        if 0 < pos < len(label)-1:
+            if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c:
+                return True
+        return False
+
+    elif cp_value == 0x0375:
+        if pos < len(label)-1 and len(label) > 1:
+            return _is_script(label[pos + 1], 'Greek')
+        return False
+
+    elif cp_value == 0x05f3 or cp_value == 0x05f4:
+        if pos > 0:
+            return _is_script(label[pos - 1], 'Hebrew')
+        return False
+
+    elif cp_value == 0x30fb:
+        for cp in label:
+            if cp == u'\u30fb':
+                continue
+            if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
+                return True
+        return False
+
+    elif 0x660 <= cp_value <= 0x669:
+        for cp in label:
+            if 0x6f0 <= ord(cp) <= 0x06f9:
+                return False
+        return True
+
+    elif 0x6f0 <= cp_value <= 0x6f9:
+        for cp in label:
+            if 0x660 <= ord(cp) <= 0x0669:
+                return False
+        return True
+
+
+def check_label(label):
+
+    if isinstance(label, (bytes, bytearray)):
+        label = label.decode('utf-8')
+    if len(label) == 0:
+        raise IDNAError('Empty Label')
+
+    check_nfc(label)
+    check_hyphen_ok(label)
+    check_initial_combiner(label)
+
+    for (pos, cp) in enumerate(label):
+        cp_value = ord(cp)
+        if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']):
+            continue
+        elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
+            try:
+                if not valid_contextj(label, pos):
+                    raise InvalidCodepointContext('Joiner {0} not allowed at position {1} in {2}'.format(
+                        _unot(cp_value), pos+1, repr(label)))
+            except ValueError:
+                raise IDNAError('Unknown codepoint adjacent to joiner {0} at position {1} in {2}'.format(
+                    _unot(cp_value), pos+1, repr(label)))
+        elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
+            if not valid_contexto(label, pos):
+                raise InvalidCodepointContext('Codepoint {0} not allowed at position {1} in {2}'.format(_unot(cp_value), pos+1, repr(label)))
+        else:
+            raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
+
+    check_bidi(label)
+
+
+def alabel(label):
+
+    try:
+        label = label.encode('ascii')
+        ulabel(label)
+        if not valid_label_length(label):
+            raise IDNAError('Label too long')
+        return label
+    except UnicodeEncodeError:
+        pass
+
+    if not label:
+        raise IDNAError('No Input')
+
+    label = unicode(label)
+    check_label(label)
+    label = _punycode(label)
+    label = _alabel_prefix + label
+
+    if not valid_label_length(label):
+        raise IDNAError('Label too long')
+
+    return label
+
+
+def ulabel(label):
+
+    if not isinstance(label, (bytes, bytearray)):
+        try:
+            label = label.encode('ascii')
+        except UnicodeEncodeError:
+            check_label(label)
+            return label
+
+    label = label.lower()
+    if label.startswith(_alabel_prefix):
+        label = label[len(_alabel_prefix):]
+    else:
+        check_label(label)
+        return label.decode('ascii')
+
+    label = label.decode('punycode')
+    check_label(label)
+    return label
+
+
+def uts46_remap(domain, std3_rules=True, transitional=False):
+    """Re-map the characters in the string according to UTS46 processing."""
+    from .uts46data import uts46data
+    output = u""
+    try:
+        for pos, char in enumerate(domain):
+            code_point = ord(char)
+            uts46row = uts46data[code_point if code_point < 256 else
+                bisect.bisect_left(uts46data, (code_point, "Z")) - 1]
+            status = uts46row[1]
+            replacement = uts46row[2] if len(uts46row) == 3 else None
+            if (status == "V" or
+                    (status == "D" and not transitional) or
+                    (status == "3" and not std3_rules and replacement is None)):
+                output += char
+            elif replacement is not None and (status == "M" or
+                    (status == "3" and not std3_rules) or
+                    (status == "D" and transitional)):
+                output += replacement
+            elif status != "I":
+                raise IndexError()
+        return unicodedata.normalize("NFC", output)
+    except IndexError:
+        raise InvalidCodepoint(
+            "Codepoint {0} not allowed at position {1} in {2}".format(
+            _unot(code_point), pos + 1, repr(domain)))
+
+
+def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False):
+
+    if isinstance(s, (bytes, bytearray)):
+        s = s.decode("ascii")
+    if uts46:
+        s = uts46_remap(s, std3_rules, transitional)
+    trailing_dot = False
+    result = []
+    if strict:
+        labels = s.split('.')
+    else:
+        labels = _unicode_dots_re.split(s)
+    if not labels or labels == ['']:
+        raise IDNAError('Empty domain')
+    if labels[-1] == '':
+        del labels[-1]
+        trailing_dot = True
+    for label in labels:
+        s = alabel(label)
+        if s:
+            result.append(s)
+        else:
+            raise IDNAError('Empty label')
+    if trailing_dot:
+        result.append(b'')
+    s = b'.'.join(result)
+    if not valid_string_length(s, trailing_dot):
+        raise IDNAError('Domain too long')
+    return s
+
+
+def decode(s, strict=False, uts46=False, std3_rules=False):
+
+    if isinstance(s, (bytes, bytearray)):
+        s = s.decode("ascii")
+    if uts46:
+        s = uts46_remap(s, std3_rules, False)
+    trailing_dot = False
+    result = []
+    if not strict:
+        labels = _unicode_dots_re.split(s)
+    else:
+        labels = s.split(u'.')
+    if not labels or labels == ['']:
+        raise IDNAError('Empty domain')
+    if not labels[-1]:
+        del labels[-1]
+        trailing_dot = True
+    for label in labels:
+        s = ulabel(label)
+        if s:
+            result.append(s)
+        else:
+            raise IDNAError('Empty label')
+    if trailing_dot:
+        result.append(u'')
+    return u'.'.join(result)
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/idnadata.py b/lib/python3.7/site-packages/pip/_vendor/idna/idnadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..a80c959d2a7127633cd696f86844d1f356a0bc83
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/idna/idnadata.py
@@ -0,0 +1,1979 @@
+# This file is automatically generated by tools/idna-data
+
+__version__ = "11.0.0"
+scripts = {
+    'Greek': (
+        0x37000000374,
+        0x37500000378,
+        0x37a0000037e,
+        0x37f00000380,
+        0x38400000385,
+        0x38600000387,
+        0x3880000038b,
+        0x38c0000038d,
+        0x38e000003a2,
+        0x3a3000003e2,
+        0x3f000000400,
+        0x1d2600001d2b,
+        0x1d5d00001d62,
+        0x1d6600001d6b,
+        0x1dbf00001dc0,
+        0x1f0000001f16,
+        0x1f1800001f1e,
+        0x1f2000001f46,
+        0x1f4800001f4e,
+        0x1f5000001f58,
+        0x1f5900001f5a,
+        0x1f5b00001f5c,
+        0x1f5d00001f5e,
+        0x1f5f00001f7e,
+        0x1f8000001fb5,
+        0x1fb600001fc5,
+        0x1fc600001fd4,
+        0x1fd600001fdc,
+        0x1fdd00001ff0,
+        0x1ff200001ff5,
+        0x1ff600001fff,
+        0x212600002127,
+        0xab650000ab66,
+        0x101400001018f,
+        0x101a0000101a1,
+        0x1d2000001d246,
+    ),
+    'Han': (
+        0x2e8000002e9a,
+        0x2e9b00002ef4,
+        0x2f0000002fd6,
+        0x300500003006,
+        0x300700003008,
+        0x30210000302a,
+        0x30380000303c,
+        0x340000004db6,
+        0x4e0000009ff0,
+        0xf9000000fa6e,
+        0xfa700000fada,
+        0x200000002a6d7,
+        0x2a7000002b735,
+        0x2b7400002b81e,
+        0x2b8200002cea2,
+        0x2ceb00002ebe1,
+        0x2f8000002fa1e,
+    ),
+    'Hebrew': (
+        0x591000005c8,
+        0x5d0000005eb,
+        0x5ef000005f5,
+        0xfb1d0000fb37,
+        0xfb380000fb3d,
+        0xfb3e0000fb3f,
+        0xfb400000fb42,
+        0xfb430000fb45,
+        0xfb460000fb50,
+    ),
+    'Hiragana': (
+        0x304100003097,
+        0x309d000030a0,
+        0x1b0010001b11f,
+        0x1f2000001f201,
+    ),
+    'Katakana': (
+        0x30a1000030fb,
+        0x30fd00003100,
+        0x31f000003200,
+        0x32d0000032ff,
+        0x330000003358,
+        0xff660000ff70,
+        0xff710000ff9e,
+        0x1b0000001b001,
+    ),
+}
+joining_types = {
+    0x600: 85,
+    0x601: 85,
+    0x602: 85,
+    0x603: 85,
+    0x604: 85,
+    0x605: 85,
+    0x608: 85,
+    0x60b: 85,
+    0x620: 68,
+    0x621: 85,
+    0x622: 82,
+    0x623: 82,
+    0x624: 82,
+    0x625: 82,
+    0x626: 68,
+    0x627: 82,
+    0x628: 68,
+    0x629: 82,
+    0x62a: 68,
+    0x62b: 68,
+    0x62c: 68,
+    0x62d: 68,
+    0x62e: 68,
+    0x62f: 82,
+    0x630: 82,
+    0x631: 82,
+    0x632: 82,
+    0x633: 68,
+    0x634: 68,
+    0x635: 68,
+    0x636: 68,
+    0x637: 68,
+    0x638: 68,
+    0x639: 68,
+    0x63a: 68,
+    0x63b: 68,
+    0x63c: 68,
+    0x63d: 68,
+    0x63e: 68,
+    0x63f: 68,
+    0x640: 67,
+    0x641: 68,
+    0x642: 68,
+    0x643: 68,
+    0x644: 68,
+    0x645: 68,
+    0x646: 68,
+    0x647: 68,
+    0x648: 82,
+    0x649: 68,
+    0x64a: 68,
+    0x66e: 68,
+    0x66f: 68,
+    0x671: 82,
+    0x672: 82,
+    0x673: 82,
+    0x674: 85,
+    0x675: 82,
+    0x676: 82,
+    0x677: 82,
+    0x678: 68,
+    0x679: 68,
+    0x67a: 68,
+    0x67b: 68,
+    0x67c: 68,
+    0x67d: 68,
+    0x67e: 68,
+    0x67f: 68,
+    0x680: 68,
+    0x681: 68,
+    0x682: 68,
+    0x683: 68,
+    0x684: 68,
+    0x685: 68,
+    0x686: 68,
+    0x687: 68,
+    0x688: 82,
+    0x689: 82,
+    0x68a: 82,
+    0x68b: 82,
+    0x68c: 82,
+    0x68d: 82,
+    0x68e: 82,
+    0x68f: 82,
+    0x690: 82,
+    0x691: 82,
+    0x692: 82,
+    0x693: 82,
+    0x694: 82,
+    0x695: 82,
+    0x696: 82,
+    0x697: 82,
+    0x698: 82,
+    0x699: 82,
+    0x69a: 68,
+    0x69b: 68,
+    0x69c: 68,
+    0x69d: 68,
+    0x69e: 68,
+    0x69f: 68,
+    0x6a0: 68,
+    0x6a1: 68,
+    0x6a2: 68,
+    0x6a3: 68,
+    0x6a4: 68,
+    0x6a5: 68,
+    0x6a6: 68,
+    0x6a7: 68,
+    0x6a8: 68,
+    0x6a9: 68,
+    0x6aa: 68,
+    0x6ab: 68,
+    0x6ac: 68,
+    0x6ad: 68,
+    0x6ae: 68,
+    0x6af: 68,
+    0x6b0: 68,
+    0x6b1: 68,
+    0x6b2: 68,
+    0x6b3: 68,
+    0x6b4: 68,
+    0x6b5: 68,
+    0x6b6: 68,
+    0x6b7: 68,
+    0x6b8: 68,
+    0x6b9: 68,
+    0x6ba: 68,
+    0x6bb: 68,
+    0x6bc: 68,
+    0x6bd: 68,
+    0x6be: 68,
+    0x6bf: 68,
+    0x6c0: 82,
+    0x6c1: 68,
+    0x6c2: 68,
+    0x6c3: 82,
+    0x6c4: 82,
+    0x6c5: 82,
+    0x6c6: 82,
+    0x6c7: 82,
+    0x6c8: 82,
+    0x6c9: 82,
+    0x6ca: 82,
+    0x6cb: 82,
+    0x6cc: 68,
+    0x6cd: 82,
+    0x6ce: 68,
+    0x6cf: 82,
+    0x6d0: 68,
+    0x6d1: 68,
+    0x6d2: 82,
+    0x6d3: 82,
+    0x6d5: 82,
+    0x6dd: 85,
+    0x6ee: 82,
+    0x6ef: 82,
+    0x6fa: 68,
+    0x6fb: 68,
+    0x6fc: 68,
+    0x6ff: 68,
+    0x70f: 84,
+    0x710: 82,
+    0x712: 68,
+    0x713: 68,
+    0x714: 68,
+    0x715: 82,
+    0x716: 82,
+    0x717: 82,
+    0x718: 82,
+    0x719: 82,
+    0x71a: 68,
+    0x71b: 68,
+    0x71c: 68,
+    0x71d: 68,
+    0x71e: 82,
+    0x71f: 68,
+    0x720: 68,
+    0x721: 68,
+    0x722: 68,
+    0x723: 68,
+    0x724: 68,
+    0x725: 68,
+    0x726: 68,
+    0x727: 68,
+    0x728: 82,
+    0x729: 68,
+    0x72a: 82,
+    0x72b: 68,
+    0x72c: 82,
+    0x72d: 68,
+    0x72e: 68,
+    0x72f: 82,
+    0x74d: 82,
+    0x74e: 68,
+    0x74f: 68,
+    0x750: 68,
+    0x751: 68,
+    0x752: 68,
+    0x753: 68,
+    0x754: 68,
+    0x755: 68,
+    0x756: 68,
+    0x757: 68,
+    0x758: 68,
+    0x759: 82,
+    0x75a: 82,
+    0x75b: 82,
+    0x75c: 68,
+    0x75d: 68,
+    0x75e: 68,
+    0x75f: 68,
+    0x760: 68,
+    0x761: 68,
+    0x762: 68,
+    0x763: 68,
+    0x764: 68,
+    0x765: 68,
+    0x766: 68,
+    0x767: 68,
+    0x768: 68,
+    0x769: 68,
+    0x76a: 68,
+    0x76b: 82,
+    0x76c: 82,
+    0x76d: 68,
+    0x76e: 68,
+    0x76f: 68,
+    0x770: 68,
+    0x771: 82,
+    0x772: 68,
+    0x773: 82,
+    0x774: 82,
+    0x775: 68,
+    0x776: 68,
+    0x777: 68,
+    0x778: 82,
+    0x779: 82,
+    0x77a: 68,
+    0x77b: 68,
+    0x77c: 68,
+    0x77d: 68,
+    0x77e: 68,
+    0x77f: 68,
+    0x7ca: 68,
+    0x7cb: 68,
+    0x7cc: 68,
+    0x7cd: 68,
+    0x7ce: 68,
+    0x7cf: 68,
+    0x7d0: 68,
+    0x7d1: 68,
+    0x7d2: 68,
+    0x7d3: 68,
+    0x7d4: 68,
+    0x7d5: 68,
+    0x7d6: 68,
+    0x7d7: 68,
+    0x7d8: 68,
+    0x7d9: 68,
+    0x7da: 68,
+    0x7db: 68,
+    0x7dc: 68,
+    0x7dd: 68,
+    0x7de: 68,
+    0x7df: 68,
+    0x7e0: 68,
+    0x7e1: 68,
+    0x7e2: 68,
+    0x7e3: 68,
+    0x7e4: 68,
+    0x7e5: 68,
+    0x7e6: 68,
+    0x7e7: 68,
+    0x7e8: 68,
+    0x7e9: 68,
+    0x7ea: 68,
+    0x7fa: 67,
+    0x840: 82,
+    0x841: 68,
+    0x842: 68,
+    0x843: 68,
+    0x844: 68,
+    0x845: 68,
+    0x846: 82,
+    0x847: 82,
+    0x848: 68,
+    0x849: 82,
+    0x84a: 68,
+    0x84b: 68,
+    0x84c: 68,
+    0x84d: 68,
+    0x84e: 68,
+    0x84f: 68,
+    0x850: 68,
+    0x851: 68,
+    0x852: 68,
+    0x853: 68,
+    0x854: 82,
+    0x855: 68,
+    0x856: 85,
+    0x857: 85,
+    0x858: 85,
+    0x860: 68,
+    0x861: 85,
+    0x862: 68,
+    0x863: 68,
+    0x864: 68,
+    0x865: 68,
+    0x866: 85,
+    0x867: 82,
+    0x868: 68,
+    0x869: 82,
+    0x86a: 82,
+    0x8a0: 68,
+    0x8a1: 68,
+    0x8a2: 68,
+    0x8a3: 68,
+    0x8a4: 68,
+    0x8a5: 68,
+    0x8a6: 68,
+    0x8a7: 68,
+    0x8a8: 68,
+    0x8a9: 68,
+    0x8aa: 82,
+    0x8ab: 82,
+    0x8ac: 82,
+    0x8ad: 85,
+    0x8ae: 82,
+    0x8af: 68,
+    0x8b0: 68,
+    0x8b1: 82,
+    0x8b2: 82,
+    0x8b3: 68,
+    0x8b4: 68,
+    0x8b6: 68,
+    0x8b7: 68,
+    0x8b8: 68,
+    0x8b9: 82,
+    0x8ba: 68,
+    0x8bb: 68,
+    0x8bc: 68,
+    0x8bd: 68,
+    0x8e2: 85,
+    0x1806: 85,
+    0x1807: 68,
+    0x180a: 67,
+    0x180e: 85,
+    0x1820: 68,
+    0x1821: 68,
+    0x1822: 68,
+    0x1823: 68,
+    0x1824: 68,
+    0x1825: 68,
+    0x1826: 68,
+    0x1827: 68,
+    0x1828: 68,
+    0x1829: 68,
+    0x182a: 68,
+    0x182b: 68,
+    0x182c: 68,
+    0x182d: 68,
+    0x182e: 68,
+    0x182f: 68,
+    0x1830: 68,
+    0x1831: 68,
+    0x1832: 68,
+    0x1833: 68,
+    0x1834: 68,
+    0x1835: 68,
+    0x1836: 68,
+    0x1837: 68,
+    0x1838: 68,
+    0x1839: 68,
+    0x183a: 68,
+    0x183b: 68,
+    0x183c: 68,
+    0x183d: 68,
+    0x183e: 68,
+    0x183f: 68,
+    0x1840: 68,
+    0x1841: 68,
+    0x1842: 68,
+    0x1843: 68,
+    0x1844: 68,
+    0x1845: 68,
+    0x1846: 68,
+    0x1847: 68,
+    0x1848: 68,
+    0x1849: 68,
+    0x184a: 68,
+    0x184b: 68,
+    0x184c: 68,
+    0x184d: 68,
+    0x184e: 68,
+    0x184f: 68,
+    0x1850: 68,
+    0x1851: 68,
+    0x1852: 68,
+    0x1853: 68,
+    0x1854: 68,
+    0x1855: 68,
+    0x1856: 68,
+    0x1857: 68,
+    0x1858: 68,
+    0x1859: 68,
+    0x185a: 68,
+    0x185b: 68,
+    0x185c: 68,
+    0x185d: 68,
+    0x185e: 68,
+    0x185f: 68,
+    0x1860: 68,
+    0x1861: 68,
+    0x1862: 68,
+    0x1863: 68,
+    0x1864: 68,
+    0x1865: 68,
+    0x1866: 68,
+    0x1867: 68,
+    0x1868: 68,
+    0x1869: 68,
+    0x186a: 68,
+    0x186b: 68,
+    0x186c: 68,
+    0x186d: 68,
+    0x186e: 68,
+    0x186f: 68,
+    0x1870: 68,
+    0x1871: 68,
+    0x1872: 68,
+    0x1873: 68,
+    0x1874: 68,
+    0x1875: 68,
+    0x1876: 68,
+    0x1877: 68,
+    0x1878: 68,
+    0x1880: 85,
+    0x1881: 85,
+    0x1882: 85,
+    0x1883: 85,
+    0x1884: 85,
+    0x1885: 84,
+    0x1886: 84,
+    0x1887: 68,
+    0x1888: 68,
+    0x1889: 68,
+    0x188a: 68,
+    0x188b: 68,
+    0x188c: 68,
+    0x188d: 68,
+    0x188e: 68,
+    0x188f: 68,
+    0x1890: 68,
+    0x1891: 68,
+    0x1892: 68,
+    0x1893: 68,
+    0x1894: 68,
+    0x1895: 68,
+    0x1896: 68,
+    0x1897: 68,
+    0x1898: 68,
+    0x1899: 68,
+    0x189a: 68,
+    0x189b: 68,
+    0x189c: 68,
+    0x189d: 68,
+    0x189e: 68,
+    0x189f: 68,
+    0x18a0: 68,
+    0x18a1: 68,
+    0x18a2: 68,
+    0x18a3: 68,
+    0x18a4: 68,
+    0x18a5: 68,
+    0x18a6: 68,
+    0x18a7: 68,
+    0x18a8: 68,
+    0x18aa: 68,
+    0x200c: 85,
+    0x200d: 67,
+    0x202f: 85,
+    0x2066: 85,
+    0x2067: 85,
+    0x2068: 85,
+    0x2069: 85,
+    0xa840: 68,
+    0xa841: 68,
+    0xa842: 68,
+    0xa843: 68,
+    0xa844: 68,
+    0xa845: 68,
+    0xa846: 68,
+    0xa847: 68,
+    0xa848: 68,
+    0xa849: 68,
+    0xa84a: 68,
+    0xa84b: 68,
+    0xa84c: 68,
+    0xa84d: 68,
+    0xa84e: 68,
+    0xa84f: 68,
+    0xa850: 68,
+    0xa851: 68,
+    0xa852: 68,
+    0xa853: 68,
+    0xa854: 68,
+    0xa855: 68,
+    0xa856: 68,
+    0xa857: 68,
+    0xa858: 68,
+    0xa859: 68,
+    0xa85a: 68,
+    0xa85b: 68,
+    0xa85c: 68,
+    0xa85d: 68,
+    0xa85e: 68,
+    0xa85f: 68,
+    0xa860: 68,
+    0xa861: 68,
+    0xa862: 68,
+    0xa863: 68,
+    0xa864: 68,
+    0xa865: 68,
+    0xa866: 68,
+    0xa867: 68,
+    0xa868: 68,
+    0xa869: 68,
+    0xa86a: 68,
+    0xa86b: 68,
+    0xa86c: 68,
+    0xa86d: 68,
+    0xa86e: 68,
+    0xa86f: 68,
+    0xa870: 68,
+    0xa871: 68,
+    0xa872: 76,
+    0xa873: 85,
+    0x10ac0: 68,
+    0x10ac1: 68,
+    0x10ac2: 68,
+    0x10ac3: 68,
+    0x10ac4: 68,
+    0x10ac5: 82,
+    0x10ac6: 85,
+    0x10ac7: 82,
+    0x10ac8: 85,
+    0x10ac9: 82,
+    0x10aca: 82,
+    0x10acb: 85,
+    0x10acc: 85,
+    0x10acd: 76,
+    0x10ace: 82,
+    0x10acf: 82,
+    0x10ad0: 82,
+    0x10ad1: 82,
+    0x10ad2: 82,
+    0x10ad3: 68,
+    0x10ad4: 68,
+    0x10ad5: 68,
+    0x10ad6: 68,
+    0x10ad7: 76,
+    0x10ad8: 68,
+    0x10ad9: 68,
+    0x10ada: 68,
+    0x10adb: 68,
+    0x10adc: 68,
+    0x10add: 82,
+    0x10ade: 68,
+    0x10adf: 68,
+    0x10ae0: 68,
+    0x10ae1: 82,
+    0x10ae2: 85,
+    0x10ae3: 85,
+    0x10ae4: 82,
+    0x10aeb: 68,
+    0x10aec: 68,
+    0x10aed: 68,
+    0x10aee: 68,
+    0x10aef: 82,
+    0x10b80: 68,
+    0x10b81: 82,
+    0x10b82: 68,
+    0x10b83: 82,
+    0x10b84: 82,
+    0x10b85: 82,
+    0x10b86: 68,
+    0x10b87: 68,
+    0x10b88: 68,
+    0x10b89: 82,
+    0x10b8a: 68,
+    0x10b8b: 68,
+    0x10b8c: 82,
+    0x10b8d: 68,
+    0x10b8e: 82,
+    0x10b8f: 82,
+    0x10b90: 68,
+    0x10b91: 82,
+    0x10ba9: 82,
+    0x10baa: 82,
+    0x10bab: 82,
+    0x10bac: 82,
+    0x10bad: 68,
+    0x10bae: 68,
+    0x10baf: 85,
+    0x10d00: 76,
+    0x10d01: 68,
+    0x10d02: 68,
+    0x10d03: 68,
+    0x10d04: 68,
+    0x10d05: 68,
+    0x10d06: 68,
+    0x10d07: 68,
+    0x10d08: 68,
+    0x10d09: 68,
+    0x10d0a: 68,
+    0x10d0b: 68,
+    0x10d0c: 68,
+    0x10d0d: 68,
+    0x10d0e: 68,
+    0x10d0f: 68,
+    0x10d10: 68,
+    0x10d11: 68,
+    0x10d12: 68,
+    0x10d13: 68,
+    0x10d14: 68,
+    0x10d15: 68,
+    0x10d16: 68,
+    0x10d17: 68,
+    0x10d18: 68,
+    0x10d19: 68,
+    0x10d1a: 68,
+    0x10d1b: 68,
+    0x10d1c: 68,
+    0x10d1d: 68,
+    0x10d1e: 68,
+    0x10d1f: 68,
+    0x10d20: 68,
+    0x10d21: 68,
+    0x10d22: 82,
+    0x10d23: 68,
+    0x10f30: 68,
+    0x10f31: 68,
+    0x10f32: 68,
+    0x10f33: 82,
+    0x10f34: 68,
+    0x10f35: 68,
+    0x10f36: 68,
+    0x10f37: 68,
+    0x10f38: 68,
+    0x10f39: 68,
+    0x10f3a: 68,
+    0x10f3b: 68,
+    0x10f3c: 68,
+    0x10f3d: 68,
+    0x10f3e: 68,
+    0x10f3f: 68,
+    0x10f40: 68,
+    0x10f41: 68,
+    0x10f42: 68,
+    0x10f43: 68,
+    0x10f44: 68,
+    0x10f45: 85,
+    0x10f51: 68,
+    0x10f52: 68,
+    0x10f53: 68,
+    0x10f54: 82,
+    0x110bd: 85,
+    0x110cd: 85,
+    0x1e900: 68,
+    0x1e901: 68,
+    0x1e902: 68,
+    0x1e903: 68,
+    0x1e904: 68,
+    0x1e905: 68,
+    0x1e906: 68,
+    0x1e907: 68,
+    0x1e908: 68,
+    0x1e909: 68,
+    0x1e90a: 68,
+    0x1e90b: 68,
+    0x1e90c: 68,
+    0x1e90d: 68,
+    0x1e90e: 68,
+    0x1e90f: 68,
+    0x1e910: 68,
+    0x1e911: 68,
+    0x1e912: 68,
+    0x1e913: 68,
+    0x1e914: 68,
+    0x1e915: 68,
+    0x1e916: 68,
+    0x1e917: 68,
+    0x1e918: 68,
+    0x1e919: 68,
+    0x1e91a: 68,
+    0x1e91b: 68,
+    0x1e91c: 68,
+    0x1e91d: 68,
+    0x1e91e: 68,
+    0x1e91f: 68,
+    0x1e920: 68,
+    0x1e921: 68,
+    0x1e922: 68,
+    0x1e923: 68,
+    0x1e924: 68,
+    0x1e925: 68,
+    0x1e926: 68,
+    0x1e927: 68,
+    0x1e928: 68,
+    0x1e929: 68,
+    0x1e92a: 68,
+    0x1e92b: 68,
+    0x1e92c: 68,
+    0x1e92d: 68,
+    0x1e92e: 68,
+    0x1e92f: 68,
+    0x1e930: 68,
+    0x1e931: 68,
+    0x1e932: 68,
+    0x1e933: 68,
+    0x1e934: 68,
+    0x1e935: 68,
+    0x1e936: 68,
+    0x1e937: 68,
+    0x1e938: 68,
+    0x1e939: 68,
+    0x1e93a: 68,
+    0x1e93b: 68,
+    0x1e93c: 68,
+    0x1e93d: 68,
+    0x1e93e: 68,
+    0x1e93f: 68,
+    0x1e940: 68,
+    0x1e941: 68,
+    0x1e942: 68,
+    0x1e943: 68,
+}
+codepoint_classes = {
+    'PVALID': (
+        0x2d0000002e,
+        0x300000003a,
+        0x610000007b,
+        0xdf000000f7,
+        0xf800000100,
+        0x10100000102,
+        0x10300000104,
+        0x10500000106,
+        0x10700000108,
+        0x1090000010a,
+        0x10b0000010c,
+        0x10d0000010e,
+        0x10f00000110,
+        0x11100000112,
+        0x11300000114,
+        0x11500000116,
+        0x11700000118,
+        0x1190000011a,
+        0x11b0000011c,
+        0x11d0000011e,
+        0x11f00000120,
+        0x12100000122,
+        0x12300000124,
+        0x12500000126,
+        0x12700000128,
+        0x1290000012a,
+        0x12b0000012c,
+        0x12d0000012e,
+        0x12f00000130,
+        0x13100000132,
+        0x13500000136,
+        0x13700000139,
+        0x13a0000013b,
+        0x13c0000013d,
+        0x13e0000013f,
+        0x14200000143,
+        0x14400000145,
+        0x14600000147,
+        0x14800000149,
+        0x14b0000014c,
+        0x14d0000014e,
+        0x14f00000150,
+        0x15100000152,
+        0x15300000154,
+        0x15500000156,
+        0x15700000158,
+        0x1590000015a,
+        0x15b0000015c,
+        0x15d0000015e,
+        0x15f00000160,
+        0x16100000162,
+        0x16300000164,
+        0x16500000166,
+        0x16700000168,
+        0x1690000016a,
+        0x16b0000016c,
+        0x16d0000016e,
+        0x16f00000170,
+        0x17100000172,
+        0x17300000174,
+        0x17500000176,
+        0x17700000178,
+        0x17a0000017b,
+        0x17c0000017d,
+        0x17e0000017f,
+        0x18000000181,
+        0x18300000184,
+        0x18500000186,
+        0x18800000189,
+        0x18c0000018e,
+        0x19200000193,
+        0x19500000196,
+        0x1990000019c,
+        0x19e0000019f,
+        0x1a1000001a2,
+        0x1a3000001a4,
+        0x1a5000001a6,
+        0x1a8000001a9,
+        0x1aa000001ac,
+        0x1ad000001ae,
+        0x1b0000001b1,
+        0x1b4000001b5,
+        0x1b6000001b7,
+        0x1b9000001bc,
+        0x1bd000001c4,
+        0x1ce000001cf,
+        0x1d0000001d1,
+        0x1d2000001d3,
+        0x1d4000001d5,
+        0x1d6000001d7,
+        0x1d8000001d9,
+        0x1da000001db,
+        0x1dc000001de,
+        0x1df000001e0,
+        0x1e1000001e2,
+        0x1e3000001e4,
+        0x1e5000001e6,
+        0x1e7000001e8,
+        0x1e9000001ea,
+        0x1eb000001ec,
+        0x1ed000001ee,
+        0x1ef000001f1,
+        0x1f5000001f6,
+        0x1f9000001fa,
+        0x1fb000001fc,
+        0x1fd000001fe,
+        0x1ff00000200,
+        0x20100000202,
+        0x20300000204,
+        0x20500000206,
+        0x20700000208,
+        0x2090000020a,
+        0x20b0000020c,
+        0x20d0000020e,
+        0x20f00000210,
+        0x21100000212,
+        0x21300000214,
+        0x21500000216,
+        0x21700000218,
+        0x2190000021a,
+        0x21b0000021c,
+        0x21d0000021e,
+        0x21f00000220,
+        0x22100000222,
+        0x22300000224,
+        0x22500000226,
+        0x22700000228,
+        0x2290000022a,
+        0x22b0000022c,
+        0x22d0000022e,
+        0x22f00000230,
+        0x23100000232,
+        0x2330000023a,
+        0x23c0000023d,
+        0x23f00000241,
+        0x24200000243,
+        0x24700000248,
+        0x2490000024a,
+        0x24b0000024c,
+        0x24d0000024e,
+        0x24f000002b0,
+        0x2b9000002c2,
+        0x2c6000002d2,
+        0x2ec000002ed,
+        0x2ee000002ef,
+        0x30000000340,
+        0x34200000343,
+        0x3460000034f,
+        0x35000000370,
+        0x37100000372,
+        0x37300000374,
+        0x37700000378,
+        0x37b0000037e,
+        0x39000000391,
+        0x3ac000003cf,
+        0x3d7000003d8,
+        0x3d9000003da,
+        0x3db000003dc,
+        0x3dd000003de,
+        0x3df000003e0,
+        0x3e1000003e2,
+        0x3e3000003e4,
+        0x3e5000003e6,
+        0x3e7000003e8,
+        0x3e9000003ea,
+        0x3eb000003ec,
+        0x3ed000003ee,
+        0x3ef000003f0,
+        0x3f3000003f4,
+        0x3f8000003f9,
+        0x3fb000003fd,
+        0x43000000460,
+        0x46100000462,
+        0x46300000464,
+        0x46500000466,
+        0x46700000468,
+        0x4690000046a,
+        0x46b0000046c,
+        0x46d0000046e,
+        0x46f00000470,
+        0x47100000472,
+        0x47300000474,
+        0x47500000476,
+        0x47700000478,
+        0x4790000047a,
+        0x47b0000047c,
+        0x47d0000047e,
+        0x47f00000480,
+        0x48100000482,
+        0x48300000488,
+        0x48b0000048c,
+        0x48d0000048e,
+        0x48f00000490,
+        0x49100000492,
+        0x49300000494,
+        0x49500000496,
+        0x49700000498,
+        0x4990000049a,
+        0x49b0000049c,
+        0x49d0000049e,
+        0x49f000004a0,
+        0x4a1000004a2,
+        0x4a3000004a4,
+        0x4a5000004a6,
+        0x4a7000004a8,
+        0x4a9000004aa,
+        0x4ab000004ac,
+        0x4ad000004ae,
+        0x4af000004b0,
+        0x4b1000004b2,
+        0x4b3000004b4,
+        0x4b5000004b6,
+        0x4b7000004b8,
+        0x4b9000004ba,
+        0x4bb000004bc,
+        0x4bd000004be,
+        0x4bf000004c0,
+        0x4c2000004c3,
+        0x4c4000004c5,
+        0x4c6000004c7,
+        0x4c8000004c9,
+        0x4ca000004cb,
+        0x4cc000004cd,
+        0x4ce000004d0,
+        0x4d1000004d2,
+        0x4d3000004d4,
+        0x4d5000004d6,
+        0x4d7000004d8,
+        0x4d9000004da,
+        0x4db000004dc,
+        0x4dd000004de,
+        0x4df000004e0,
+        0x4e1000004e2,
+        0x4e3000004e4,
+        0x4e5000004e6,
+        0x4e7000004e8,
+        0x4e9000004ea,
+        0x4eb000004ec,
+        0x4ed000004ee,
+        0x4ef000004f0,
+        0x4f1000004f2,
+        0x4f3000004f4,
+        0x4f5000004f6,
+        0x4f7000004f8,
+        0x4f9000004fa,
+        0x4fb000004fc,
+        0x4fd000004fe,
+        0x4ff00000500,
+        0x50100000502,
+        0x50300000504,
+        0x50500000506,
+        0x50700000508,
+        0x5090000050a,
+        0x50b0000050c,
+        0x50d0000050e,
+        0x50f00000510,
+        0x51100000512,
+        0x51300000514,
+        0x51500000516,
+        0x51700000518,
+        0x5190000051a,
+        0x51b0000051c,
+        0x51d0000051e,
+        0x51f00000520,
+        0x52100000522,
+        0x52300000524,
+        0x52500000526,
+        0x52700000528,
+        0x5290000052a,
+        0x52b0000052c,
+        0x52d0000052e,
+        0x52f00000530,
+        0x5590000055a,
+        0x56000000587,
+        0x58800000589,
+        0x591000005be,
+        0x5bf000005c0,
+        0x5c1000005c3,
+        0x5c4000005c6,
+        0x5c7000005c8,
+        0x5d0000005eb,
+        0x5ef000005f3,
+        0x6100000061b,
+        0x62000000640,
+        0x64100000660,
+        0x66e00000675,
+        0x679000006d4,
+        0x6d5000006dd,
+        0x6df000006e9,
+        0x6ea000006f0,
+        0x6fa00000700,
+        0x7100000074b,
+        0x74d000007b2,
+        0x7c0000007f6,
+        0x7fd000007fe,
+        0x8000000082e,
+        0x8400000085c,
+        0x8600000086b,
+        0x8a0000008b5,
+        0x8b6000008be,
+        0x8d3000008e2,
+        0x8e300000958,
+        0x96000000964,
+        0x96600000970,
+        0x97100000984,
+        0x9850000098d,
+        0x98f00000991,
+        0x993000009a9,
+        0x9aa000009b1,
+        0x9b2000009b3,
+        0x9b6000009ba,
+        0x9bc000009c5,
+        0x9c7000009c9,
+        0x9cb000009cf,
+        0x9d7000009d8,
+        0x9e0000009e4,
+        0x9e6000009f2,
+        0x9fc000009fd,
+        0x9fe000009ff,
+        0xa0100000a04,
+        0xa0500000a0b,
+        0xa0f00000a11,
+        0xa1300000a29,
+        0xa2a00000a31,
+        0xa3200000a33,
+        0xa3500000a36,
+        0xa3800000a3a,
+        0xa3c00000a3d,
+        0xa3e00000a43,
+        0xa4700000a49,
+        0xa4b00000a4e,
+        0xa5100000a52,
+        0xa5c00000a5d,
+        0xa6600000a76,
+        0xa8100000a84,
+        0xa8500000a8e,
+        0xa8f00000a92,
+        0xa9300000aa9,
+        0xaaa00000ab1,
+        0xab200000ab4,
+        0xab500000aba,
+        0xabc00000ac6,
+        0xac700000aca,
+        0xacb00000ace,
+        0xad000000ad1,
+        0xae000000ae4,
+        0xae600000af0,
+        0xaf900000b00,
+        0xb0100000b04,
+        0xb0500000b0d,
+        0xb0f00000b11,
+        0xb1300000b29,
+        0xb2a00000b31,
+        0xb3200000b34,
+        0xb3500000b3a,
+        0xb3c00000b45,
+        0xb4700000b49,
+        0xb4b00000b4e,
+        0xb5600000b58,
+        0xb5f00000b64,
+        0xb6600000b70,
+        0xb7100000b72,
+        0xb8200000b84,
+        0xb8500000b8b,
+        0xb8e00000b91,
+        0xb9200000b96,
+        0xb9900000b9b,
+        0xb9c00000b9d,
+        0xb9e00000ba0,
+        0xba300000ba5,
+        0xba800000bab,
+        0xbae00000bba,
+        0xbbe00000bc3,
+        0xbc600000bc9,
+        0xbca00000bce,
+        0xbd000000bd1,
+        0xbd700000bd8,
+        0xbe600000bf0,
+        0xc0000000c0d,
+        0xc0e00000c11,
+        0xc1200000c29,
+        0xc2a00000c3a,
+        0xc3d00000c45,
+        0xc4600000c49,
+        0xc4a00000c4e,
+        0xc5500000c57,
+        0xc5800000c5b,
+        0xc6000000c64,
+        0xc6600000c70,
+        0xc8000000c84,
+        0xc8500000c8d,
+        0xc8e00000c91,
+        0xc9200000ca9,
+        0xcaa00000cb4,
+        0xcb500000cba,
+        0xcbc00000cc5,
+        0xcc600000cc9,
+        0xcca00000cce,
+        0xcd500000cd7,
+        0xcde00000cdf,
+        0xce000000ce4,
+        0xce600000cf0,
+        0xcf100000cf3,
+        0xd0000000d04,
+        0xd0500000d0d,
+        0xd0e00000d11,
+        0xd1200000d45,
+        0xd4600000d49,
+        0xd4a00000d4f,
+        0xd5400000d58,
+        0xd5f00000d64,
+        0xd6600000d70,
+        0xd7a00000d80,
+        0xd8200000d84,
+        0xd8500000d97,
+        0xd9a00000db2,
+        0xdb300000dbc,
+        0xdbd00000dbe,
+        0xdc000000dc7,
+        0xdca00000dcb,
+        0xdcf00000dd5,
+        0xdd600000dd7,
+        0xdd800000de0,
+        0xde600000df0,
+        0xdf200000df4,
+        0xe0100000e33,
+        0xe3400000e3b,
+        0xe4000000e4f,
+        0xe5000000e5a,
+        0xe8100000e83,
+        0xe8400000e85,
+        0xe8700000e89,
+        0xe8a00000e8b,
+        0xe8d00000e8e,
+        0xe9400000e98,
+        0xe9900000ea0,
+        0xea100000ea4,
+        0xea500000ea6,
+        0xea700000ea8,
+        0xeaa00000eac,
+        0xead00000eb3,
+        0xeb400000eba,
+        0xebb00000ebe,
+        0xec000000ec5,
+        0xec600000ec7,
+        0xec800000ece,
+        0xed000000eda,
+        0xede00000ee0,
+        0xf0000000f01,
+        0xf0b00000f0c,
+        0xf1800000f1a,
+        0xf2000000f2a,
+        0xf3500000f36,
+        0xf3700000f38,
+        0xf3900000f3a,
+        0xf3e00000f43,
+        0xf4400000f48,
+        0xf4900000f4d,
+        0xf4e00000f52,
+        0xf5300000f57,
+        0xf5800000f5c,
+        0xf5d00000f69,
+        0xf6a00000f6d,
+        0xf7100000f73,
+        0xf7400000f75,
+        0xf7a00000f81,
+        0xf8200000f85,
+        0xf8600000f93,
+        0xf9400000f98,
+        0xf9900000f9d,
+        0xf9e00000fa2,
+        0xfa300000fa7,
+        0xfa800000fac,
+        0xfad00000fb9,
+        0xfba00000fbd,
+        0xfc600000fc7,
+        0x10000000104a,
+        0x10500000109e,
+        0x10d0000010fb,
+        0x10fd00001100,
+        0x120000001249,
+        0x124a0000124e,
+        0x125000001257,
+        0x125800001259,
+        0x125a0000125e,
+        0x126000001289,
+        0x128a0000128e,
+        0x1290000012b1,
+        0x12b2000012b6,
+        0x12b8000012bf,
+        0x12c0000012c1,
+        0x12c2000012c6,
+        0x12c8000012d7,
+        0x12d800001311,
+        0x131200001316,
+        0x13180000135b,
+        0x135d00001360,
+        0x138000001390,
+        0x13a0000013f6,
+        0x14010000166d,
+        0x166f00001680,
+        0x16810000169b,
+        0x16a0000016eb,
+        0x16f1000016f9,
+        0x17000000170d,
+        0x170e00001715,
+        0x172000001735,
+        0x174000001754,
+        0x17600000176d,
+        0x176e00001771,
+        0x177200001774,
+        0x1780000017b4,
+        0x17b6000017d4,
+        0x17d7000017d8,
+        0x17dc000017de,
+        0x17e0000017ea,
+        0x18100000181a,
+        0x182000001879,
+        0x1880000018ab,
+        0x18b0000018f6,
+        0x19000000191f,
+        0x19200000192c,
+        0x19300000193c,
+        0x19460000196e,
+        0x197000001975,
+        0x1980000019ac,
+        0x19b0000019ca,
+        0x19d0000019da,
+        0x1a0000001a1c,
+        0x1a2000001a5f,
+        0x1a6000001a7d,
+        0x1a7f00001a8a,
+        0x1a9000001a9a,
+        0x1aa700001aa8,
+        0x1ab000001abe,
+        0x1b0000001b4c,
+        0x1b5000001b5a,
+        0x1b6b00001b74,
+        0x1b8000001bf4,
+        0x1c0000001c38,
+        0x1c4000001c4a,
+        0x1c4d00001c7e,
+        0x1cd000001cd3,
+        0x1cd400001cfa,
+        0x1d0000001d2c,
+        0x1d2f00001d30,
+        0x1d3b00001d3c,
+        0x1d4e00001d4f,
+        0x1d6b00001d78,
+        0x1d7900001d9b,
+        0x1dc000001dfa,
+        0x1dfb00001e00,
+        0x1e0100001e02,
+        0x1e0300001e04,
+        0x1e0500001e06,
+        0x1e0700001e08,
+        0x1e0900001e0a,
+        0x1e0b00001e0c,
+        0x1e0d00001e0e,
+        0x1e0f00001e10,
+        0x1e1100001e12,
+        0x1e1300001e14,
+        0x1e1500001e16,
+        0x1e1700001e18,
+        0x1e1900001e1a,
+        0x1e1b00001e1c,
+        0x1e1d00001e1e,
+        0x1e1f00001e20,
+        0x1e2100001e22,
+        0x1e2300001e24,
+        0x1e2500001e26,
+        0x1e2700001e28,
+        0x1e2900001e2a,
+        0x1e2b00001e2c,
+        0x1e2d00001e2e,
+        0x1e2f00001e30,
+        0x1e3100001e32,
+        0x1e3300001e34,
+        0x1e3500001e36,
+        0x1e3700001e38,
+        0x1e3900001e3a,
+        0x1e3b00001e3c,
+        0x1e3d00001e3e,
+        0x1e3f00001e40,
+        0x1e4100001e42,
+        0x1e4300001e44,
+        0x1e4500001e46,
+        0x1e4700001e48,
+        0x1e4900001e4a,
+        0x1e4b00001e4c,
+        0x1e4d00001e4e,
+        0x1e4f00001e50,
+        0x1e5100001e52,
+        0x1e5300001e54,
+        0x1e5500001e56,
+        0x1e5700001e58,
+        0x1e5900001e5a,
+        0x1e5b00001e5c,
+        0x1e5d00001e5e,
+        0x1e5f00001e60,
+        0x1e6100001e62,
+        0x1e6300001e64,
+        0x1e6500001e66,
+        0x1e6700001e68,
+        0x1e6900001e6a,
+        0x1e6b00001e6c,
+        0x1e6d00001e6e,
+        0x1e6f00001e70,
+        0x1e7100001e72,
+        0x1e7300001e74,
+        0x1e7500001e76,
+        0x1e7700001e78,
+        0x1e7900001e7a,
+        0x1e7b00001e7c,
+        0x1e7d00001e7e,
+        0x1e7f00001e80,
+        0x1e8100001e82,
+        0x1e8300001e84,
+        0x1e8500001e86,
+        0x1e8700001e88,
+        0x1e8900001e8a,
+        0x1e8b00001e8c,
+        0x1e8d00001e8e,
+        0x1e8f00001e90,
+        0x1e9100001e92,
+        0x1e9300001e94,
+        0x1e9500001e9a,
+        0x1e9c00001e9e,
+        0x1e9f00001ea0,
+        0x1ea100001ea2,
+        0x1ea300001ea4,
+        0x1ea500001ea6,
+        0x1ea700001ea8,
+        0x1ea900001eaa,
+        0x1eab00001eac,
+        0x1ead00001eae,
+        0x1eaf00001eb0,
+        0x1eb100001eb2,
+        0x1eb300001eb4,
+        0x1eb500001eb6,
+        0x1eb700001eb8,
+        0x1eb900001eba,
+        0x1ebb00001ebc,
+        0x1ebd00001ebe,
+        0x1ebf00001ec0,
+        0x1ec100001ec2,
+        0x1ec300001ec4,
+        0x1ec500001ec6,
+        0x1ec700001ec8,
+        0x1ec900001eca,
+        0x1ecb00001ecc,
+        0x1ecd00001ece,
+        0x1ecf00001ed0,
+        0x1ed100001ed2,
+        0x1ed300001ed4,
+        0x1ed500001ed6,
+        0x1ed700001ed8,
+        0x1ed900001eda,
+        0x1edb00001edc,
+        0x1edd00001ede,
+        0x1edf00001ee0,
+        0x1ee100001ee2,
+        0x1ee300001ee4,
+        0x1ee500001ee6,
+        0x1ee700001ee8,
+        0x1ee900001eea,
+        0x1eeb00001eec,
+        0x1eed00001eee,
+        0x1eef00001ef0,
+        0x1ef100001ef2,
+        0x1ef300001ef4,
+        0x1ef500001ef6,
+        0x1ef700001ef8,
+        0x1ef900001efa,
+        0x1efb00001efc,
+        0x1efd00001efe,
+        0x1eff00001f08,
+        0x1f1000001f16,
+        0x1f2000001f28,
+        0x1f3000001f38,
+        0x1f4000001f46,
+        0x1f5000001f58,
+        0x1f6000001f68,
+        0x1f7000001f71,
+        0x1f7200001f73,
+        0x1f7400001f75,
+        0x1f7600001f77,
+        0x1f7800001f79,
+        0x1f7a00001f7b,
+        0x1f7c00001f7d,
+        0x1fb000001fb2,
+        0x1fb600001fb7,
+        0x1fc600001fc7,
+        0x1fd000001fd3,
+        0x1fd600001fd8,
+        0x1fe000001fe3,
+        0x1fe400001fe8,
+        0x1ff600001ff7,
+        0x214e0000214f,
+        0x218400002185,
+        0x2c3000002c5f,
+        0x2c6100002c62,
+        0x2c6500002c67,
+        0x2c6800002c69,
+        0x2c6a00002c6b,
+        0x2c6c00002c6d,
+        0x2c7100002c72,
+        0x2c7300002c75,
+        0x2c7600002c7c,
+        0x2c8100002c82,
+        0x2c8300002c84,
+        0x2c8500002c86,
+        0x2c8700002c88,
+        0x2c8900002c8a,
+        0x2c8b00002c8c,
+        0x2c8d00002c8e,
+        0x2c8f00002c90,
+        0x2c9100002c92,
+        0x2c9300002c94,
+        0x2c9500002c96,
+        0x2c9700002c98,
+        0x2c9900002c9a,
+        0x2c9b00002c9c,
+        0x2c9d00002c9e,
+        0x2c9f00002ca0,
+        0x2ca100002ca2,
+        0x2ca300002ca4,
+        0x2ca500002ca6,
+        0x2ca700002ca8,
+        0x2ca900002caa,
+        0x2cab00002cac,
+        0x2cad00002cae,
+        0x2caf00002cb0,
+        0x2cb100002cb2,
+        0x2cb300002cb4,
+        0x2cb500002cb6,
+        0x2cb700002cb8,
+        0x2cb900002cba,
+        0x2cbb00002cbc,
+        0x2cbd00002cbe,
+        0x2cbf00002cc0,
+        0x2cc100002cc2,
+        0x2cc300002cc4,
+        0x2cc500002cc6,
+        0x2cc700002cc8,
+        0x2cc900002cca,
+        0x2ccb00002ccc,
+        0x2ccd00002cce,
+        0x2ccf00002cd0,
+        0x2cd100002cd2,
+        0x2cd300002cd4,
+        0x2cd500002cd6,
+        0x2cd700002cd8,
+        0x2cd900002cda,
+        0x2cdb00002cdc,
+        0x2cdd00002cde,
+        0x2cdf00002ce0,
+        0x2ce100002ce2,
+        0x2ce300002ce5,
+        0x2cec00002ced,
+        0x2cee00002cf2,
+        0x2cf300002cf4,
+        0x2d0000002d26,
+        0x2d2700002d28,
+        0x2d2d00002d2e,
+        0x2d3000002d68,
+        0x2d7f00002d97,
+        0x2da000002da7,
+        0x2da800002daf,
+        0x2db000002db7,
+        0x2db800002dbf,
+        0x2dc000002dc7,
+        0x2dc800002dcf,
+        0x2dd000002dd7,
+        0x2dd800002ddf,
+        0x2de000002e00,
+        0x2e2f00002e30,
+        0x300500003008,
+        0x302a0000302e,
+        0x303c0000303d,
+        0x304100003097,
+        0x30990000309b,
+        0x309d0000309f,
+        0x30a1000030fb,
+        0x30fc000030ff,
+        0x310500003130,
+        0x31a0000031bb,
+        0x31f000003200,
+        0x340000004db6,
+        0x4e0000009ff0,
+        0xa0000000a48d,
+        0xa4d00000a4fe,
+        0xa5000000a60d,
+        0xa6100000a62c,
+        0xa6410000a642,
+        0xa6430000a644,
+        0xa6450000a646,
+        0xa6470000a648,
+        0xa6490000a64a,
+        0xa64b0000a64c,
+        0xa64d0000a64e,
+        0xa64f0000a650,
+        0xa6510000a652,
+        0xa6530000a654,
+        0xa6550000a656,
+        0xa6570000a658,
+        0xa6590000a65a,
+        0xa65b0000a65c,
+        0xa65d0000a65e,
+        0xa65f0000a660,
+        0xa6610000a662,
+        0xa6630000a664,
+        0xa6650000a666,
+        0xa6670000a668,
+        0xa6690000a66a,
+        0xa66b0000a66c,
+        0xa66d0000a670,
+        0xa6740000a67e,
+        0xa67f0000a680,
+        0xa6810000a682,
+        0xa6830000a684,
+        0xa6850000a686,
+        0xa6870000a688,
+        0xa6890000a68a,
+        0xa68b0000a68c,
+        0xa68d0000a68e,
+        0xa68f0000a690,
+        0xa6910000a692,
+        0xa6930000a694,
+        0xa6950000a696,
+        0xa6970000a698,
+        0xa6990000a69a,
+        0xa69b0000a69c,
+        0xa69e0000a6e6,
+        0xa6f00000a6f2,
+        0xa7170000a720,
+        0xa7230000a724,
+        0xa7250000a726,
+        0xa7270000a728,
+        0xa7290000a72a,
+        0xa72b0000a72c,
+        0xa72d0000a72e,
+        0xa72f0000a732,
+        0xa7330000a734,
+        0xa7350000a736,
+        0xa7370000a738,
+        0xa7390000a73a,
+        0xa73b0000a73c,
+        0xa73d0000a73e,
+        0xa73f0000a740,
+        0xa7410000a742,
+        0xa7430000a744,
+        0xa7450000a746,
+        0xa7470000a748,
+        0xa7490000a74a,
+        0xa74b0000a74c,
+        0xa74d0000a74e,
+        0xa74f0000a750,
+        0xa7510000a752,
+        0xa7530000a754,
+        0xa7550000a756,
+        0xa7570000a758,
+        0xa7590000a75a,
+        0xa75b0000a75c,
+        0xa75d0000a75e,
+        0xa75f0000a760,
+        0xa7610000a762,
+        0xa7630000a764,
+        0xa7650000a766,
+        0xa7670000a768,
+        0xa7690000a76a,
+        0xa76b0000a76c,
+        0xa76d0000a76e,
+        0xa76f0000a770,
+        0xa7710000a779,
+        0xa77a0000a77b,
+        0xa77c0000a77d,
+        0xa77f0000a780,
+        0xa7810000a782,
+        0xa7830000a784,
+        0xa7850000a786,
+        0xa7870000a789,
+        0xa78c0000a78d,
+        0xa78e0000a790,
+        0xa7910000a792,
+        0xa7930000a796,
+        0xa7970000a798,
+        0xa7990000a79a,
+        0xa79b0000a79c,
+        0xa79d0000a79e,
+        0xa79f0000a7a0,
+        0xa7a10000a7a2,
+        0xa7a30000a7a4,
+        0xa7a50000a7a6,
+        0xa7a70000a7a8,
+        0xa7a90000a7aa,
+        0xa7af0000a7b0,
+        0xa7b50000a7b6,
+        0xa7b70000a7b8,
+        0xa7b90000a7ba,
+        0xa7f70000a7f8,
+        0xa7fa0000a828,
+        0xa8400000a874,
+        0xa8800000a8c6,
+        0xa8d00000a8da,
+        0xa8e00000a8f8,
+        0xa8fb0000a8fc,
+        0xa8fd0000a92e,
+        0xa9300000a954,
+        0xa9800000a9c1,
+        0xa9cf0000a9da,
+        0xa9e00000a9ff,
+        0xaa000000aa37,
+        0xaa400000aa4e,
+        0xaa500000aa5a,
+        0xaa600000aa77,
+        0xaa7a0000aac3,
+        0xaadb0000aade,
+        0xaae00000aaf0,
+        0xaaf20000aaf7,
+        0xab010000ab07,
+        0xab090000ab0f,
+        0xab110000ab17,
+        0xab200000ab27,
+        0xab280000ab2f,
+        0xab300000ab5b,
+        0xab600000ab66,
+        0xabc00000abeb,
+        0xabec0000abee,
+        0xabf00000abfa,
+        0xac000000d7a4,
+        0xfa0e0000fa10,
+        0xfa110000fa12,
+        0xfa130000fa15,
+        0xfa1f0000fa20,
+        0xfa210000fa22,
+        0xfa230000fa25,
+        0xfa270000fa2a,
+        0xfb1e0000fb1f,
+        0xfe200000fe30,
+        0xfe730000fe74,
+        0x100000001000c,
+        0x1000d00010027,
+        0x100280001003b,
+        0x1003c0001003e,
+        0x1003f0001004e,
+        0x100500001005e,
+        0x10080000100fb,
+        0x101fd000101fe,
+        0x102800001029d,
+        0x102a0000102d1,
+        0x102e0000102e1,
+        0x1030000010320,
+        0x1032d00010341,
+        0x103420001034a,
+        0x103500001037b,
+        0x103800001039e,
+        0x103a0000103c4,
+        0x103c8000103d0,
+        0x104280001049e,
+        0x104a0000104aa,
+        0x104d8000104fc,
+        0x1050000010528,
+        0x1053000010564,
+        0x1060000010737,
+        0x1074000010756,
+        0x1076000010768,
+        0x1080000010806,
+        0x1080800010809,
+        0x1080a00010836,
+        0x1083700010839,
+        0x1083c0001083d,
+        0x1083f00010856,
+        0x1086000010877,
+        0x108800001089f,
+        0x108e0000108f3,
+        0x108f4000108f6,
+        0x1090000010916,
+        0x109200001093a,
+        0x10980000109b8,
+        0x109be000109c0,
+        0x10a0000010a04,
+        0x10a0500010a07,
+        0x10a0c00010a14,
+        0x10a1500010a18,
+        0x10a1900010a36,
+        0x10a3800010a3b,
+        0x10a3f00010a40,
+        0x10a6000010a7d,
+        0x10a8000010a9d,
+        0x10ac000010ac8,
+        0x10ac900010ae7,
+        0x10b0000010b36,
+        0x10b4000010b56,
+        0x10b6000010b73,
+        0x10b8000010b92,
+        0x10c0000010c49,
+        0x10cc000010cf3,
+        0x10d0000010d28,
+        0x10d3000010d3a,
+        0x10f0000010f1d,
+        0x10f2700010f28,
+        0x10f3000010f51,
+        0x1100000011047,
+        0x1106600011070,
+        0x1107f000110bb,
+        0x110d0000110e9,
+        0x110f0000110fa,
+        0x1110000011135,
+        0x1113600011140,
+        0x1114400011147,
+        0x1115000011174,
+        0x1117600011177,
+        0x11180000111c5,
+        0x111c9000111cd,
+        0x111d0000111db,
+        0x111dc000111dd,
+        0x1120000011212,
+        0x1121300011238,
+        0x1123e0001123f,
+        0x1128000011287,
+        0x1128800011289,
+        0x1128a0001128e,
+        0x1128f0001129e,
+        0x1129f000112a9,
+        0x112b0000112eb,
+        0x112f0000112fa,
+        0x1130000011304,
+        0x113050001130d,
+        0x1130f00011311,
+        0x1131300011329,
+        0x1132a00011331,
+        0x1133200011334,
+        0x113350001133a,
+        0x1133b00011345,
+        0x1134700011349,
+        0x1134b0001134e,
+        0x1135000011351,
+        0x1135700011358,
+        0x1135d00011364,
+        0x113660001136d,
+        0x1137000011375,
+        0x114000001144b,
+        0x114500001145a,
+        0x1145e0001145f,
+        0x11480000114c6,
+        0x114c7000114c8,
+        0x114d0000114da,
+        0x11580000115b6,
+        0x115b8000115c1,
+        0x115d8000115de,
+        0x1160000011641,
+        0x1164400011645,
+        0x116500001165a,
+        0x11680000116b8,
+        0x116c0000116ca,
+        0x117000001171b,
+        0x1171d0001172c,
+        0x117300001173a,
+        0x118000001183b,
+        0x118c0000118ea,
+        0x118ff00011900,
+        0x11a0000011a3f,
+        0x11a4700011a48,
+        0x11a5000011a84,
+        0x11a8600011a9a,
+        0x11a9d00011a9e,
+        0x11ac000011af9,
+        0x11c0000011c09,
+        0x11c0a00011c37,
+        0x11c3800011c41,
+        0x11c5000011c5a,
+        0x11c7200011c90,
+        0x11c9200011ca8,
+        0x11ca900011cb7,
+        0x11d0000011d07,
+        0x11d0800011d0a,
+        0x11d0b00011d37,
+        0x11d3a00011d3b,
+        0x11d3c00011d3e,
+        0x11d3f00011d48,
+        0x11d5000011d5a,
+        0x11d6000011d66,
+        0x11d6700011d69,
+        0x11d6a00011d8f,
+        0x11d9000011d92,
+        0x11d9300011d99,
+        0x11da000011daa,
+        0x11ee000011ef7,
+        0x120000001239a,
+        0x1248000012544,
+        0x130000001342f,
+        0x1440000014647,
+        0x1680000016a39,
+        0x16a4000016a5f,
+        0x16a6000016a6a,
+        0x16ad000016aee,
+        0x16af000016af5,
+        0x16b0000016b37,
+        0x16b4000016b44,
+        0x16b5000016b5a,
+        0x16b6300016b78,
+        0x16b7d00016b90,
+        0x16e6000016e80,
+        0x16f0000016f45,
+        0x16f5000016f7f,
+        0x16f8f00016fa0,
+        0x16fe000016fe2,
+        0x17000000187f2,
+        0x1880000018af3,
+        0x1b0000001b11f,
+        0x1b1700001b2fc,
+        0x1bc000001bc6b,
+        0x1bc700001bc7d,
+        0x1bc800001bc89,
+        0x1bc900001bc9a,
+        0x1bc9d0001bc9f,
+        0x1da000001da37,
+        0x1da3b0001da6d,
+        0x1da750001da76,
+        0x1da840001da85,
+        0x1da9b0001daa0,
+        0x1daa10001dab0,
+        0x1e0000001e007,
+        0x1e0080001e019,
+        0x1e01b0001e022,
+        0x1e0230001e025,
+        0x1e0260001e02b,
+        0x1e8000001e8c5,
+        0x1e8d00001e8d7,
+        0x1e9220001e94b,
+        0x1e9500001e95a,
+        0x200000002a6d7,
+        0x2a7000002b735,
+        0x2b7400002b81e,
+        0x2b8200002cea2,
+        0x2ceb00002ebe1,
+    ),
+    'CONTEXTJ': (
+        0x200c0000200e,
+    ),
+    'CONTEXTO': (
+        0xb7000000b8,
+        0x37500000376,
+        0x5f3000005f5,
+        0x6600000066a,
+        0x6f0000006fa,
+        0x30fb000030fc,
+    ),
+}
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/intranges.py b/lib/python3.7/site-packages/pip/_vendor/idna/intranges.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa8a735662d0afc1950ba75bc57e99e8480dac88
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/idna/intranges.py
@@ -0,0 +1,53 @@
+"""
+Given a list of integers, made up of (hopefully) a small number of long runs
+of consecutive integers, compute a representation of the form
+((start1, end1), (start2, end2) ...). Then answer the question "was x present
+in the original list?" in time O(log(# runs)).
+"""
+
+import bisect
+
+def intranges_from_list(list_):
+    """Represent a list of integers as a sequence of ranges:
+    ((start_0, end_0), (start_1, end_1), ...), such that the original
+    integers are exactly those x such that start_i <= x < end_i for some i.
+
+    Ranges are encoded as single integers (start << 32 | end), not as tuples.
+    """
+
+    sorted_list = sorted(list_)
+    ranges = []
+    last_write = -1
+    for i in range(len(sorted_list)):
+        if i+1 < len(sorted_list):
+            if sorted_list[i] == sorted_list[i+1]-1:
+                continue
+        current_range = sorted_list[last_write+1:i+1]
+        ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
+        last_write = i
+
+    return tuple(ranges)
+
+def _encode_range(start, end):
+    return (start << 32) | end
+
+def _decode_range(r):
+    return (r >> 32), (r & ((1 << 32) - 1))
+
+
+def intranges_contain(int_, ranges):
+    """Determine if `int_` falls into one of the ranges in `ranges`."""
+    tuple_ = _encode_range(int_, 0)
+    pos = bisect.bisect_left(ranges, tuple_)
+    # we could be immediately ahead of a tuple (start, end)
+    # with start < int_ <= end
+    if pos > 0:
+        left, right = _decode_range(ranges[pos-1])
+        if left <= int_ < right:
+            return True
+    # or we could be immediately behind a tuple (int_, end)
+    if pos < len(ranges):
+        left, _ = _decode_range(ranges[pos])
+        if left == int_:
+            return True
+    return False
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/package_data.py b/lib/python3.7/site-packages/pip/_vendor/idna/package_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..257e8989393055bec9bf86010828fb8bab7a9645
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/idna/package_data.py
@@ -0,0 +1,2 @@
+__version__ = '2.8'
+
diff --git a/lib/python3.7/site-packages/pip/_vendor/idna/uts46data.py b/lib/python3.7/site-packages/pip/_vendor/idna/uts46data.py
new file mode 100644
index 0000000000000000000000000000000000000000..a68ed4c0ec3d3a5ffd400dbe8ffb0ec470eb2b66
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/idna/uts46data.py
@@ -0,0 +1,8205 @@
+# This file is automatically generated by tools/idna-data
+# vim: set fileencoding=utf-8 :
+
+"""IDNA Mapping Table from UTS46."""
+
+
+__version__ = "11.0.0"
+def _seg_0():
+    return [
+    (0x0, '3'),
+    (0x1, '3'),
+    (0x2, '3'),
+    (0x3, '3'),
+    (0x4, '3'),
+    (0x5, '3'),
+    (0x6, '3'),
+    (0x7, '3'),
+    (0x8, '3'),
+    (0x9, '3'),
+    (0xA, '3'),
+    (0xB, '3'),
+    (0xC, '3'),
+    (0xD, '3'),
+    (0xE, '3'),
+    (0xF, '3'),
+    (0x10, '3'),
+    (0x11, '3'),
+    (0x12, '3'),
+    (0x13, '3'),
+    (0x14, '3'),
+    (0x15, '3'),
+    (0x16, '3'),
+    (0x17, '3'),
+    (0x18, '3'),
+    (0x19, '3'),
+    (0x1A, '3'),
+    (0x1B, '3'),
+    (0x1C, '3'),
+    (0x1D, '3'),
+    (0x1E, '3'),
+    (0x1F, '3'),
+    (0x20, '3'),
+    (0x21, '3'),
+    (0x22, '3'),
+    (0x23, '3'),
+    (0x24, '3'),
+    (0x25, '3'),
+    (0x26, '3'),
+    (0x27, '3'),
+    (0x28, '3'),
+    (0x29, '3'),
+    (0x2A, '3'),
+    (0x2B, '3'),
+    (0x2C, '3'),
+    (0x2D, 'V'),
+    (0x2E, 'V'),
+    (0x2F, '3'),
+    (0x30, 'V'),
+    (0x31, 'V'),
+    (0x32, 'V'),
+    (0x33, 'V'),
+    (0x34, 'V'),
+    (0x35, 'V'),
+    (0x36, 'V'),
+    (0x37, 'V'),
+    (0x38, 'V'),
+    (0x39, 'V'),
+    (0x3A, '3'),
+    (0x3B, '3'),
+    (0x3C, '3'),
+    (0x3D, '3'),
+    (0x3E, '3'),
+    (0x3F, '3'),
+    (0x40, '3'),
+    (0x41, 'M', u'a'),
+    (0x42, 'M', u'b'),
+    (0x43, 'M', u'c'),
+    (0x44, 'M', u'd'),
+    (0x45, 'M', u'e'),
+    (0x46, 'M', u'f'),
+    (0x47, 'M', u'g'),
+    (0x48, 'M', u'h'),
+    (0x49, 'M', u'i'),
+    (0x4A, 'M', u'j'),
+    (0x4B, 'M', u'k'),
+    (0x4C, 'M', u'l'),
+    (0x4D, 'M', u'm'),
+    (0x4E, 'M', u'n'),
+    (0x4F, 'M', u'o'),
+    (0x50, 'M', u'p'),
+    (0x51, 'M', u'q'),
+    (0x52, 'M', u'r'),
+    (0x53, 'M', u's'),
+    (0x54, 'M', u't'),
+    (0x55, 'M', u'u'),
+    (0x56, 'M', u'v'),
+    (0x57, 'M', u'w'),
+    (0x58, 'M', u'x'),
+    (0x59, 'M', u'y'),
+    (0x5A, 'M', u'z'),
+    (0x5B, '3'),
+    (0x5C, '3'),
+    (0x5D, '3'),
+    (0x5E, '3'),
+    (0x5F, '3'),
+    (0x60, '3'),
+    (0x61, 'V'),
+    (0x62, 'V'),
+    (0x63, 'V'),
+    ]
+
+def _seg_1():
+    return [
+    (0x64, 'V'),
+    (0x65, 'V'),
+    (0x66, 'V'),
+    (0x67, 'V'),
+    (0x68, 'V'),
+    (0x69, 'V'),
+    (0x6A, 'V'),
+    (0x6B, 'V'),
+    (0x6C, 'V'),
+    (0x6D, 'V'),
+    (0x6E, 'V'),
+    (0x6F, 'V'),
+    (0x70, 'V'),
+    (0x71, 'V'),
+    (0x72, 'V'),
+    (0x73, 'V'),
+    (0x74, 'V'),
+    (0x75, 'V'),
+    (0x76, 'V'),
+    (0x77, 'V'),
+    (0x78, 'V'),
+    (0x79, 'V'),
+    (0x7A, 'V'),
+    (0x7B, '3'),
+    (0x7C, '3'),
+    (0x7D, '3'),
+    (0x7E, '3'),
+    (0x7F, '3'),
+    (0x80, 'X'),
+    (0x81, 'X'),
+    (0x82, 'X'),
+    (0x83, 'X'),
+    (0x84, 'X'),
+    (0x85, 'X'),
+    (0x86, 'X'),
+    (0x87, 'X'),
+    (0x88, 'X'),
+    (0x89, 'X'),
+    (0x8A, 'X'),
+    (0x8B, 'X'),
+    (0x8C, 'X'),
+    (0x8D, 'X'),
+    (0x8E, 'X'),
+    (0x8F, 'X'),
+    (0x90, 'X'),
+    (0x91, 'X'),
+    (0x92, 'X'),
+    (0x93, 'X'),
+    (0x94, 'X'),
+    (0x95, 'X'),
+    (0x96, 'X'),
+    (0x97, 'X'),
+    (0x98, 'X'),
+    (0x99, 'X'),
+    (0x9A, 'X'),
+    (0x9B, 'X'),
+    (0x9C, 'X'),
+    (0x9D, 'X'),
+    (0x9E, 'X'),
+    (0x9F, 'X'),
+    (0xA0, '3', u' '),
+    (0xA1, 'V'),
+    (0xA2, 'V'),
+    (0xA3, 'V'),
+    (0xA4, 'V'),
+    (0xA5, 'V'),
+    (0xA6, 'V'),
+    (0xA7, 'V'),
+    (0xA8, '3', u' ̈'),
+    (0xA9, 'V'),
+    (0xAA, 'M', u'a'),
+    (0xAB, 'V'),
+    (0xAC, 'V'),
+    (0xAD, 'I'),
+    (0xAE, 'V'),
+    (0xAF, '3', u' Ì„'),
+    (0xB0, 'V'),
+    (0xB1, 'V'),
+    (0xB2, 'M', u'2'),
+    (0xB3, 'M', u'3'),
+    (0xB4, '3', u' ́'),
+    (0xB5, 'M', u'μ'),
+    (0xB6, 'V'),
+    (0xB7, 'V'),
+    (0xB8, '3', u' ̧'),
+    (0xB9, 'M', u'1'),
+    (0xBA, 'M', u'o'),
+    (0xBB, 'V'),
+    (0xBC, 'M', u'1⁄4'),
+    (0xBD, 'M', u'1⁄2'),
+    (0xBE, 'M', u'3⁄4'),
+    (0xBF, 'V'),
+    (0xC0, 'M', u'à'),
+    (0xC1, 'M', u'á'),
+    (0xC2, 'M', u'â'),
+    (0xC3, 'M', u'ã'),
+    (0xC4, 'M', u'ä'),
+    (0xC5, 'M', u'Ã¥'),
+    (0xC6, 'M', u'æ'),
+    (0xC7, 'M', u'ç'),
+    ]
+
+def _seg_2():
+    return [
+    (0xC8, 'M', u'è'),
+    (0xC9, 'M', u'é'),
+    (0xCA, 'M', u'ê'),
+    (0xCB, 'M', u'ë'),
+    (0xCC, 'M', u'ì'),
+    (0xCD, 'M', u'í'),
+    (0xCE, 'M', u'î'),
+    (0xCF, 'M', u'ï'),
+    (0xD0, 'M', u'ð'),
+    (0xD1, 'M', u'ñ'),
+    (0xD2, 'M', u'ò'),
+    (0xD3, 'M', u'ó'),
+    (0xD4, 'M', u'ô'),
+    (0xD5, 'M', u'õ'),
+    (0xD6, 'M', u'ö'),
+    (0xD7, 'V'),
+    (0xD8, 'M', u'ø'),
+    (0xD9, 'M', u'ù'),
+    (0xDA, 'M', u'ú'),
+    (0xDB, 'M', u'û'),
+    (0xDC, 'M', u'ü'),
+    (0xDD, 'M', u'ý'),
+    (0xDE, 'M', u'þ'),
+    (0xDF, 'D', u'ss'),
+    (0xE0, 'V'),
+    (0xE1, 'V'),
+    (0xE2, 'V'),
+    (0xE3, 'V'),
+    (0xE4, 'V'),
+    (0xE5, 'V'),
+    (0xE6, 'V'),
+    (0xE7, 'V'),
+    (0xE8, 'V'),
+    (0xE9, 'V'),
+    (0xEA, 'V'),
+    (0xEB, 'V'),
+    (0xEC, 'V'),
+    (0xED, 'V'),
+    (0xEE, 'V'),
+    (0xEF, 'V'),
+    (0xF0, 'V'),
+    (0xF1, 'V'),
+    (0xF2, 'V'),
+    (0xF3, 'V'),
+    (0xF4, 'V'),
+    (0xF5, 'V'),
+    (0xF6, 'V'),
+    (0xF7, 'V'),
+    (0xF8, 'V'),
+    (0xF9, 'V'),
+    (0xFA, 'V'),
+    (0xFB, 'V'),
+    (0xFC, 'V'),
+    (0xFD, 'V'),
+    (0xFE, 'V'),
+    (0xFF, 'V'),
+    (0x100, 'M', u'ā'),
+    (0x101, 'V'),
+    (0x102, 'M', u'ă'),
+    (0x103, 'V'),
+    (0x104, 'M', u'Ä…'),
+    (0x105, 'V'),
+    (0x106, 'M', u'ć'),
+    (0x107, 'V'),
+    (0x108, 'M', u'ĉ'),
+    (0x109, 'V'),
+    (0x10A, 'M', u'Ä‹'),
+    (0x10B, 'V'),
+    (0x10C, 'M', u'č'),
+    (0x10D, 'V'),
+    (0x10E, 'M', u'ď'),
+    (0x10F, 'V'),
+    (0x110, 'M', u'Ä‘'),
+    (0x111, 'V'),
+    (0x112, 'M', u'Ä“'),
+    (0x113, 'V'),
+    (0x114, 'M', u'Ä•'),
+    (0x115, 'V'),
+    (0x116, 'M', u'Ä—'),
+    (0x117, 'V'),
+    (0x118, 'M', u'Ä™'),
+    (0x119, 'V'),
+    (0x11A, 'M', u'Ä›'),
+    (0x11B, 'V'),
+    (0x11C, 'M', u'ĝ'),
+    (0x11D, 'V'),
+    (0x11E, 'M', u'ÄŸ'),
+    (0x11F, 'V'),
+    (0x120, 'M', u'Ä¡'),
+    (0x121, 'V'),
+    (0x122, 'M', u'Ä£'),
+    (0x123, 'V'),
+    (0x124, 'M', u'Ä¥'),
+    (0x125, 'V'),
+    (0x126, 'M', u'ħ'),
+    (0x127, 'V'),
+    (0x128, 'M', u'Ä©'),
+    (0x129, 'V'),
+    (0x12A, 'M', u'Ä«'),
+    (0x12B, 'V'),
+    ]
+
+def _seg_3():
+    return [
+    (0x12C, 'M', u'Ä­'),
+    (0x12D, 'V'),
+    (0x12E, 'M', u'į'),
+    (0x12F, 'V'),
+    (0x130, 'M', u'i̇'),
+    (0x131, 'V'),
+    (0x132, 'M', u'ij'),
+    (0x134, 'M', u'ĵ'),
+    (0x135, 'V'),
+    (0x136, 'M', u'Ä·'),
+    (0x137, 'V'),
+    (0x139, 'M', u'ĺ'),
+    (0x13A, 'V'),
+    (0x13B, 'M', u'ļ'),
+    (0x13C, 'V'),
+    (0x13D, 'M', u'ľ'),
+    (0x13E, 'V'),
+    (0x13F, 'M', u'l·'),
+    (0x141, 'M', u'Å‚'),
+    (0x142, 'V'),
+    (0x143, 'M', u'Å„'),
+    (0x144, 'V'),
+    (0x145, 'M', u'ņ'),
+    (0x146, 'V'),
+    (0x147, 'M', u'ň'),
+    (0x148, 'V'),
+    (0x149, 'M', u'ʼn'),
+    (0x14A, 'M', u'Å‹'),
+    (0x14B, 'V'),
+    (0x14C, 'M', u'ō'),
+    (0x14D, 'V'),
+    (0x14E, 'M', u'ŏ'),
+    (0x14F, 'V'),
+    (0x150, 'M', u'Å‘'),
+    (0x151, 'V'),
+    (0x152, 'M', u'Å“'),
+    (0x153, 'V'),
+    (0x154, 'M', u'Å•'),
+    (0x155, 'V'),
+    (0x156, 'M', u'Å—'),
+    (0x157, 'V'),
+    (0x158, 'M', u'Å™'),
+    (0x159, 'V'),
+    (0x15A, 'M', u'Å›'),
+    (0x15B, 'V'),
+    (0x15C, 'M', u'ŝ'),
+    (0x15D, 'V'),
+    (0x15E, 'M', u'ÅŸ'),
+    (0x15F, 'V'),
+    (0x160, 'M', u'Å¡'),
+    (0x161, 'V'),
+    (0x162, 'M', u'Å£'),
+    (0x163, 'V'),
+    (0x164, 'M', u'Å¥'),
+    (0x165, 'V'),
+    (0x166, 'M', u'ŧ'),
+    (0x167, 'V'),
+    (0x168, 'M', u'Å©'),
+    (0x169, 'V'),
+    (0x16A, 'M', u'Å«'),
+    (0x16B, 'V'),
+    (0x16C, 'M', u'Å­'),
+    (0x16D, 'V'),
+    (0x16E, 'M', u'ů'),
+    (0x16F, 'V'),
+    (0x170, 'M', u'ű'),
+    (0x171, 'V'),
+    (0x172, 'M', u'ų'),
+    (0x173, 'V'),
+    (0x174, 'M', u'ŵ'),
+    (0x175, 'V'),
+    (0x176, 'M', u'Å·'),
+    (0x177, 'V'),
+    (0x178, 'M', u'ÿ'),
+    (0x179, 'M', u'ź'),
+    (0x17A, 'V'),
+    (0x17B, 'M', u'ż'),
+    (0x17C, 'V'),
+    (0x17D, 'M', u'ž'),
+    (0x17E, 'V'),
+    (0x17F, 'M', u's'),
+    (0x180, 'V'),
+    (0x181, 'M', u'É“'),
+    (0x182, 'M', u'ƃ'),
+    (0x183, 'V'),
+    (0x184, 'M', u'Æ…'),
+    (0x185, 'V'),
+    (0x186, 'M', u'É”'),
+    (0x187, 'M', u'ƈ'),
+    (0x188, 'V'),
+    (0x189, 'M', u'É–'),
+    (0x18A, 'M', u'É—'),
+    (0x18B, 'M', u'ƌ'),
+    (0x18C, 'V'),
+    (0x18E, 'M', u'ǝ'),
+    (0x18F, 'M', u'É™'),
+    (0x190, 'M', u'É›'),
+    (0x191, 'M', u'Æ’'),
+    (0x192, 'V'),
+    (0x193, 'M', u'É '),
+    ]
+
+def _seg_4():
+    return [
+    (0x194, 'M', u'É£'),
+    (0x195, 'V'),
+    (0x196, 'M', u'É©'),
+    (0x197, 'M', u'ɨ'),
+    (0x198, 'M', u'Æ™'),
+    (0x199, 'V'),
+    (0x19C, 'M', u'ɯ'),
+    (0x19D, 'M', u'ɲ'),
+    (0x19E, 'V'),
+    (0x19F, 'M', u'ɵ'),
+    (0x1A0, 'M', u'Æ¡'),
+    (0x1A1, 'V'),
+    (0x1A2, 'M', u'Æ£'),
+    (0x1A3, 'V'),
+    (0x1A4, 'M', u'Æ¥'),
+    (0x1A5, 'V'),
+    (0x1A6, 'M', u'Ê€'),
+    (0x1A7, 'M', u'ƨ'),
+    (0x1A8, 'V'),
+    (0x1A9, 'M', u'ʃ'),
+    (0x1AA, 'V'),
+    (0x1AC, 'M', u'Æ­'),
+    (0x1AD, 'V'),
+    (0x1AE, 'M', u'ʈ'),
+    (0x1AF, 'M', u'ư'),
+    (0x1B0, 'V'),
+    (0x1B1, 'M', u'ÊŠ'),
+    (0x1B2, 'M', u'Ê‹'),
+    (0x1B3, 'M', u'Æ´'),
+    (0x1B4, 'V'),
+    (0x1B5, 'M', u'ƶ'),
+    (0x1B6, 'V'),
+    (0x1B7, 'M', u'Ê’'),
+    (0x1B8, 'M', u'ƹ'),
+    (0x1B9, 'V'),
+    (0x1BC, 'M', u'ƽ'),
+    (0x1BD, 'V'),
+    (0x1C4, 'M', u'dž'),
+    (0x1C7, 'M', u'lj'),
+    (0x1CA, 'M', u'nj'),
+    (0x1CD, 'M', u'ÇŽ'),
+    (0x1CE, 'V'),
+    (0x1CF, 'M', u'ǐ'),
+    (0x1D0, 'V'),
+    (0x1D1, 'M', u'Ç’'),
+    (0x1D2, 'V'),
+    (0x1D3, 'M', u'Ç”'),
+    (0x1D4, 'V'),
+    (0x1D5, 'M', u'Ç–'),
+    (0x1D6, 'V'),
+    (0x1D7, 'M', u'ǘ'),
+    (0x1D8, 'V'),
+    (0x1D9, 'M', u'Çš'),
+    (0x1DA, 'V'),
+    (0x1DB, 'M', u'ǜ'),
+    (0x1DC, 'V'),
+    (0x1DE, 'M', u'ÇŸ'),
+    (0x1DF, 'V'),
+    (0x1E0, 'M', u'Ç¡'),
+    (0x1E1, 'V'),
+    (0x1E2, 'M', u'Ç£'),
+    (0x1E3, 'V'),
+    (0x1E4, 'M', u'Ç¥'),
+    (0x1E5, 'V'),
+    (0x1E6, 'M', u'ǧ'),
+    (0x1E7, 'V'),
+    (0x1E8, 'M', u'Ç©'),
+    (0x1E9, 'V'),
+    (0x1EA, 'M', u'Ç«'),
+    (0x1EB, 'V'),
+    (0x1EC, 'M', u'Ç­'),
+    (0x1ED, 'V'),
+    (0x1EE, 'M', u'ǯ'),
+    (0x1EF, 'V'),
+    (0x1F1, 'M', u'dz'),
+    (0x1F4, 'M', u'ǵ'),
+    (0x1F5, 'V'),
+    (0x1F6, 'M', u'Æ•'),
+    (0x1F7, 'M', u'Æ¿'),
+    (0x1F8, 'M', u'ǹ'),
+    (0x1F9, 'V'),
+    (0x1FA, 'M', u'Ç»'),
+    (0x1FB, 'V'),
+    (0x1FC, 'M', u'ǽ'),
+    (0x1FD, 'V'),
+    (0x1FE, 'M', u'Ç¿'),
+    (0x1FF, 'V'),
+    (0x200, 'M', u'ȁ'),
+    (0x201, 'V'),
+    (0x202, 'M', u'ȃ'),
+    (0x203, 'V'),
+    (0x204, 'M', u'È…'),
+    (0x205, 'V'),
+    (0x206, 'M', u'ȇ'),
+    (0x207, 'V'),
+    (0x208, 'M', u'ȉ'),
+    (0x209, 'V'),
+    (0x20A, 'M', u'È‹'),
+    (0x20B, 'V'),
+    (0x20C, 'M', u'ȍ'),
+    ]
+
+def _seg_5():
+    return [
+    (0x20D, 'V'),
+    (0x20E, 'M', u'ȏ'),
+    (0x20F, 'V'),
+    (0x210, 'M', u'È‘'),
+    (0x211, 'V'),
+    (0x212, 'M', u'È“'),
+    (0x213, 'V'),
+    (0x214, 'M', u'È•'),
+    (0x215, 'V'),
+    (0x216, 'M', u'È—'),
+    (0x217, 'V'),
+    (0x218, 'M', u'È™'),
+    (0x219, 'V'),
+    (0x21A, 'M', u'È›'),
+    (0x21B, 'V'),
+    (0x21C, 'M', u'ȝ'),
+    (0x21D, 'V'),
+    (0x21E, 'M', u'ÈŸ'),
+    (0x21F, 'V'),
+    (0x220, 'M', u'Æž'),
+    (0x221, 'V'),
+    (0x222, 'M', u'È£'),
+    (0x223, 'V'),
+    (0x224, 'M', u'È¥'),
+    (0x225, 'V'),
+    (0x226, 'M', u'ȧ'),
+    (0x227, 'V'),
+    (0x228, 'M', u'È©'),
+    (0x229, 'V'),
+    (0x22A, 'M', u'È«'),
+    (0x22B, 'V'),
+    (0x22C, 'M', u'È­'),
+    (0x22D, 'V'),
+    (0x22E, 'M', u'ȯ'),
+    (0x22F, 'V'),
+    (0x230, 'M', u'ȱ'),
+    (0x231, 'V'),
+    (0x232, 'M', u'ȳ'),
+    (0x233, 'V'),
+    (0x23A, 'M', u'â±¥'),
+    (0x23B, 'M', u'ȼ'),
+    (0x23C, 'V'),
+    (0x23D, 'M', u'Æš'),
+    (0x23E, 'M', u'ⱦ'),
+    (0x23F, 'V'),
+    (0x241, 'M', u'É‚'),
+    (0x242, 'V'),
+    (0x243, 'M', u'Æ€'),
+    (0x244, 'M', u'ʉ'),
+    (0x245, 'M', u'ʌ'),
+    (0x246, 'M', u'ɇ'),
+    (0x247, 'V'),
+    (0x248, 'M', u'ɉ'),
+    (0x249, 'V'),
+    (0x24A, 'M', u'É‹'),
+    (0x24B, 'V'),
+    (0x24C, 'M', u'ɍ'),
+    (0x24D, 'V'),
+    (0x24E, 'M', u'ɏ'),
+    (0x24F, 'V'),
+    (0x2B0, 'M', u'h'),
+    (0x2B1, 'M', u'ɦ'),
+    (0x2B2, 'M', u'j'),
+    (0x2B3, 'M', u'r'),
+    (0x2B4, 'M', u'ɹ'),
+    (0x2B5, 'M', u'É»'),
+    (0x2B6, 'M', u'ʁ'),
+    (0x2B7, 'M', u'w'),
+    (0x2B8, 'M', u'y'),
+    (0x2B9, 'V'),
+    (0x2D8, '3', u' ̆'),
+    (0x2D9, '3', u' ̇'),
+    (0x2DA, '3', u' ÌŠ'),
+    (0x2DB, '3', u' ̨'),
+    (0x2DC, '3', u' ̃'),
+    (0x2DD, '3', u' Ì‹'),
+    (0x2DE, 'V'),
+    (0x2E0, 'M', u'É£'),
+    (0x2E1, 'M', u'l'),
+    (0x2E2, 'M', u's'),
+    (0x2E3, 'M', u'x'),
+    (0x2E4, 'M', u'Ê•'),
+    (0x2E5, 'V'),
+    (0x340, 'M', u'Ì€'),
+    (0x341, 'M', u'́'),
+    (0x342, 'V'),
+    (0x343, 'M', u'Ì“'),
+    (0x344, 'M', u'̈́'),
+    (0x345, 'M', u'ι'),
+    (0x346, 'V'),
+    (0x34F, 'I'),
+    (0x350, 'V'),
+    (0x370, 'M', u'ͱ'),
+    (0x371, 'V'),
+    (0x372, 'M', u'ͳ'),
+    (0x373, 'V'),
+    (0x374, 'M', u'ʹ'),
+    (0x375, 'V'),
+    (0x376, 'M', u'Í·'),
+    (0x377, 'V'),
+    ]
+
+def _seg_6():
+    return [
+    (0x378, 'X'),
+    (0x37A, '3', u' ι'),
+    (0x37B, 'V'),
+    (0x37E, '3', u';'),
+    (0x37F, 'M', u'ϳ'),
+    (0x380, 'X'),
+    (0x384, '3', u' ́'),
+    (0x385, '3', u' ̈́'),
+    (0x386, 'M', u'ά'),
+    (0x387, 'M', u'·'),
+    (0x388, 'M', u'έ'),
+    (0x389, 'M', u'ή'),
+    (0x38A, 'M', u'ί'),
+    (0x38B, 'X'),
+    (0x38C, 'M', u'ό'),
+    (0x38D, 'X'),
+    (0x38E, 'M', u'ύ'),
+    (0x38F, 'M', u'ÏŽ'),
+    (0x390, 'V'),
+    (0x391, 'M', u'α'),
+    (0x392, 'M', u'β'),
+    (0x393, 'M', u'γ'),
+    (0x394, 'M', u'δ'),
+    (0x395, 'M', u'ε'),
+    (0x396, 'M', u'ζ'),
+    (0x397, 'M', u'η'),
+    (0x398, 'M', u'θ'),
+    (0x399, 'M', u'ι'),
+    (0x39A, 'M', u'κ'),
+    (0x39B, 'M', u'λ'),
+    (0x39C, 'M', u'μ'),
+    (0x39D, 'M', u'ν'),
+    (0x39E, 'M', u'ξ'),
+    (0x39F, 'M', u'ο'),
+    (0x3A0, 'M', u'Ï€'),
+    (0x3A1, 'M', u'ρ'),
+    (0x3A2, 'X'),
+    (0x3A3, 'M', u'σ'),
+    (0x3A4, 'M', u'Ï„'),
+    (0x3A5, 'M', u'Ï…'),
+    (0x3A6, 'M', u'φ'),
+    (0x3A7, 'M', u'χ'),
+    (0x3A8, 'M', u'ψ'),
+    (0x3A9, 'M', u'ω'),
+    (0x3AA, 'M', u'ÏŠ'),
+    (0x3AB, 'M', u'Ï‹'),
+    (0x3AC, 'V'),
+    (0x3C2, 'D', u'σ'),
+    (0x3C3, 'V'),
+    (0x3CF, 'M', u'Ï—'),
+    (0x3D0, 'M', u'β'),
+    (0x3D1, 'M', u'θ'),
+    (0x3D2, 'M', u'Ï…'),
+    (0x3D3, 'M', u'ύ'),
+    (0x3D4, 'M', u'Ï‹'),
+    (0x3D5, 'M', u'φ'),
+    (0x3D6, 'M', u'Ï€'),
+    (0x3D7, 'V'),
+    (0x3D8, 'M', u'Ï™'),
+    (0x3D9, 'V'),
+    (0x3DA, 'M', u'Ï›'),
+    (0x3DB, 'V'),
+    (0x3DC, 'M', u'ϝ'),
+    (0x3DD, 'V'),
+    (0x3DE, 'M', u'ÏŸ'),
+    (0x3DF, 'V'),
+    (0x3E0, 'M', u'Ï¡'),
+    (0x3E1, 'V'),
+    (0x3E2, 'M', u'Ï£'),
+    (0x3E3, 'V'),
+    (0x3E4, 'M', u'Ï¥'),
+    (0x3E5, 'V'),
+    (0x3E6, 'M', u'ϧ'),
+    (0x3E7, 'V'),
+    (0x3E8, 'M', u'Ï©'),
+    (0x3E9, 'V'),
+    (0x3EA, 'M', u'Ï«'),
+    (0x3EB, 'V'),
+    (0x3EC, 'M', u'Ï­'),
+    (0x3ED, 'V'),
+    (0x3EE, 'M', u'ϯ'),
+    (0x3EF, 'V'),
+    (0x3F0, 'M', u'κ'),
+    (0x3F1, 'M', u'ρ'),
+    (0x3F2, 'M', u'σ'),
+    (0x3F3, 'V'),
+    (0x3F4, 'M', u'θ'),
+    (0x3F5, 'M', u'ε'),
+    (0x3F6, 'V'),
+    (0x3F7, 'M', u'ϸ'),
+    (0x3F8, 'V'),
+    (0x3F9, 'M', u'σ'),
+    (0x3FA, 'M', u'Ï»'),
+    (0x3FB, 'V'),
+    (0x3FD, 'M', u'Í»'),
+    (0x3FE, 'M', u'ͼ'),
+    (0x3FF, 'M', u'ͽ'),
+    (0x400, 'M', u'ѐ'),
+    (0x401, 'M', u'Ñ‘'),
+    (0x402, 'M', u'Ñ’'),
+    ]
+
+def _seg_7():
+    return [
+    (0x403, 'M', u'Ñ“'),
+    (0x404, 'M', u'Ñ”'),
+    (0x405, 'M', u'Ñ•'),
+    (0x406, 'M', u'Ñ–'),
+    (0x407, 'M', u'Ñ—'),
+    (0x408, 'M', u'ј'),
+    (0x409, 'M', u'Ñ™'),
+    (0x40A, 'M', u'Ñš'),
+    (0x40B, 'M', u'Ñ›'),
+    (0x40C, 'M', u'ќ'),
+    (0x40D, 'M', u'ѝ'),
+    (0x40E, 'M', u'Ñž'),
+    (0x40F, 'M', u'ÑŸ'),
+    (0x410, 'M', u'а'),
+    (0x411, 'M', u'б'),
+    (0x412, 'M', u'в'),
+    (0x413, 'M', u'г'),
+    (0x414, 'M', u'д'),
+    (0x415, 'M', u'е'),
+    (0x416, 'M', u'ж'),
+    (0x417, 'M', u'з'),
+    (0x418, 'M', u'и'),
+    (0x419, 'M', u'й'),
+    (0x41A, 'M', u'к'),
+    (0x41B, 'M', u'л'),
+    (0x41C, 'M', u'м'),
+    (0x41D, 'M', u'н'),
+    (0x41E, 'M', u'о'),
+    (0x41F, 'M', u'п'),
+    (0x420, 'M', u'Ñ€'),
+    (0x421, 'M', u'с'),
+    (0x422, 'M', u'Ñ‚'),
+    (0x423, 'M', u'у'),
+    (0x424, 'M', u'Ñ„'),
+    (0x425, 'M', u'Ñ…'),
+    (0x426, 'M', u'ц'),
+    (0x427, 'M', u'ч'),
+    (0x428, 'M', u'ш'),
+    (0x429, 'M', u'щ'),
+    (0x42A, 'M', u'ÑŠ'),
+    (0x42B, 'M', u'Ñ‹'),
+    (0x42C, 'M', u'ь'),
+    (0x42D, 'M', u'э'),
+    (0x42E, 'M', u'ÑŽ'),
+    (0x42F, 'M', u'я'),
+    (0x430, 'V'),
+    (0x460, 'M', u'Ñ¡'),
+    (0x461, 'V'),
+    (0x462, 'M', u'Ñ£'),
+    (0x463, 'V'),
+    (0x464, 'M', u'Ñ¥'),
+    (0x465, 'V'),
+    (0x466, 'M', u'ѧ'),
+    (0x467, 'V'),
+    (0x468, 'M', u'Ñ©'),
+    (0x469, 'V'),
+    (0x46A, 'M', u'Ñ«'),
+    (0x46B, 'V'),
+    (0x46C, 'M', u'Ñ­'),
+    (0x46D, 'V'),
+    (0x46E, 'M', u'ѯ'),
+    (0x46F, 'V'),
+    (0x470, 'M', u'ѱ'),
+    (0x471, 'V'),
+    (0x472, 'M', u'ѳ'),
+    (0x473, 'V'),
+    (0x474, 'M', u'ѵ'),
+    (0x475, 'V'),
+    (0x476, 'M', u'Ñ·'),
+    (0x477, 'V'),
+    (0x478, 'M', u'ѹ'),
+    (0x479, 'V'),
+    (0x47A, 'M', u'Ñ»'),
+    (0x47B, 'V'),
+    (0x47C, 'M', u'ѽ'),
+    (0x47D, 'V'),
+    (0x47E, 'M', u'Ñ¿'),
+    (0x47F, 'V'),
+    (0x480, 'M', u'ҁ'),
+    (0x481, 'V'),
+    (0x48A, 'M', u'Ò‹'),
+    (0x48B, 'V'),
+    (0x48C, 'M', u'ҍ'),
+    (0x48D, 'V'),
+    (0x48E, 'M', u'ҏ'),
+    (0x48F, 'V'),
+    (0x490, 'M', u'Ò‘'),
+    (0x491, 'V'),
+    (0x492, 'M', u'Ò“'),
+    (0x493, 'V'),
+    (0x494, 'M', u'Ò•'),
+    (0x495, 'V'),
+    (0x496, 'M', u'Ò—'),
+    (0x497, 'V'),
+    (0x498, 'M', u'Ò™'),
+    (0x499, 'V'),
+    (0x49A, 'M', u'Ò›'),
+    (0x49B, 'V'),
+    (0x49C, 'M', u'ҝ'),
+    (0x49D, 'V'),
+    ]
+
+def _seg_8():
+    return [
+    (0x49E, 'M', u'ÒŸ'),
+    (0x49F, 'V'),
+    (0x4A0, 'M', u'Ò¡'),
+    (0x4A1, 'V'),
+    (0x4A2, 'M', u'Ò£'),
+    (0x4A3, 'V'),
+    (0x4A4, 'M', u'Ò¥'),
+    (0x4A5, 'V'),
+    (0x4A6, 'M', u'Ò§'),
+    (0x4A7, 'V'),
+    (0x4A8, 'M', u'Ò©'),
+    (0x4A9, 'V'),
+    (0x4AA, 'M', u'Ò«'),
+    (0x4AB, 'V'),
+    (0x4AC, 'M', u'Ò­'),
+    (0x4AD, 'V'),
+    (0x4AE, 'M', u'Ò¯'),
+    (0x4AF, 'V'),
+    (0x4B0, 'M', u'Ò±'),
+    (0x4B1, 'V'),
+    (0x4B2, 'M', u'Ò³'),
+    (0x4B3, 'V'),
+    (0x4B4, 'M', u'Òµ'),
+    (0x4B5, 'V'),
+    (0x4B6, 'M', u'Ò·'),
+    (0x4B7, 'V'),
+    (0x4B8, 'M', u'Ò¹'),
+    (0x4B9, 'V'),
+    (0x4BA, 'M', u'Ò»'),
+    (0x4BB, 'V'),
+    (0x4BC, 'M', u'Ò½'),
+    (0x4BD, 'V'),
+    (0x4BE, 'M', u'Ò¿'),
+    (0x4BF, 'V'),
+    (0x4C0, 'X'),
+    (0x4C1, 'M', u'Ó‚'),
+    (0x4C2, 'V'),
+    (0x4C3, 'M', u'Ó„'),
+    (0x4C4, 'V'),
+    (0x4C5, 'M', u'Ó†'),
+    (0x4C6, 'V'),
+    (0x4C7, 'M', u'Óˆ'),
+    (0x4C8, 'V'),
+    (0x4C9, 'M', u'ÓŠ'),
+    (0x4CA, 'V'),
+    (0x4CB, 'M', u'ӌ'),
+    (0x4CC, 'V'),
+    (0x4CD, 'M', u'ÓŽ'),
+    (0x4CE, 'V'),
+    (0x4D0, 'M', u'Ó‘'),
+    (0x4D1, 'V'),
+    (0x4D2, 'M', u'Ó“'),
+    (0x4D3, 'V'),
+    (0x4D4, 'M', u'Ó•'),
+    (0x4D5, 'V'),
+    (0x4D6, 'M', u'Ó—'),
+    (0x4D7, 'V'),
+    (0x4D8, 'M', u'Ó™'),
+    (0x4D9, 'V'),
+    (0x4DA, 'M', u'Ó›'),
+    (0x4DB, 'V'),
+    (0x4DC, 'M', u'ӝ'),
+    (0x4DD, 'V'),
+    (0x4DE, 'M', u'ÓŸ'),
+    (0x4DF, 'V'),
+    (0x4E0, 'M', u'Ó¡'),
+    (0x4E1, 'V'),
+    (0x4E2, 'M', u'Ó£'),
+    (0x4E3, 'V'),
+    (0x4E4, 'M', u'Ó¥'),
+    (0x4E5, 'V'),
+    (0x4E6, 'M', u'Ó§'),
+    (0x4E7, 'V'),
+    (0x4E8, 'M', u'Ó©'),
+    (0x4E9, 'V'),
+    (0x4EA, 'M', u'Ó«'),
+    (0x4EB, 'V'),
+    (0x4EC, 'M', u'Ó­'),
+    (0x4ED, 'V'),
+    (0x4EE, 'M', u'Ó¯'),
+    (0x4EF, 'V'),
+    (0x4F0, 'M', u'Ó±'),
+    (0x4F1, 'V'),
+    (0x4F2, 'M', u'Ó³'),
+    (0x4F3, 'V'),
+    (0x4F4, 'M', u'Óµ'),
+    (0x4F5, 'V'),
+    (0x4F6, 'M', u'Ó·'),
+    (0x4F7, 'V'),
+    (0x4F8, 'M', u'Ó¹'),
+    (0x4F9, 'V'),
+    (0x4FA, 'M', u'Ó»'),
+    (0x4FB, 'V'),
+    (0x4FC, 'M', u'Ó½'),
+    (0x4FD, 'V'),
+    (0x4FE, 'M', u'Ó¿'),
+    (0x4FF, 'V'),
+    (0x500, 'M', u'ԁ'),
+    (0x501, 'V'),
+    (0x502, 'M', u'Ôƒ'),
+    ]
+
+def _seg_9():
+    return [
+    (0x503, 'V'),
+    (0x504, 'M', u'Ô…'),
+    (0x505, 'V'),
+    (0x506, 'M', u'Ô‡'),
+    (0x507, 'V'),
+    (0x508, 'M', u'Ô‰'),
+    (0x509, 'V'),
+    (0x50A, 'M', u'Ô‹'),
+    (0x50B, 'V'),
+    (0x50C, 'M', u'ԍ'),
+    (0x50D, 'V'),
+    (0x50E, 'M', u'ԏ'),
+    (0x50F, 'V'),
+    (0x510, 'M', u'Ô‘'),
+    (0x511, 'V'),
+    (0x512, 'M', u'Ô“'),
+    (0x513, 'V'),
+    (0x514, 'M', u'Ô•'),
+    (0x515, 'V'),
+    (0x516, 'M', u'Ô—'),
+    (0x517, 'V'),
+    (0x518, 'M', u'Ô™'),
+    (0x519, 'V'),
+    (0x51A, 'M', u'Ô›'),
+    (0x51B, 'V'),
+    (0x51C, 'M', u'ԝ'),
+    (0x51D, 'V'),
+    (0x51E, 'M', u'ÔŸ'),
+    (0x51F, 'V'),
+    (0x520, 'M', u'Ô¡'),
+    (0x521, 'V'),
+    (0x522, 'M', u'Ô£'),
+    (0x523, 'V'),
+    (0x524, 'M', u'Ô¥'),
+    (0x525, 'V'),
+    (0x526, 'M', u'Ô§'),
+    (0x527, 'V'),
+    (0x528, 'M', u'Ô©'),
+    (0x529, 'V'),
+    (0x52A, 'M', u'Ô«'),
+    (0x52B, 'V'),
+    (0x52C, 'M', u'Ô­'),
+    (0x52D, 'V'),
+    (0x52E, 'M', u'Ô¯'),
+    (0x52F, 'V'),
+    (0x530, 'X'),
+    (0x531, 'M', u'Õ¡'),
+    (0x532, 'M', u'Õ¢'),
+    (0x533, 'M', u'Õ£'),
+    (0x534, 'M', u'Õ¤'),
+    (0x535, 'M', u'Õ¥'),
+    (0x536, 'M', u'Õ¦'),
+    (0x537, 'M', u'Õ§'),
+    (0x538, 'M', u'Õ¨'),
+    (0x539, 'M', u'Õ©'),
+    (0x53A, 'M', u'Õª'),
+    (0x53B, 'M', u'Õ«'),
+    (0x53C, 'M', u'Õ¬'),
+    (0x53D, 'M', u'Õ­'),
+    (0x53E, 'M', u'Õ®'),
+    (0x53F, 'M', u'Õ¯'),
+    (0x540, 'M', u'Õ°'),
+    (0x541, 'M', u'Õ±'),
+    (0x542, 'M', u'Õ²'),
+    (0x543, 'M', u'Õ³'),
+    (0x544, 'M', u'Õ´'),
+    (0x545, 'M', u'Õµ'),
+    (0x546, 'M', u'Õ¶'),
+    (0x547, 'M', u'Õ·'),
+    (0x548, 'M', u'Õ¸'),
+    (0x549, 'M', u'Õ¹'),
+    (0x54A, 'M', u'Õº'),
+    (0x54B, 'M', u'Õ»'),
+    (0x54C, 'M', u'Õ¼'),
+    (0x54D, 'M', u'Õ½'),
+    (0x54E, 'M', u'Õ¾'),
+    (0x54F, 'M', u'Õ¿'),
+    (0x550, 'M', u'Ö€'),
+    (0x551, 'M', u'ց'),
+    (0x552, 'M', u'Ö‚'),
+    (0x553, 'M', u'Öƒ'),
+    (0x554, 'M', u'Ö„'),
+    (0x555, 'M', u'Ö…'),
+    (0x556, 'M', u'Ö†'),
+    (0x557, 'X'),
+    (0x559, 'V'),
+    (0x587, 'M', u'Õ¥Ö‚'),
+    (0x588, 'V'),
+    (0x58B, 'X'),
+    (0x58D, 'V'),
+    (0x590, 'X'),
+    (0x591, 'V'),
+    (0x5C8, 'X'),
+    (0x5D0, 'V'),
+    (0x5EB, 'X'),
+    (0x5EF, 'V'),
+    (0x5F5, 'X'),
+    (0x606, 'V'),
+    (0x61C, 'X'),
+    (0x61E, 'V'),
+    ]
+
+def _seg_10():
+    return [
+    (0x675, 'M', u'اٴ'),
+    (0x676, 'M', u'وٴ'),
+    (0x677, 'M', u'Û‡Ù´'),
+    (0x678, 'M', u'يٴ'),
+    (0x679, 'V'),
+    (0x6DD, 'X'),
+    (0x6DE, 'V'),
+    (0x70E, 'X'),
+    (0x710, 'V'),
+    (0x74B, 'X'),
+    (0x74D, 'V'),
+    (0x7B2, 'X'),
+    (0x7C0, 'V'),
+    (0x7FB, 'X'),
+    (0x7FD, 'V'),
+    (0x82E, 'X'),
+    (0x830, 'V'),
+    (0x83F, 'X'),
+    (0x840, 'V'),
+    (0x85C, 'X'),
+    (0x85E, 'V'),
+    (0x85F, 'X'),
+    (0x860, 'V'),
+    (0x86B, 'X'),
+    (0x8A0, 'V'),
+    (0x8B5, 'X'),
+    (0x8B6, 'V'),
+    (0x8BE, 'X'),
+    (0x8D3, 'V'),
+    (0x8E2, 'X'),
+    (0x8E3, 'V'),
+    (0x958, 'M', u'क़'),
+    (0x959, 'M', u'ख़'),
+    (0x95A, 'M', u'ग़'),
+    (0x95B, 'M', u'ज़'),
+    (0x95C, 'M', u'ड़'),
+    (0x95D, 'M', u'ढ़'),
+    (0x95E, 'M', u'फ़'),
+    (0x95F, 'M', u'य़'),
+    (0x960, 'V'),
+    (0x984, 'X'),
+    (0x985, 'V'),
+    (0x98D, 'X'),
+    (0x98F, 'V'),
+    (0x991, 'X'),
+    (0x993, 'V'),
+    (0x9A9, 'X'),
+    (0x9AA, 'V'),
+    (0x9B1, 'X'),
+    (0x9B2, 'V'),
+    (0x9B3, 'X'),
+    (0x9B6, 'V'),
+    (0x9BA, 'X'),
+    (0x9BC, 'V'),
+    (0x9C5, 'X'),
+    (0x9C7, 'V'),
+    (0x9C9, 'X'),
+    (0x9CB, 'V'),
+    (0x9CF, 'X'),
+    (0x9D7, 'V'),
+    (0x9D8, 'X'),
+    (0x9DC, 'M', u'ড়'),
+    (0x9DD, 'M', u'ঢ়'),
+    (0x9DE, 'X'),
+    (0x9DF, 'M', u'য়'),
+    (0x9E0, 'V'),
+    (0x9E4, 'X'),
+    (0x9E6, 'V'),
+    (0x9FF, 'X'),
+    (0xA01, 'V'),
+    (0xA04, 'X'),
+    (0xA05, 'V'),
+    (0xA0B, 'X'),
+    (0xA0F, 'V'),
+    (0xA11, 'X'),
+    (0xA13, 'V'),
+    (0xA29, 'X'),
+    (0xA2A, 'V'),
+    (0xA31, 'X'),
+    (0xA32, 'V'),
+    (0xA33, 'M', u'ਲ਼'),
+    (0xA34, 'X'),
+    (0xA35, 'V'),
+    (0xA36, 'M', u'ਸ਼'),
+    (0xA37, 'X'),
+    (0xA38, 'V'),
+    (0xA3A, 'X'),
+    (0xA3C, 'V'),
+    (0xA3D, 'X'),
+    (0xA3E, 'V'),
+    (0xA43, 'X'),
+    (0xA47, 'V'),
+    (0xA49, 'X'),
+    (0xA4B, 'V'),
+    (0xA4E, 'X'),
+    (0xA51, 'V'),
+    (0xA52, 'X'),
+    (0xA59, 'M', u'ਖ਼'),
+    (0xA5A, 'M', u'ਗ਼'),
+    (0xA5B, 'M', u'ਜ਼'),
+    ]
+
+def _seg_11():
+    return [
+    (0xA5C, 'V'),
+    (0xA5D, 'X'),
+    (0xA5E, 'M', u'ਫ਼'),
+    (0xA5F, 'X'),
+    (0xA66, 'V'),
+    (0xA77, 'X'),
+    (0xA81, 'V'),
+    (0xA84, 'X'),
+    (0xA85, 'V'),
+    (0xA8E, 'X'),
+    (0xA8F, 'V'),
+    (0xA92, 'X'),
+    (0xA93, 'V'),
+    (0xAA9, 'X'),
+    (0xAAA, 'V'),
+    (0xAB1, 'X'),
+    (0xAB2, 'V'),
+    (0xAB4, 'X'),
+    (0xAB5, 'V'),
+    (0xABA, 'X'),
+    (0xABC, 'V'),
+    (0xAC6, 'X'),
+    (0xAC7, 'V'),
+    (0xACA, 'X'),
+    (0xACB, 'V'),
+    (0xACE, 'X'),
+    (0xAD0, 'V'),
+    (0xAD1, 'X'),
+    (0xAE0, 'V'),
+    (0xAE4, 'X'),
+    (0xAE6, 'V'),
+    (0xAF2, 'X'),
+    (0xAF9, 'V'),
+    (0xB00, 'X'),
+    (0xB01, 'V'),
+    (0xB04, 'X'),
+    (0xB05, 'V'),
+    (0xB0D, 'X'),
+    (0xB0F, 'V'),
+    (0xB11, 'X'),
+    (0xB13, 'V'),
+    (0xB29, 'X'),
+    (0xB2A, 'V'),
+    (0xB31, 'X'),
+    (0xB32, 'V'),
+    (0xB34, 'X'),
+    (0xB35, 'V'),
+    (0xB3A, 'X'),
+    (0xB3C, 'V'),
+    (0xB45, 'X'),
+    (0xB47, 'V'),
+    (0xB49, 'X'),
+    (0xB4B, 'V'),
+    (0xB4E, 'X'),
+    (0xB56, 'V'),
+    (0xB58, 'X'),
+    (0xB5C, 'M', u'ଡ଼'),
+    (0xB5D, 'M', u'ଢ଼'),
+    (0xB5E, 'X'),
+    (0xB5F, 'V'),
+    (0xB64, 'X'),
+    (0xB66, 'V'),
+    (0xB78, 'X'),
+    (0xB82, 'V'),
+    (0xB84, 'X'),
+    (0xB85, 'V'),
+    (0xB8B, 'X'),
+    (0xB8E, 'V'),
+    (0xB91, 'X'),
+    (0xB92, 'V'),
+    (0xB96, 'X'),
+    (0xB99, 'V'),
+    (0xB9B, 'X'),
+    (0xB9C, 'V'),
+    (0xB9D, 'X'),
+    (0xB9E, 'V'),
+    (0xBA0, 'X'),
+    (0xBA3, 'V'),
+    (0xBA5, 'X'),
+    (0xBA8, 'V'),
+    (0xBAB, 'X'),
+    (0xBAE, 'V'),
+    (0xBBA, 'X'),
+    (0xBBE, 'V'),
+    (0xBC3, 'X'),
+    (0xBC6, 'V'),
+    (0xBC9, 'X'),
+    (0xBCA, 'V'),
+    (0xBCE, 'X'),
+    (0xBD0, 'V'),
+    (0xBD1, 'X'),
+    (0xBD7, 'V'),
+    (0xBD8, 'X'),
+    (0xBE6, 'V'),
+    (0xBFB, 'X'),
+    (0xC00, 'V'),
+    (0xC0D, 'X'),
+    (0xC0E, 'V'),
+    (0xC11, 'X'),
+    (0xC12, 'V'),
+    ]
+
+def _seg_12():
+    return [
+    (0xC29, 'X'),
+    (0xC2A, 'V'),
+    (0xC3A, 'X'),
+    (0xC3D, 'V'),
+    (0xC45, 'X'),
+    (0xC46, 'V'),
+    (0xC49, 'X'),
+    (0xC4A, 'V'),
+    (0xC4E, 'X'),
+    (0xC55, 'V'),
+    (0xC57, 'X'),
+    (0xC58, 'V'),
+    (0xC5B, 'X'),
+    (0xC60, 'V'),
+    (0xC64, 'X'),
+    (0xC66, 'V'),
+    (0xC70, 'X'),
+    (0xC78, 'V'),
+    (0xC8D, 'X'),
+    (0xC8E, 'V'),
+    (0xC91, 'X'),
+    (0xC92, 'V'),
+    (0xCA9, 'X'),
+    (0xCAA, 'V'),
+    (0xCB4, 'X'),
+    (0xCB5, 'V'),
+    (0xCBA, 'X'),
+    (0xCBC, 'V'),
+    (0xCC5, 'X'),
+    (0xCC6, 'V'),
+    (0xCC9, 'X'),
+    (0xCCA, 'V'),
+    (0xCCE, 'X'),
+    (0xCD5, 'V'),
+    (0xCD7, 'X'),
+    (0xCDE, 'V'),
+    (0xCDF, 'X'),
+    (0xCE0, 'V'),
+    (0xCE4, 'X'),
+    (0xCE6, 'V'),
+    (0xCF0, 'X'),
+    (0xCF1, 'V'),
+    (0xCF3, 'X'),
+    (0xD00, 'V'),
+    (0xD04, 'X'),
+    (0xD05, 'V'),
+    (0xD0D, 'X'),
+    (0xD0E, 'V'),
+    (0xD11, 'X'),
+    (0xD12, 'V'),
+    (0xD45, 'X'),
+    (0xD46, 'V'),
+    (0xD49, 'X'),
+    (0xD4A, 'V'),
+    (0xD50, 'X'),
+    (0xD54, 'V'),
+    (0xD64, 'X'),
+    (0xD66, 'V'),
+    (0xD80, 'X'),
+    (0xD82, 'V'),
+    (0xD84, 'X'),
+    (0xD85, 'V'),
+    (0xD97, 'X'),
+    (0xD9A, 'V'),
+    (0xDB2, 'X'),
+    (0xDB3, 'V'),
+    (0xDBC, 'X'),
+    (0xDBD, 'V'),
+    (0xDBE, 'X'),
+    (0xDC0, 'V'),
+    (0xDC7, 'X'),
+    (0xDCA, 'V'),
+    (0xDCB, 'X'),
+    (0xDCF, 'V'),
+    (0xDD5, 'X'),
+    (0xDD6, 'V'),
+    (0xDD7, 'X'),
+    (0xDD8, 'V'),
+    (0xDE0, 'X'),
+    (0xDE6, 'V'),
+    (0xDF0, 'X'),
+    (0xDF2, 'V'),
+    (0xDF5, 'X'),
+    (0xE01, 'V'),
+    (0xE33, 'M', u'ํา'),
+    (0xE34, 'V'),
+    (0xE3B, 'X'),
+    (0xE3F, 'V'),
+    (0xE5C, 'X'),
+    (0xE81, 'V'),
+    (0xE83, 'X'),
+    (0xE84, 'V'),
+    (0xE85, 'X'),
+    (0xE87, 'V'),
+    (0xE89, 'X'),
+    (0xE8A, 'V'),
+    (0xE8B, 'X'),
+    (0xE8D, 'V'),
+    (0xE8E, 'X'),
+    (0xE94, 'V'),
+    ]
+
+def _seg_13():
+    return [
+    (0xE98, 'X'),
+    (0xE99, 'V'),
+    (0xEA0, 'X'),
+    (0xEA1, 'V'),
+    (0xEA4, 'X'),
+    (0xEA5, 'V'),
+    (0xEA6, 'X'),
+    (0xEA7, 'V'),
+    (0xEA8, 'X'),
+    (0xEAA, 'V'),
+    (0xEAC, 'X'),
+    (0xEAD, 'V'),
+    (0xEB3, 'M', u'ໍາ'),
+    (0xEB4, 'V'),
+    (0xEBA, 'X'),
+    (0xEBB, 'V'),
+    (0xEBE, 'X'),
+    (0xEC0, 'V'),
+    (0xEC5, 'X'),
+    (0xEC6, 'V'),
+    (0xEC7, 'X'),
+    (0xEC8, 'V'),
+    (0xECE, 'X'),
+    (0xED0, 'V'),
+    (0xEDA, 'X'),
+    (0xEDC, 'M', u'ຫນ'),
+    (0xEDD, 'M', u'ຫມ'),
+    (0xEDE, 'V'),
+    (0xEE0, 'X'),
+    (0xF00, 'V'),
+    (0xF0C, 'M', u'་'),
+    (0xF0D, 'V'),
+    (0xF43, 'M', u'གྷ'),
+    (0xF44, 'V'),
+    (0xF48, 'X'),
+    (0xF49, 'V'),
+    (0xF4D, 'M', u'ཌྷ'),
+    (0xF4E, 'V'),
+    (0xF52, 'M', u'དྷ'),
+    (0xF53, 'V'),
+    (0xF57, 'M', u'བྷ'),
+    (0xF58, 'V'),
+    (0xF5C, 'M', u'ཛྷ'),
+    (0xF5D, 'V'),
+    (0xF69, 'M', u'ཀྵ'),
+    (0xF6A, 'V'),
+    (0xF6D, 'X'),
+    (0xF71, 'V'),
+    (0xF73, 'M', u'ཱི'),
+    (0xF74, 'V'),
+    (0xF75, 'M', u'ཱུ'),
+    (0xF76, 'M', u'ྲྀ'),
+    (0xF77, 'M', u'ྲཱྀ'),
+    (0xF78, 'M', u'ླྀ'),
+    (0xF79, 'M', u'ླཱྀ'),
+    (0xF7A, 'V'),
+    (0xF81, 'M', u'ཱྀ'),
+    (0xF82, 'V'),
+    (0xF93, 'M', u'ྒྷ'),
+    (0xF94, 'V'),
+    (0xF98, 'X'),
+    (0xF99, 'V'),
+    (0xF9D, 'M', u'ྜྷ'),
+    (0xF9E, 'V'),
+    (0xFA2, 'M', u'ྡྷ'),
+    (0xFA3, 'V'),
+    (0xFA7, 'M', u'ྦྷ'),
+    (0xFA8, 'V'),
+    (0xFAC, 'M', u'ྫྷ'),
+    (0xFAD, 'V'),
+    (0xFB9, 'M', u'ྐྵ'),
+    (0xFBA, 'V'),
+    (0xFBD, 'X'),
+    (0xFBE, 'V'),
+    (0xFCD, 'X'),
+    (0xFCE, 'V'),
+    (0xFDB, 'X'),
+    (0x1000, 'V'),
+    (0x10A0, 'X'),
+    (0x10C7, 'M', u'â´§'),
+    (0x10C8, 'X'),
+    (0x10CD, 'M', u'â´­'),
+    (0x10CE, 'X'),
+    (0x10D0, 'V'),
+    (0x10FC, 'M', u'ნ'),
+    (0x10FD, 'V'),
+    (0x115F, 'X'),
+    (0x1161, 'V'),
+    (0x1249, 'X'),
+    (0x124A, 'V'),
+    (0x124E, 'X'),
+    (0x1250, 'V'),
+    (0x1257, 'X'),
+    (0x1258, 'V'),
+    (0x1259, 'X'),
+    (0x125A, 'V'),
+    (0x125E, 'X'),
+    (0x1260, 'V'),
+    (0x1289, 'X'),
+    (0x128A, 'V'),
+    ]
+
+def _seg_14():
+    return [
+    (0x128E, 'X'),
+    (0x1290, 'V'),
+    (0x12B1, 'X'),
+    (0x12B2, 'V'),
+    (0x12B6, 'X'),
+    (0x12B8, 'V'),
+    (0x12BF, 'X'),
+    (0x12C0, 'V'),
+    (0x12C1, 'X'),
+    (0x12C2, 'V'),
+    (0x12C6, 'X'),
+    (0x12C8, 'V'),
+    (0x12D7, 'X'),
+    (0x12D8, 'V'),
+    (0x1311, 'X'),
+    (0x1312, 'V'),
+    (0x1316, 'X'),
+    (0x1318, 'V'),
+    (0x135B, 'X'),
+    (0x135D, 'V'),
+    (0x137D, 'X'),
+    (0x1380, 'V'),
+    (0x139A, 'X'),
+    (0x13A0, 'V'),
+    (0x13F6, 'X'),
+    (0x13F8, 'M', u'Ᏸ'),
+    (0x13F9, 'M', u'Ᏹ'),
+    (0x13FA, 'M', u'Ᏺ'),
+    (0x13FB, 'M', u'Ᏻ'),
+    (0x13FC, 'M', u'Ᏼ'),
+    (0x13FD, 'M', u'Ᏽ'),
+    (0x13FE, 'X'),
+    (0x1400, 'V'),
+    (0x1680, 'X'),
+    (0x1681, 'V'),
+    (0x169D, 'X'),
+    (0x16A0, 'V'),
+    (0x16F9, 'X'),
+    (0x1700, 'V'),
+    (0x170D, 'X'),
+    (0x170E, 'V'),
+    (0x1715, 'X'),
+    (0x1720, 'V'),
+    (0x1737, 'X'),
+    (0x1740, 'V'),
+    (0x1754, 'X'),
+    (0x1760, 'V'),
+    (0x176D, 'X'),
+    (0x176E, 'V'),
+    (0x1771, 'X'),
+    (0x1772, 'V'),
+    (0x1774, 'X'),
+    (0x1780, 'V'),
+    (0x17B4, 'X'),
+    (0x17B6, 'V'),
+    (0x17DE, 'X'),
+    (0x17E0, 'V'),
+    (0x17EA, 'X'),
+    (0x17F0, 'V'),
+    (0x17FA, 'X'),
+    (0x1800, 'V'),
+    (0x1806, 'X'),
+    (0x1807, 'V'),
+    (0x180B, 'I'),
+    (0x180E, 'X'),
+    (0x1810, 'V'),
+    (0x181A, 'X'),
+    (0x1820, 'V'),
+    (0x1879, 'X'),
+    (0x1880, 'V'),
+    (0x18AB, 'X'),
+    (0x18B0, 'V'),
+    (0x18F6, 'X'),
+    (0x1900, 'V'),
+    (0x191F, 'X'),
+    (0x1920, 'V'),
+    (0x192C, 'X'),
+    (0x1930, 'V'),
+    (0x193C, 'X'),
+    (0x1940, 'V'),
+    (0x1941, 'X'),
+    (0x1944, 'V'),
+    (0x196E, 'X'),
+    (0x1970, 'V'),
+    (0x1975, 'X'),
+    (0x1980, 'V'),
+    (0x19AC, 'X'),
+    (0x19B0, 'V'),
+    (0x19CA, 'X'),
+    (0x19D0, 'V'),
+    (0x19DB, 'X'),
+    (0x19DE, 'V'),
+    (0x1A1C, 'X'),
+    (0x1A1E, 'V'),
+    (0x1A5F, 'X'),
+    (0x1A60, 'V'),
+    (0x1A7D, 'X'),
+    (0x1A7F, 'V'),
+    (0x1A8A, 'X'),
+    (0x1A90, 'V'),
+    ]
+
+def _seg_15():
+    return [
+    (0x1A9A, 'X'),
+    (0x1AA0, 'V'),
+    (0x1AAE, 'X'),
+    (0x1AB0, 'V'),
+    (0x1ABF, 'X'),
+    (0x1B00, 'V'),
+    (0x1B4C, 'X'),
+    (0x1B50, 'V'),
+    (0x1B7D, 'X'),
+    (0x1B80, 'V'),
+    (0x1BF4, 'X'),
+    (0x1BFC, 'V'),
+    (0x1C38, 'X'),
+    (0x1C3B, 'V'),
+    (0x1C4A, 'X'),
+    (0x1C4D, 'V'),
+    (0x1C80, 'M', u'в'),
+    (0x1C81, 'M', u'д'),
+    (0x1C82, 'M', u'о'),
+    (0x1C83, 'M', u'с'),
+    (0x1C84, 'M', u'Ñ‚'),
+    (0x1C86, 'M', u'ÑŠ'),
+    (0x1C87, 'M', u'Ñ£'),
+    (0x1C88, 'M', u'ꙋ'),
+    (0x1C89, 'X'),
+    (0x1CC0, 'V'),
+    (0x1CC8, 'X'),
+    (0x1CD0, 'V'),
+    (0x1CFA, 'X'),
+    (0x1D00, 'V'),
+    (0x1D2C, 'M', u'a'),
+    (0x1D2D, 'M', u'æ'),
+    (0x1D2E, 'M', u'b'),
+    (0x1D2F, 'V'),
+    (0x1D30, 'M', u'd'),
+    (0x1D31, 'M', u'e'),
+    (0x1D32, 'M', u'ǝ'),
+    (0x1D33, 'M', u'g'),
+    (0x1D34, 'M', u'h'),
+    (0x1D35, 'M', u'i'),
+    (0x1D36, 'M', u'j'),
+    (0x1D37, 'M', u'k'),
+    (0x1D38, 'M', u'l'),
+    (0x1D39, 'M', u'm'),
+    (0x1D3A, 'M', u'n'),
+    (0x1D3B, 'V'),
+    (0x1D3C, 'M', u'o'),
+    (0x1D3D, 'M', u'È£'),
+    (0x1D3E, 'M', u'p'),
+    (0x1D3F, 'M', u'r'),
+    (0x1D40, 'M', u't'),
+    (0x1D41, 'M', u'u'),
+    (0x1D42, 'M', u'w'),
+    (0x1D43, 'M', u'a'),
+    (0x1D44, 'M', u'ɐ'),
+    (0x1D45, 'M', u'É‘'),
+    (0x1D46, 'M', u'á´‚'),
+    (0x1D47, 'M', u'b'),
+    (0x1D48, 'M', u'd'),
+    (0x1D49, 'M', u'e'),
+    (0x1D4A, 'M', u'É™'),
+    (0x1D4B, 'M', u'É›'),
+    (0x1D4C, 'M', u'ɜ'),
+    (0x1D4D, 'M', u'g'),
+    (0x1D4E, 'V'),
+    (0x1D4F, 'M', u'k'),
+    (0x1D50, 'M', u'm'),
+    (0x1D51, 'M', u'Å‹'),
+    (0x1D52, 'M', u'o'),
+    (0x1D53, 'M', u'É”'),
+    (0x1D54, 'M', u'á´–'),
+    (0x1D55, 'M', u'á´—'),
+    (0x1D56, 'M', u'p'),
+    (0x1D57, 'M', u't'),
+    (0x1D58, 'M', u'u'),
+    (0x1D59, 'M', u'ᴝ'),
+    (0x1D5A, 'M', u'ɯ'),
+    (0x1D5B, 'M', u'v'),
+    (0x1D5C, 'M', u'á´¥'),
+    (0x1D5D, 'M', u'β'),
+    (0x1D5E, 'M', u'γ'),
+    (0x1D5F, 'M', u'δ'),
+    (0x1D60, 'M', u'φ'),
+    (0x1D61, 'M', u'χ'),
+    (0x1D62, 'M', u'i'),
+    (0x1D63, 'M', u'r'),
+    (0x1D64, 'M', u'u'),
+    (0x1D65, 'M', u'v'),
+    (0x1D66, 'M', u'β'),
+    (0x1D67, 'M', u'γ'),
+    (0x1D68, 'M', u'ρ'),
+    (0x1D69, 'M', u'φ'),
+    (0x1D6A, 'M', u'χ'),
+    (0x1D6B, 'V'),
+    (0x1D78, 'M', u'н'),
+    (0x1D79, 'V'),
+    (0x1D9B, 'M', u'É’'),
+    (0x1D9C, 'M', u'c'),
+    (0x1D9D, 'M', u'É•'),
+    (0x1D9E, 'M', u'ð'),
+    ]
+
+def _seg_16():
+    return [
+    (0x1D9F, 'M', u'ɜ'),
+    (0x1DA0, 'M', u'f'),
+    (0x1DA1, 'M', u'ÉŸ'),
+    (0x1DA2, 'M', u'É¡'),
+    (0x1DA3, 'M', u'É¥'),
+    (0x1DA4, 'M', u'ɨ'),
+    (0x1DA5, 'M', u'É©'),
+    (0x1DA6, 'M', u'ɪ'),
+    (0x1DA7, 'M', u'áµ»'),
+    (0x1DA8, 'M', u'ʝ'),
+    (0x1DA9, 'M', u'É­'),
+    (0x1DAA, 'M', u'á¶…'),
+    (0x1DAB, 'M', u'ÊŸ'),
+    (0x1DAC, 'M', u'ɱ'),
+    (0x1DAD, 'M', u'ɰ'),
+    (0x1DAE, 'M', u'ɲ'),
+    (0x1DAF, 'M', u'ɳ'),
+    (0x1DB0, 'M', u'É´'),
+    (0x1DB1, 'M', u'ɵ'),
+    (0x1DB2, 'M', u'ɸ'),
+    (0x1DB3, 'M', u'Ê‚'),
+    (0x1DB4, 'M', u'ʃ'),
+    (0x1DB5, 'M', u'Æ«'),
+    (0x1DB6, 'M', u'ʉ'),
+    (0x1DB7, 'M', u'ÊŠ'),
+    (0x1DB8, 'M', u'ᴜ'),
+    (0x1DB9, 'M', u'Ê‹'),
+    (0x1DBA, 'M', u'ʌ'),
+    (0x1DBB, 'M', u'z'),
+    (0x1DBC, 'M', u'ʐ'),
+    (0x1DBD, 'M', u'Ê‘'),
+    (0x1DBE, 'M', u'Ê’'),
+    (0x1DBF, 'M', u'θ'),
+    (0x1DC0, 'V'),
+    (0x1DFA, 'X'),
+    (0x1DFB, 'V'),
+    (0x1E00, 'M', u'ḁ'),
+    (0x1E01, 'V'),
+    (0x1E02, 'M', u'ḃ'),
+    (0x1E03, 'V'),
+    (0x1E04, 'M', u'ḅ'),
+    (0x1E05, 'V'),
+    (0x1E06, 'M', u'ḇ'),
+    (0x1E07, 'V'),
+    (0x1E08, 'M', u'ḉ'),
+    (0x1E09, 'V'),
+    (0x1E0A, 'M', u'ḋ'),
+    (0x1E0B, 'V'),
+    (0x1E0C, 'M', u'ḍ'),
+    (0x1E0D, 'V'),
+    (0x1E0E, 'M', u'ḏ'),
+    (0x1E0F, 'V'),
+    (0x1E10, 'M', u'ḑ'),
+    (0x1E11, 'V'),
+    (0x1E12, 'M', u'ḓ'),
+    (0x1E13, 'V'),
+    (0x1E14, 'M', u'ḕ'),
+    (0x1E15, 'V'),
+    (0x1E16, 'M', u'ḗ'),
+    (0x1E17, 'V'),
+    (0x1E18, 'M', u'ḙ'),
+    (0x1E19, 'V'),
+    (0x1E1A, 'M', u'ḛ'),
+    (0x1E1B, 'V'),
+    (0x1E1C, 'M', u'ḝ'),
+    (0x1E1D, 'V'),
+    (0x1E1E, 'M', u'ḟ'),
+    (0x1E1F, 'V'),
+    (0x1E20, 'M', u'ḡ'),
+    (0x1E21, 'V'),
+    (0x1E22, 'M', u'ḣ'),
+    (0x1E23, 'V'),
+    (0x1E24, 'M', u'ḥ'),
+    (0x1E25, 'V'),
+    (0x1E26, 'M', u'ḧ'),
+    (0x1E27, 'V'),
+    (0x1E28, 'M', u'ḩ'),
+    (0x1E29, 'V'),
+    (0x1E2A, 'M', u'ḫ'),
+    (0x1E2B, 'V'),
+    (0x1E2C, 'M', u'ḭ'),
+    (0x1E2D, 'V'),
+    (0x1E2E, 'M', u'ḯ'),
+    (0x1E2F, 'V'),
+    (0x1E30, 'M', u'ḱ'),
+    (0x1E31, 'V'),
+    (0x1E32, 'M', u'ḳ'),
+    (0x1E33, 'V'),
+    (0x1E34, 'M', u'ḵ'),
+    (0x1E35, 'V'),
+    (0x1E36, 'M', u'ḷ'),
+    (0x1E37, 'V'),
+    (0x1E38, 'M', u'ḹ'),
+    (0x1E39, 'V'),
+    (0x1E3A, 'M', u'ḻ'),
+    (0x1E3B, 'V'),
+    (0x1E3C, 'M', u'ḽ'),
+    (0x1E3D, 'V'),
+    (0x1E3E, 'M', u'ḿ'),
+    (0x1E3F, 'V'),
+    ]
+
+def _seg_17():
+    return [
+    (0x1E40, 'M', u'ṁ'),
+    (0x1E41, 'V'),
+    (0x1E42, 'M', u'ṃ'),
+    (0x1E43, 'V'),
+    (0x1E44, 'M', u'á¹…'),
+    (0x1E45, 'V'),
+    (0x1E46, 'M', u'ṇ'),
+    (0x1E47, 'V'),
+    (0x1E48, 'M', u'ṉ'),
+    (0x1E49, 'V'),
+    (0x1E4A, 'M', u'ṋ'),
+    (0x1E4B, 'V'),
+    (0x1E4C, 'M', u'ṍ'),
+    (0x1E4D, 'V'),
+    (0x1E4E, 'M', u'ṏ'),
+    (0x1E4F, 'V'),
+    (0x1E50, 'M', u'ṑ'),
+    (0x1E51, 'V'),
+    (0x1E52, 'M', u'ṓ'),
+    (0x1E53, 'V'),
+    (0x1E54, 'M', u'ṕ'),
+    (0x1E55, 'V'),
+    (0x1E56, 'M', u'á¹—'),
+    (0x1E57, 'V'),
+    (0x1E58, 'M', u'á¹™'),
+    (0x1E59, 'V'),
+    (0x1E5A, 'M', u'á¹›'),
+    (0x1E5B, 'V'),
+    (0x1E5C, 'M', u'ṝ'),
+    (0x1E5D, 'V'),
+    (0x1E5E, 'M', u'ṟ'),
+    (0x1E5F, 'V'),
+    (0x1E60, 'M', u'ṡ'),
+    (0x1E61, 'V'),
+    (0x1E62, 'M', u'á¹£'),
+    (0x1E63, 'V'),
+    (0x1E64, 'M', u'á¹¥'),
+    (0x1E65, 'V'),
+    (0x1E66, 'M', u'á¹§'),
+    (0x1E67, 'V'),
+    (0x1E68, 'M', u'ṩ'),
+    (0x1E69, 'V'),
+    (0x1E6A, 'M', u'ṫ'),
+    (0x1E6B, 'V'),
+    (0x1E6C, 'M', u'á¹­'),
+    (0x1E6D, 'V'),
+    (0x1E6E, 'M', u'ṯ'),
+    (0x1E6F, 'V'),
+    (0x1E70, 'M', u'á¹±'),
+    (0x1E71, 'V'),
+    (0x1E72, 'M', u'á¹³'),
+    (0x1E73, 'V'),
+    (0x1E74, 'M', u'á¹µ'),
+    (0x1E75, 'V'),
+    (0x1E76, 'M', u'á¹·'),
+    (0x1E77, 'V'),
+    (0x1E78, 'M', u'á¹¹'),
+    (0x1E79, 'V'),
+    (0x1E7A, 'M', u'á¹»'),
+    (0x1E7B, 'V'),
+    (0x1E7C, 'M', u'á¹½'),
+    (0x1E7D, 'V'),
+    (0x1E7E, 'M', u'ṿ'),
+    (0x1E7F, 'V'),
+    (0x1E80, 'M', u'ẁ'),
+    (0x1E81, 'V'),
+    (0x1E82, 'M', u'ẃ'),
+    (0x1E83, 'V'),
+    (0x1E84, 'M', u'ẅ'),
+    (0x1E85, 'V'),
+    (0x1E86, 'M', u'ẇ'),
+    (0x1E87, 'V'),
+    (0x1E88, 'M', u'ẉ'),
+    (0x1E89, 'V'),
+    (0x1E8A, 'M', u'ẋ'),
+    (0x1E8B, 'V'),
+    (0x1E8C, 'M', u'ẍ'),
+    (0x1E8D, 'V'),
+    (0x1E8E, 'M', u'ẏ'),
+    (0x1E8F, 'V'),
+    (0x1E90, 'M', u'ẑ'),
+    (0x1E91, 'V'),
+    (0x1E92, 'M', u'ẓ'),
+    (0x1E93, 'V'),
+    (0x1E94, 'M', u'ẕ'),
+    (0x1E95, 'V'),
+    (0x1E9A, 'M', u'aʾ'),
+    (0x1E9B, 'M', u'ṡ'),
+    (0x1E9C, 'V'),
+    (0x1E9E, 'M', u'ss'),
+    (0x1E9F, 'V'),
+    (0x1EA0, 'M', u'ạ'),
+    (0x1EA1, 'V'),
+    (0x1EA2, 'M', u'ả'),
+    (0x1EA3, 'V'),
+    (0x1EA4, 'M', u'ấ'),
+    (0x1EA5, 'V'),
+    (0x1EA6, 'M', u'ầ'),
+    (0x1EA7, 'V'),
+    (0x1EA8, 'M', u'ẩ'),
+    ]
+
+def _seg_18():
+    return [
+    (0x1EA9, 'V'),
+    (0x1EAA, 'M', u'ẫ'),
+    (0x1EAB, 'V'),
+    (0x1EAC, 'M', u'ậ'),
+    (0x1EAD, 'V'),
+    (0x1EAE, 'M', u'ắ'),
+    (0x1EAF, 'V'),
+    (0x1EB0, 'M', u'ằ'),
+    (0x1EB1, 'V'),
+    (0x1EB2, 'M', u'ẳ'),
+    (0x1EB3, 'V'),
+    (0x1EB4, 'M', u'ẵ'),
+    (0x1EB5, 'V'),
+    (0x1EB6, 'M', u'ặ'),
+    (0x1EB7, 'V'),
+    (0x1EB8, 'M', u'ẹ'),
+    (0x1EB9, 'V'),
+    (0x1EBA, 'M', u'ẻ'),
+    (0x1EBB, 'V'),
+    (0x1EBC, 'M', u'ẽ'),
+    (0x1EBD, 'V'),
+    (0x1EBE, 'M', u'ế'),
+    (0x1EBF, 'V'),
+    (0x1EC0, 'M', u'ề'),
+    (0x1EC1, 'V'),
+    (0x1EC2, 'M', u'ể'),
+    (0x1EC3, 'V'),
+    (0x1EC4, 'M', u'á»…'),
+    (0x1EC5, 'V'),
+    (0x1EC6, 'M', u'ệ'),
+    (0x1EC7, 'V'),
+    (0x1EC8, 'M', u'ỉ'),
+    (0x1EC9, 'V'),
+    (0x1ECA, 'M', u'ị'),
+    (0x1ECB, 'V'),
+    (0x1ECC, 'M', u'ọ'),
+    (0x1ECD, 'V'),
+    (0x1ECE, 'M', u'ỏ'),
+    (0x1ECF, 'V'),
+    (0x1ED0, 'M', u'ố'),
+    (0x1ED1, 'V'),
+    (0x1ED2, 'M', u'ồ'),
+    (0x1ED3, 'V'),
+    (0x1ED4, 'M', u'ổ'),
+    (0x1ED5, 'V'),
+    (0x1ED6, 'M', u'á»—'),
+    (0x1ED7, 'V'),
+    (0x1ED8, 'M', u'á»™'),
+    (0x1ED9, 'V'),
+    (0x1EDA, 'M', u'á»›'),
+    (0x1EDB, 'V'),
+    (0x1EDC, 'M', u'ờ'),
+    (0x1EDD, 'V'),
+    (0x1EDE, 'M', u'ở'),
+    (0x1EDF, 'V'),
+    (0x1EE0, 'M', u'ỡ'),
+    (0x1EE1, 'V'),
+    (0x1EE2, 'M', u'ợ'),
+    (0x1EE3, 'V'),
+    (0x1EE4, 'M', u'ụ'),
+    (0x1EE5, 'V'),
+    (0x1EE6, 'M', u'á»§'),
+    (0x1EE7, 'V'),
+    (0x1EE8, 'M', u'ứ'),
+    (0x1EE9, 'V'),
+    (0x1EEA, 'M', u'ừ'),
+    (0x1EEB, 'V'),
+    (0x1EEC, 'M', u'á»­'),
+    (0x1EED, 'V'),
+    (0x1EEE, 'M', u'ữ'),
+    (0x1EEF, 'V'),
+    (0x1EF0, 'M', u'á»±'),
+    (0x1EF1, 'V'),
+    (0x1EF2, 'M', u'ỳ'),
+    (0x1EF3, 'V'),
+    (0x1EF4, 'M', u'ỵ'),
+    (0x1EF5, 'V'),
+    (0x1EF6, 'M', u'á»·'),
+    (0x1EF7, 'V'),
+    (0x1EF8, 'M', u'ỹ'),
+    (0x1EF9, 'V'),
+    (0x1EFA, 'M', u'á»»'),
+    (0x1EFB, 'V'),
+    (0x1EFC, 'M', u'ỽ'),
+    (0x1EFD, 'V'),
+    (0x1EFE, 'M', u'ỿ'),
+    (0x1EFF, 'V'),
+    (0x1F08, 'M', u'á¼€'),
+    (0x1F09, 'M', u'ἁ'),
+    (0x1F0A, 'M', u'ἂ'),
+    (0x1F0B, 'M', u'ἃ'),
+    (0x1F0C, 'M', u'ἄ'),
+    (0x1F0D, 'M', u'á¼…'),
+    (0x1F0E, 'M', u'ἆ'),
+    (0x1F0F, 'M', u'ἇ'),
+    (0x1F10, 'V'),
+    (0x1F16, 'X'),
+    (0x1F18, 'M', u'ἐ'),
+    (0x1F19, 'M', u'ἑ'),
+    (0x1F1A, 'M', u'á¼’'),
+    ]
+
+def _seg_19():
+    return [
+    (0x1F1B, 'M', u'ἓ'),
+    (0x1F1C, 'M', u'á¼”'),
+    (0x1F1D, 'M', u'ἕ'),
+    (0x1F1E, 'X'),
+    (0x1F20, 'V'),
+    (0x1F28, 'M', u'á¼ '),
+    (0x1F29, 'M', u'ἡ'),
+    (0x1F2A, 'M', u'á¼¢'),
+    (0x1F2B, 'M', u'á¼£'),
+    (0x1F2C, 'M', u'ἤ'),
+    (0x1F2D, 'M', u'á¼¥'),
+    (0x1F2E, 'M', u'ἦ'),
+    (0x1F2F, 'M', u'á¼§'),
+    (0x1F30, 'V'),
+    (0x1F38, 'M', u'á¼°'),
+    (0x1F39, 'M', u'á¼±'),
+    (0x1F3A, 'M', u'á¼²'),
+    (0x1F3B, 'M', u'á¼³'),
+    (0x1F3C, 'M', u'á¼´'),
+    (0x1F3D, 'M', u'á¼µ'),
+    (0x1F3E, 'M', u'á¼¶'),
+    (0x1F3F, 'M', u'á¼·'),
+    (0x1F40, 'V'),
+    (0x1F46, 'X'),
+    (0x1F48, 'M', u'á½€'),
+    (0x1F49, 'M', u'ὁ'),
+    (0x1F4A, 'M', u'ὂ'),
+    (0x1F4B, 'M', u'ὃ'),
+    (0x1F4C, 'M', u'ὄ'),
+    (0x1F4D, 'M', u'á½…'),
+    (0x1F4E, 'X'),
+    (0x1F50, 'V'),
+    (0x1F58, 'X'),
+    (0x1F59, 'M', u'ὑ'),
+    (0x1F5A, 'X'),
+    (0x1F5B, 'M', u'ὓ'),
+    (0x1F5C, 'X'),
+    (0x1F5D, 'M', u'ὕ'),
+    (0x1F5E, 'X'),
+    (0x1F5F, 'M', u'á½—'),
+    (0x1F60, 'V'),
+    (0x1F68, 'M', u'á½ '),
+    (0x1F69, 'M', u'ὡ'),
+    (0x1F6A, 'M', u'á½¢'),
+    (0x1F6B, 'M', u'á½£'),
+    (0x1F6C, 'M', u'ὤ'),
+    (0x1F6D, 'M', u'á½¥'),
+    (0x1F6E, 'M', u'ὦ'),
+    (0x1F6F, 'M', u'á½§'),
+    (0x1F70, 'V'),
+    (0x1F71, 'M', u'ά'),
+    (0x1F72, 'V'),
+    (0x1F73, 'M', u'έ'),
+    (0x1F74, 'V'),
+    (0x1F75, 'M', u'ή'),
+    (0x1F76, 'V'),
+    (0x1F77, 'M', u'ί'),
+    (0x1F78, 'V'),
+    (0x1F79, 'M', u'ό'),
+    (0x1F7A, 'V'),
+    (0x1F7B, 'M', u'ύ'),
+    (0x1F7C, 'V'),
+    (0x1F7D, 'M', u'ÏŽ'),
+    (0x1F7E, 'X'),
+    (0x1F80, 'M', u'ἀι'),
+    (0x1F81, 'M', u'ἁι'),
+    (0x1F82, 'M', u'ἂι'),
+    (0x1F83, 'M', u'ἃι'),
+    (0x1F84, 'M', u'ἄι'),
+    (0x1F85, 'M', u'ἅι'),
+    (0x1F86, 'M', u'ἆι'),
+    (0x1F87, 'M', u'ἇι'),
+    (0x1F88, 'M', u'ἀι'),
+    (0x1F89, 'M', u'ἁι'),
+    (0x1F8A, 'M', u'ἂι'),
+    (0x1F8B, 'M', u'ἃι'),
+    (0x1F8C, 'M', u'ἄι'),
+    (0x1F8D, 'M', u'ἅι'),
+    (0x1F8E, 'M', u'ἆι'),
+    (0x1F8F, 'M', u'ἇι'),
+    (0x1F90, 'M', u'ἠι'),
+    (0x1F91, 'M', u'ἡι'),
+    (0x1F92, 'M', u'ἢι'),
+    (0x1F93, 'M', u'ἣι'),
+    (0x1F94, 'M', u'ἤι'),
+    (0x1F95, 'M', u'ἥι'),
+    (0x1F96, 'M', u'ἦι'),
+    (0x1F97, 'M', u'ἧι'),
+    (0x1F98, 'M', u'ἠι'),
+    (0x1F99, 'M', u'ἡι'),
+    (0x1F9A, 'M', u'ἢι'),
+    (0x1F9B, 'M', u'ἣι'),
+    (0x1F9C, 'M', u'ἤι'),
+    (0x1F9D, 'M', u'ἥι'),
+    (0x1F9E, 'M', u'ἦι'),
+    (0x1F9F, 'M', u'ἧι'),
+    (0x1FA0, 'M', u'ὠι'),
+    (0x1FA1, 'M', u'ὡι'),
+    (0x1FA2, 'M', u'ὢι'),
+    (0x1FA3, 'M', u'ὣι'),
+    ]
+
+def _seg_20():
+    return [
+    (0x1FA4, 'M', u'ὤι'),
+    (0x1FA5, 'M', u'ὥι'),
+    (0x1FA6, 'M', u'ὦι'),
+    (0x1FA7, 'M', u'ὧι'),
+    (0x1FA8, 'M', u'ὠι'),
+    (0x1FA9, 'M', u'ὡι'),
+    (0x1FAA, 'M', u'ὢι'),
+    (0x1FAB, 'M', u'ὣι'),
+    (0x1FAC, 'M', u'ὤι'),
+    (0x1FAD, 'M', u'ὥι'),
+    (0x1FAE, 'M', u'ὦι'),
+    (0x1FAF, 'M', u'ὧι'),
+    (0x1FB0, 'V'),
+    (0x1FB2, 'M', u'ὰι'),
+    (0x1FB3, 'M', u'αι'),
+    (0x1FB4, 'M', u'άι'),
+    (0x1FB5, 'X'),
+    (0x1FB6, 'V'),
+    (0x1FB7, 'M', u'ᾶι'),
+    (0x1FB8, 'M', u'á¾°'),
+    (0x1FB9, 'M', u'á¾±'),
+    (0x1FBA, 'M', u'á½°'),
+    (0x1FBB, 'M', u'ά'),
+    (0x1FBC, 'M', u'αι'),
+    (0x1FBD, '3', u' Ì“'),
+    (0x1FBE, 'M', u'ι'),
+    (0x1FBF, '3', u' Ì“'),
+    (0x1FC0, '3', u' Í‚'),
+    (0x1FC1, '3', u' ̈͂'),
+    (0x1FC2, 'M', u'ὴι'),
+    (0x1FC3, 'M', u'ηι'),
+    (0x1FC4, 'M', u'ήι'),
+    (0x1FC5, 'X'),
+    (0x1FC6, 'V'),
+    (0x1FC7, 'M', u'ῆι'),
+    (0x1FC8, 'M', u'á½²'),
+    (0x1FC9, 'M', u'έ'),
+    (0x1FCA, 'M', u'á½´'),
+    (0x1FCB, 'M', u'ή'),
+    (0x1FCC, 'M', u'ηι'),
+    (0x1FCD, '3', u' ̓̀'),
+    (0x1FCE, '3', u' ̓́'),
+    (0x1FCF, '3', u' ̓͂'),
+    (0x1FD0, 'V'),
+    (0x1FD3, 'M', u'ΐ'),
+    (0x1FD4, 'X'),
+    (0x1FD6, 'V'),
+    (0x1FD8, 'M', u'ῐ'),
+    (0x1FD9, 'M', u'á¿‘'),
+    (0x1FDA, 'M', u'á½¶'),
+    (0x1FDB, 'M', u'ί'),
+    (0x1FDC, 'X'),
+    (0x1FDD, '3', u' ̔̀'),
+    (0x1FDE, '3', u' ̔́'),
+    (0x1FDF, '3', u' ̔͂'),
+    (0x1FE0, 'V'),
+    (0x1FE3, 'M', u'ΰ'),
+    (0x1FE4, 'V'),
+    (0x1FE8, 'M', u'á¿ '),
+    (0x1FE9, 'M', u'á¿¡'),
+    (0x1FEA, 'M', u'ὺ'),
+    (0x1FEB, 'M', u'ύ'),
+    (0x1FEC, 'M', u'á¿¥'),
+    (0x1FED, '3', u' ̈̀'),
+    (0x1FEE, '3', u' ̈́'),
+    (0x1FEF, '3', u'`'),
+    (0x1FF0, 'X'),
+    (0x1FF2, 'M', u'ὼι'),
+    (0x1FF3, 'M', u'ωι'),
+    (0x1FF4, 'M', u'ώι'),
+    (0x1FF5, 'X'),
+    (0x1FF6, 'V'),
+    (0x1FF7, 'M', u'ῶι'),
+    (0x1FF8, 'M', u'ὸ'),
+    (0x1FF9, 'M', u'ό'),
+    (0x1FFA, 'M', u'á½¼'),
+    (0x1FFB, 'M', u'ÏŽ'),
+    (0x1FFC, 'M', u'ωι'),
+    (0x1FFD, '3', u' ́'),
+    (0x1FFE, '3', u' Ì”'),
+    (0x1FFF, 'X'),
+    (0x2000, '3', u' '),
+    (0x200B, 'I'),
+    (0x200C, 'D', u''),
+    (0x200E, 'X'),
+    (0x2010, 'V'),
+    (0x2011, 'M', u'‐'),
+    (0x2012, 'V'),
+    (0x2017, '3', u' ̳'),
+    (0x2018, 'V'),
+    (0x2024, 'X'),
+    (0x2027, 'V'),
+    (0x2028, 'X'),
+    (0x202F, '3', u' '),
+    (0x2030, 'V'),
+    (0x2033, 'M', u'′′'),
+    (0x2034, 'M', u'′′′'),
+    (0x2035, 'V'),
+    (0x2036, 'M', u'‵‵'),
+    (0x2037, 'M', u'‵‵‵'),
+    ]
+
+def _seg_21():
+    return [
+    (0x2038, 'V'),
+    (0x203C, '3', u'!!'),
+    (0x203D, 'V'),
+    (0x203E, '3', u' Ì…'),
+    (0x203F, 'V'),
+    (0x2047, '3', u'??'),
+    (0x2048, '3', u'?!'),
+    (0x2049, '3', u'!?'),
+    (0x204A, 'V'),
+    (0x2057, 'M', u'′′′′'),
+    (0x2058, 'V'),
+    (0x205F, '3', u' '),
+    (0x2060, 'I'),
+    (0x2061, 'X'),
+    (0x2064, 'I'),
+    (0x2065, 'X'),
+    (0x2070, 'M', u'0'),
+    (0x2071, 'M', u'i'),
+    (0x2072, 'X'),
+    (0x2074, 'M', u'4'),
+    (0x2075, 'M', u'5'),
+    (0x2076, 'M', u'6'),
+    (0x2077, 'M', u'7'),
+    (0x2078, 'M', u'8'),
+    (0x2079, 'M', u'9'),
+    (0x207A, '3', u'+'),
+    (0x207B, 'M', u'−'),
+    (0x207C, '3', u'='),
+    (0x207D, '3', u'('),
+    (0x207E, '3', u')'),
+    (0x207F, 'M', u'n'),
+    (0x2080, 'M', u'0'),
+    (0x2081, 'M', u'1'),
+    (0x2082, 'M', u'2'),
+    (0x2083, 'M', u'3'),
+    (0x2084, 'M', u'4'),
+    (0x2085, 'M', u'5'),
+    (0x2086, 'M', u'6'),
+    (0x2087, 'M', u'7'),
+    (0x2088, 'M', u'8'),
+    (0x2089, 'M', u'9'),
+    (0x208A, '3', u'+'),
+    (0x208B, 'M', u'−'),
+    (0x208C, '3', u'='),
+    (0x208D, '3', u'('),
+    (0x208E, '3', u')'),
+    (0x208F, 'X'),
+    (0x2090, 'M', u'a'),
+    (0x2091, 'M', u'e'),
+    (0x2092, 'M', u'o'),
+    (0x2093, 'M', u'x'),
+    (0x2094, 'M', u'É™'),
+    (0x2095, 'M', u'h'),
+    (0x2096, 'M', u'k'),
+    (0x2097, 'M', u'l'),
+    (0x2098, 'M', u'm'),
+    (0x2099, 'M', u'n'),
+    (0x209A, 'M', u'p'),
+    (0x209B, 'M', u's'),
+    (0x209C, 'M', u't'),
+    (0x209D, 'X'),
+    (0x20A0, 'V'),
+    (0x20A8, 'M', u'rs'),
+    (0x20A9, 'V'),
+    (0x20C0, 'X'),
+    (0x20D0, 'V'),
+    (0x20F1, 'X'),
+    (0x2100, '3', u'a/c'),
+    (0x2101, '3', u'a/s'),
+    (0x2102, 'M', u'c'),
+    (0x2103, 'M', u'°c'),
+    (0x2104, 'V'),
+    (0x2105, '3', u'c/o'),
+    (0x2106, '3', u'c/u'),
+    (0x2107, 'M', u'É›'),
+    (0x2108, 'V'),
+    (0x2109, 'M', u'°f'),
+    (0x210A, 'M', u'g'),
+    (0x210B, 'M', u'h'),
+    (0x210F, 'M', u'ħ'),
+    (0x2110, 'M', u'i'),
+    (0x2112, 'M', u'l'),
+    (0x2114, 'V'),
+    (0x2115, 'M', u'n'),
+    (0x2116, 'M', u'no'),
+    (0x2117, 'V'),
+    (0x2119, 'M', u'p'),
+    (0x211A, 'M', u'q'),
+    (0x211B, 'M', u'r'),
+    (0x211E, 'V'),
+    (0x2120, 'M', u'sm'),
+    (0x2121, 'M', u'tel'),
+    (0x2122, 'M', u'tm'),
+    (0x2123, 'V'),
+    (0x2124, 'M', u'z'),
+    (0x2125, 'V'),
+    (0x2126, 'M', u'ω'),
+    (0x2127, 'V'),
+    (0x2128, 'M', u'z'),
+    (0x2129, 'V'),
+    ]
+
+def _seg_22():
+    return [
+    (0x212A, 'M', u'k'),
+    (0x212B, 'M', u'Ã¥'),
+    (0x212C, 'M', u'b'),
+    (0x212D, 'M', u'c'),
+    (0x212E, 'V'),
+    (0x212F, 'M', u'e'),
+    (0x2131, 'M', u'f'),
+    (0x2132, 'X'),
+    (0x2133, 'M', u'm'),
+    (0x2134, 'M', u'o'),
+    (0x2135, 'M', u'א'),
+    (0x2136, 'M', u'ב'),
+    (0x2137, 'M', u'×’'),
+    (0x2138, 'M', u'ד'),
+    (0x2139, 'M', u'i'),
+    (0x213A, 'V'),
+    (0x213B, 'M', u'fax'),
+    (0x213C, 'M', u'Ï€'),
+    (0x213D, 'M', u'γ'),
+    (0x213F, 'M', u'Ï€'),
+    (0x2140, 'M', u'∑'),
+    (0x2141, 'V'),
+    (0x2145, 'M', u'd'),
+    (0x2147, 'M', u'e'),
+    (0x2148, 'M', u'i'),
+    (0x2149, 'M', u'j'),
+    (0x214A, 'V'),
+    (0x2150, 'M', u'1⁄7'),
+    (0x2151, 'M', u'1⁄9'),
+    (0x2152, 'M', u'1⁄10'),
+    (0x2153, 'M', u'1⁄3'),
+    (0x2154, 'M', u'2⁄3'),
+    (0x2155, 'M', u'1⁄5'),
+    (0x2156, 'M', u'2⁄5'),
+    (0x2157, 'M', u'3⁄5'),
+    (0x2158, 'M', u'4⁄5'),
+    (0x2159, 'M', u'1⁄6'),
+    (0x215A, 'M', u'5⁄6'),
+    (0x215B, 'M', u'1⁄8'),
+    (0x215C, 'M', u'3⁄8'),
+    (0x215D, 'M', u'5⁄8'),
+    (0x215E, 'M', u'7⁄8'),
+    (0x215F, 'M', u'1⁄'),
+    (0x2160, 'M', u'i'),
+    (0x2161, 'M', u'ii'),
+    (0x2162, 'M', u'iii'),
+    (0x2163, 'M', u'iv'),
+    (0x2164, 'M', u'v'),
+    (0x2165, 'M', u'vi'),
+    (0x2166, 'M', u'vii'),
+    (0x2167, 'M', u'viii'),
+    (0x2168, 'M', u'ix'),
+    (0x2169, 'M', u'x'),
+    (0x216A, 'M', u'xi'),
+    (0x216B, 'M', u'xii'),
+    (0x216C, 'M', u'l'),
+    (0x216D, 'M', u'c'),
+    (0x216E, 'M', u'd'),
+    (0x216F, 'M', u'm'),
+    (0x2170, 'M', u'i'),
+    (0x2171, 'M', u'ii'),
+    (0x2172, 'M', u'iii'),
+    (0x2173, 'M', u'iv'),
+    (0x2174, 'M', u'v'),
+    (0x2175, 'M', u'vi'),
+    (0x2176, 'M', u'vii'),
+    (0x2177, 'M', u'viii'),
+    (0x2178, 'M', u'ix'),
+    (0x2179, 'M', u'x'),
+    (0x217A, 'M', u'xi'),
+    (0x217B, 'M', u'xii'),
+    (0x217C, 'M', u'l'),
+    (0x217D, 'M', u'c'),
+    (0x217E, 'M', u'd'),
+    (0x217F, 'M', u'm'),
+    (0x2180, 'V'),
+    (0x2183, 'X'),
+    (0x2184, 'V'),
+    (0x2189, 'M', u'0⁄3'),
+    (0x218A, 'V'),
+    (0x218C, 'X'),
+    (0x2190, 'V'),
+    (0x222C, 'M', u'∫∫'),
+    (0x222D, 'M', u'∫∫∫'),
+    (0x222E, 'V'),
+    (0x222F, 'M', u'∮∮'),
+    (0x2230, 'M', u'∮∮∮'),
+    (0x2231, 'V'),
+    (0x2260, '3'),
+    (0x2261, 'V'),
+    (0x226E, '3'),
+    (0x2270, 'V'),
+    (0x2329, 'M', u'〈'),
+    (0x232A, 'M', u'〉'),
+    (0x232B, 'V'),
+    (0x2427, 'X'),
+    (0x2440, 'V'),
+    (0x244B, 'X'),
+    (0x2460, 'M', u'1'),
+    (0x2461, 'M', u'2'),
+    ]
+
+def _seg_23():
+    return [
+    (0x2462, 'M', u'3'),
+    (0x2463, 'M', u'4'),
+    (0x2464, 'M', u'5'),
+    (0x2465, 'M', u'6'),
+    (0x2466, 'M', u'7'),
+    (0x2467, 'M', u'8'),
+    (0x2468, 'M', u'9'),
+    (0x2469, 'M', u'10'),
+    (0x246A, 'M', u'11'),
+    (0x246B, 'M', u'12'),
+    (0x246C, 'M', u'13'),
+    (0x246D, 'M', u'14'),
+    (0x246E, 'M', u'15'),
+    (0x246F, 'M', u'16'),
+    (0x2470, 'M', u'17'),
+    (0x2471, 'M', u'18'),
+    (0x2472, 'M', u'19'),
+    (0x2473, 'M', u'20'),
+    (0x2474, '3', u'(1)'),
+    (0x2475, '3', u'(2)'),
+    (0x2476, '3', u'(3)'),
+    (0x2477, '3', u'(4)'),
+    (0x2478, '3', u'(5)'),
+    (0x2479, '3', u'(6)'),
+    (0x247A, '3', u'(7)'),
+    (0x247B, '3', u'(8)'),
+    (0x247C, '3', u'(9)'),
+    (0x247D, '3', u'(10)'),
+    (0x247E, '3', u'(11)'),
+    (0x247F, '3', u'(12)'),
+    (0x2480, '3', u'(13)'),
+    (0x2481, '3', u'(14)'),
+    (0x2482, '3', u'(15)'),
+    (0x2483, '3', u'(16)'),
+    (0x2484, '3', u'(17)'),
+    (0x2485, '3', u'(18)'),
+    (0x2486, '3', u'(19)'),
+    (0x2487, '3', u'(20)'),
+    (0x2488, 'X'),
+    (0x249C, '3', u'(a)'),
+    (0x249D, '3', u'(b)'),
+    (0x249E, '3', u'(c)'),
+    (0x249F, '3', u'(d)'),
+    (0x24A0, '3', u'(e)'),
+    (0x24A1, '3', u'(f)'),
+    (0x24A2, '3', u'(g)'),
+    (0x24A3, '3', u'(h)'),
+    (0x24A4, '3', u'(i)'),
+    (0x24A5, '3', u'(j)'),
+    (0x24A6, '3', u'(k)'),
+    (0x24A7, '3', u'(l)'),
+    (0x24A8, '3', u'(m)'),
+    (0x24A9, '3', u'(n)'),
+    (0x24AA, '3', u'(o)'),
+    (0x24AB, '3', u'(p)'),
+    (0x24AC, '3', u'(q)'),
+    (0x24AD, '3', u'(r)'),
+    (0x24AE, '3', u'(s)'),
+    (0x24AF, '3', u'(t)'),
+    (0x24B0, '3', u'(u)'),
+    (0x24B1, '3', u'(v)'),
+    (0x24B2, '3', u'(w)'),
+    (0x24B3, '3', u'(x)'),
+    (0x24B4, '3', u'(y)'),
+    (0x24B5, '3', u'(z)'),
+    (0x24B6, 'M', u'a'),
+    (0x24B7, 'M', u'b'),
+    (0x24B8, 'M', u'c'),
+    (0x24B9, 'M', u'd'),
+    (0x24BA, 'M', u'e'),
+    (0x24BB, 'M', u'f'),
+    (0x24BC, 'M', u'g'),
+    (0x24BD, 'M', u'h'),
+    (0x24BE, 'M', u'i'),
+    (0x24BF, 'M', u'j'),
+    (0x24C0, 'M', u'k'),
+    (0x24C1, 'M', u'l'),
+    (0x24C2, 'M', u'm'),
+    (0x24C3, 'M', u'n'),
+    (0x24C4, 'M', u'o'),
+    (0x24C5, 'M', u'p'),
+    (0x24C6, 'M', u'q'),
+    (0x24C7, 'M', u'r'),
+    (0x24C8, 'M', u's'),
+    (0x24C9, 'M', u't'),
+    (0x24CA, 'M', u'u'),
+    (0x24CB, 'M', u'v'),
+    (0x24CC, 'M', u'w'),
+    (0x24CD, 'M', u'x'),
+    (0x24CE, 'M', u'y'),
+    (0x24CF, 'M', u'z'),
+    (0x24D0, 'M', u'a'),
+    (0x24D1, 'M', u'b'),
+    (0x24D2, 'M', u'c'),
+    (0x24D3, 'M', u'd'),
+    (0x24D4, 'M', u'e'),
+    (0x24D5, 'M', u'f'),
+    (0x24D6, 'M', u'g'),
+    (0x24D7, 'M', u'h'),
+    (0x24D8, 'M', u'i'),
+    ]
+
+def _seg_24():
+    return [
+    (0x24D9, 'M', u'j'),
+    (0x24DA, 'M', u'k'),
+    (0x24DB, 'M', u'l'),
+    (0x24DC, 'M', u'm'),
+    (0x24DD, 'M', u'n'),
+    (0x24DE, 'M', u'o'),
+    (0x24DF, 'M', u'p'),
+    (0x24E0, 'M', u'q'),
+    (0x24E1, 'M', u'r'),
+    (0x24E2, 'M', u's'),
+    (0x24E3, 'M', u't'),
+    (0x24E4, 'M', u'u'),
+    (0x24E5, 'M', u'v'),
+    (0x24E6, 'M', u'w'),
+    (0x24E7, 'M', u'x'),
+    (0x24E8, 'M', u'y'),
+    (0x24E9, 'M', u'z'),
+    (0x24EA, 'M', u'0'),
+    (0x24EB, 'V'),
+    (0x2A0C, 'M', u'∫∫∫∫'),
+    (0x2A0D, 'V'),
+    (0x2A74, '3', u'::='),
+    (0x2A75, '3', u'=='),
+    (0x2A76, '3', u'==='),
+    (0x2A77, 'V'),
+    (0x2ADC, 'M', u'⫝̸'),
+    (0x2ADD, 'V'),
+    (0x2B74, 'X'),
+    (0x2B76, 'V'),
+    (0x2B96, 'X'),
+    (0x2B98, 'V'),
+    (0x2BC9, 'X'),
+    (0x2BCA, 'V'),
+    (0x2BFF, 'X'),
+    (0x2C00, 'M', u'â°°'),
+    (0x2C01, 'M', u'â°±'),
+    (0x2C02, 'M', u'â°²'),
+    (0x2C03, 'M', u'â°³'),
+    (0x2C04, 'M', u'â°´'),
+    (0x2C05, 'M', u'â°µ'),
+    (0x2C06, 'M', u'â°¶'),
+    (0x2C07, 'M', u'â°·'),
+    (0x2C08, 'M', u'â°¸'),
+    (0x2C09, 'M', u'â°¹'),
+    (0x2C0A, 'M', u'â°º'),
+    (0x2C0B, 'M', u'â°»'),
+    (0x2C0C, 'M', u'â°¼'),
+    (0x2C0D, 'M', u'â°½'),
+    (0x2C0E, 'M', u'â°¾'),
+    (0x2C0F, 'M', u'â°¿'),
+    (0x2C10, 'M', u'â±€'),
+    (0x2C11, 'M', u'ⱁ'),
+    (0x2C12, 'M', u'ⱂ'),
+    (0x2C13, 'M', u'ⱃ'),
+    (0x2C14, 'M', u'ⱄ'),
+    (0x2C15, 'M', u'â±…'),
+    (0x2C16, 'M', u'ⱆ'),
+    (0x2C17, 'M', u'ⱇ'),
+    (0x2C18, 'M', u'ⱈ'),
+    (0x2C19, 'M', u'ⱉ'),
+    (0x2C1A, 'M', u'ⱊ'),
+    (0x2C1B, 'M', u'ⱋ'),
+    (0x2C1C, 'M', u'ⱌ'),
+    (0x2C1D, 'M', u'ⱍ'),
+    (0x2C1E, 'M', u'ⱎ'),
+    (0x2C1F, 'M', u'ⱏ'),
+    (0x2C20, 'M', u'ⱐ'),
+    (0x2C21, 'M', u'ⱑ'),
+    (0x2C22, 'M', u'â±’'),
+    (0x2C23, 'M', u'ⱓ'),
+    (0x2C24, 'M', u'â±”'),
+    (0x2C25, 'M', u'ⱕ'),
+    (0x2C26, 'M', u'â±–'),
+    (0x2C27, 'M', u'â±—'),
+    (0x2C28, 'M', u'ⱘ'),
+    (0x2C29, 'M', u'â±™'),
+    (0x2C2A, 'M', u'ⱚ'),
+    (0x2C2B, 'M', u'â±›'),
+    (0x2C2C, 'M', u'ⱜ'),
+    (0x2C2D, 'M', u'ⱝ'),
+    (0x2C2E, 'M', u'ⱞ'),
+    (0x2C2F, 'X'),
+    (0x2C30, 'V'),
+    (0x2C5F, 'X'),
+    (0x2C60, 'M', u'ⱡ'),
+    (0x2C61, 'V'),
+    (0x2C62, 'M', u'É«'),
+    (0x2C63, 'M', u'áµ½'),
+    (0x2C64, 'M', u'ɽ'),
+    (0x2C65, 'V'),
+    (0x2C67, 'M', u'ⱨ'),
+    (0x2C68, 'V'),
+    (0x2C69, 'M', u'ⱪ'),
+    (0x2C6A, 'V'),
+    (0x2C6B, 'M', u'ⱬ'),
+    (0x2C6C, 'V'),
+    (0x2C6D, 'M', u'É‘'),
+    (0x2C6E, 'M', u'ɱ'),
+    (0x2C6F, 'M', u'ɐ'),
+    (0x2C70, 'M', u'É’'),
+    ]
+
+def _seg_25():
+    return [
+    (0x2C71, 'V'),
+    (0x2C72, 'M', u'â±³'),
+    (0x2C73, 'V'),
+    (0x2C75, 'M', u'â±¶'),
+    (0x2C76, 'V'),
+    (0x2C7C, 'M', u'j'),
+    (0x2C7D, 'M', u'v'),
+    (0x2C7E, 'M', u'È¿'),
+    (0x2C7F, 'M', u'É€'),
+    (0x2C80, 'M', u'ⲁ'),
+    (0x2C81, 'V'),
+    (0x2C82, 'M', u'ⲃ'),
+    (0x2C83, 'V'),
+    (0x2C84, 'M', u'â²…'),
+    (0x2C85, 'V'),
+    (0x2C86, 'M', u'ⲇ'),
+    (0x2C87, 'V'),
+    (0x2C88, 'M', u'ⲉ'),
+    (0x2C89, 'V'),
+    (0x2C8A, 'M', u'ⲋ'),
+    (0x2C8B, 'V'),
+    (0x2C8C, 'M', u'ⲍ'),
+    (0x2C8D, 'V'),
+    (0x2C8E, 'M', u'ⲏ'),
+    (0x2C8F, 'V'),
+    (0x2C90, 'M', u'ⲑ'),
+    (0x2C91, 'V'),
+    (0x2C92, 'M', u'ⲓ'),
+    (0x2C93, 'V'),
+    (0x2C94, 'M', u'ⲕ'),
+    (0x2C95, 'V'),
+    (0x2C96, 'M', u'â²—'),
+    (0x2C97, 'V'),
+    (0x2C98, 'M', u'â²™'),
+    (0x2C99, 'V'),
+    (0x2C9A, 'M', u'â²›'),
+    (0x2C9B, 'V'),
+    (0x2C9C, 'M', u'ⲝ'),
+    (0x2C9D, 'V'),
+    (0x2C9E, 'M', u'ⲟ'),
+    (0x2C9F, 'V'),
+    (0x2CA0, 'M', u'ⲡ'),
+    (0x2CA1, 'V'),
+    (0x2CA2, 'M', u'â²£'),
+    (0x2CA3, 'V'),
+    (0x2CA4, 'M', u'â²¥'),
+    (0x2CA5, 'V'),
+    (0x2CA6, 'M', u'â²§'),
+    (0x2CA7, 'V'),
+    (0x2CA8, 'M', u'ⲩ'),
+    (0x2CA9, 'V'),
+    (0x2CAA, 'M', u'ⲫ'),
+    (0x2CAB, 'V'),
+    (0x2CAC, 'M', u'â²­'),
+    (0x2CAD, 'V'),
+    (0x2CAE, 'M', u'ⲯ'),
+    (0x2CAF, 'V'),
+    (0x2CB0, 'M', u'â²±'),
+    (0x2CB1, 'V'),
+    (0x2CB2, 'M', u'â²³'),
+    (0x2CB3, 'V'),
+    (0x2CB4, 'M', u'â²µ'),
+    (0x2CB5, 'V'),
+    (0x2CB6, 'M', u'â²·'),
+    (0x2CB7, 'V'),
+    (0x2CB8, 'M', u'â²¹'),
+    (0x2CB9, 'V'),
+    (0x2CBA, 'M', u'â²»'),
+    (0x2CBB, 'V'),
+    (0x2CBC, 'M', u'â²½'),
+    (0x2CBD, 'V'),
+    (0x2CBE, 'M', u'ⲿ'),
+    (0x2CBF, 'V'),
+    (0x2CC0, 'M', u'ⳁ'),
+    (0x2CC1, 'V'),
+    (0x2CC2, 'M', u'ⳃ'),
+    (0x2CC3, 'V'),
+    (0x2CC4, 'M', u'â³…'),
+    (0x2CC5, 'V'),
+    (0x2CC6, 'M', u'ⳇ'),
+    (0x2CC7, 'V'),
+    (0x2CC8, 'M', u'ⳉ'),
+    (0x2CC9, 'V'),
+    (0x2CCA, 'M', u'ⳋ'),
+    (0x2CCB, 'V'),
+    (0x2CCC, 'M', u'ⳍ'),
+    (0x2CCD, 'V'),
+    (0x2CCE, 'M', u'ⳏ'),
+    (0x2CCF, 'V'),
+    (0x2CD0, 'M', u'ⳑ'),
+    (0x2CD1, 'V'),
+    (0x2CD2, 'M', u'ⳓ'),
+    (0x2CD3, 'V'),
+    (0x2CD4, 'M', u'ⳕ'),
+    (0x2CD5, 'V'),
+    (0x2CD6, 'M', u'â³—'),
+    (0x2CD7, 'V'),
+    (0x2CD8, 'M', u'â³™'),
+    (0x2CD9, 'V'),
+    (0x2CDA, 'M', u'â³›'),
+    ]
+
+def _seg_26():
+    return [
+    (0x2CDB, 'V'),
+    (0x2CDC, 'M', u'ⳝ'),
+    (0x2CDD, 'V'),
+    (0x2CDE, 'M', u'ⳟ'),
+    (0x2CDF, 'V'),
+    (0x2CE0, 'M', u'ⳡ'),
+    (0x2CE1, 'V'),
+    (0x2CE2, 'M', u'â³£'),
+    (0x2CE3, 'V'),
+    (0x2CEB, 'M', u'ⳬ'),
+    (0x2CEC, 'V'),
+    (0x2CED, 'M', u'â³®'),
+    (0x2CEE, 'V'),
+    (0x2CF2, 'M', u'â³³'),
+    (0x2CF3, 'V'),
+    (0x2CF4, 'X'),
+    (0x2CF9, 'V'),
+    (0x2D26, 'X'),
+    (0x2D27, 'V'),
+    (0x2D28, 'X'),
+    (0x2D2D, 'V'),
+    (0x2D2E, 'X'),
+    (0x2D30, 'V'),
+    (0x2D68, 'X'),
+    (0x2D6F, 'M', u'ⵡ'),
+    (0x2D70, 'V'),
+    (0x2D71, 'X'),
+    (0x2D7F, 'V'),
+    (0x2D97, 'X'),
+    (0x2DA0, 'V'),
+    (0x2DA7, 'X'),
+    (0x2DA8, 'V'),
+    (0x2DAF, 'X'),
+    (0x2DB0, 'V'),
+    (0x2DB7, 'X'),
+    (0x2DB8, 'V'),
+    (0x2DBF, 'X'),
+    (0x2DC0, 'V'),
+    (0x2DC7, 'X'),
+    (0x2DC8, 'V'),
+    (0x2DCF, 'X'),
+    (0x2DD0, 'V'),
+    (0x2DD7, 'X'),
+    (0x2DD8, 'V'),
+    (0x2DDF, 'X'),
+    (0x2DE0, 'V'),
+    (0x2E4F, 'X'),
+    (0x2E80, 'V'),
+    (0x2E9A, 'X'),
+    (0x2E9B, 'V'),
+    (0x2E9F, 'M', u'母'),
+    (0x2EA0, 'V'),
+    (0x2EF3, 'M', u'龟'),
+    (0x2EF4, 'X'),
+    (0x2F00, 'M', u'一'),
+    (0x2F01, 'M', u'丨'),
+    (0x2F02, 'M', u'丶'),
+    (0x2F03, 'M', u'丿'),
+    (0x2F04, 'M', u'ä¹™'),
+    (0x2F05, 'M', u'亅'),
+    (0x2F06, 'M', u'二'),
+    (0x2F07, 'M', u'亠'),
+    (0x2F08, 'M', u'人'),
+    (0x2F09, 'M', u'å„¿'),
+    (0x2F0A, 'M', u'å…¥'),
+    (0x2F0B, 'M', u'å…«'),
+    (0x2F0C, 'M', u'冂'),
+    (0x2F0D, 'M', u'冖'),
+    (0x2F0E, 'M', u'冫'),
+    (0x2F0F, 'M', u'几'),
+    (0x2F10, 'M', u'凵'),
+    (0x2F11, 'M', u'刀'),
+    (0x2F12, 'M', u'力'),
+    (0x2F13, 'M', u'勹'),
+    (0x2F14, 'M', u'匕'),
+    (0x2F15, 'M', u'匚'),
+    (0x2F16, 'M', u'匸'),
+    (0x2F17, 'M', u'十'),
+    (0x2F18, 'M', u'卜'),
+    (0x2F19, 'M', u'卩'),
+    (0x2F1A, 'M', u'厂'),
+    (0x2F1B, 'M', u'厶'),
+    (0x2F1C, 'M', u'又'),
+    (0x2F1D, 'M', u'口'),
+    (0x2F1E, 'M', u'å›—'),
+    (0x2F1F, 'M', u'土'),
+    (0x2F20, 'M', u'士'),
+    (0x2F21, 'M', u'夂'),
+    (0x2F22, 'M', u'夊'),
+    (0x2F23, 'M', u'夕'),
+    (0x2F24, 'M', u'大'),
+    (0x2F25, 'M', u'女'),
+    (0x2F26, 'M', u'子'),
+    (0x2F27, 'M', u'宀'),
+    (0x2F28, 'M', u'寸'),
+    (0x2F29, 'M', u'小'),
+    (0x2F2A, 'M', u'å°¢'),
+    (0x2F2B, 'M', u'å°¸'),
+    (0x2F2C, 'M', u'å±®'),
+    (0x2F2D, 'M', u'å±±'),
+    ]
+
+def _seg_27():
+    return [
+    (0x2F2E, 'M', u'å·›'),
+    (0x2F2F, 'M', u'å·¥'),
+    (0x2F30, 'M', u'å·±'),
+    (0x2F31, 'M', u'å·¾'),
+    (0x2F32, 'M', u'å¹²'),
+    (0x2F33, 'M', u'幺'),
+    (0x2F34, 'M', u'广'),
+    (0x2F35, 'M', u'å»´'),
+    (0x2F36, 'M', u'廾'),
+    (0x2F37, 'M', u'弋'),
+    (0x2F38, 'M', u'弓'),
+    (0x2F39, 'M', u'彐'),
+    (0x2F3A, 'M', u'彡'),
+    (0x2F3B, 'M', u'å½³'),
+    (0x2F3C, 'M', u'心'),
+    (0x2F3D, 'M', u'戈'),
+    (0x2F3E, 'M', u'戶'),
+    (0x2F3F, 'M', u'手'),
+    (0x2F40, 'M', u'支'),
+    (0x2F41, 'M', u'æ”´'),
+    (0x2F42, 'M', u'æ–‡'),
+    (0x2F43, 'M', u'æ–—'),
+    (0x2F44, 'M', u'æ–¤'),
+    (0x2F45, 'M', u'æ–¹'),
+    (0x2F46, 'M', u'æ— '),
+    (0x2F47, 'M', u'æ—¥'),
+    (0x2F48, 'M', u'æ›°'),
+    (0x2F49, 'M', u'月'),
+    (0x2F4A, 'M', u'木'),
+    (0x2F4B, 'M', u'欠'),
+    (0x2F4C, 'M', u'æ­¢'),
+    (0x2F4D, 'M', u'æ­¹'),
+    (0x2F4E, 'M', u'殳'),
+    (0x2F4F, 'M', u'毋'),
+    (0x2F50, 'M', u'比'),
+    (0x2F51, 'M', u'毛'),
+    (0x2F52, 'M', u'氏'),
+    (0x2F53, 'M', u'æ°”'),
+    (0x2F54, 'M', u'æ°´'),
+    (0x2F55, 'M', u'火'),
+    (0x2F56, 'M', u'爪'),
+    (0x2F57, 'M', u'父'),
+    (0x2F58, 'M', u'爻'),
+    (0x2F59, 'M', u'爿'),
+    (0x2F5A, 'M', u'片'),
+    (0x2F5B, 'M', u'牙'),
+    (0x2F5C, 'M', u'牛'),
+    (0x2F5D, 'M', u'犬'),
+    (0x2F5E, 'M', u'玄'),
+    (0x2F5F, 'M', u'玉'),
+    (0x2F60, 'M', u'瓜'),
+    (0x2F61, 'M', u'瓦'),
+    (0x2F62, 'M', u'甘'),
+    (0x2F63, 'M', u'生'),
+    (0x2F64, 'M', u'用'),
+    (0x2F65, 'M', u'ç”°'),
+    (0x2F66, 'M', u'ç–‹'),
+    (0x2F67, 'M', u'ç–’'),
+    (0x2F68, 'M', u'ç™¶'),
+    (0x2F69, 'M', u'白'),
+    (0x2F6A, 'M', u'çš®'),
+    (0x2F6B, 'M', u'çš¿'),
+    (0x2F6C, 'M', u'ç›®'),
+    (0x2F6D, 'M', u'矛'),
+    (0x2F6E, 'M', u'矢'),
+    (0x2F6F, 'M', u'石'),
+    (0x2F70, 'M', u'示'),
+    (0x2F71, 'M', u'禸'),
+    (0x2F72, 'M', u'禾'),
+    (0x2F73, 'M', u'ç©´'),
+    (0x2F74, 'M', u'ç«‹'),
+    (0x2F75, 'M', u'竹'),
+    (0x2F76, 'M', u'ç±³'),
+    (0x2F77, 'M', u'糸'),
+    (0x2F78, 'M', u'ç¼¶'),
+    (0x2F79, 'M', u'网'),
+    (0x2F7A, 'M', u'羊'),
+    (0x2F7B, 'M', u'ç¾½'),
+    (0x2F7C, 'M', u'老'),
+    (0x2F7D, 'M', u'而'),
+    (0x2F7E, 'M', u'耒'),
+    (0x2F7F, 'M', u'耳'),
+    (0x2F80, 'M', u'聿'),
+    (0x2F81, 'M', u'肉'),
+    (0x2F82, 'M', u'臣'),
+    (0x2F83, 'M', u'自'),
+    (0x2F84, 'M', u'至'),
+    (0x2F85, 'M', u'臼'),
+    (0x2F86, 'M', u'舌'),
+    (0x2F87, 'M', u'舛'),
+    (0x2F88, 'M', u'舟'),
+    (0x2F89, 'M', u'艮'),
+    (0x2F8A, 'M', u'色'),
+    (0x2F8B, 'M', u'艸'),
+    (0x2F8C, 'M', u'虍'),
+    (0x2F8D, 'M', u'虫'),
+    (0x2F8E, 'M', u'è¡€'),
+    (0x2F8F, 'M', u'行'),
+    (0x2F90, 'M', u'è¡£'),
+    (0x2F91, 'M', u'襾'),
+    ]
+
+def _seg_28():
+    return [
+    (0x2F92, 'M', u'見'),
+    (0x2F93, 'M', u'è§’'),
+    (0x2F94, 'M', u'言'),
+    (0x2F95, 'M', u'è°·'),
+    (0x2F96, 'M', u'豆'),
+    (0x2F97, 'M', u'豕'),
+    (0x2F98, 'M', u'豸'),
+    (0x2F99, 'M', u'貝'),
+    (0x2F9A, 'M', u'赤'),
+    (0x2F9B, 'M', u'èµ°'),
+    (0x2F9C, 'M', u'è¶³'),
+    (0x2F9D, 'M', u'身'),
+    (0x2F9E, 'M', u'車'),
+    (0x2F9F, 'M', u'è¾›'),
+    (0x2FA0, 'M', u'è¾°'),
+    (0x2FA1, 'M', u'è¾µ'),
+    (0x2FA2, 'M', u'é‚‘'),
+    (0x2FA3, 'M', u'é…‰'),
+    (0x2FA4, 'M', u'釆'),
+    (0x2FA5, 'M', u'里'),
+    (0x2FA6, 'M', u'金'),
+    (0x2FA7, 'M', u'é•·'),
+    (0x2FA8, 'M', u'é–€'),
+    (0x2FA9, 'M', u'阜'),
+    (0x2FAA, 'M', u'éš¶'),
+    (0x2FAB, 'M', u'éš¹'),
+    (0x2FAC, 'M', u'雨'),
+    (0x2FAD, 'M', u'靑'),
+    (0x2FAE, 'M', u'非'),
+    (0x2FAF, 'M', u'面'),
+    (0x2FB0, 'M', u'革'),
+    (0x2FB1, 'M', u'韋'),
+    (0x2FB2, 'M', u'韭'),
+    (0x2FB3, 'M', u'音'),
+    (0x2FB4, 'M', u'頁'),
+    (0x2FB5, 'M', u'風'),
+    (0x2FB6, 'M', u'飛'),
+    (0x2FB7, 'M', u'食'),
+    (0x2FB8, 'M', u'首'),
+    (0x2FB9, 'M', u'香'),
+    (0x2FBA, 'M', u'馬'),
+    (0x2FBB, 'M', u'骨'),
+    (0x2FBC, 'M', u'高'),
+    (0x2FBD, 'M', u'髟'),
+    (0x2FBE, 'M', u'鬥'),
+    (0x2FBF, 'M', u'鬯'),
+    (0x2FC0, 'M', u'鬲'),
+    (0x2FC1, 'M', u'鬼'),
+    (0x2FC2, 'M', u'é­š'),
+    (0x2FC3, 'M', u'é³¥'),
+    (0x2FC4, 'M', u'é¹µ'),
+    (0x2FC5, 'M', u'鹿'),
+    (0x2FC6, 'M', u'麥'),
+    (0x2FC7, 'M', u'麻'),
+    (0x2FC8, 'M', u'黃'),
+    (0x2FC9, 'M', u'黍'),
+    (0x2FCA, 'M', u'黑'),
+    (0x2FCB, 'M', u'黹'),
+    (0x2FCC, 'M', u'黽'),
+    (0x2FCD, 'M', u'鼎'),
+    (0x2FCE, 'M', u'鼓'),
+    (0x2FCF, 'M', u'é¼ '),
+    (0x2FD0, 'M', u'é¼»'),
+    (0x2FD1, 'M', u'齊'),
+    (0x2FD2, 'M', u'é½’'),
+    (0x2FD3, 'M', u'龍'),
+    (0x2FD4, 'M', u'龜'),
+    (0x2FD5, 'M', u'é¾ '),
+    (0x2FD6, 'X'),
+    (0x3000, '3', u' '),
+    (0x3001, 'V'),
+    (0x3002, 'M', u'.'),
+    (0x3003, 'V'),
+    (0x3036, 'M', u'〒'),
+    (0x3037, 'V'),
+    (0x3038, 'M', u'十'),
+    (0x3039, 'M', u'卄'),
+    (0x303A, 'M', u'卅'),
+    (0x303B, 'V'),
+    (0x3040, 'X'),
+    (0x3041, 'V'),
+    (0x3097, 'X'),
+    (0x3099, 'V'),
+    (0x309B, '3', u' ã‚™'),
+    (0x309C, '3', u' ゚'),
+    (0x309D, 'V'),
+    (0x309F, 'M', u'より'),
+    (0x30A0, 'V'),
+    (0x30FF, 'M', u'コト'),
+    (0x3100, 'X'),
+    (0x3105, 'V'),
+    (0x3130, 'X'),
+    (0x3131, 'M', u'á„€'),
+    (0x3132, 'M', u'ᄁ'),
+    (0x3133, 'M', u'ᆪ'),
+    (0x3134, 'M', u'á„‚'),
+    (0x3135, 'M', u'ᆬ'),
+    (0x3136, 'M', u'ᆭ'),
+    (0x3137, 'M', u'ᄃ'),
+    (0x3138, 'M', u'á„„'),
+    ]
+
+def _seg_29():
+    return [
+    (0x3139, 'M', u'á„…'),
+    (0x313A, 'M', u'ᆰ'),
+    (0x313B, 'M', u'ᆱ'),
+    (0x313C, 'M', u'ᆲ'),
+    (0x313D, 'M', u'ᆳ'),
+    (0x313E, 'M', u'ᆴ'),
+    (0x313F, 'M', u'ᆵ'),
+    (0x3140, 'M', u'ᄚ'),
+    (0x3141, 'M', u'ᄆ'),
+    (0x3142, 'M', u'ᄇ'),
+    (0x3143, 'M', u'ᄈ'),
+    (0x3144, 'M', u'á„¡'),
+    (0x3145, 'M', u'ᄉ'),
+    (0x3146, 'M', u'ᄊ'),
+    (0x3147, 'M', u'á„‹'),
+    (0x3148, 'M', u'ᄌ'),
+    (0x3149, 'M', u'ᄍ'),
+    (0x314A, 'M', u'ᄎ'),
+    (0x314B, 'M', u'ᄏ'),
+    (0x314C, 'M', u'ᄐ'),
+    (0x314D, 'M', u'á„‘'),
+    (0x314E, 'M', u'á„’'),
+    (0x314F, 'M', u'á…¡'),
+    (0x3150, 'M', u'á…¢'),
+    (0x3151, 'M', u'á…£'),
+    (0x3152, 'M', u'á…¤'),
+    (0x3153, 'M', u'á…¥'),
+    (0x3154, 'M', u'á…¦'),
+    (0x3155, 'M', u'á…§'),
+    (0x3156, 'M', u'á…¨'),
+    (0x3157, 'M', u'á…©'),
+    (0x3158, 'M', u'á…ª'),
+    (0x3159, 'M', u'á…«'),
+    (0x315A, 'M', u'á…¬'),
+    (0x315B, 'M', u'á…­'),
+    (0x315C, 'M', u'á…®'),
+    (0x315D, 'M', u'á…¯'),
+    (0x315E, 'M', u'á…°'),
+    (0x315F, 'M', u'á…±'),
+    (0x3160, 'M', u'á…²'),
+    (0x3161, 'M', u'á…³'),
+    (0x3162, 'M', u'á…´'),
+    (0x3163, 'M', u'á…µ'),
+    (0x3164, 'X'),
+    (0x3165, 'M', u'á„”'),
+    (0x3166, 'M', u'á„•'),
+    (0x3167, 'M', u'ᇇ'),
+    (0x3168, 'M', u'ᇈ'),
+    (0x3169, 'M', u'ᇌ'),
+    (0x316A, 'M', u'ᇎ'),
+    (0x316B, 'M', u'ᇓ'),
+    (0x316C, 'M', u'ᇗ'),
+    (0x316D, 'M', u'ᇙ'),
+    (0x316E, 'M', u'ᄜ'),
+    (0x316F, 'M', u'ᇝ'),
+    (0x3170, 'M', u'ᇟ'),
+    (0x3171, 'M', u'ᄝ'),
+    (0x3172, 'M', u'ᄞ'),
+    (0x3173, 'M', u'á„ '),
+    (0x3174, 'M', u'á„¢'),
+    (0x3175, 'M', u'á„£'),
+    (0x3176, 'M', u'á„§'),
+    (0x3177, 'M', u'á„©'),
+    (0x3178, 'M', u'á„«'),
+    (0x3179, 'M', u'ᄬ'),
+    (0x317A, 'M', u'á„­'),
+    (0x317B, 'M', u'á„®'),
+    (0x317C, 'M', u'ᄯ'),
+    (0x317D, 'M', u'ᄲ'),
+    (0x317E, 'M', u'á„¶'),
+    (0x317F, 'M', u'á…€'),
+    (0x3180, 'M', u'á…‡'),
+    (0x3181, 'M', u'ᅌ'),
+    (0x3182, 'M', u'ᇱ'),
+    (0x3183, 'M', u'ᇲ'),
+    (0x3184, 'M', u'á…—'),
+    (0x3185, 'M', u'á…˜'),
+    (0x3186, 'M', u'á…™'),
+    (0x3187, 'M', u'ᆄ'),
+    (0x3188, 'M', u'ᆅ'),
+    (0x3189, 'M', u'ᆈ'),
+    (0x318A, 'M', u'ᆑ'),
+    (0x318B, 'M', u'ᆒ'),
+    (0x318C, 'M', u'ᆔ'),
+    (0x318D, 'M', u'ᆞ'),
+    (0x318E, 'M', u'ᆡ'),
+    (0x318F, 'X'),
+    (0x3190, 'V'),
+    (0x3192, 'M', u'一'),
+    (0x3193, 'M', u'二'),
+    (0x3194, 'M', u'三'),
+    (0x3195, 'M', u'å››'),
+    (0x3196, 'M', u'上'),
+    (0x3197, 'M', u'中'),
+    (0x3198, 'M', u'下'),
+    (0x3199, 'M', u'甲'),
+    (0x319A, 'M', u'ä¹™'),
+    (0x319B, 'M', u'丙'),
+    (0x319C, 'M', u'丁'),
+    (0x319D, 'M', u'天'),
+    ]
+
+def _seg_30():
+    return [
+    (0x319E, 'M', u'地'),
+    (0x319F, 'M', u'人'),
+    (0x31A0, 'V'),
+    (0x31BB, 'X'),
+    (0x31C0, 'V'),
+    (0x31E4, 'X'),
+    (0x31F0, 'V'),
+    (0x3200, '3', u'(á„€)'),
+    (0x3201, '3', u'(á„‚)'),
+    (0x3202, '3', u'(ᄃ)'),
+    (0x3203, '3', u'(á„…)'),
+    (0x3204, '3', u'(ᄆ)'),
+    (0x3205, '3', u'(ᄇ)'),
+    (0x3206, '3', u'(ᄉ)'),
+    (0x3207, '3', u'(á„‹)'),
+    (0x3208, '3', u'(ᄌ)'),
+    (0x3209, '3', u'(ᄎ)'),
+    (0x320A, '3', u'(ᄏ)'),
+    (0x320B, '3', u'(ᄐ)'),
+    (0x320C, '3', u'(á„‘)'),
+    (0x320D, '3', u'(á„’)'),
+    (0x320E, '3', u'(ê°€)'),
+    (0x320F, '3', u'(나)'),
+    (0x3210, '3', u'(다)'),
+    (0x3211, '3', u'(라)'),
+    (0x3212, '3', u'(마)'),
+    (0x3213, '3', u'(ë°”)'),
+    (0x3214, '3', u'(사)'),
+    (0x3215, '3', u'(ì•„)'),
+    (0x3216, '3', u'(자)'),
+    (0x3217, '3', u'(ì°¨)'),
+    (0x3218, '3', u'(ì¹´)'),
+    (0x3219, '3', u'(타)'),
+    (0x321A, '3', u'(파)'),
+    (0x321B, '3', u'(하)'),
+    (0x321C, '3', u'(주)'),
+    (0x321D, '3', u'(오전)'),
+    (0x321E, '3', u'(오후)'),
+    (0x321F, 'X'),
+    (0x3220, '3', u'(一)'),
+    (0x3221, '3', u'(二)'),
+    (0x3222, '3', u'(三)'),
+    (0x3223, '3', u'(å››)'),
+    (0x3224, '3', u'(五)'),
+    (0x3225, '3', u'(å…­)'),
+    (0x3226, '3', u'(七)'),
+    (0x3227, '3', u'(å…«)'),
+    (0x3228, '3', u'(九)'),
+    (0x3229, '3', u'(十)'),
+    (0x322A, '3', u'(月)'),
+    (0x322B, '3', u'(火)'),
+    (0x322C, '3', u'(æ°´)'),
+    (0x322D, '3', u'(木)'),
+    (0x322E, '3', u'(金)'),
+    (0x322F, '3', u'(土)'),
+    (0x3230, '3', u'(æ—¥)'),
+    (0x3231, '3', u'(æ ª)'),
+    (0x3232, '3', u'(有)'),
+    (0x3233, '3', u'(社)'),
+    (0x3234, '3', u'(名)'),
+    (0x3235, '3', u'(特)'),
+    (0x3236, '3', u'(財)'),
+    (0x3237, '3', u'(祝)'),
+    (0x3238, '3', u'(労)'),
+    (0x3239, '3', u'(代)'),
+    (0x323A, '3', u'(呼)'),
+    (0x323B, '3', u'(å­¦)'),
+    (0x323C, '3', u'(監)'),
+    (0x323D, '3', u'(企)'),
+    (0x323E, '3', u'(資)'),
+    (0x323F, '3', u'(協)'),
+    (0x3240, '3', u'(祭)'),
+    (0x3241, '3', u'(休)'),
+    (0x3242, '3', u'(自)'),
+    (0x3243, '3', u'(至)'),
+    (0x3244, 'M', u'問'),
+    (0x3245, 'M', u'å¹¼'),
+    (0x3246, 'M', u'æ–‡'),
+    (0x3247, 'M', u'箏'),
+    (0x3248, 'V'),
+    (0x3250, 'M', u'pte'),
+    (0x3251, 'M', u'21'),
+    (0x3252, 'M', u'22'),
+    (0x3253, 'M', u'23'),
+    (0x3254, 'M', u'24'),
+    (0x3255, 'M', u'25'),
+    (0x3256, 'M', u'26'),
+    (0x3257, 'M', u'27'),
+    (0x3258, 'M', u'28'),
+    (0x3259, 'M', u'29'),
+    (0x325A, 'M', u'30'),
+    (0x325B, 'M', u'31'),
+    (0x325C, 'M', u'32'),
+    (0x325D, 'M', u'33'),
+    (0x325E, 'M', u'34'),
+    (0x325F, 'M', u'35'),
+    (0x3260, 'M', u'á„€'),
+    (0x3261, 'M', u'á„‚'),
+    (0x3262, 'M', u'ᄃ'),
+    (0x3263, 'M', u'á„…'),
+    ]
+
+def _seg_31():
+    return [
+    (0x3264, 'M', u'ᄆ'),
+    (0x3265, 'M', u'ᄇ'),
+    (0x3266, 'M', u'ᄉ'),
+    (0x3267, 'M', u'á„‹'),
+    (0x3268, 'M', u'ᄌ'),
+    (0x3269, 'M', u'ᄎ'),
+    (0x326A, 'M', u'ᄏ'),
+    (0x326B, 'M', u'ᄐ'),
+    (0x326C, 'M', u'á„‘'),
+    (0x326D, 'M', u'á„’'),
+    (0x326E, 'M', u'ê°€'),
+    (0x326F, 'M', u'나'),
+    (0x3270, 'M', u'다'),
+    (0x3271, 'M', u'라'),
+    (0x3272, 'M', u'마'),
+    (0x3273, 'M', u'ë°”'),
+    (0x3274, 'M', u'사'),
+    (0x3275, 'M', u'ì•„'),
+    (0x3276, 'M', u'자'),
+    (0x3277, 'M', u'ì°¨'),
+    (0x3278, 'M', u'ì¹´'),
+    (0x3279, 'M', u'타'),
+    (0x327A, 'M', u'파'),
+    (0x327B, 'M', u'하'),
+    (0x327C, 'M', u'참고'),
+    (0x327D, 'M', u'주의'),
+    (0x327E, 'M', u'ìš°'),
+    (0x327F, 'V'),
+    (0x3280, 'M', u'一'),
+    (0x3281, 'M', u'二'),
+    (0x3282, 'M', u'三'),
+    (0x3283, 'M', u'å››'),
+    (0x3284, 'M', u'五'),
+    (0x3285, 'M', u'å…­'),
+    (0x3286, 'M', u'七'),
+    (0x3287, 'M', u'å…«'),
+    (0x3288, 'M', u'九'),
+    (0x3289, 'M', u'十'),
+    (0x328A, 'M', u'月'),
+    (0x328B, 'M', u'火'),
+    (0x328C, 'M', u'æ°´'),
+    (0x328D, 'M', u'木'),
+    (0x328E, 'M', u'金'),
+    (0x328F, 'M', u'土'),
+    (0x3290, 'M', u'æ—¥'),
+    (0x3291, 'M', u'æ ª'),
+    (0x3292, 'M', u'有'),
+    (0x3293, 'M', u'社'),
+    (0x3294, 'M', u'名'),
+    (0x3295, 'M', u'特'),
+    (0x3296, 'M', u'財'),
+    (0x3297, 'M', u'祝'),
+    (0x3298, 'M', u'労'),
+    (0x3299, 'M', u'秘'),
+    (0x329A, 'M', u'ç”·'),
+    (0x329B, 'M', u'女'),
+    (0x329C, 'M', u'適'),
+    (0x329D, 'M', u'優'),
+    (0x329E, 'M', u'印'),
+    (0x329F, 'M', u'注'),
+    (0x32A0, 'M', u'é …'),
+    (0x32A1, 'M', u'休'),
+    (0x32A2, 'M', u'写'),
+    (0x32A3, 'M', u'æ­£'),
+    (0x32A4, 'M', u'上'),
+    (0x32A5, 'M', u'中'),
+    (0x32A6, 'M', u'下'),
+    (0x32A7, 'M', u'å·¦'),
+    (0x32A8, 'M', u'右'),
+    (0x32A9, 'M', u'医'),
+    (0x32AA, 'M', u'å®—'),
+    (0x32AB, 'M', u'å­¦'),
+    (0x32AC, 'M', u'監'),
+    (0x32AD, 'M', u'企'),
+    (0x32AE, 'M', u'資'),
+    (0x32AF, 'M', u'協'),
+    (0x32B0, 'M', u'夜'),
+    (0x32B1, 'M', u'36'),
+    (0x32B2, 'M', u'37'),
+    (0x32B3, 'M', u'38'),
+    (0x32B4, 'M', u'39'),
+    (0x32B5, 'M', u'40'),
+    (0x32B6, 'M', u'41'),
+    (0x32B7, 'M', u'42'),
+    (0x32B8, 'M', u'43'),
+    (0x32B9, 'M', u'44'),
+    (0x32BA, 'M', u'45'),
+    (0x32BB, 'M', u'46'),
+    (0x32BC, 'M', u'47'),
+    (0x32BD, 'M', u'48'),
+    (0x32BE, 'M', u'49'),
+    (0x32BF, 'M', u'50'),
+    (0x32C0, 'M', u'1月'),
+    (0x32C1, 'M', u'2月'),
+    (0x32C2, 'M', u'3月'),
+    (0x32C3, 'M', u'4月'),
+    (0x32C4, 'M', u'5月'),
+    (0x32C5, 'M', u'6月'),
+    (0x32C6, 'M', u'7月'),
+    (0x32C7, 'M', u'8月'),
+    ]
+
+def _seg_32():
+    return [
+    (0x32C8, 'M', u'9月'),
+    (0x32C9, 'M', u'10月'),
+    (0x32CA, 'M', u'11月'),
+    (0x32CB, 'M', u'12月'),
+    (0x32CC, 'M', u'hg'),
+    (0x32CD, 'M', u'erg'),
+    (0x32CE, 'M', u'ev'),
+    (0x32CF, 'M', u'ltd'),
+    (0x32D0, 'M', u'ã‚¢'),
+    (0x32D1, 'M', u'イ'),
+    (0x32D2, 'M', u'ウ'),
+    (0x32D3, 'M', u'エ'),
+    (0x32D4, 'M', u'オ'),
+    (0x32D5, 'M', u'ã‚«'),
+    (0x32D6, 'M', u'ã‚­'),
+    (0x32D7, 'M', u'ク'),
+    (0x32D8, 'M', u'ケ'),
+    (0x32D9, 'M', u'コ'),
+    (0x32DA, 'M', u'サ'),
+    (0x32DB, 'M', u'ã‚·'),
+    (0x32DC, 'M', u'ス'),
+    (0x32DD, 'M', u'ã‚»'),
+    (0x32DE, 'M', u'ソ'),
+    (0x32DF, 'M', u'ã‚¿'),
+    (0x32E0, 'M', u'チ'),
+    (0x32E1, 'M', u'ツ'),
+    (0x32E2, 'M', u'テ'),
+    (0x32E3, 'M', u'ト'),
+    (0x32E4, 'M', u'ナ'),
+    (0x32E5, 'M', u'ニ'),
+    (0x32E6, 'M', u'ヌ'),
+    (0x32E7, 'M', u'ネ'),
+    (0x32E8, 'M', u'ノ'),
+    (0x32E9, 'M', u'ハ'),
+    (0x32EA, 'M', u'ヒ'),
+    (0x32EB, 'M', u'フ'),
+    (0x32EC, 'M', u'ヘ'),
+    (0x32ED, 'M', u'ホ'),
+    (0x32EE, 'M', u'マ'),
+    (0x32EF, 'M', u'ミ'),
+    (0x32F0, 'M', u'ム'),
+    (0x32F1, 'M', u'メ'),
+    (0x32F2, 'M', u'モ'),
+    (0x32F3, 'M', u'ヤ'),
+    (0x32F4, 'M', u'ユ'),
+    (0x32F5, 'M', u'ヨ'),
+    (0x32F6, 'M', u'ラ'),
+    (0x32F7, 'M', u'リ'),
+    (0x32F8, 'M', u'ル'),
+    (0x32F9, 'M', u'レ'),
+    (0x32FA, 'M', u'ロ'),
+    (0x32FB, 'M', u'ワ'),
+    (0x32FC, 'M', u'ヰ'),
+    (0x32FD, 'M', u'ヱ'),
+    (0x32FE, 'M', u'ヲ'),
+    (0x32FF, 'X'),
+    (0x3300, 'M', u'アパート'),
+    (0x3301, 'M', u'アルファ'),
+    (0x3302, 'M', u'アンペア'),
+    (0x3303, 'M', u'アール'),
+    (0x3304, 'M', u'イニング'),
+    (0x3305, 'M', u'インチ'),
+    (0x3306, 'M', u'ウォン'),
+    (0x3307, 'M', u'エスクード'),
+    (0x3308, 'M', u'エーカー'),
+    (0x3309, 'M', u'オンス'),
+    (0x330A, 'M', u'オーム'),
+    (0x330B, 'M', u'カイリ'),
+    (0x330C, 'M', u'カラット'),
+    (0x330D, 'M', u'カロリー'),
+    (0x330E, 'M', u'ガロン'),
+    (0x330F, 'M', u'ガンマ'),
+    (0x3310, 'M', u'ギガ'),
+    (0x3311, 'M', u'ギニー'),
+    (0x3312, 'M', u'キュリー'),
+    (0x3313, 'M', u'ギルダー'),
+    (0x3314, 'M', u'キロ'),
+    (0x3315, 'M', u'キログラム'),
+    (0x3316, 'M', u'キロメートル'),
+    (0x3317, 'M', u'キロワット'),
+    (0x3318, 'M', u'グラム'),
+    (0x3319, 'M', u'グラムトン'),
+    (0x331A, 'M', u'クルゼイロ'),
+    (0x331B, 'M', u'クローネ'),
+    (0x331C, 'M', u'ケース'),
+    (0x331D, 'M', u'コルナ'),
+    (0x331E, 'M', u'コーポ'),
+    (0x331F, 'M', u'サイクル'),
+    (0x3320, 'M', u'サンチーム'),
+    (0x3321, 'M', u'シリング'),
+    (0x3322, 'M', u'センチ'),
+    (0x3323, 'M', u'セント'),
+    (0x3324, 'M', u'ダース'),
+    (0x3325, 'M', u'デシ'),
+    (0x3326, 'M', u'ドル'),
+    (0x3327, 'M', u'トン'),
+    (0x3328, 'M', u'ナノ'),
+    (0x3329, 'M', u'ノット'),
+    (0x332A, 'M', u'ハイツ'),
+    (0x332B, 'M', u'パーセント'),
+    ]
+
+def _seg_33():
+    return [
+    (0x332C, 'M', u'パーツ'),
+    (0x332D, 'M', u'バーレル'),
+    (0x332E, 'M', u'ピアストル'),
+    (0x332F, 'M', u'ピクル'),
+    (0x3330, 'M', u'ピコ'),
+    (0x3331, 'M', u'ビル'),
+    (0x3332, 'M', u'ファラッド'),
+    (0x3333, 'M', u'フィート'),
+    (0x3334, 'M', u'ブッシェル'),
+    (0x3335, 'M', u'フラン'),
+    (0x3336, 'M', u'ヘクタール'),
+    (0x3337, 'M', u'ペソ'),
+    (0x3338, 'M', u'ペニヒ'),
+    (0x3339, 'M', u'ヘルツ'),
+    (0x333A, 'M', u'ペンス'),
+    (0x333B, 'M', u'ページ'),
+    (0x333C, 'M', u'ベータ'),
+    (0x333D, 'M', u'ポイント'),
+    (0x333E, 'M', u'ボルト'),
+    (0x333F, 'M', u'ホン'),
+    (0x3340, 'M', u'ポンド'),
+    (0x3341, 'M', u'ホール'),
+    (0x3342, 'M', u'ホーン'),
+    (0x3343, 'M', u'マイクロ'),
+    (0x3344, 'M', u'マイル'),
+    (0x3345, 'M', u'マッハ'),
+    (0x3346, 'M', u'マルク'),
+    (0x3347, 'M', u'マンション'),
+    (0x3348, 'M', u'ミクロン'),
+    (0x3349, 'M', u'ミリ'),
+    (0x334A, 'M', u'ミリバール'),
+    (0x334B, 'M', u'メガ'),
+    (0x334C, 'M', u'メガトン'),
+    (0x334D, 'M', u'メートル'),
+    (0x334E, 'M', u'ヤード'),
+    (0x334F, 'M', u'ヤール'),
+    (0x3350, 'M', u'ユアン'),
+    (0x3351, 'M', u'リットル'),
+    (0x3352, 'M', u'リラ'),
+    (0x3353, 'M', u'ルピー'),
+    (0x3354, 'M', u'ルーブル'),
+    (0x3355, 'M', u'レム'),
+    (0x3356, 'M', u'レントゲン'),
+    (0x3357, 'M', u'ワット'),
+    (0x3358, 'M', u'0点'),
+    (0x3359, 'M', u'1点'),
+    (0x335A, 'M', u'2点'),
+    (0x335B, 'M', u'3点'),
+    (0x335C, 'M', u'4点'),
+    (0x335D, 'M', u'5点'),
+    (0x335E, 'M', u'6点'),
+    (0x335F, 'M', u'7点'),
+    (0x3360, 'M', u'8点'),
+    (0x3361, 'M', u'9点'),
+    (0x3362, 'M', u'10点'),
+    (0x3363, 'M', u'11点'),
+    (0x3364, 'M', u'12点'),
+    (0x3365, 'M', u'13点'),
+    (0x3366, 'M', u'14点'),
+    (0x3367, 'M', u'15点'),
+    (0x3368, 'M', u'16点'),
+    (0x3369, 'M', u'17点'),
+    (0x336A, 'M', u'18点'),
+    (0x336B, 'M', u'19点'),
+    (0x336C, 'M', u'20点'),
+    (0x336D, 'M', u'21点'),
+    (0x336E, 'M', u'22点'),
+    (0x336F, 'M', u'23点'),
+    (0x3370, 'M', u'24点'),
+    (0x3371, 'M', u'hpa'),
+    (0x3372, 'M', u'da'),
+    (0x3373, 'M', u'au'),
+    (0x3374, 'M', u'bar'),
+    (0x3375, 'M', u'ov'),
+    (0x3376, 'M', u'pc'),
+    (0x3377, 'M', u'dm'),
+    (0x3378, 'M', u'dm2'),
+    (0x3379, 'M', u'dm3'),
+    (0x337A, 'M', u'iu'),
+    (0x337B, 'M', u'平成'),
+    (0x337C, 'M', u'昭和'),
+    (0x337D, 'M', u'大正'),
+    (0x337E, 'M', u'明治'),
+    (0x337F, 'M', u'株式会社'),
+    (0x3380, 'M', u'pa'),
+    (0x3381, 'M', u'na'),
+    (0x3382, 'M', u'μa'),
+    (0x3383, 'M', u'ma'),
+    (0x3384, 'M', u'ka'),
+    (0x3385, 'M', u'kb'),
+    (0x3386, 'M', u'mb'),
+    (0x3387, 'M', u'gb'),
+    (0x3388, 'M', u'cal'),
+    (0x3389, 'M', u'kcal'),
+    (0x338A, 'M', u'pf'),
+    (0x338B, 'M', u'nf'),
+    (0x338C, 'M', u'μf'),
+    (0x338D, 'M', u'μg'),
+    (0x338E, 'M', u'mg'),
+    (0x338F, 'M', u'kg'),
+    ]
+
+def _seg_34():
+    return [
+    (0x3390, 'M', u'hz'),
+    (0x3391, 'M', u'khz'),
+    (0x3392, 'M', u'mhz'),
+    (0x3393, 'M', u'ghz'),
+    (0x3394, 'M', u'thz'),
+    (0x3395, 'M', u'μl'),
+    (0x3396, 'M', u'ml'),
+    (0x3397, 'M', u'dl'),
+    (0x3398, 'M', u'kl'),
+    (0x3399, 'M', u'fm'),
+    (0x339A, 'M', u'nm'),
+    (0x339B, 'M', u'μm'),
+    (0x339C, 'M', u'mm'),
+    (0x339D, 'M', u'cm'),
+    (0x339E, 'M', u'km'),
+    (0x339F, 'M', u'mm2'),
+    (0x33A0, 'M', u'cm2'),
+    (0x33A1, 'M', u'm2'),
+    (0x33A2, 'M', u'km2'),
+    (0x33A3, 'M', u'mm3'),
+    (0x33A4, 'M', u'cm3'),
+    (0x33A5, 'M', u'm3'),
+    (0x33A6, 'M', u'km3'),
+    (0x33A7, 'M', u'm∕s'),
+    (0x33A8, 'M', u'm∕s2'),
+    (0x33A9, 'M', u'pa'),
+    (0x33AA, 'M', u'kpa'),
+    (0x33AB, 'M', u'mpa'),
+    (0x33AC, 'M', u'gpa'),
+    (0x33AD, 'M', u'rad'),
+    (0x33AE, 'M', u'rad∕s'),
+    (0x33AF, 'M', u'rad∕s2'),
+    (0x33B0, 'M', u'ps'),
+    (0x33B1, 'M', u'ns'),
+    (0x33B2, 'M', u'μs'),
+    (0x33B3, 'M', u'ms'),
+    (0x33B4, 'M', u'pv'),
+    (0x33B5, 'M', u'nv'),
+    (0x33B6, 'M', u'μv'),
+    (0x33B7, 'M', u'mv'),
+    (0x33B8, 'M', u'kv'),
+    (0x33B9, 'M', u'mv'),
+    (0x33BA, 'M', u'pw'),
+    (0x33BB, 'M', u'nw'),
+    (0x33BC, 'M', u'μw'),
+    (0x33BD, 'M', u'mw'),
+    (0x33BE, 'M', u'kw'),
+    (0x33BF, 'M', u'mw'),
+    (0x33C0, 'M', u'kω'),
+    (0x33C1, 'M', u'mω'),
+    (0x33C2, 'X'),
+    (0x33C3, 'M', u'bq'),
+    (0x33C4, 'M', u'cc'),
+    (0x33C5, 'M', u'cd'),
+    (0x33C6, 'M', u'c∕kg'),
+    (0x33C7, 'X'),
+    (0x33C8, 'M', u'db'),
+    (0x33C9, 'M', u'gy'),
+    (0x33CA, 'M', u'ha'),
+    (0x33CB, 'M', u'hp'),
+    (0x33CC, 'M', u'in'),
+    (0x33CD, 'M', u'kk'),
+    (0x33CE, 'M', u'km'),
+    (0x33CF, 'M', u'kt'),
+    (0x33D0, 'M', u'lm'),
+    (0x33D1, 'M', u'ln'),
+    (0x33D2, 'M', u'log'),
+    (0x33D3, 'M', u'lx'),
+    (0x33D4, 'M', u'mb'),
+    (0x33D5, 'M', u'mil'),
+    (0x33D6, 'M', u'mol'),
+    (0x33D7, 'M', u'ph'),
+    (0x33D8, 'X'),
+    (0x33D9, 'M', u'ppm'),
+    (0x33DA, 'M', u'pr'),
+    (0x33DB, 'M', u'sr'),
+    (0x33DC, 'M', u'sv'),
+    (0x33DD, 'M', u'wb'),
+    (0x33DE, 'M', u'v∕m'),
+    (0x33DF, 'M', u'a∕m'),
+    (0x33E0, 'M', u'1æ—¥'),
+    (0x33E1, 'M', u'2æ—¥'),
+    (0x33E2, 'M', u'3æ—¥'),
+    (0x33E3, 'M', u'4æ—¥'),
+    (0x33E4, 'M', u'5æ—¥'),
+    (0x33E5, 'M', u'6æ—¥'),
+    (0x33E6, 'M', u'7æ—¥'),
+    (0x33E7, 'M', u'8æ—¥'),
+    (0x33E8, 'M', u'9æ—¥'),
+    (0x33E9, 'M', u'10æ—¥'),
+    (0x33EA, 'M', u'11æ—¥'),
+    (0x33EB, 'M', u'12æ—¥'),
+    (0x33EC, 'M', u'13æ—¥'),
+    (0x33ED, 'M', u'14æ—¥'),
+    (0x33EE, 'M', u'15æ—¥'),
+    (0x33EF, 'M', u'16æ—¥'),
+    (0x33F0, 'M', u'17æ—¥'),
+    (0x33F1, 'M', u'18æ—¥'),
+    (0x33F2, 'M', u'19æ—¥'),
+    (0x33F3, 'M', u'20æ—¥'),
+    ]
+
+def _seg_35():
+    return [
+    (0x33F4, 'M', u'21æ—¥'),
+    (0x33F5, 'M', u'22æ—¥'),
+    (0x33F6, 'M', u'23æ—¥'),
+    (0x33F7, 'M', u'24æ—¥'),
+    (0x33F8, 'M', u'25æ—¥'),
+    (0x33F9, 'M', u'26æ—¥'),
+    (0x33FA, 'M', u'27æ—¥'),
+    (0x33FB, 'M', u'28æ—¥'),
+    (0x33FC, 'M', u'29æ—¥'),
+    (0x33FD, 'M', u'30æ—¥'),
+    (0x33FE, 'M', u'31æ—¥'),
+    (0x33FF, 'M', u'gal'),
+    (0x3400, 'V'),
+    (0x4DB6, 'X'),
+    (0x4DC0, 'V'),
+    (0x9FF0, 'X'),
+    (0xA000, 'V'),
+    (0xA48D, 'X'),
+    (0xA490, 'V'),
+    (0xA4C7, 'X'),
+    (0xA4D0, 'V'),
+    (0xA62C, 'X'),
+    (0xA640, 'M', u'ꙁ'),
+    (0xA641, 'V'),
+    (0xA642, 'M', u'ꙃ'),
+    (0xA643, 'V'),
+    (0xA644, 'M', u'ê™…'),
+    (0xA645, 'V'),
+    (0xA646, 'M', u'ꙇ'),
+    (0xA647, 'V'),
+    (0xA648, 'M', u'ꙉ'),
+    (0xA649, 'V'),
+    (0xA64A, 'M', u'ꙋ'),
+    (0xA64B, 'V'),
+    (0xA64C, 'M', u'ꙍ'),
+    (0xA64D, 'V'),
+    (0xA64E, 'M', u'ꙏ'),
+    (0xA64F, 'V'),
+    (0xA650, 'M', u'ꙑ'),
+    (0xA651, 'V'),
+    (0xA652, 'M', u'ꙓ'),
+    (0xA653, 'V'),
+    (0xA654, 'M', u'ꙕ'),
+    (0xA655, 'V'),
+    (0xA656, 'M', u'ê™—'),
+    (0xA657, 'V'),
+    (0xA658, 'M', u'ê™™'),
+    (0xA659, 'V'),
+    (0xA65A, 'M', u'ê™›'),
+    (0xA65B, 'V'),
+    (0xA65C, 'M', u'ꙝ'),
+    (0xA65D, 'V'),
+    (0xA65E, 'M', u'ꙟ'),
+    (0xA65F, 'V'),
+    (0xA660, 'M', u'ꙡ'),
+    (0xA661, 'V'),
+    (0xA662, 'M', u'ꙣ'),
+    (0xA663, 'V'),
+    (0xA664, 'M', u'ꙥ'),
+    (0xA665, 'V'),
+    (0xA666, 'M', u'ê™§'),
+    (0xA667, 'V'),
+    (0xA668, 'M', u'ꙩ'),
+    (0xA669, 'V'),
+    (0xA66A, 'M', u'ꙫ'),
+    (0xA66B, 'V'),
+    (0xA66C, 'M', u'ê™­'),
+    (0xA66D, 'V'),
+    (0xA680, 'M', u'ꚁ'),
+    (0xA681, 'V'),
+    (0xA682, 'M', u'ꚃ'),
+    (0xA683, 'V'),
+    (0xA684, 'M', u'êš…'),
+    (0xA685, 'V'),
+    (0xA686, 'M', u'ꚇ'),
+    (0xA687, 'V'),
+    (0xA688, 'M', u'ꚉ'),
+    (0xA689, 'V'),
+    (0xA68A, 'M', u'êš‹'),
+    (0xA68B, 'V'),
+    (0xA68C, 'M', u'ꚍ'),
+    (0xA68D, 'V'),
+    (0xA68E, 'M', u'ꚏ'),
+    (0xA68F, 'V'),
+    (0xA690, 'M', u'êš‘'),
+    (0xA691, 'V'),
+    (0xA692, 'M', u'êš“'),
+    (0xA693, 'V'),
+    (0xA694, 'M', u'êš•'),
+    (0xA695, 'V'),
+    (0xA696, 'M', u'êš—'),
+    (0xA697, 'V'),
+    (0xA698, 'M', u'êš™'),
+    (0xA699, 'V'),
+    (0xA69A, 'M', u'êš›'),
+    (0xA69B, 'V'),
+    (0xA69C, 'M', u'ÑŠ'),
+    (0xA69D, 'M', u'ь'),
+    (0xA69E, 'V'),
+    (0xA6F8, 'X'),
+    ]
+
+def _seg_36():
+    return [
+    (0xA700, 'V'),
+    (0xA722, 'M', u'ꜣ'),
+    (0xA723, 'V'),
+    (0xA724, 'M', u'ꜥ'),
+    (0xA725, 'V'),
+    (0xA726, 'M', u'ꜧ'),
+    (0xA727, 'V'),
+    (0xA728, 'M', u'ꜩ'),
+    (0xA729, 'V'),
+    (0xA72A, 'M', u'ꜫ'),
+    (0xA72B, 'V'),
+    (0xA72C, 'M', u'ꜭ'),
+    (0xA72D, 'V'),
+    (0xA72E, 'M', u'ꜯ'),
+    (0xA72F, 'V'),
+    (0xA732, 'M', u'ꜳ'),
+    (0xA733, 'V'),
+    (0xA734, 'M', u'ꜵ'),
+    (0xA735, 'V'),
+    (0xA736, 'M', u'ꜷ'),
+    (0xA737, 'V'),
+    (0xA738, 'M', u'ꜹ'),
+    (0xA739, 'V'),
+    (0xA73A, 'M', u'ꜻ'),
+    (0xA73B, 'V'),
+    (0xA73C, 'M', u'ꜽ'),
+    (0xA73D, 'V'),
+    (0xA73E, 'M', u'ꜿ'),
+    (0xA73F, 'V'),
+    (0xA740, 'M', u'ꝁ'),
+    (0xA741, 'V'),
+    (0xA742, 'M', u'ꝃ'),
+    (0xA743, 'V'),
+    (0xA744, 'M', u'ꝅ'),
+    (0xA745, 'V'),
+    (0xA746, 'M', u'ꝇ'),
+    (0xA747, 'V'),
+    (0xA748, 'M', u'ꝉ'),
+    (0xA749, 'V'),
+    (0xA74A, 'M', u'ꝋ'),
+    (0xA74B, 'V'),
+    (0xA74C, 'M', u'ꝍ'),
+    (0xA74D, 'V'),
+    (0xA74E, 'M', u'ꝏ'),
+    (0xA74F, 'V'),
+    (0xA750, 'M', u'ꝑ'),
+    (0xA751, 'V'),
+    (0xA752, 'M', u'ꝓ'),
+    (0xA753, 'V'),
+    (0xA754, 'M', u'ꝕ'),
+    (0xA755, 'V'),
+    (0xA756, 'M', u'ꝗ'),
+    (0xA757, 'V'),
+    (0xA758, 'M', u'ꝙ'),
+    (0xA759, 'V'),
+    (0xA75A, 'M', u'ꝛ'),
+    (0xA75B, 'V'),
+    (0xA75C, 'M', u'ꝝ'),
+    (0xA75D, 'V'),
+    (0xA75E, 'M', u'ꝟ'),
+    (0xA75F, 'V'),
+    (0xA760, 'M', u'ꝡ'),
+    (0xA761, 'V'),
+    (0xA762, 'M', u'ꝣ'),
+    (0xA763, 'V'),
+    (0xA764, 'M', u'ꝥ'),
+    (0xA765, 'V'),
+    (0xA766, 'M', u'ꝧ'),
+    (0xA767, 'V'),
+    (0xA768, 'M', u'ꝩ'),
+    (0xA769, 'V'),
+    (0xA76A, 'M', u'ꝫ'),
+    (0xA76B, 'V'),
+    (0xA76C, 'M', u'ꝭ'),
+    (0xA76D, 'V'),
+    (0xA76E, 'M', u'ꝯ'),
+    (0xA76F, 'V'),
+    (0xA770, 'M', u'ꝯ'),
+    (0xA771, 'V'),
+    (0xA779, 'M', u'ꝺ'),
+    (0xA77A, 'V'),
+    (0xA77B, 'M', u'ꝼ'),
+    (0xA77C, 'V'),
+    (0xA77D, 'M', u'áµ¹'),
+    (0xA77E, 'M', u'ꝿ'),
+    (0xA77F, 'V'),
+    (0xA780, 'M', u'ꞁ'),
+    (0xA781, 'V'),
+    (0xA782, 'M', u'ꞃ'),
+    (0xA783, 'V'),
+    (0xA784, 'M', u'êž…'),
+    (0xA785, 'V'),
+    (0xA786, 'M', u'ꞇ'),
+    (0xA787, 'V'),
+    (0xA78B, 'M', u'ꞌ'),
+    (0xA78C, 'V'),
+    (0xA78D, 'M', u'É¥'),
+    (0xA78E, 'V'),
+    (0xA790, 'M', u'êž‘'),
+    (0xA791, 'V'),
+    ]
+
+def _seg_37():
+    return [
+    (0xA792, 'M', u'êž“'),
+    (0xA793, 'V'),
+    (0xA796, 'M', u'êž—'),
+    (0xA797, 'V'),
+    (0xA798, 'M', u'êž™'),
+    (0xA799, 'V'),
+    (0xA79A, 'M', u'êž›'),
+    (0xA79B, 'V'),
+    (0xA79C, 'M', u'ꞝ'),
+    (0xA79D, 'V'),
+    (0xA79E, 'M', u'ꞟ'),
+    (0xA79F, 'V'),
+    (0xA7A0, 'M', u'êž¡'),
+    (0xA7A1, 'V'),
+    (0xA7A2, 'M', u'ꞣ'),
+    (0xA7A3, 'V'),
+    (0xA7A4, 'M', u'ꞥ'),
+    (0xA7A5, 'V'),
+    (0xA7A6, 'M', u'êž§'),
+    (0xA7A7, 'V'),
+    (0xA7A8, 'M', u'êž©'),
+    (0xA7A9, 'V'),
+    (0xA7AA, 'M', u'ɦ'),
+    (0xA7AB, 'M', u'ɜ'),
+    (0xA7AC, 'M', u'É¡'),
+    (0xA7AD, 'M', u'ɬ'),
+    (0xA7AE, 'M', u'ɪ'),
+    (0xA7AF, 'V'),
+    (0xA7B0, 'M', u'Êž'),
+    (0xA7B1, 'M', u'ʇ'),
+    (0xA7B2, 'M', u'ʝ'),
+    (0xA7B3, 'M', u'ê­“'),
+    (0xA7B4, 'M', u'êžµ'),
+    (0xA7B5, 'V'),
+    (0xA7B6, 'M', u'êž·'),
+    (0xA7B7, 'V'),
+    (0xA7B8, 'X'),
+    (0xA7B9, 'V'),
+    (0xA7BA, 'X'),
+    (0xA7F7, 'V'),
+    (0xA7F8, 'M', u'ħ'),
+    (0xA7F9, 'M', u'Å“'),
+    (0xA7FA, 'V'),
+    (0xA82C, 'X'),
+    (0xA830, 'V'),
+    (0xA83A, 'X'),
+    (0xA840, 'V'),
+    (0xA878, 'X'),
+    (0xA880, 'V'),
+    (0xA8C6, 'X'),
+    (0xA8CE, 'V'),
+    (0xA8DA, 'X'),
+    (0xA8E0, 'V'),
+    (0xA954, 'X'),
+    (0xA95F, 'V'),
+    (0xA97D, 'X'),
+    (0xA980, 'V'),
+    (0xA9CE, 'X'),
+    (0xA9CF, 'V'),
+    (0xA9DA, 'X'),
+    (0xA9DE, 'V'),
+    (0xA9FF, 'X'),
+    (0xAA00, 'V'),
+    (0xAA37, 'X'),
+    (0xAA40, 'V'),
+    (0xAA4E, 'X'),
+    (0xAA50, 'V'),
+    (0xAA5A, 'X'),
+    (0xAA5C, 'V'),
+    (0xAAC3, 'X'),
+    (0xAADB, 'V'),
+    (0xAAF7, 'X'),
+    (0xAB01, 'V'),
+    (0xAB07, 'X'),
+    (0xAB09, 'V'),
+    (0xAB0F, 'X'),
+    (0xAB11, 'V'),
+    (0xAB17, 'X'),
+    (0xAB20, 'V'),
+    (0xAB27, 'X'),
+    (0xAB28, 'V'),
+    (0xAB2F, 'X'),
+    (0xAB30, 'V'),
+    (0xAB5C, 'M', u'ꜧ'),
+    (0xAB5D, 'M', u'ꬷ'),
+    (0xAB5E, 'M', u'É«'),
+    (0xAB5F, 'M', u'ê­’'),
+    (0xAB60, 'V'),
+    (0xAB66, 'X'),
+    (0xAB70, 'M', u'Ꭰ'),
+    (0xAB71, 'M', u'Ꭱ'),
+    (0xAB72, 'M', u'Ꭲ'),
+    (0xAB73, 'M', u'Ꭳ'),
+    (0xAB74, 'M', u'Ꭴ'),
+    (0xAB75, 'M', u'Ꭵ'),
+    (0xAB76, 'M', u'Ꭶ'),
+    (0xAB77, 'M', u'Ꭷ'),
+    (0xAB78, 'M', u'Ꭸ'),
+    (0xAB79, 'M', u'Ꭹ'),
+    (0xAB7A, 'M', u'Ꭺ'),
+    ]
+
+def _seg_38():
+    return [
+    (0xAB7B, 'M', u'Ꭻ'),
+    (0xAB7C, 'M', u'Ꭼ'),
+    (0xAB7D, 'M', u'Ꭽ'),
+    (0xAB7E, 'M', u'Ꭾ'),
+    (0xAB7F, 'M', u'Ꭿ'),
+    (0xAB80, 'M', u'Ꮀ'),
+    (0xAB81, 'M', u'Ꮁ'),
+    (0xAB82, 'M', u'Ꮂ'),
+    (0xAB83, 'M', u'Ꮃ'),
+    (0xAB84, 'M', u'Ꮄ'),
+    (0xAB85, 'M', u'Ꮅ'),
+    (0xAB86, 'M', u'Ꮆ'),
+    (0xAB87, 'M', u'Ꮇ'),
+    (0xAB88, 'M', u'Ꮈ'),
+    (0xAB89, 'M', u'Ꮉ'),
+    (0xAB8A, 'M', u'Ꮊ'),
+    (0xAB8B, 'M', u'Ꮋ'),
+    (0xAB8C, 'M', u'Ꮌ'),
+    (0xAB8D, 'M', u'Ꮍ'),
+    (0xAB8E, 'M', u'Ꮎ'),
+    (0xAB8F, 'M', u'Ꮏ'),
+    (0xAB90, 'M', u'Ꮐ'),
+    (0xAB91, 'M', u'Ꮑ'),
+    (0xAB92, 'M', u'Ꮒ'),
+    (0xAB93, 'M', u'Ꮓ'),
+    (0xAB94, 'M', u'Ꮔ'),
+    (0xAB95, 'M', u'Ꮕ'),
+    (0xAB96, 'M', u'Ꮖ'),
+    (0xAB97, 'M', u'Ꮗ'),
+    (0xAB98, 'M', u'Ꮘ'),
+    (0xAB99, 'M', u'Ꮙ'),
+    (0xAB9A, 'M', u'Ꮚ'),
+    (0xAB9B, 'M', u'Ꮛ'),
+    (0xAB9C, 'M', u'Ꮜ'),
+    (0xAB9D, 'M', u'Ꮝ'),
+    (0xAB9E, 'M', u'Ꮞ'),
+    (0xAB9F, 'M', u'Ꮟ'),
+    (0xABA0, 'M', u'Ꮠ'),
+    (0xABA1, 'M', u'Ꮡ'),
+    (0xABA2, 'M', u'Ꮢ'),
+    (0xABA3, 'M', u'Ꮣ'),
+    (0xABA4, 'M', u'Ꮤ'),
+    (0xABA5, 'M', u'Ꮥ'),
+    (0xABA6, 'M', u'Ꮦ'),
+    (0xABA7, 'M', u'Ꮧ'),
+    (0xABA8, 'M', u'Ꮨ'),
+    (0xABA9, 'M', u'Ꮩ'),
+    (0xABAA, 'M', u'Ꮪ'),
+    (0xABAB, 'M', u'Ꮫ'),
+    (0xABAC, 'M', u'Ꮬ'),
+    (0xABAD, 'M', u'Ꮭ'),
+    (0xABAE, 'M', u'Ꮮ'),
+    (0xABAF, 'M', u'Ꮯ'),
+    (0xABB0, 'M', u'Ꮰ'),
+    (0xABB1, 'M', u'Ꮱ'),
+    (0xABB2, 'M', u'Ꮲ'),
+    (0xABB3, 'M', u'Ꮳ'),
+    (0xABB4, 'M', u'Ꮴ'),
+    (0xABB5, 'M', u'Ꮵ'),
+    (0xABB6, 'M', u'Ꮶ'),
+    (0xABB7, 'M', u'Ꮷ'),
+    (0xABB8, 'M', u'Ꮸ'),
+    (0xABB9, 'M', u'Ꮹ'),
+    (0xABBA, 'M', u'Ꮺ'),
+    (0xABBB, 'M', u'Ꮻ'),
+    (0xABBC, 'M', u'Ꮼ'),
+    (0xABBD, 'M', u'Ꮽ'),
+    (0xABBE, 'M', u'Ꮾ'),
+    (0xABBF, 'M', u'Ꮿ'),
+    (0xABC0, 'V'),
+    (0xABEE, 'X'),
+    (0xABF0, 'V'),
+    (0xABFA, 'X'),
+    (0xAC00, 'V'),
+    (0xD7A4, 'X'),
+    (0xD7B0, 'V'),
+    (0xD7C7, 'X'),
+    (0xD7CB, 'V'),
+    (0xD7FC, 'X'),
+    (0xF900, 'M', u'豈'),
+    (0xF901, 'M', u'æ›´'),
+    (0xF902, 'M', u'車'),
+    (0xF903, 'M', u'賈'),
+    (0xF904, 'M', u'滑'),
+    (0xF905, 'M', u'串'),
+    (0xF906, 'M', u'句'),
+    (0xF907, 'M', u'龜'),
+    (0xF909, 'M', u'契'),
+    (0xF90A, 'M', u'金'),
+    (0xF90B, 'M', u'å–‡'),
+    (0xF90C, 'M', u'奈'),
+    (0xF90D, 'M', u'懶'),
+    (0xF90E, 'M', u'癩'),
+    (0xF90F, 'M', u'ç¾…'),
+    (0xF910, 'M', u'蘿'),
+    (0xF911, 'M', u'螺'),
+    (0xF912, 'M', u'裸'),
+    (0xF913, 'M', u'邏'),
+    (0xF914, 'M', u'樂'),
+    (0xF915, 'M', u'æ´›'),
+    ]
+
+def _seg_39():
+    return [
+    (0xF916, 'M', u'烙'),
+    (0xF917, 'M', u'珞'),
+    (0xF918, 'M', u'落'),
+    (0xF919, 'M', u'é…ª'),
+    (0xF91A, 'M', u'é§±'),
+    (0xF91B, 'M', u'亂'),
+    (0xF91C, 'M', u'卵'),
+    (0xF91D, 'M', u'欄'),
+    (0xF91E, 'M', u'爛'),
+    (0xF91F, 'M', u'蘭'),
+    (0xF920, 'M', u'鸞'),
+    (0xF921, 'M', u'嵐'),
+    (0xF922, 'M', u'æ¿«'),
+    (0xF923, 'M', u'藍'),
+    (0xF924, 'M', u'襤'),
+    (0xF925, 'M', u'拉'),
+    (0xF926, 'M', u'臘'),
+    (0xF927, 'M', u'è Ÿ'),
+    (0xF928, 'M', u'廊'),
+    (0xF929, 'M', u'朗'),
+    (0xF92A, 'M', u'浪'),
+    (0xF92B, 'M', u'狼'),
+    (0xF92C, 'M', u'郎'),
+    (0xF92D, 'M', u'來'),
+    (0xF92E, 'M', u'冷'),
+    (0xF92F, 'M', u'勞'),
+    (0xF930, 'M', u'æ“„'),
+    (0xF931, 'M', u'æ«“'),
+    (0xF932, 'M', u'爐'),
+    (0xF933, 'M', u'ç›§'),
+    (0xF934, 'M', u'老'),
+    (0xF935, 'M', u'蘆'),
+    (0xF936, 'M', u'虜'),
+    (0xF937, 'M', u'è·¯'),
+    (0xF938, 'M', u'露'),
+    (0xF939, 'M', u'é­¯'),
+    (0xF93A, 'M', u'é·º'),
+    (0xF93B, 'M', u'碌'),
+    (0xF93C, 'M', u'祿'),
+    (0xF93D, 'M', u'ç¶ '),
+    (0xF93E, 'M', u'菉'),
+    (0xF93F, 'M', u'錄'),
+    (0xF940, 'M', u'鹿'),
+    (0xF941, 'M', u'è«–'),
+    (0xF942, 'M', u'壟'),
+    (0xF943, 'M', u'弄'),
+    (0xF944, 'M', u'ç± '),
+    (0xF945, 'M', u'聾'),
+    (0xF946, 'M', u'牢'),
+    (0xF947, 'M', u'磊'),
+    (0xF948, 'M', u'賂'),
+    (0xF949, 'M', u'é›·'),
+    (0xF94A, 'M', u'壘'),
+    (0xF94B, 'M', u'å±¢'),
+    (0xF94C, 'M', u'樓'),
+    (0xF94D, 'M', u'æ·š'),
+    (0xF94E, 'M', u'漏'),
+    (0xF94F, 'M', u'ç´¯'),
+    (0xF950, 'M', u'縷'),
+    (0xF951, 'M', u'陋'),
+    (0xF952, 'M', u'å‹’'),
+    (0xF953, 'M', u'è‚‹'),
+    (0xF954, 'M', u'凜'),
+    (0xF955, 'M', u'凌'),
+    (0xF956, 'M', u'稜'),
+    (0xF957, 'M', u'ç¶¾'),
+    (0xF958, 'M', u'菱'),
+    (0xF959, 'M', u'陵'),
+    (0xF95A, 'M', u'讀'),
+    (0xF95B, 'M', u'拏'),
+    (0xF95C, 'M', u'樂'),
+    (0xF95D, 'M', u'諾'),
+    (0xF95E, 'M', u'丹'),
+    (0xF95F, 'M', u'寧'),
+    (0xF960, 'M', u'怒'),
+    (0xF961, 'M', u'率'),
+    (0xF962, 'M', u'ç•°'),
+    (0xF963, 'M', u'北'),
+    (0xF964, 'M', u'磻'),
+    (0xF965, 'M', u'便'),
+    (0xF966, 'M', u'復'),
+    (0xF967, 'M', u'不'),
+    (0xF968, 'M', u'泌'),
+    (0xF969, 'M', u'數'),
+    (0xF96A, 'M', u'ç´¢'),
+    (0xF96B, 'M', u'參'),
+    (0xF96C, 'M', u'塞'),
+    (0xF96D, 'M', u'省'),
+    (0xF96E, 'M', u'葉'),
+    (0xF96F, 'M', u'說'),
+    (0xF970, 'M', u'殺'),
+    (0xF971, 'M', u'è¾°'),
+    (0xF972, 'M', u'沈'),
+    (0xF973, 'M', u'拾'),
+    (0xF974, 'M', u'è‹¥'),
+    (0xF975, 'M', u'掠'),
+    (0xF976, 'M', u'ç•¥'),
+    (0xF977, 'M', u'亮'),
+    (0xF978, 'M', u'å…©'),
+    (0xF979, 'M', u'凉'),
+    ]
+
+def _seg_40():
+    return [
+    (0xF97A, 'M', u'梁'),
+    (0xF97B, 'M', u'ç³§'),
+    (0xF97C, 'M', u'良'),
+    (0xF97D, 'M', u'è«’'),
+    (0xF97E, 'M', u'量'),
+    (0xF97F, 'M', u'勵'),
+    (0xF980, 'M', u'å‘‚'),
+    (0xF981, 'M', u'女'),
+    (0xF982, 'M', u'廬'),
+    (0xF983, 'M', u'æ—…'),
+    (0xF984, 'M', u'濾'),
+    (0xF985, 'M', u'礪'),
+    (0xF986, 'M', u'é–­'),
+    (0xF987, 'M', u'驪'),
+    (0xF988, 'M', u'麗'),
+    (0xF989, 'M', u'黎'),
+    (0xF98A, 'M', u'力'),
+    (0xF98B, 'M', u'曆'),
+    (0xF98C, 'M', u'æ­·'),
+    (0xF98D, 'M', u'è½¢'),
+    (0xF98E, 'M', u'å¹´'),
+    (0xF98F, 'M', u'憐'),
+    (0xF990, 'M', u'戀'),
+    (0xF991, 'M', u'æ’š'),
+    (0xF992, 'M', u'æ¼£'),
+    (0xF993, 'M', u'ç…‰'),
+    (0xF994, 'M', u'ç’‰'),
+    (0xF995, 'M', u'ç§Š'),
+    (0xF996, 'M', u'ç·´'),
+    (0xF997, 'M', u'聯'),
+    (0xF998, 'M', u'輦'),
+    (0xF999, 'M', u'è“®'),
+    (0xF99A, 'M', u'連'),
+    (0xF99B, 'M', u'鍊'),
+    (0xF99C, 'M', u'列'),
+    (0xF99D, 'M', u'劣'),
+    (0xF99E, 'M', u'å’½'),
+    (0xF99F, 'M', u'烈'),
+    (0xF9A0, 'M', u'裂'),
+    (0xF9A1, 'M', u'說'),
+    (0xF9A2, 'M', u'廉'),
+    (0xF9A3, 'M', u'念'),
+    (0xF9A4, 'M', u'捻'),
+    (0xF9A5, 'M', u'æ®®'),
+    (0xF9A6, 'M', u'ç°¾'),
+    (0xF9A7, 'M', u'獵'),
+    (0xF9A8, 'M', u'令'),
+    (0xF9A9, 'M', u'囹'),
+    (0xF9AA, 'M', u'寧'),
+    (0xF9AB, 'M', u'嶺'),
+    (0xF9AC, 'M', u'怜'),
+    (0xF9AD, 'M', u'玲'),
+    (0xF9AE, 'M', u'ç‘©'),
+    (0xF9AF, 'M', u'羚'),
+    (0xF9B0, 'M', u'聆'),
+    (0xF9B1, 'M', u'鈴'),
+    (0xF9B2, 'M', u'é›¶'),
+    (0xF9B3, 'M', u'靈'),
+    (0xF9B4, 'M', u'é ˜'),
+    (0xF9B5, 'M', u'例'),
+    (0xF9B6, 'M', u'禮'),
+    (0xF9B7, 'M', u'醴'),
+    (0xF9B8, 'M', u'隸'),
+    (0xF9B9, 'M', u'惡'),
+    (0xF9BA, 'M', u'了'),
+    (0xF9BB, 'M', u'僚'),
+    (0xF9BC, 'M', u'寮'),
+    (0xF9BD, 'M', u'å°¿'),
+    (0xF9BE, 'M', u'æ–™'),
+    (0xF9BF, 'M', u'樂'),
+    (0xF9C0, 'M', u'燎'),
+    (0xF9C1, 'M', u'療'),
+    (0xF9C2, 'M', u'蓼'),
+    (0xF9C3, 'M', u'遼'),
+    (0xF9C4, 'M', u'龍'),
+    (0xF9C5, 'M', u'暈'),
+    (0xF9C6, 'M', u'阮'),
+    (0xF9C7, 'M', u'劉'),
+    (0xF9C8, 'M', u'杻'),
+    (0xF9C9, 'M', u'柳'),
+    (0xF9CA, 'M', u'流'),
+    (0xF9CB, 'M', u'溜'),
+    (0xF9CC, 'M', u'琉'),
+    (0xF9CD, 'M', u'ç•™'),
+    (0xF9CE, 'M', u'ç¡«'),
+    (0xF9CF, 'M', u'紐'),
+    (0xF9D0, 'M', u'類'),
+    (0xF9D1, 'M', u'å…­'),
+    (0xF9D2, 'M', u'戮'),
+    (0xF9D3, 'M', u'陸'),
+    (0xF9D4, 'M', u'倫'),
+    (0xF9D5, 'M', u'å´™'),
+    (0xF9D6, 'M', u'æ·ª'),
+    (0xF9D7, 'M', u'輪'),
+    (0xF9D8, 'M', u'律'),
+    (0xF9D9, 'M', u'æ…„'),
+    (0xF9DA, 'M', u'æ —'),
+    (0xF9DB, 'M', u'率'),
+    (0xF9DC, 'M', u'隆'),
+    (0xF9DD, 'M', u'利'),
+    ]
+
+def _seg_41():
+    return [
+    (0xF9DE, 'M', u'吏'),
+    (0xF9DF, 'M', u'å±¥'),
+    (0xF9E0, 'M', u'易'),
+    (0xF9E1, 'M', u'李'),
+    (0xF9E2, 'M', u'梨'),
+    (0xF9E3, 'M', u'æ³¥'),
+    (0xF9E4, 'M', u'理'),
+    (0xF9E5, 'M', u'ç—¢'),
+    (0xF9E6, 'M', u'ç½¹'),
+    (0xF9E7, 'M', u'裏'),
+    (0xF9E8, 'M', u'裡'),
+    (0xF9E9, 'M', u'里'),
+    (0xF9EA, 'M', u'離'),
+    (0xF9EB, 'M', u'匿'),
+    (0xF9EC, 'M', u'溺'),
+    (0xF9ED, 'M', u'吝'),
+    (0xF9EE, 'M', u'燐'),
+    (0xF9EF, 'M', u'ç’˜'),
+    (0xF9F0, 'M', u'è—º'),
+    (0xF9F1, 'M', u'隣'),
+    (0xF9F2, 'M', u'é±—'),
+    (0xF9F3, 'M', u'麟'),
+    (0xF9F4, 'M', u'æž—'),
+    (0xF9F5, 'M', u'æ·‹'),
+    (0xF9F6, 'M', u'臨'),
+    (0xF9F7, 'M', u'ç«‹'),
+    (0xF9F8, 'M', u'笠'),
+    (0xF9F9, 'M', u'ç²’'),
+    (0xF9FA, 'M', u'ç‹€'),
+    (0xF9FB, 'M', u'ç‚™'),
+    (0xF9FC, 'M', u'è­˜'),
+    (0xF9FD, 'M', u'什'),
+    (0xF9FE, 'M', u'茶'),
+    (0xF9FF, 'M', u'刺'),
+    (0xFA00, 'M', u'切'),
+    (0xFA01, 'M', u'度'),
+    (0xFA02, 'M', u'æ‹“'),
+    (0xFA03, 'M', u'ç³–'),
+    (0xFA04, 'M', u'å®…'),
+    (0xFA05, 'M', u'æ´ž'),
+    (0xFA06, 'M', u'æš´'),
+    (0xFA07, 'M', u'è¼»'),
+    (0xFA08, 'M', u'行'),
+    (0xFA09, 'M', u'降'),
+    (0xFA0A, 'M', u'見'),
+    (0xFA0B, 'M', u'廓'),
+    (0xFA0C, 'M', u'å…€'),
+    (0xFA0D, 'M', u'å—€'),
+    (0xFA0E, 'V'),
+    (0xFA10, 'M', u'塚'),
+    (0xFA11, 'V'),
+    (0xFA12, 'M', u'æ™´'),
+    (0xFA13, 'V'),
+    (0xFA15, 'M', u'凞'),
+    (0xFA16, 'M', u'猪'),
+    (0xFA17, 'M', u'益'),
+    (0xFA18, 'M', u'礼'),
+    (0xFA19, 'M', u'神'),
+    (0xFA1A, 'M', u'祥'),
+    (0xFA1B, 'M', u'福'),
+    (0xFA1C, 'M', u'靖'),
+    (0xFA1D, 'M', u'ç²¾'),
+    (0xFA1E, 'M', u'ç¾½'),
+    (0xFA1F, 'V'),
+    (0xFA20, 'M', u'蘒'),
+    (0xFA21, 'V'),
+    (0xFA22, 'M', u'諸'),
+    (0xFA23, 'V'),
+    (0xFA25, 'M', u'逸'),
+    (0xFA26, 'M', u'都'),
+    (0xFA27, 'V'),
+    (0xFA2A, 'M', u'飯'),
+    (0xFA2B, 'M', u'飼'),
+    (0xFA2C, 'M', u'館'),
+    (0xFA2D, 'M', u'é¶´'),
+    (0xFA2E, 'M', u'郞'),
+    (0xFA2F, 'M', u'éš·'),
+    (0xFA30, 'M', u'ä¾®'),
+    (0xFA31, 'M', u'僧'),
+    (0xFA32, 'M', u'免'),
+    (0xFA33, 'M', u'勉'),
+    (0xFA34, 'M', u'勤'),
+    (0xFA35, 'M', u'卑'),
+    (0xFA36, 'M', u'喝'),
+    (0xFA37, 'M', u'嘆'),
+    (0xFA38, 'M', u'器'),
+    (0xFA39, 'M', u'å¡€'),
+    (0xFA3A, 'M', u'墨'),
+    (0xFA3B, 'M', u'層'),
+    (0xFA3C, 'M', u'å±®'),
+    (0xFA3D, 'M', u'æ‚”'),
+    (0xFA3E, 'M', u'æ…¨'),
+    (0xFA3F, 'M', u'憎'),
+    (0xFA40, 'M', u'懲'),
+    (0xFA41, 'M', u'敏'),
+    (0xFA42, 'M', u'æ—¢'),
+    (0xFA43, 'M', u'æš‘'),
+    (0xFA44, 'M', u'梅'),
+    (0xFA45, 'M', u'æµ·'),
+    (0xFA46, 'M', u'渚'),
+    ]
+
+def _seg_42():
+    return [
+    (0xFA47, 'M', u'æ¼¢'),
+    (0xFA48, 'M', u'ç…®'),
+    (0xFA49, 'M', u'爫'),
+    (0xFA4A, 'M', u'琢'),
+    (0xFA4B, 'M', u'碑'),
+    (0xFA4C, 'M', u'社'),
+    (0xFA4D, 'M', u'祉'),
+    (0xFA4E, 'M', u'祈'),
+    (0xFA4F, 'M', u'祐'),
+    (0xFA50, 'M', u'祖'),
+    (0xFA51, 'M', u'祝'),
+    (0xFA52, 'M', u'禍'),
+    (0xFA53, 'M', u'禎'),
+    (0xFA54, 'M', u'ç©€'),
+    (0xFA55, 'M', u'突'),
+    (0xFA56, 'M', u'節'),
+    (0xFA57, 'M', u'ç·´'),
+    (0xFA58, 'M', u'縉'),
+    (0xFA59, 'M', u'繁'),
+    (0xFA5A, 'M', u'ç½²'),
+    (0xFA5B, 'M', u'者'),
+    (0xFA5C, 'M', u'臭'),
+    (0xFA5D, 'M', u'艹'),
+    (0xFA5F, 'M', u'è‘—'),
+    (0xFA60, 'M', u'褐'),
+    (0xFA61, 'M', u'視'),
+    (0xFA62, 'M', u'謁'),
+    (0xFA63, 'M', u'謹'),
+    (0xFA64, 'M', u'賓'),
+    (0xFA65, 'M', u'è´ˆ'),
+    (0xFA66, 'M', u'è¾¶'),
+    (0xFA67, 'M', u'逸'),
+    (0xFA68, 'M', u'難'),
+    (0xFA69, 'M', u'響'),
+    (0xFA6A, 'M', u'é »'),
+    (0xFA6B, 'M', u'恵'),
+    (0xFA6C, 'M', u'𤋮'),
+    (0xFA6D, 'M', u'舘'),
+    (0xFA6E, 'X'),
+    (0xFA70, 'M', u'並'),
+    (0xFA71, 'M', u'况'),
+    (0xFA72, 'M', u'å…¨'),
+    (0xFA73, 'M', u'ä¾€'),
+    (0xFA74, 'M', u'å……'),
+    (0xFA75, 'M', u'冀'),
+    (0xFA76, 'M', u'勇'),
+    (0xFA77, 'M', u'勺'),
+    (0xFA78, 'M', u'喝'),
+    (0xFA79, 'M', u'å••'),
+    (0xFA7A, 'M', u'å–™'),
+    (0xFA7B, 'M', u'å—¢'),
+    (0xFA7C, 'M', u'塚'),
+    (0xFA7D, 'M', u'墳'),
+    (0xFA7E, 'M', u'奄'),
+    (0xFA7F, 'M', u'奔'),
+    (0xFA80, 'M', u'å©¢'),
+    (0xFA81, 'M', u'嬨'),
+    (0xFA82, 'M', u'å»’'),
+    (0xFA83, 'M', u'å»™'),
+    (0xFA84, 'M', u'彩'),
+    (0xFA85, 'M', u'å¾­'),
+    (0xFA86, 'M', u'惘'),
+    (0xFA87, 'M', u'æ…Ž'),
+    (0xFA88, 'M', u'愈'),
+    (0xFA89, 'M', u'憎'),
+    (0xFA8A, 'M', u'æ… '),
+    (0xFA8B, 'M', u'懲'),
+    (0xFA8C, 'M', u'戴'),
+    (0xFA8D, 'M', u'揄'),
+    (0xFA8E, 'M', u'搜'),
+    (0xFA8F, 'M', u'æ‘’'),
+    (0xFA90, 'M', u'æ•–'),
+    (0xFA91, 'M', u'æ™´'),
+    (0xFA92, 'M', u'朗'),
+    (0xFA93, 'M', u'望'),
+    (0xFA94, 'M', u'杖'),
+    (0xFA95, 'M', u'æ­¹'),
+    (0xFA96, 'M', u'殺'),
+    (0xFA97, 'M', u'流'),
+    (0xFA98, 'M', u'æ»›'),
+    (0xFA99, 'M', u'滋'),
+    (0xFA9A, 'M', u'æ¼¢'),
+    (0xFA9B, 'M', u'瀞'),
+    (0xFA9C, 'M', u'ç…®'),
+    (0xFA9D, 'M', u'çž§'),
+    (0xFA9E, 'M', u'爵'),
+    (0xFA9F, 'M', u'犯'),
+    (0xFAA0, 'M', u'猪'),
+    (0xFAA1, 'M', u'瑱'),
+    (0xFAA2, 'M', u'甆'),
+    (0xFAA3, 'M', u'ç”»'),
+    (0xFAA4, 'M', u'瘝'),
+    (0xFAA5, 'M', u'瘟'),
+    (0xFAA6, 'M', u'益'),
+    (0xFAA7, 'M', u'ç››'),
+    (0xFAA8, 'M', u'ç›´'),
+    (0xFAA9, 'M', u'睊'),
+    (0xFAAA, 'M', u'着'),
+    (0xFAAB, 'M', u'磌'),
+    (0xFAAC, 'M', u'窱'),
+    ]
+
+def _seg_43():
+    return [
+    (0xFAAD, 'M', u'節'),
+    (0xFAAE, 'M', u'ç±»'),
+    (0xFAAF, 'M', u'çµ›'),
+    (0xFAB0, 'M', u'ç·´'),
+    (0xFAB1, 'M', u'ç¼¾'),
+    (0xFAB2, 'M', u'者'),
+    (0xFAB3, 'M', u'荒'),
+    (0xFAB4, 'M', u'華'),
+    (0xFAB5, 'M', u'蝹'),
+    (0xFAB6, 'M', u'襁'),
+    (0xFAB7, 'M', u'覆'),
+    (0xFAB8, 'M', u'視'),
+    (0xFAB9, 'M', u'調'),
+    (0xFABA, 'M', u'諸'),
+    (0xFABB, 'M', u'è«‹'),
+    (0xFABC, 'M', u'謁'),
+    (0xFABD, 'M', u'諾'),
+    (0xFABE, 'M', u'è«­'),
+    (0xFABF, 'M', u'謹'),
+    (0xFAC0, 'M', u'變'),
+    (0xFAC1, 'M', u'è´ˆ'),
+    (0xFAC2, 'M', u'輸'),
+    (0xFAC3, 'M', u'遲'),
+    (0xFAC4, 'M', u'醙'),
+    (0xFAC5, 'M', u'鉶'),
+    (0xFAC6, 'M', u'陼'),
+    (0xFAC7, 'M', u'難'),
+    (0xFAC8, 'M', u'靖'),
+    (0xFAC9, 'M', u'韛'),
+    (0xFACA, 'M', u'響'),
+    (0xFACB, 'M', u'é ‹'),
+    (0xFACC, 'M', u'é »'),
+    (0xFACD, 'M', u'鬒'),
+    (0xFACE, 'M', u'龜'),
+    (0xFACF, 'M', u'𢡊'),
+    (0xFAD0, 'M', u'𢡄'),
+    (0xFAD1, 'M', u'𣏕'),
+    (0xFAD2, 'M', u'㮝'),
+    (0xFAD3, 'M', u'䀘'),
+    (0xFAD4, 'M', u'䀹'),
+    (0xFAD5, 'M', u'𥉉'),
+    (0xFAD6, 'M', u'𥳐'),
+    (0xFAD7, 'M', u'𧻓'),
+    (0xFAD8, 'M', u'齃'),
+    (0xFAD9, 'M', u'龎'),
+    (0xFADA, 'X'),
+    (0xFB00, 'M', u'ff'),
+    (0xFB01, 'M', u'fi'),
+    (0xFB02, 'M', u'fl'),
+    (0xFB03, 'M', u'ffi'),
+    (0xFB04, 'M', u'ffl'),
+    (0xFB05, 'M', u'st'),
+    (0xFB07, 'X'),
+    (0xFB13, 'M', u'Õ´Õ¶'),
+    (0xFB14, 'M', u'Õ´Õ¥'),
+    (0xFB15, 'M', u'Õ´Õ«'),
+    (0xFB16, 'M', u'Õ¾Õ¶'),
+    (0xFB17, 'M', u'Õ´Õ­'),
+    (0xFB18, 'X'),
+    (0xFB1D, 'M', u'×™Ö´'),
+    (0xFB1E, 'V'),
+    (0xFB1F, 'M', u'ײַ'),
+    (0xFB20, 'M', u'×¢'),
+    (0xFB21, 'M', u'א'),
+    (0xFB22, 'M', u'ד'),
+    (0xFB23, 'M', u'×”'),
+    (0xFB24, 'M', u'×›'),
+    (0xFB25, 'M', u'ל'),
+    (0xFB26, 'M', u'ם'),
+    (0xFB27, 'M', u'ר'),
+    (0xFB28, 'M', u'ת'),
+    (0xFB29, '3', u'+'),
+    (0xFB2A, 'M', u'שׁ'),
+    (0xFB2B, 'M', u'שׂ'),
+    (0xFB2C, 'M', u'שּׁ'),
+    (0xFB2D, 'M', u'שּׂ'),
+    (0xFB2E, 'M', u'אַ'),
+    (0xFB2F, 'M', u'אָ'),
+    (0xFB30, 'M', u'אּ'),
+    (0xFB31, 'M', u'בּ'),
+    (0xFB32, 'M', u'×’Ö¼'),
+    (0xFB33, 'M', u'דּ'),
+    (0xFB34, 'M', u'×”Ö¼'),
+    (0xFB35, 'M', u'וּ'),
+    (0xFB36, 'M', u'×–Ö¼'),
+    (0xFB37, 'X'),
+    (0xFB38, 'M', u'טּ'),
+    (0xFB39, 'M', u'×™Ö¼'),
+    (0xFB3A, 'M', u'ךּ'),
+    (0xFB3B, 'M', u'×›Ö¼'),
+    (0xFB3C, 'M', u'לּ'),
+    (0xFB3D, 'X'),
+    (0xFB3E, 'M', u'מּ'),
+    (0xFB3F, 'X'),
+    (0xFB40, 'M', u'× Ö¼'),
+    (0xFB41, 'M', u'סּ'),
+    (0xFB42, 'X'),
+    (0xFB43, 'M', u'×£Ö¼'),
+    (0xFB44, 'M', u'פּ'),
+    (0xFB45, 'X'),
+    ]
+
+def _seg_44():
+    return [
+    (0xFB46, 'M', u'צּ'),
+    (0xFB47, 'M', u'×§Ö¼'),
+    (0xFB48, 'M', u'רּ'),
+    (0xFB49, 'M', u'שּ'),
+    (0xFB4A, 'M', u'תּ'),
+    (0xFB4B, 'M', u'וֹ'),
+    (0xFB4C, 'M', u'בֿ'),
+    (0xFB4D, 'M', u'×›Ö¿'),
+    (0xFB4E, 'M', u'פֿ'),
+    (0xFB4F, 'M', u'אל'),
+    (0xFB50, 'M', u'Ù±'),
+    (0xFB52, 'M', u'Ù»'),
+    (0xFB56, 'M', u'Ù¾'),
+    (0xFB5A, 'M', u'Ú€'),
+    (0xFB5E, 'M', u'Ùº'),
+    (0xFB62, 'M', u'Ù¿'),
+    (0xFB66, 'M', u'Ù¹'),
+    (0xFB6A, 'M', u'Ú¤'),
+    (0xFB6E, 'M', u'Ú¦'),
+    (0xFB72, 'M', u'Ú„'),
+    (0xFB76, 'M', u'Úƒ'),
+    (0xFB7A, 'M', u'Ú†'),
+    (0xFB7E, 'M', u'Ú‡'),
+    (0xFB82, 'M', u'ڍ'),
+    (0xFB84, 'M', u'ڌ'),
+    (0xFB86, 'M', u'ÚŽ'),
+    (0xFB88, 'M', u'Úˆ'),
+    (0xFB8A, 'M', u'Ú˜'),
+    (0xFB8C, 'M', u'Ú‘'),
+    (0xFB8E, 'M', u'Ú©'),
+    (0xFB92, 'M', u'Ú¯'),
+    (0xFB96, 'M', u'Ú³'),
+    (0xFB9A, 'M', u'Ú±'),
+    (0xFB9E, 'M', u'Úº'),
+    (0xFBA0, 'M', u'Ú»'),
+    (0xFBA4, 'M', u'Û€'),
+    (0xFBA6, 'M', u'ہ'),
+    (0xFBAA, 'M', u'Ú¾'),
+    (0xFBAE, 'M', u'Û’'),
+    (0xFBB0, 'M', u'Û“'),
+    (0xFBB2, 'V'),
+    (0xFBC2, 'X'),
+    (0xFBD3, 'M', u'Ú­'),
+    (0xFBD7, 'M', u'Û‡'),
+    (0xFBD9, 'M', u'Û†'),
+    (0xFBDB, 'M', u'Ûˆ'),
+    (0xFBDD, 'M', u'Û‡Ù´'),
+    (0xFBDE, 'M', u'Û‹'),
+    (0xFBE0, 'M', u'Û…'),
+    (0xFBE2, 'M', u'Û‰'),
+    (0xFBE4, 'M', u'ې'),
+    (0xFBE8, 'M', u'Ù‰'),
+    (0xFBEA, 'M', u'ئا'),
+    (0xFBEC, 'M', u'ئە'),
+    (0xFBEE, 'M', u'ئو'),
+    (0xFBF0, 'M', u'ئۇ'),
+    (0xFBF2, 'M', u'ئۆ'),
+    (0xFBF4, 'M', u'ئۈ'),
+    (0xFBF6, 'M', u'ئې'),
+    (0xFBF9, 'M', u'ئى'),
+    (0xFBFC, 'M', u'ی'),
+    (0xFC00, 'M', u'ئج'),
+    (0xFC01, 'M', u'ئح'),
+    (0xFC02, 'M', u'ئم'),
+    (0xFC03, 'M', u'ئى'),
+    (0xFC04, 'M', u'ئي'),
+    (0xFC05, 'M', u'بج'),
+    (0xFC06, 'M', u'بح'),
+    (0xFC07, 'M', u'بخ'),
+    (0xFC08, 'M', u'بم'),
+    (0xFC09, 'M', u'بى'),
+    (0xFC0A, 'M', u'بي'),
+    (0xFC0B, 'M', u'تج'),
+    (0xFC0C, 'M', u'تح'),
+    (0xFC0D, 'M', u'تخ'),
+    (0xFC0E, 'M', u'تم'),
+    (0xFC0F, 'M', u'تى'),
+    (0xFC10, 'M', u'تي'),
+    (0xFC11, 'M', u'ثج'),
+    (0xFC12, 'M', u'ثم'),
+    (0xFC13, 'M', u'ثى'),
+    (0xFC14, 'M', u'ثي'),
+    (0xFC15, 'M', u'جح'),
+    (0xFC16, 'M', u'جم'),
+    (0xFC17, 'M', u'حج'),
+    (0xFC18, 'M', u'حم'),
+    (0xFC19, 'M', u'خج'),
+    (0xFC1A, 'M', u'خح'),
+    (0xFC1B, 'M', u'خم'),
+    (0xFC1C, 'M', u'سج'),
+    (0xFC1D, 'M', u'سح'),
+    (0xFC1E, 'M', u'سخ'),
+    (0xFC1F, 'M', u'سم'),
+    (0xFC20, 'M', u'صح'),
+    (0xFC21, 'M', u'صم'),
+    (0xFC22, 'M', u'ضج'),
+    (0xFC23, 'M', u'ضح'),
+    (0xFC24, 'M', u'ضخ'),
+    (0xFC25, 'M', u'ضم'),
+    (0xFC26, 'M', u'طح'),
+    ]
+
+def _seg_45():
+    return [
+    (0xFC27, 'M', u'طم'),
+    (0xFC28, 'M', u'ظم'),
+    (0xFC29, 'M', u'عج'),
+    (0xFC2A, 'M', u'عم'),
+    (0xFC2B, 'M', u'غج'),
+    (0xFC2C, 'M', u'غم'),
+    (0xFC2D, 'M', u'فج'),
+    (0xFC2E, 'M', u'فح'),
+    (0xFC2F, 'M', u'فخ'),
+    (0xFC30, 'M', u'فم'),
+    (0xFC31, 'M', u'فى'),
+    (0xFC32, 'M', u'في'),
+    (0xFC33, 'M', u'قح'),
+    (0xFC34, 'M', u'قم'),
+    (0xFC35, 'M', u'قى'),
+    (0xFC36, 'M', u'قي'),
+    (0xFC37, 'M', u'كا'),
+    (0xFC38, 'M', u'كج'),
+    (0xFC39, 'M', u'كح'),
+    (0xFC3A, 'M', u'كخ'),
+    (0xFC3B, 'M', u'كل'),
+    (0xFC3C, 'M', u'كم'),
+    (0xFC3D, 'M', u'كى'),
+    (0xFC3E, 'M', u'كي'),
+    (0xFC3F, 'M', u'لج'),
+    (0xFC40, 'M', u'لح'),
+    (0xFC41, 'M', u'لخ'),
+    (0xFC42, 'M', u'لم'),
+    (0xFC43, 'M', u'لى'),
+    (0xFC44, 'M', u'لي'),
+    (0xFC45, 'M', u'مج'),
+    (0xFC46, 'M', u'مح'),
+    (0xFC47, 'M', u'مخ'),
+    (0xFC48, 'M', u'مم'),
+    (0xFC49, 'M', u'مى'),
+    (0xFC4A, 'M', u'مي'),
+    (0xFC4B, 'M', u'نج'),
+    (0xFC4C, 'M', u'نح'),
+    (0xFC4D, 'M', u'نخ'),
+    (0xFC4E, 'M', u'نم'),
+    (0xFC4F, 'M', u'نى'),
+    (0xFC50, 'M', u'ني'),
+    (0xFC51, 'M', u'هج'),
+    (0xFC52, 'M', u'هم'),
+    (0xFC53, 'M', u'هى'),
+    (0xFC54, 'M', u'هي'),
+    (0xFC55, 'M', u'يج'),
+    (0xFC56, 'M', u'يح'),
+    (0xFC57, 'M', u'يخ'),
+    (0xFC58, 'M', u'يم'),
+    (0xFC59, 'M', u'يى'),
+    (0xFC5A, 'M', u'يي'),
+    (0xFC5B, 'M', u'ذٰ'),
+    (0xFC5C, 'M', u'رٰ'),
+    (0xFC5D, 'M', u'ىٰ'),
+    (0xFC5E, '3', u' ٌّ'),
+    (0xFC5F, '3', u' ٍّ'),
+    (0xFC60, '3', u' ÙŽÙ‘'),
+    (0xFC61, '3', u' ُّ'),
+    (0xFC62, '3', u' ِّ'),
+    (0xFC63, '3', u' ّٰ'),
+    (0xFC64, 'M', u'ئر'),
+    (0xFC65, 'M', u'ئز'),
+    (0xFC66, 'M', u'ئم'),
+    (0xFC67, 'M', u'ئن'),
+    (0xFC68, 'M', u'ئى'),
+    (0xFC69, 'M', u'ئي'),
+    (0xFC6A, 'M', u'بر'),
+    (0xFC6B, 'M', u'بز'),
+    (0xFC6C, 'M', u'بم'),
+    (0xFC6D, 'M', u'بن'),
+    (0xFC6E, 'M', u'بى'),
+    (0xFC6F, 'M', u'بي'),
+    (0xFC70, 'M', u'تر'),
+    (0xFC71, 'M', u'تز'),
+    (0xFC72, 'M', u'تم'),
+    (0xFC73, 'M', u'تن'),
+    (0xFC74, 'M', u'تى'),
+    (0xFC75, 'M', u'تي'),
+    (0xFC76, 'M', u'ثر'),
+    (0xFC77, 'M', u'ثز'),
+    (0xFC78, 'M', u'ثم'),
+    (0xFC79, 'M', u'ثن'),
+    (0xFC7A, 'M', u'ثى'),
+    (0xFC7B, 'M', u'ثي'),
+    (0xFC7C, 'M', u'فى'),
+    (0xFC7D, 'M', u'في'),
+    (0xFC7E, 'M', u'قى'),
+    (0xFC7F, 'M', u'قي'),
+    (0xFC80, 'M', u'كا'),
+    (0xFC81, 'M', u'كل'),
+    (0xFC82, 'M', u'كم'),
+    (0xFC83, 'M', u'كى'),
+    (0xFC84, 'M', u'كي'),
+    (0xFC85, 'M', u'لم'),
+    (0xFC86, 'M', u'لى'),
+    (0xFC87, 'M', u'لي'),
+    (0xFC88, 'M', u'ما'),
+    (0xFC89, 'M', u'مم'),
+    (0xFC8A, 'M', u'نر'),
+    ]
+
+def _seg_46():
+    return [
+    (0xFC8B, 'M', u'نز'),
+    (0xFC8C, 'M', u'نم'),
+    (0xFC8D, 'M', u'نن'),
+    (0xFC8E, 'M', u'نى'),
+    (0xFC8F, 'M', u'ني'),
+    (0xFC90, 'M', u'ىٰ'),
+    (0xFC91, 'M', u'ير'),
+    (0xFC92, 'M', u'يز'),
+    (0xFC93, 'M', u'يم'),
+    (0xFC94, 'M', u'ين'),
+    (0xFC95, 'M', u'يى'),
+    (0xFC96, 'M', u'يي'),
+    (0xFC97, 'M', u'ئج'),
+    (0xFC98, 'M', u'ئح'),
+    (0xFC99, 'M', u'ئخ'),
+    (0xFC9A, 'M', u'ئم'),
+    (0xFC9B, 'M', u'ئه'),
+    (0xFC9C, 'M', u'بج'),
+    (0xFC9D, 'M', u'بح'),
+    (0xFC9E, 'M', u'بخ'),
+    (0xFC9F, 'M', u'بم'),
+    (0xFCA0, 'M', u'به'),
+    (0xFCA1, 'M', u'تج'),
+    (0xFCA2, 'M', u'تح'),
+    (0xFCA3, 'M', u'تخ'),
+    (0xFCA4, 'M', u'تم'),
+    (0xFCA5, 'M', u'ته'),
+    (0xFCA6, 'M', u'ثم'),
+    (0xFCA7, 'M', u'جح'),
+    (0xFCA8, 'M', u'جم'),
+    (0xFCA9, 'M', u'حج'),
+    (0xFCAA, 'M', u'حم'),
+    (0xFCAB, 'M', u'خج'),
+    (0xFCAC, 'M', u'خم'),
+    (0xFCAD, 'M', u'سج'),
+    (0xFCAE, 'M', u'سح'),
+    (0xFCAF, 'M', u'سخ'),
+    (0xFCB0, 'M', u'سم'),
+    (0xFCB1, 'M', u'صح'),
+    (0xFCB2, 'M', u'صخ'),
+    (0xFCB3, 'M', u'صم'),
+    (0xFCB4, 'M', u'ضج'),
+    (0xFCB5, 'M', u'ضح'),
+    (0xFCB6, 'M', u'ضخ'),
+    (0xFCB7, 'M', u'ضم'),
+    (0xFCB8, 'M', u'طح'),
+    (0xFCB9, 'M', u'ظم'),
+    (0xFCBA, 'M', u'عج'),
+    (0xFCBB, 'M', u'عم'),
+    (0xFCBC, 'M', u'غج'),
+    (0xFCBD, 'M', u'غم'),
+    (0xFCBE, 'M', u'فج'),
+    (0xFCBF, 'M', u'فح'),
+    (0xFCC0, 'M', u'فخ'),
+    (0xFCC1, 'M', u'فم'),
+    (0xFCC2, 'M', u'قح'),
+    (0xFCC3, 'M', u'قم'),
+    (0xFCC4, 'M', u'كج'),
+    (0xFCC5, 'M', u'كح'),
+    (0xFCC6, 'M', u'كخ'),
+    (0xFCC7, 'M', u'كل'),
+    (0xFCC8, 'M', u'كم'),
+    (0xFCC9, 'M', u'لج'),
+    (0xFCCA, 'M', u'لح'),
+    (0xFCCB, 'M', u'لخ'),
+    (0xFCCC, 'M', u'لم'),
+    (0xFCCD, 'M', u'له'),
+    (0xFCCE, 'M', u'مج'),
+    (0xFCCF, 'M', u'مح'),
+    (0xFCD0, 'M', u'مخ'),
+    (0xFCD1, 'M', u'مم'),
+    (0xFCD2, 'M', u'نج'),
+    (0xFCD3, 'M', u'نح'),
+    (0xFCD4, 'M', u'نخ'),
+    (0xFCD5, 'M', u'نم'),
+    (0xFCD6, 'M', u'نه'),
+    (0xFCD7, 'M', u'هج'),
+    (0xFCD8, 'M', u'هم'),
+    (0xFCD9, 'M', u'هٰ'),
+    (0xFCDA, 'M', u'يج'),
+    (0xFCDB, 'M', u'يح'),
+    (0xFCDC, 'M', u'يخ'),
+    (0xFCDD, 'M', u'يم'),
+    (0xFCDE, 'M', u'يه'),
+    (0xFCDF, 'M', u'ئم'),
+    (0xFCE0, 'M', u'ئه'),
+    (0xFCE1, 'M', u'بم'),
+    (0xFCE2, 'M', u'به'),
+    (0xFCE3, 'M', u'تم'),
+    (0xFCE4, 'M', u'ته'),
+    (0xFCE5, 'M', u'ثم'),
+    (0xFCE6, 'M', u'ثه'),
+    (0xFCE7, 'M', u'سم'),
+    (0xFCE8, 'M', u'سه'),
+    (0xFCE9, 'M', u'شم'),
+    (0xFCEA, 'M', u'شه'),
+    (0xFCEB, 'M', u'كل'),
+    (0xFCEC, 'M', u'كم'),
+    (0xFCED, 'M', u'لم'),
+    (0xFCEE, 'M', u'نم'),
+    ]
+
+def _seg_47():
+    return [
+    (0xFCEF, 'M', u'نه'),
+    (0xFCF0, 'M', u'يم'),
+    (0xFCF1, 'M', u'يه'),
+    (0xFCF2, 'M', u'Ù€ÙŽÙ‘'),
+    (0xFCF3, 'M', u'ـُّ'),
+    (0xFCF4, 'M', u'ـِّ'),
+    (0xFCF5, 'M', u'طى'),
+    (0xFCF6, 'M', u'طي'),
+    (0xFCF7, 'M', u'عى'),
+    (0xFCF8, 'M', u'عي'),
+    (0xFCF9, 'M', u'غى'),
+    (0xFCFA, 'M', u'غي'),
+    (0xFCFB, 'M', u'سى'),
+    (0xFCFC, 'M', u'سي'),
+    (0xFCFD, 'M', u'شى'),
+    (0xFCFE, 'M', u'شي'),
+    (0xFCFF, 'M', u'حى'),
+    (0xFD00, 'M', u'حي'),
+    (0xFD01, 'M', u'جى'),
+    (0xFD02, 'M', u'جي'),
+    (0xFD03, 'M', u'خى'),
+    (0xFD04, 'M', u'خي'),
+    (0xFD05, 'M', u'صى'),
+    (0xFD06, 'M', u'صي'),
+    (0xFD07, 'M', u'ضى'),
+    (0xFD08, 'M', u'ضي'),
+    (0xFD09, 'M', u'شج'),
+    (0xFD0A, 'M', u'شح'),
+    (0xFD0B, 'M', u'شخ'),
+    (0xFD0C, 'M', u'شم'),
+    (0xFD0D, 'M', u'شر'),
+    (0xFD0E, 'M', u'سر'),
+    (0xFD0F, 'M', u'صر'),
+    (0xFD10, 'M', u'ضر'),
+    (0xFD11, 'M', u'طى'),
+    (0xFD12, 'M', u'طي'),
+    (0xFD13, 'M', u'عى'),
+    (0xFD14, 'M', u'عي'),
+    (0xFD15, 'M', u'غى'),
+    (0xFD16, 'M', u'غي'),
+    (0xFD17, 'M', u'سى'),
+    (0xFD18, 'M', u'سي'),
+    (0xFD19, 'M', u'شى'),
+    (0xFD1A, 'M', u'شي'),
+    (0xFD1B, 'M', u'حى'),
+    (0xFD1C, 'M', u'حي'),
+    (0xFD1D, 'M', u'جى'),
+    (0xFD1E, 'M', u'جي'),
+    (0xFD1F, 'M', u'خى'),
+    (0xFD20, 'M', u'خي'),
+    (0xFD21, 'M', u'صى'),
+    (0xFD22, 'M', u'صي'),
+    (0xFD23, 'M', u'ضى'),
+    (0xFD24, 'M', u'ضي'),
+    (0xFD25, 'M', u'شج'),
+    (0xFD26, 'M', u'شح'),
+    (0xFD27, 'M', u'شخ'),
+    (0xFD28, 'M', u'شم'),
+    (0xFD29, 'M', u'شر'),
+    (0xFD2A, 'M', u'سر'),
+    (0xFD2B, 'M', u'صر'),
+    (0xFD2C, 'M', u'ضر'),
+    (0xFD2D, 'M', u'شج'),
+    (0xFD2E, 'M', u'شح'),
+    (0xFD2F, 'M', u'شخ'),
+    (0xFD30, 'M', u'شم'),
+    (0xFD31, 'M', u'سه'),
+    (0xFD32, 'M', u'شه'),
+    (0xFD33, 'M', u'طم'),
+    (0xFD34, 'M', u'سج'),
+    (0xFD35, 'M', u'سح'),
+    (0xFD36, 'M', u'سخ'),
+    (0xFD37, 'M', u'شج'),
+    (0xFD38, 'M', u'شح'),
+    (0xFD39, 'M', u'شخ'),
+    (0xFD3A, 'M', u'طم'),
+    (0xFD3B, 'M', u'ظم'),
+    (0xFD3C, 'M', u'اً'),
+    (0xFD3E, 'V'),
+    (0xFD40, 'X'),
+    (0xFD50, 'M', u'تجم'),
+    (0xFD51, 'M', u'تحج'),
+    (0xFD53, 'M', u'تحم'),
+    (0xFD54, 'M', u'تخم'),
+    (0xFD55, 'M', u'تمج'),
+    (0xFD56, 'M', u'تمح'),
+    (0xFD57, 'M', u'تمخ'),
+    (0xFD58, 'M', u'جمح'),
+    (0xFD5A, 'M', u'حمي'),
+    (0xFD5B, 'M', u'حمى'),
+    (0xFD5C, 'M', u'سحج'),
+    (0xFD5D, 'M', u'سجح'),
+    (0xFD5E, 'M', u'سجى'),
+    (0xFD5F, 'M', u'سمح'),
+    (0xFD61, 'M', u'سمج'),
+    (0xFD62, 'M', u'سمم'),
+    (0xFD64, 'M', u'صحح'),
+    (0xFD66, 'M', u'صمم'),
+    (0xFD67, 'M', u'شحم'),
+    (0xFD69, 'M', u'شجي'),
+    ]
+
+def _seg_48():
+    return [
+    (0xFD6A, 'M', u'شمخ'),
+    (0xFD6C, 'M', u'شمم'),
+    (0xFD6E, 'M', u'ضحى'),
+    (0xFD6F, 'M', u'ضخم'),
+    (0xFD71, 'M', u'طمح'),
+    (0xFD73, 'M', u'طمم'),
+    (0xFD74, 'M', u'طمي'),
+    (0xFD75, 'M', u'عجم'),
+    (0xFD76, 'M', u'عمم'),
+    (0xFD78, 'M', u'عمى'),
+    (0xFD79, 'M', u'غمم'),
+    (0xFD7A, 'M', u'غمي'),
+    (0xFD7B, 'M', u'غمى'),
+    (0xFD7C, 'M', u'فخم'),
+    (0xFD7E, 'M', u'قمح'),
+    (0xFD7F, 'M', u'قمم'),
+    (0xFD80, 'M', u'لحم'),
+    (0xFD81, 'M', u'لحي'),
+    (0xFD82, 'M', u'لحى'),
+    (0xFD83, 'M', u'لجج'),
+    (0xFD85, 'M', u'لخم'),
+    (0xFD87, 'M', u'لمح'),
+    (0xFD89, 'M', u'محج'),
+    (0xFD8A, 'M', u'محم'),
+    (0xFD8B, 'M', u'محي'),
+    (0xFD8C, 'M', u'مجح'),
+    (0xFD8D, 'M', u'مجم'),
+    (0xFD8E, 'M', u'مخج'),
+    (0xFD8F, 'M', u'مخم'),
+    (0xFD90, 'X'),
+    (0xFD92, 'M', u'مجخ'),
+    (0xFD93, 'M', u'همج'),
+    (0xFD94, 'M', u'همم'),
+    (0xFD95, 'M', u'نحم'),
+    (0xFD96, 'M', u'نحى'),
+    (0xFD97, 'M', u'نجم'),
+    (0xFD99, 'M', u'نجى'),
+    (0xFD9A, 'M', u'نمي'),
+    (0xFD9B, 'M', u'نمى'),
+    (0xFD9C, 'M', u'يمم'),
+    (0xFD9E, 'M', u'بخي'),
+    (0xFD9F, 'M', u'تجي'),
+    (0xFDA0, 'M', u'تجى'),
+    (0xFDA1, 'M', u'تخي'),
+    (0xFDA2, 'M', u'تخى'),
+    (0xFDA3, 'M', u'تمي'),
+    (0xFDA4, 'M', u'تمى'),
+    (0xFDA5, 'M', u'جمي'),
+    (0xFDA6, 'M', u'جحى'),
+    (0xFDA7, 'M', u'جمى'),
+    (0xFDA8, 'M', u'سخى'),
+    (0xFDA9, 'M', u'صحي'),
+    (0xFDAA, 'M', u'شحي'),
+    (0xFDAB, 'M', u'ضحي'),
+    (0xFDAC, 'M', u'لجي'),
+    (0xFDAD, 'M', u'لمي'),
+    (0xFDAE, 'M', u'يحي'),
+    (0xFDAF, 'M', u'يجي'),
+    (0xFDB0, 'M', u'يمي'),
+    (0xFDB1, 'M', u'ممي'),
+    (0xFDB2, 'M', u'قمي'),
+    (0xFDB3, 'M', u'نحي'),
+    (0xFDB4, 'M', u'قمح'),
+    (0xFDB5, 'M', u'لحم'),
+    (0xFDB6, 'M', u'عمي'),
+    (0xFDB7, 'M', u'كمي'),
+    (0xFDB8, 'M', u'نجح'),
+    (0xFDB9, 'M', u'مخي'),
+    (0xFDBA, 'M', u'لجم'),
+    (0xFDBB, 'M', u'كمم'),
+    (0xFDBC, 'M', u'لجم'),
+    (0xFDBD, 'M', u'نجح'),
+    (0xFDBE, 'M', u'جحي'),
+    (0xFDBF, 'M', u'حجي'),
+    (0xFDC0, 'M', u'مجي'),
+    (0xFDC1, 'M', u'فمي'),
+    (0xFDC2, 'M', u'بحي'),
+    (0xFDC3, 'M', u'كمم'),
+    (0xFDC4, 'M', u'عجم'),
+    (0xFDC5, 'M', u'صمم'),
+    (0xFDC6, 'M', u'سخي'),
+    (0xFDC7, 'M', u'نجي'),
+    (0xFDC8, 'X'),
+    (0xFDF0, 'M', u'صلے'),
+    (0xFDF1, 'M', u'قلے'),
+    (0xFDF2, 'M', u'الله'),
+    (0xFDF3, 'M', u'اكبر'),
+    (0xFDF4, 'M', u'محمد'),
+    (0xFDF5, 'M', u'صلعم'),
+    (0xFDF6, 'M', u'رسول'),
+    (0xFDF7, 'M', u'عليه'),
+    (0xFDF8, 'M', u'وسلم'),
+    (0xFDF9, 'M', u'صلى'),
+    (0xFDFA, '3', u'صلى الله عليه وسلم'),
+    (0xFDFB, '3', u'جل جلاله'),
+    (0xFDFC, 'M', u'ریال'),
+    (0xFDFD, 'V'),
+    (0xFDFE, 'X'),
+    (0xFE00, 'I'),
+    (0xFE10, '3', u','),
+    ]
+
+def _seg_49():
+    return [
+    (0xFE11, 'M', u'、'),
+    (0xFE12, 'X'),
+    (0xFE13, '3', u':'),
+    (0xFE14, '3', u';'),
+    (0xFE15, '3', u'!'),
+    (0xFE16, '3', u'?'),
+    (0xFE17, 'M', u'〖'),
+    (0xFE18, 'M', u'〗'),
+    (0xFE19, 'X'),
+    (0xFE20, 'V'),
+    (0xFE30, 'X'),
+    (0xFE31, 'M', u'—'),
+    (0xFE32, 'M', u'–'),
+    (0xFE33, '3', u'_'),
+    (0xFE35, '3', u'('),
+    (0xFE36, '3', u')'),
+    (0xFE37, '3', u'{'),
+    (0xFE38, '3', u'}'),
+    (0xFE39, 'M', u'〔'),
+    (0xFE3A, 'M', u'〕'),
+    (0xFE3B, 'M', u'【'),
+    (0xFE3C, 'M', u'】'),
+    (0xFE3D, 'M', u'《'),
+    (0xFE3E, 'M', u'》'),
+    (0xFE3F, 'M', u'〈'),
+    (0xFE40, 'M', u'〉'),
+    (0xFE41, 'M', u'「'),
+    (0xFE42, 'M', u'」'),
+    (0xFE43, 'M', u'『'),
+    (0xFE44, 'M', u'』'),
+    (0xFE45, 'V'),
+    (0xFE47, '3', u'['),
+    (0xFE48, '3', u']'),
+    (0xFE49, '3', u' Ì…'),
+    (0xFE4D, '3', u'_'),
+    (0xFE50, '3', u','),
+    (0xFE51, 'M', u'、'),
+    (0xFE52, 'X'),
+    (0xFE54, '3', u';'),
+    (0xFE55, '3', u':'),
+    (0xFE56, '3', u'?'),
+    (0xFE57, '3', u'!'),
+    (0xFE58, 'M', u'—'),
+    (0xFE59, '3', u'('),
+    (0xFE5A, '3', u')'),
+    (0xFE5B, '3', u'{'),
+    (0xFE5C, '3', u'}'),
+    (0xFE5D, 'M', u'〔'),
+    (0xFE5E, 'M', u'〕'),
+    (0xFE5F, '3', u'#'),
+    (0xFE60, '3', u'&'),
+    (0xFE61, '3', u'*'),
+    (0xFE62, '3', u'+'),
+    (0xFE63, 'M', u'-'),
+    (0xFE64, '3', u'<'),
+    (0xFE65, '3', u'>'),
+    (0xFE66, '3', u'='),
+    (0xFE67, 'X'),
+    (0xFE68, '3', u'\\'),
+    (0xFE69, '3', u'$'),
+    (0xFE6A, '3', u'%'),
+    (0xFE6B, '3', u'@'),
+    (0xFE6C, 'X'),
+    (0xFE70, '3', u' Ù‹'),
+    (0xFE71, 'M', u'ـً'),
+    (0xFE72, '3', u' ٌ'),
+    (0xFE73, 'V'),
+    (0xFE74, '3', u' ٍ'),
+    (0xFE75, 'X'),
+    (0xFE76, '3', u' ÙŽ'),
+    (0xFE77, 'M', u'Ù€ÙŽ'),
+    (0xFE78, '3', u' ُ'),
+    (0xFE79, 'M', u'ـُ'),
+    (0xFE7A, '3', u' ِ'),
+    (0xFE7B, 'M', u'ـِ'),
+    (0xFE7C, '3', u' Ù‘'),
+    (0xFE7D, 'M', u'ـّ'),
+    (0xFE7E, '3', u' Ù’'),
+    (0xFE7F, 'M', u'ـْ'),
+    (0xFE80, 'M', u'Ø¡'),
+    (0xFE81, 'M', u'Ø¢'),
+    (0xFE83, 'M', u'Ø£'),
+    (0xFE85, 'M', u'ؤ'),
+    (0xFE87, 'M', u'Ø¥'),
+    (0xFE89, 'M', u'ئ'),
+    (0xFE8D, 'M', u'ا'),
+    (0xFE8F, 'M', u'ب'),
+    (0xFE93, 'M', u'Ø©'),
+    (0xFE95, 'M', u'ت'),
+    (0xFE99, 'M', u'Ø«'),
+    (0xFE9D, 'M', u'ج'),
+    (0xFEA1, 'M', u'Ø­'),
+    (0xFEA5, 'M', u'Ø®'),
+    (0xFEA9, 'M', u'د'),
+    (0xFEAB, 'M', u'ذ'),
+    (0xFEAD, 'M', u'ر'),
+    (0xFEAF, 'M', u'ز'),
+    (0xFEB1, 'M', u'س'),
+    (0xFEB5, 'M', u'Ø´'),
+    (0xFEB9, 'M', u'ص'),
+    ]
+
+def _seg_50():
+    return [
+    (0xFEBD, 'M', u'ض'),
+    (0xFEC1, 'M', u'Ø·'),
+    (0xFEC5, 'M', u'ظ'),
+    (0xFEC9, 'M', u'ع'),
+    (0xFECD, 'M', u'غ'),
+    (0xFED1, 'M', u'ف'),
+    (0xFED5, 'M', u'Ù‚'),
+    (0xFED9, 'M', u'Ùƒ'),
+    (0xFEDD, 'M', u'Ù„'),
+    (0xFEE1, 'M', u'Ù…'),
+    (0xFEE5, 'M', u'Ù†'),
+    (0xFEE9, 'M', u'Ù‡'),
+    (0xFEED, 'M', u'Ùˆ'),
+    (0xFEEF, 'M', u'Ù‰'),
+    (0xFEF1, 'M', u'ÙŠ'),
+    (0xFEF5, 'M', u'لآ'),
+    (0xFEF7, 'M', u'لأ'),
+    (0xFEF9, 'M', u'لإ'),
+    (0xFEFB, 'M', u'لا'),
+    (0xFEFD, 'X'),
+    (0xFEFF, 'I'),
+    (0xFF00, 'X'),
+    (0xFF01, '3', u'!'),
+    (0xFF02, '3', u'"'),
+    (0xFF03, '3', u'#'),
+    (0xFF04, '3', u'$'),
+    (0xFF05, '3', u'%'),
+    (0xFF06, '3', u'&'),
+    (0xFF07, '3', u'\''),
+    (0xFF08, '3', u'('),
+    (0xFF09, '3', u')'),
+    (0xFF0A, '3', u'*'),
+    (0xFF0B, '3', u'+'),
+    (0xFF0C, '3', u','),
+    (0xFF0D, 'M', u'-'),
+    (0xFF0E, 'M', u'.'),
+    (0xFF0F, '3', u'/'),
+    (0xFF10, 'M', u'0'),
+    (0xFF11, 'M', u'1'),
+    (0xFF12, 'M', u'2'),
+    (0xFF13, 'M', u'3'),
+    (0xFF14, 'M', u'4'),
+    (0xFF15, 'M', u'5'),
+    (0xFF16, 'M', u'6'),
+    (0xFF17, 'M', u'7'),
+    (0xFF18, 'M', u'8'),
+    (0xFF19, 'M', u'9'),
+    (0xFF1A, '3', u':'),
+    (0xFF1B, '3', u';'),
+    (0xFF1C, '3', u'<'),
+    (0xFF1D, '3', u'='),
+    (0xFF1E, '3', u'>'),
+    (0xFF1F, '3', u'?'),
+    (0xFF20, '3', u'@'),
+    (0xFF21, 'M', u'a'),
+    (0xFF22, 'M', u'b'),
+    (0xFF23, 'M', u'c'),
+    (0xFF24, 'M', u'd'),
+    (0xFF25, 'M', u'e'),
+    (0xFF26, 'M', u'f'),
+    (0xFF27, 'M', u'g'),
+    (0xFF28, 'M', u'h'),
+    (0xFF29, 'M', u'i'),
+    (0xFF2A, 'M', u'j'),
+    (0xFF2B, 'M', u'k'),
+    (0xFF2C, 'M', u'l'),
+    (0xFF2D, 'M', u'm'),
+    (0xFF2E, 'M', u'n'),
+    (0xFF2F, 'M', u'o'),
+    (0xFF30, 'M', u'p'),
+    (0xFF31, 'M', u'q'),
+    (0xFF32, 'M', u'r'),
+    (0xFF33, 'M', u's'),
+    (0xFF34, 'M', u't'),
+    (0xFF35, 'M', u'u'),
+    (0xFF36, 'M', u'v'),
+    (0xFF37, 'M', u'w'),
+    (0xFF38, 'M', u'x'),
+    (0xFF39, 'M', u'y'),
+    (0xFF3A, 'M', u'z'),
+    (0xFF3B, '3', u'['),
+    (0xFF3C, '3', u'\\'),
+    (0xFF3D, '3', u']'),
+    (0xFF3E, '3', u'^'),
+    (0xFF3F, '3', u'_'),
+    (0xFF40, '3', u'`'),
+    (0xFF41, 'M', u'a'),
+    (0xFF42, 'M', u'b'),
+    (0xFF43, 'M', u'c'),
+    (0xFF44, 'M', u'd'),
+    (0xFF45, 'M', u'e'),
+    (0xFF46, 'M', u'f'),
+    (0xFF47, 'M', u'g'),
+    (0xFF48, 'M', u'h'),
+    (0xFF49, 'M', u'i'),
+    (0xFF4A, 'M', u'j'),
+    (0xFF4B, 'M', u'k'),
+    (0xFF4C, 'M', u'l'),
+    (0xFF4D, 'M', u'm'),
+    (0xFF4E, 'M', u'n'),
+    ]
+
+def _seg_51():
+    return [
+    (0xFF4F, 'M', u'o'),
+    (0xFF50, 'M', u'p'),
+    (0xFF51, 'M', u'q'),
+    (0xFF52, 'M', u'r'),
+    (0xFF53, 'M', u's'),
+    (0xFF54, 'M', u't'),
+    (0xFF55, 'M', u'u'),
+    (0xFF56, 'M', u'v'),
+    (0xFF57, 'M', u'w'),
+    (0xFF58, 'M', u'x'),
+    (0xFF59, 'M', u'y'),
+    (0xFF5A, 'M', u'z'),
+    (0xFF5B, '3', u'{'),
+    (0xFF5C, '3', u'|'),
+    (0xFF5D, '3', u'}'),
+    (0xFF5E, '3', u'~'),
+    (0xFF5F, 'M', u'⦅'),
+    (0xFF60, 'M', u'⦆'),
+    (0xFF61, 'M', u'.'),
+    (0xFF62, 'M', u'「'),
+    (0xFF63, 'M', u'」'),
+    (0xFF64, 'M', u'、'),
+    (0xFF65, 'M', u'・'),
+    (0xFF66, 'M', u'ヲ'),
+    (0xFF67, 'M', u'ã‚¡'),
+    (0xFF68, 'M', u'ã‚£'),
+    (0xFF69, 'M', u'ã‚¥'),
+    (0xFF6A, 'M', u'ã‚§'),
+    (0xFF6B, 'M', u'ã‚©'),
+    (0xFF6C, 'M', u'ャ'),
+    (0xFF6D, 'M', u'ュ'),
+    (0xFF6E, 'M', u'ョ'),
+    (0xFF6F, 'M', u'ッ'),
+    (0xFF70, 'M', u'ー'),
+    (0xFF71, 'M', u'ã‚¢'),
+    (0xFF72, 'M', u'イ'),
+    (0xFF73, 'M', u'ウ'),
+    (0xFF74, 'M', u'エ'),
+    (0xFF75, 'M', u'オ'),
+    (0xFF76, 'M', u'ã‚«'),
+    (0xFF77, 'M', u'ã‚­'),
+    (0xFF78, 'M', u'ク'),
+    (0xFF79, 'M', u'ケ'),
+    (0xFF7A, 'M', u'コ'),
+    (0xFF7B, 'M', u'サ'),
+    (0xFF7C, 'M', u'ã‚·'),
+    (0xFF7D, 'M', u'ス'),
+    (0xFF7E, 'M', u'ã‚»'),
+    (0xFF7F, 'M', u'ソ'),
+    (0xFF80, 'M', u'ã‚¿'),
+    (0xFF81, 'M', u'チ'),
+    (0xFF82, 'M', u'ツ'),
+    (0xFF83, 'M', u'テ'),
+    (0xFF84, 'M', u'ト'),
+    (0xFF85, 'M', u'ナ'),
+    (0xFF86, 'M', u'ニ'),
+    (0xFF87, 'M', u'ヌ'),
+    (0xFF88, 'M', u'ネ'),
+    (0xFF89, 'M', u'ノ'),
+    (0xFF8A, 'M', u'ハ'),
+    (0xFF8B, 'M', u'ヒ'),
+    (0xFF8C, 'M', u'フ'),
+    (0xFF8D, 'M', u'ヘ'),
+    (0xFF8E, 'M', u'ホ'),
+    (0xFF8F, 'M', u'マ'),
+    (0xFF90, 'M', u'ミ'),
+    (0xFF91, 'M', u'ム'),
+    (0xFF92, 'M', u'メ'),
+    (0xFF93, 'M', u'モ'),
+    (0xFF94, 'M', u'ヤ'),
+    (0xFF95, 'M', u'ユ'),
+    (0xFF96, 'M', u'ヨ'),
+    (0xFF97, 'M', u'ラ'),
+    (0xFF98, 'M', u'リ'),
+    (0xFF99, 'M', u'ル'),
+    (0xFF9A, 'M', u'レ'),
+    (0xFF9B, 'M', u'ロ'),
+    (0xFF9C, 'M', u'ワ'),
+    (0xFF9D, 'M', u'ン'),
+    (0xFF9E, 'M', u'ã‚™'),
+    (0xFF9F, 'M', u'゚'),
+    (0xFFA0, 'X'),
+    (0xFFA1, 'M', u'á„€'),
+    (0xFFA2, 'M', u'ᄁ'),
+    (0xFFA3, 'M', u'ᆪ'),
+    (0xFFA4, 'M', u'á„‚'),
+    (0xFFA5, 'M', u'ᆬ'),
+    (0xFFA6, 'M', u'ᆭ'),
+    (0xFFA7, 'M', u'ᄃ'),
+    (0xFFA8, 'M', u'á„„'),
+    (0xFFA9, 'M', u'á„…'),
+    (0xFFAA, 'M', u'ᆰ'),
+    (0xFFAB, 'M', u'ᆱ'),
+    (0xFFAC, 'M', u'ᆲ'),
+    (0xFFAD, 'M', u'ᆳ'),
+    (0xFFAE, 'M', u'ᆴ'),
+    (0xFFAF, 'M', u'ᆵ'),
+    (0xFFB0, 'M', u'ᄚ'),
+    (0xFFB1, 'M', u'ᄆ'),
+    (0xFFB2, 'M', u'ᄇ'),
+    ]
+
+def _seg_52():
+    return [
+    (0xFFB3, 'M', u'ᄈ'),
+    (0xFFB4, 'M', u'á„¡'),
+    (0xFFB5, 'M', u'ᄉ'),
+    (0xFFB6, 'M', u'ᄊ'),
+    (0xFFB7, 'M', u'á„‹'),
+    (0xFFB8, 'M', u'ᄌ'),
+    (0xFFB9, 'M', u'ᄍ'),
+    (0xFFBA, 'M', u'ᄎ'),
+    (0xFFBB, 'M', u'ᄏ'),
+    (0xFFBC, 'M', u'ᄐ'),
+    (0xFFBD, 'M', u'á„‘'),
+    (0xFFBE, 'M', u'á„’'),
+    (0xFFBF, 'X'),
+    (0xFFC2, 'M', u'á…¡'),
+    (0xFFC3, 'M', u'á…¢'),
+    (0xFFC4, 'M', u'á…£'),
+    (0xFFC5, 'M', u'á…¤'),
+    (0xFFC6, 'M', u'á…¥'),
+    (0xFFC7, 'M', u'á…¦'),
+    (0xFFC8, 'X'),
+    (0xFFCA, 'M', u'á…§'),
+    (0xFFCB, 'M', u'á…¨'),
+    (0xFFCC, 'M', u'á…©'),
+    (0xFFCD, 'M', u'á…ª'),
+    (0xFFCE, 'M', u'á…«'),
+    (0xFFCF, 'M', u'á…¬'),
+    (0xFFD0, 'X'),
+    (0xFFD2, 'M', u'á…­'),
+    (0xFFD3, 'M', u'á…®'),
+    (0xFFD4, 'M', u'á…¯'),
+    (0xFFD5, 'M', u'á…°'),
+    (0xFFD6, 'M', u'á…±'),
+    (0xFFD7, 'M', u'á…²'),
+    (0xFFD8, 'X'),
+    (0xFFDA, 'M', u'á…³'),
+    (0xFFDB, 'M', u'á…´'),
+    (0xFFDC, 'M', u'á…µ'),
+    (0xFFDD, 'X'),
+    (0xFFE0, 'M', u'¢'),
+    (0xFFE1, 'M', u'£'),
+    (0xFFE2, 'M', u'¬'),
+    (0xFFE3, '3', u' Ì„'),
+    (0xFFE4, 'M', u'¦'),
+    (0xFFE5, 'M', u'Â¥'),
+    (0xFFE6, 'M', u'â‚©'),
+    (0xFFE7, 'X'),
+    (0xFFE8, 'M', u'│'),
+    (0xFFE9, 'M', u'←'),
+    (0xFFEA, 'M', u'↑'),
+    (0xFFEB, 'M', u'→'),
+    (0xFFEC, 'M', u'↓'),
+    (0xFFED, 'M', u'â– '),
+    (0xFFEE, 'M', u'â—‹'),
+    (0xFFEF, 'X'),
+    (0x10000, 'V'),
+    (0x1000C, 'X'),
+    (0x1000D, 'V'),
+    (0x10027, 'X'),
+    (0x10028, 'V'),
+    (0x1003B, 'X'),
+    (0x1003C, 'V'),
+    (0x1003E, 'X'),
+    (0x1003F, 'V'),
+    (0x1004E, 'X'),
+    (0x10050, 'V'),
+    (0x1005E, 'X'),
+    (0x10080, 'V'),
+    (0x100FB, 'X'),
+    (0x10100, 'V'),
+    (0x10103, 'X'),
+    (0x10107, 'V'),
+    (0x10134, 'X'),
+    (0x10137, 'V'),
+    (0x1018F, 'X'),
+    (0x10190, 'V'),
+    (0x1019C, 'X'),
+    (0x101A0, 'V'),
+    (0x101A1, 'X'),
+    (0x101D0, 'V'),
+    (0x101FE, 'X'),
+    (0x10280, 'V'),
+    (0x1029D, 'X'),
+    (0x102A0, 'V'),
+    (0x102D1, 'X'),
+    (0x102E0, 'V'),
+    (0x102FC, 'X'),
+    (0x10300, 'V'),
+    (0x10324, 'X'),
+    (0x1032D, 'V'),
+    (0x1034B, 'X'),
+    (0x10350, 'V'),
+    (0x1037B, 'X'),
+    (0x10380, 'V'),
+    (0x1039E, 'X'),
+    (0x1039F, 'V'),
+    (0x103C4, 'X'),
+    (0x103C8, 'V'),
+    (0x103D6, 'X'),
+    (0x10400, 'M', u'𐐨'),
+    (0x10401, 'M', u'𐐩'),
+    ]
+
+def _seg_53():
+    return [
+    (0x10402, 'M', u'𐐪'),
+    (0x10403, 'M', u'𐐫'),
+    (0x10404, 'M', u'𐐬'),
+    (0x10405, 'M', u'𐐭'),
+    (0x10406, 'M', u'𐐮'),
+    (0x10407, 'M', u'𐐯'),
+    (0x10408, 'M', u'𐐰'),
+    (0x10409, 'M', u'𐐱'),
+    (0x1040A, 'M', u'𐐲'),
+    (0x1040B, 'M', u'𐐳'),
+    (0x1040C, 'M', u'𐐴'),
+    (0x1040D, 'M', u'𐐵'),
+    (0x1040E, 'M', u'𐐶'),
+    (0x1040F, 'M', u'𐐷'),
+    (0x10410, 'M', u'𐐸'),
+    (0x10411, 'M', u'𐐹'),
+    (0x10412, 'M', u'𐐺'),
+    (0x10413, 'M', u'𐐻'),
+    (0x10414, 'M', u'𐐼'),
+    (0x10415, 'M', u'𐐽'),
+    (0x10416, 'M', u'𐐾'),
+    (0x10417, 'M', u'𐐿'),
+    (0x10418, 'M', u'𐑀'),
+    (0x10419, 'M', u'𐑁'),
+    (0x1041A, 'M', u'𐑂'),
+    (0x1041B, 'M', u'𐑃'),
+    (0x1041C, 'M', u'𐑄'),
+    (0x1041D, 'M', u'𐑅'),
+    (0x1041E, 'M', u'𐑆'),
+    (0x1041F, 'M', u'𐑇'),
+    (0x10420, 'M', u'𐑈'),
+    (0x10421, 'M', u'𐑉'),
+    (0x10422, 'M', u'𐑊'),
+    (0x10423, 'M', u'𐑋'),
+    (0x10424, 'M', u'𐑌'),
+    (0x10425, 'M', u'𐑍'),
+    (0x10426, 'M', u'𐑎'),
+    (0x10427, 'M', u'𐑏'),
+    (0x10428, 'V'),
+    (0x1049E, 'X'),
+    (0x104A0, 'V'),
+    (0x104AA, 'X'),
+    (0x104B0, 'M', u'𐓘'),
+    (0x104B1, 'M', u'𐓙'),
+    (0x104B2, 'M', u'𐓚'),
+    (0x104B3, 'M', u'𐓛'),
+    (0x104B4, 'M', u'𐓜'),
+    (0x104B5, 'M', u'𐓝'),
+    (0x104B6, 'M', u'𐓞'),
+    (0x104B7, 'M', u'𐓟'),
+    (0x104B8, 'M', u'𐓠'),
+    (0x104B9, 'M', u'𐓡'),
+    (0x104BA, 'M', u'𐓢'),
+    (0x104BB, 'M', u'𐓣'),
+    (0x104BC, 'M', u'𐓤'),
+    (0x104BD, 'M', u'𐓥'),
+    (0x104BE, 'M', u'𐓦'),
+    (0x104BF, 'M', u'𐓧'),
+    (0x104C0, 'M', u'𐓨'),
+    (0x104C1, 'M', u'𐓩'),
+    (0x104C2, 'M', u'𐓪'),
+    (0x104C3, 'M', u'𐓫'),
+    (0x104C4, 'M', u'𐓬'),
+    (0x104C5, 'M', u'𐓭'),
+    (0x104C6, 'M', u'𐓮'),
+    (0x104C7, 'M', u'𐓯'),
+    (0x104C8, 'M', u'𐓰'),
+    (0x104C9, 'M', u'𐓱'),
+    (0x104CA, 'M', u'𐓲'),
+    (0x104CB, 'M', u'𐓳'),
+    (0x104CC, 'M', u'𐓴'),
+    (0x104CD, 'M', u'𐓵'),
+    (0x104CE, 'M', u'𐓶'),
+    (0x104CF, 'M', u'𐓷'),
+    (0x104D0, 'M', u'𐓸'),
+    (0x104D1, 'M', u'𐓹'),
+    (0x104D2, 'M', u'𐓺'),
+    (0x104D3, 'M', u'𐓻'),
+    (0x104D4, 'X'),
+    (0x104D8, 'V'),
+    (0x104FC, 'X'),
+    (0x10500, 'V'),
+    (0x10528, 'X'),
+    (0x10530, 'V'),
+    (0x10564, 'X'),
+    (0x1056F, 'V'),
+    (0x10570, 'X'),
+    (0x10600, 'V'),
+    (0x10737, 'X'),
+    (0x10740, 'V'),
+    (0x10756, 'X'),
+    (0x10760, 'V'),
+    (0x10768, 'X'),
+    (0x10800, 'V'),
+    (0x10806, 'X'),
+    (0x10808, 'V'),
+    (0x10809, 'X'),
+    (0x1080A, 'V'),
+    (0x10836, 'X'),
+    (0x10837, 'V'),
+    ]
+
+def _seg_54():
+    return [
+    (0x10839, 'X'),
+    (0x1083C, 'V'),
+    (0x1083D, 'X'),
+    (0x1083F, 'V'),
+    (0x10856, 'X'),
+    (0x10857, 'V'),
+    (0x1089F, 'X'),
+    (0x108A7, 'V'),
+    (0x108B0, 'X'),
+    (0x108E0, 'V'),
+    (0x108F3, 'X'),
+    (0x108F4, 'V'),
+    (0x108F6, 'X'),
+    (0x108FB, 'V'),
+    (0x1091C, 'X'),
+    (0x1091F, 'V'),
+    (0x1093A, 'X'),
+    (0x1093F, 'V'),
+    (0x10940, 'X'),
+    (0x10980, 'V'),
+    (0x109B8, 'X'),
+    (0x109BC, 'V'),
+    (0x109D0, 'X'),
+    (0x109D2, 'V'),
+    (0x10A04, 'X'),
+    (0x10A05, 'V'),
+    (0x10A07, 'X'),
+    (0x10A0C, 'V'),
+    (0x10A14, 'X'),
+    (0x10A15, 'V'),
+    (0x10A18, 'X'),
+    (0x10A19, 'V'),
+    (0x10A36, 'X'),
+    (0x10A38, 'V'),
+    (0x10A3B, 'X'),
+    (0x10A3F, 'V'),
+    (0x10A49, 'X'),
+    (0x10A50, 'V'),
+    (0x10A59, 'X'),
+    (0x10A60, 'V'),
+    (0x10AA0, 'X'),
+    (0x10AC0, 'V'),
+    (0x10AE7, 'X'),
+    (0x10AEB, 'V'),
+    (0x10AF7, 'X'),
+    (0x10B00, 'V'),
+    (0x10B36, 'X'),
+    (0x10B39, 'V'),
+    (0x10B56, 'X'),
+    (0x10B58, 'V'),
+    (0x10B73, 'X'),
+    (0x10B78, 'V'),
+    (0x10B92, 'X'),
+    (0x10B99, 'V'),
+    (0x10B9D, 'X'),
+    (0x10BA9, 'V'),
+    (0x10BB0, 'X'),
+    (0x10C00, 'V'),
+    (0x10C49, 'X'),
+    (0x10C80, 'M', u'𐳀'),
+    (0x10C81, 'M', u'𐳁'),
+    (0x10C82, 'M', u'𐳂'),
+    (0x10C83, 'M', u'𐳃'),
+    (0x10C84, 'M', u'𐳄'),
+    (0x10C85, 'M', u'𐳅'),
+    (0x10C86, 'M', u'𐳆'),
+    (0x10C87, 'M', u'𐳇'),
+    (0x10C88, 'M', u'𐳈'),
+    (0x10C89, 'M', u'𐳉'),
+    (0x10C8A, 'M', u'𐳊'),
+    (0x10C8B, 'M', u'𐳋'),
+    (0x10C8C, 'M', u'𐳌'),
+    (0x10C8D, 'M', u'𐳍'),
+    (0x10C8E, 'M', u'𐳎'),
+    (0x10C8F, 'M', u'𐳏'),
+    (0x10C90, 'M', u'𐳐'),
+    (0x10C91, 'M', u'𐳑'),
+    (0x10C92, 'M', u'𐳒'),
+    (0x10C93, 'M', u'𐳓'),
+    (0x10C94, 'M', u'𐳔'),
+    (0x10C95, 'M', u'𐳕'),
+    (0x10C96, 'M', u'𐳖'),
+    (0x10C97, 'M', u'𐳗'),
+    (0x10C98, 'M', u'𐳘'),
+    (0x10C99, 'M', u'𐳙'),
+    (0x10C9A, 'M', u'𐳚'),
+    (0x10C9B, 'M', u'𐳛'),
+    (0x10C9C, 'M', u'𐳜'),
+    (0x10C9D, 'M', u'𐳝'),
+    (0x10C9E, 'M', u'𐳞'),
+    (0x10C9F, 'M', u'𐳟'),
+    (0x10CA0, 'M', u'𐳠'),
+    (0x10CA1, 'M', u'𐳡'),
+    (0x10CA2, 'M', u'𐳢'),
+    (0x10CA3, 'M', u'𐳣'),
+    (0x10CA4, 'M', u'𐳤'),
+    (0x10CA5, 'M', u'𐳥'),
+    (0x10CA6, 'M', u'𐳦'),
+    (0x10CA7, 'M', u'𐳧'),
+    (0x10CA8, 'M', u'𐳨'),
+    ]
+
+def _seg_55():
+    return [
+    (0x10CA9, 'M', u'𐳩'),
+    (0x10CAA, 'M', u'𐳪'),
+    (0x10CAB, 'M', u'𐳫'),
+    (0x10CAC, 'M', u'𐳬'),
+    (0x10CAD, 'M', u'𐳭'),
+    (0x10CAE, 'M', u'𐳮'),
+    (0x10CAF, 'M', u'𐳯'),
+    (0x10CB0, 'M', u'𐳰'),
+    (0x10CB1, 'M', u'𐳱'),
+    (0x10CB2, 'M', u'𐳲'),
+    (0x10CB3, 'X'),
+    (0x10CC0, 'V'),
+    (0x10CF3, 'X'),
+    (0x10CFA, 'V'),
+    (0x10D28, 'X'),
+    (0x10D30, 'V'),
+    (0x10D3A, 'X'),
+    (0x10E60, 'V'),
+    (0x10E7F, 'X'),
+    (0x10F00, 'V'),
+    (0x10F28, 'X'),
+    (0x10F30, 'V'),
+    (0x10F5A, 'X'),
+    (0x11000, 'V'),
+    (0x1104E, 'X'),
+    (0x11052, 'V'),
+    (0x11070, 'X'),
+    (0x1107F, 'V'),
+    (0x110BD, 'X'),
+    (0x110BE, 'V'),
+    (0x110C2, 'X'),
+    (0x110D0, 'V'),
+    (0x110E9, 'X'),
+    (0x110F0, 'V'),
+    (0x110FA, 'X'),
+    (0x11100, 'V'),
+    (0x11135, 'X'),
+    (0x11136, 'V'),
+    (0x11147, 'X'),
+    (0x11150, 'V'),
+    (0x11177, 'X'),
+    (0x11180, 'V'),
+    (0x111CE, 'X'),
+    (0x111D0, 'V'),
+    (0x111E0, 'X'),
+    (0x111E1, 'V'),
+    (0x111F5, 'X'),
+    (0x11200, 'V'),
+    (0x11212, 'X'),
+    (0x11213, 'V'),
+    (0x1123F, 'X'),
+    (0x11280, 'V'),
+    (0x11287, 'X'),
+    (0x11288, 'V'),
+    (0x11289, 'X'),
+    (0x1128A, 'V'),
+    (0x1128E, 'X'),
+    (0x1128F, 'V'),
+    (0x1129E, 'X'),
+    (0x1129F, 'V'),
+    (0x112AA, 'X'),
+    (0x112B0, 'V'),
+    (0x112EB, 'X'),
+    (0x112F0, 'V'),
+    (0x112FA, 'X'),
+    (0x11300, 'V'),
+    (0x11304, 'X'),
+    (0x11305, 'V'),
+    (0x1130D, 'X'),
+    (0x1130F, 'V'),
+    (0x11311, 'X'),
+    (0x11313, 'V'),
+    (0x11329, 'X'),
+    (0x1132A, 'V'),
+    (0x11331, 'X'),
+    (0x11332, 'V'),
+    (0x11334, 'X'),
+    (0x11335, 'V'),
+    (0x1133A, 'X'),
+    (0x1133B, 'V'),
+    (0x11345, 'X'),
+    (0x11347, 'V'),
+    (0x11349, 'X'),
+    (0x1134B, 'V'),
+    (0x1134E, 'X'),
+    (0x11350, 'V'),
+    (0x11351, 'X'),
+    (0x11357, 'V'),
+    (0x11358, 'X'),
+    (0x1135D, 'V'),
+    (0x11364, 'X'),
+    (0x11366, 'V'),
+    (0x1136D, 'X'),
+    (0x11370, 'V'),
+    (0x11375, 'X'),
+    (0x11400, 'V'),
+    (0x1145A, 'X'),
+    (0x1145B, 'V'),
+    (0x1145C, 'X'),
+    (0x1145D, 'V'),
+    ]
+
+def _seg_56():
+    return [
+    (0x1145F, 'X'),
+    (0x11480, 'V'),
+    (0x114C8, 'X'),
+    (0x114D0, 'V'),
+    (0x114DA, 'X'),
+    (0x11580, 'V'),
+    (0x115B6, 'X'),
+    (0x115B8, 'V'),
+    (0x115DE, 'X'),
+    (0x11600, 'V'),
+    (0x11645, 'X'),
+    (0x11650, 'V'),
+    (0x1165A, 'X'),
+    (0x11660, 'V'),
+    (0x1166D, 'X'),
+    (0x11680, 'V'),
+    (0x116B8, 'X'),
+    (0x116C0, 'V'),
+    (0x116CA, 'X'),
+    (0x11700, 'V'),
+    (0x1171B, 'X'),
+    (0x1171D, 'V'),
+    (0x1172C, 'X'),
+    (0x11730, 'V'),
+    (0x11740, 'X'),
+    (0x11800, 'V'),
+    (0x1183C, 'X'),
+    (0x118A0, 'M', u'ð‘£€'),
+    (0x118A1, 'M', u'𑣁'),
+    (0x118A2, 'M', u'𑣂'),
+    (0x118A3, 'M', u'𑣃'),
+    (0x118A4, 'M', u'𑣄'),
+    (0x118A5, 'M', u'ð‘£…'),
+    (0x118A6, 'M', u'𑣆'),
+    (0x118A7, 'M', u'𑣇'),
+    (0x118A8, 'M', u'𑣈'),
+    (0x118A9, 'M', u'𑣉'),
+    (0x118AA, 'M', u'𑣊'),
+    (0x118AB, 'M', u'𑣋'),
+    (0x118AC, 'M', u'𑣌'),
+    (0x118AD, 'M', u'𑣍'),
+    (0x118AE, 'M', u'𑣎'),
+    (0x118AF, 'M', u'𑣏'),
+    (0x118B0, 'M', u'𑣐'),
+    (0x118B1, 'M', u'𑣑'),
+    (0x118B2, 'M', u'ð‘£’'),
+    (0x118B3, 'M', u'𑣓'),
+    (0x118B4, 'M', u'ð‘£”'),
+    (0x118B5, 'M', u'𑣕'),
+    (0x118B6, 'M', u'ð‘£–'),
+    (0x118B7, 'M', u'ð‘£—'),
+    (0x118B8, 'M', u'𑣘'),
+    (0x118B9, 'M', u'ð‘£™'),
+    (0x118BA, 'M', u'𑣚'),
+    (0x118BB, 'M', u'ð‘£›'),
+    (0x118BC, 'M', u'𑣜'),
+    (0x118BD, 'M', u'𑣝'),
+    (0x118BE, 'M', u'𑣞'),
+    (0x118BF, 'M', u'𑣟'),
+    (0x118C0, 'V'),
+    (0x118F3, 'X'),
+    (0x118FF, 'V'),
+    (0x11900, 'X'),
+    (0x11A00, 'V'),
+    (0x11A48, 'X'),
+    (0x11A50, 'V'),
+    (0x11A84, 'X'),
+    (0x11A86, 'V'),
+    (0x11AA3, 'X'),
+    (0x11AC0, 'V'),
+    (0x11AF9, 'X'),
+    (0x11C00, 'V'),
+    (0x11C09, 'X'),
+    (0x11C0A, 'V'),
+    (0x11C37, 'X'),
+    (0x11C38, 'V'),
+    (0x11C46, 'X'),
+    (0x11C50, 'V'),
+    (0x11C6D, 'X'),
+    (0x11C70, 'V'),
+    (0x11C90, 'X'),
+    (0x11C92, 'V'),
+    (0x11CA8, 'X'),
+    (0x11CA9, 'V'),
+    (0x11CB7, 'X'),
+    (0x11D00, 'V'),
+    (0x11D07, 'X'),
+    (0x11D08, 'V'),
+    (0x11D0A, 'X'),
+    (0x11D0B, 'V'),
+    (0x11D37, 'X'),
+    (0x11D3A, 'V'),
+    (0x11D3B, 'X'),
+    (0x11D3C, 'V'),
+    (0x11D3E, 'X'),
+    (0x11D3F, 'V'),
+    (0x11D48, 'X'),
+    (0x11D50, 'V'),
+    (0x11D5A, 'X'),
+    (0x11D60, 'V'),
+    ]
+
+def _seg_57():
+    return [
+    (0x11D66, 'X'),
+    (0x11D67, 'V'),
+    (0x11D69, 'X'),
+    (0x11D6A, 'V'),
+    (0x11D8F, 'X'),
+    (0x11D90, 'V'),
+    (0x11D92, 'X'),
+    (0x11D93, 'V'),
+    (0x11D99, 'X'),
+    (0x11DA0, 'V'),
+    (0x11DAA, 'X'),
+    (0x11EE0, 'V'),
+    (0x11EF9, 'X'),
+    (0x12000, 'V'),
+    (0x1239A, 'X'),
+    (0x12400, 'V'),
+    (0x1246F, 'X'),
+    (0x12470, 'V'),
+    (0x12475, 'X'),
+    (0x12480, 'V'),
+    (0x12544, 'X'),
+    (0x13000, 'V'),
+    (0x1342F, 'X'),
+    (0x14400, 'V'),
+    (0x14647, 'X'),
+    (0x16800, 'V'),
+    (0x16A39, 'X'),
+    (0x16A40, 'V'),
+    (0x16A5F, 'X'),
+    (0x16A60, 'V'),
+    (0x16A6A, 'X'),
+    (0x16A6E, 'V'),
+    (0x16A70, 'X'),
+    (0x16AD0, 'V'),
+    (0x16AEE, 'X'),
+    (0x16AF0, 'V'),
+    (0x16AF6, 'X'),
+    (0x16B00, 'V'),
+    (0x16B46, 'X'),
+    (0x16B50, 'V'),
+    (0x16B5A, 'X'),
+    (0x16B5B, 'V'),
+    (0x16B62, 'X'),
+    (0x16B63, 'V'),
+    (0x16B78, 'X'),
+    (0x16B7D, 'V'),
+    (0x16B90, 'X'),
+    (0x16E60, 'V'),
+    (0x16E9B, 'X'),
+    (0x16F00, 'V'),
+    (0x16F45, 'X'),
+    (0x16F50, 'V'),
+    (0x16F7F, 'X'),
+    (0x16F8F, 'V'),
+    (0x16FA0, 'X'),
+    (0x16FE0, 'V'),
+    (0x16FE2, 'X'),
+    (0x17000, 'V'),
+    (0x187F2, 'X'),
+    (0x18800, 'V'),
+    (0x18AF3, 'X'),
+    (0x1B000, 'V'),
+    (0x1B11F, 'X'),
+    (0x1B170, 'V'),
+    (0x1B2FC, 'X'),
+    (0x1BC00, 'V'),
+    (0x1BC6B, 'X'),
+    (0x1BC70, 'V'),
+    (0x1BC7D, 'X'),
+    (0x1BC80, 'V'),
+    (0x1BC89, 'X'),
+    (0x1BC90, 'V'),
+    (0x1BC9A, 'X'),
+    (0x1BC9C, 'V'),
+    (0x1BCA0, 'I'),
+    (0x1BCA4, 'X'),
+    (0x1D000, 'V'),
+    (0x1D0F6, 'X'),
+    (0x1D100, 'V'),
+    (0x1D127, 'X'),
+    (0x1D129, 'V'),
+    (0x1D15E, 'M', u'𝅗𝅥'),
+    (0x1D15F, 'M', u'𝅘𝅥'),
+    (0x1D160, 'M', u'𝅘𝅥𝅮'),
+    (0x1D161, 'M', u'𝅘𝅥𝅯'),
+    (0x1D162, 'M', u'𝅘𝅥𝅰'),
+    (0x1D163, 'M', u'𝅘𝅥𝅱'),
+    (0x1D164, 'M', u'𝅘𝅥𝅲'),
+    (0x1D165, 'V'),
+    (0x1D173, 'X'),
+    (0x1D17B, 'V'),
+    (0x1D1BB, 'M', u'𝆹𝅥'),
+    (0x1D1BC, 'M', u'𝆺𝅥'),
+    (0x1D1BD, 'M', u'𝆹𝅥𝅮'),
+    (0x1D1BE, 'M', u'𝆺𝅥𝅮'),
+    (0x1D1BF, 'M', u'𝆹𝅥𝅯'),
+    (0x1D1C0, 'M', u'𝆺𝅥𝅯'),
+    (0x1D1C1, 'V'),
+    (0x1D1E9, 'X'),
+    (0x1D200, 'V'),
+    ]
+
+def _seg_58():
+    return [
+    (0x1D246, 'X'),
+    (0x1D2E0, 'V'),
+    (0x1D2F4, 'X'),
+    (0x1D300, 'V'),
+    (0x1D357, 'X'),
+    (0x1D360, 'V'),
+    (0x1D379, 'X'),
+    (0x1D400, 'M', u'a'),
+    (0x1D401, 'M', u'b'),
+    (0x1D402, 'M', u'c'),
+    (0x1D403, 'M', u'd'),
+    (0x1D404, 'M', u'e'),
+    (0x1D405, 'M', u'f'),
+    (0x1D406, 'M', u'g'),
+    (0x1D407, 'M', u'h'),
+    (0x1D408, 'M', u'i'),
+    (0x1D409, 'M', u'j'),
+    (0x1D40A, 'M', u'k'),
+    (0x1D40B, 'M', u'l'),
+    (0x1D40C, 'M', u'm'),
+    (0x1D40D, 'M', u'n'),
+    (0x1D40E, 'M', u'o'),
+    (0x1D40F, 'M', u'p'),
+    (0x1D410, 'M', u'q'),
+    (0x1D411, 'M', u'r'),
+    (0x1D412, 'M', u's'),
+    (0x1D413, 'M', u't'),
+    (0x1D414, 'M', u'u'),
+    (0x1D415, 'M', u'v'),
+    (0x1D416, 'M', u'w'),
+    (0x1D417, 'M', u'x'),
+    (0x1D418, 'M', u'y'),
+    (0x1D419, 'M', u'z'),
+    (0x1D41A, 'M', u'a'),
+    (0x1D41B, 'M', u'b'),
+    (0x1D41C, 'M', u'c'),
+    (0x1D41D, 'M', u'd'),
+    (0x1D41E, 'M', u'e'),
+    (0x1D41F, 'M', u'f'),
+    (0x1D420, 'M', u'g'),
+    (0x1D421, 'M', u'h'),
+    (0x1D422, 'M', u'i'),
+    (0x1D423, 'M', u'j'),
+    (0x1D424, 'M', u'k'),
+    (0x1D425, 'M', u'l'),
+    (0x1D426, 'M', u'm'),
+    (0x1D427, 'M', u'n'),
+    (0x1D428, 'M', u'o'),
+    (0x1D429, 'M', u'p'),
+    (0x1D42A, 'M', u'q'),
+    (0x1D42B, 'M', u'r'),
+    (0x1D42C, 'M', u's'),
+    (0x1D42D, 'M', u't'),
+    (0x1D42E, 'M', u'u'),
+    (0x1D42F, 'M', u'v'),
+    (0x1D430, 'M', u'w'),
+    (0x1D431, 'M', u'x'),
+    (0x1D432, 'M', u'y'),
+    (0x1D433, 'M', u'z'),
+    (0x1D434, 'M', u'a'),
+    (0x1D435, 'M', u'b'),
+    (0x1D436, 'M', u'c'),
+    (0x1D437, 'M', u'd'),
+    (0x1D438, 'M', u'e'),
+    (0x1D439, 'M', u'f'),
+    (0x1D43A, 'M', u'g'),
+    (0x1D43B, 'M', u'h'),
+    (0x1D43C, 'M', u'i'),
+    (0x1D43D, 'M', u'j'),
+    (0x1D43E, 'M', u'k'),
+    (0x1D43F, 'M', u'l'),
+    (0x1D440, 'M', u'm'),
+    (0x1D441, 'M', u'n'),
+    (0x1D442, 'M', u'o'),
+    (0x1D443, 'M', u'p'),
+    (0x1D444, 'M', u'q'),
+    (0x1D445, 'M', u'r'),
+    (0x1D446, 'M', u's'),
+    (0x1D447, 'M', u't'),
+    (0x1D448, 'M', u'u'),
+    (0x1D449, 'M', u'v'),
+    (0x1D44A, 'M', u'w'),
+    (0x1D44B, 'M', u'x'),
+    (0x1D44C, 'M', u'y'),
+    (0x1D44D, 'M', u'z'),
+    (0x1D44E, 'M', u'a'),
+    (0x1D44F, 'M', u'b'),
+    (0x1D450, 'M', u'c'),
+    (0x1D451, 'M', u'd'),
+    (0x1D452, 'M', u'e'),
+    (0x1D453, 'M', u'f'),
+    (0x1D454, 'M', u'g'),
+    (0x1D455, 'X'),
+    (0x1D456, 'M', u'i'),
+    (0x1D457, 'M', u'j'),
+    (0x1D458, 'M', u'k'),
+    (0x1D459, 'M', u'l'),
+    (0x1D45A, 'M', u'm'),
+    (0x1D45B, 'M', u'n'),
+    (0x1D45C, 'M', u'o'),
+    ]
+
+def _seg_59():
+    return [
+    (0x1D45D, 'M', u'p'),
+    (0x1D45E, 'M', u'q'),
+    (0x1D45F, 'M', u'r'),
+    (0x1D460, 'M', u's'),
+    (0x1D461, 'M', u't'),
+    (0x1D462, 'M', u'u'),
+    (0x1D463, 'M', u'v'),
+    (0x1D464, 'M', u'w'),
+    (0x1D465, 'M', u'x'),
+    (0x1D466, 'M', u'y'),
+    (0x1D467, 'M', u'z'),
+    (0x1D468, 'M', u'a'),
+    (0x1D469, 'M', u'b'),
+    (0x1D46A, 'M', u'c'),
+    (0x1D46B, 'M', u'd'),
+    (0x1D46C, 'M', u'e'),
+    (0x1D46D, 'M', u'f'),
+    (0x1D46E, 'M', u'g'),
+    (0x1D46F, 'M', u'h'),
+    (0x1D470, 'M', u'i'),
+    (0x1D471, 'M', u'j'),
+    (0x1D472, 'M', u'k'),
+    (0x1D473, 'M', u'l'),
+    (0x1D474, 'M', u'm'),
+    (0x1D475, 'M', u'n'),
+    (0x1D476, 'M', u'o'),
+    (0x1D477, 'M', u'p'),
+    (0x1D478, 'M', u'q'),
+    (0x1D479, 'M', u'r'),
+    (0x1D47A, 'M', u's'),
+    (0x1D47B, 'M', u't'),
+    (0x1D47C, 'M', u'u'),
+    (0x1D47D, 'M', u'v'),
+    (0x1D47E, 'M', u'w'),
+    (0x1D47F, 'M', u'x'),
+    (0x1D480, 'M', u'y'),
+    (0x1D481, 'M', u'z'),
+    (0x1D482, 'M', u'a'),
+    (0x1D483, 'M', u'b'),
+    (0x1D484, 'M', u'c'),
+    (0x1D485, 'M', u'd'),
+    (0x1D486, 'M', u'e'),
+    (0x1D487, 'M', u'f'),
+    (0x1D488, 'M', u'g'),
+    (0x1D489, 'M', u'h'),
+    (0x1D48A, 'M', u'i'),
+    (0x1D48B, 'M', u'j'),
+    (0x1D48C, 'M', u'k'),
+    (0x1D48D, 'M', u'l'),
+    (0x1D48E, 'M', u'm'),
+    (0x1D48F, 'M', u'n'),
+    (0x1D490, 'M', u'o'),
+    (0x1D491, 'M', u'p'),
+    (0x1D492, 'M', u'q'),
+    (0x1D493, 'M', u'r'),
+    (0x1D494, 'M', u's'),
+    (0x1D495, 'M', u't'),
+    (0x1D496, 'M', u'u'),
+    (0x1D497, 'M', u'v'),
+    (0x1D498, 'M', u'w'),
+    (0x1D499, 'M', u'x'),
+    (0x1D49A, 'M', u'y'),
+    (0x1D49B, 'M', u'z'),
+    (0x1D49C, 'M', u'a'),
+    (0x1D49D, 'X'),
+    (0x1D49E, 'M', u'c'),
+    (0x1D49F, 'M', u'd'),
+    (0x1D4A0, 'X'),
+    (0x1D4A2, 'M', u'g'),
+    (0x1D4A3, 'X'),
+    (0x1D4A5, 'M', u'j'),
+    (0x1D4A6, 'M', u'k'),
+    (0x1D4A7, 'X'),
+    (0x1D4A9, 'M', u'n'),
+    (0x1D4AA, 'M', u'o'),
+    (0x1D4AB, 'M', u'p'),
+    (0x1D4AC, 'M', u'q'),
+    (0x1D4AD, 'X'),
+    (0x1D4AE, 'M', u's'),
+    (0x1D4AF, 'M', u't'),
+    (0x1D4B0, 'M', u'u'),
+    (0x1D4B1, 'M', u'v'),
+    (0x1D4B2, 'M', u'w'),
+    (0x1D4B3, 'M', u'x'),
+    (0x1D4B4, 'M', u'y'),
+    (0x1D4B5, 'M', u'z'),
+    (0x1D4B6, 'M', u'a'),
+    (0x1D4B7, 'M', u'b'),
+    (0x1D4B8, 'M', u'c'),
+    (0x1D4B9, 'M', u'd'),
+    (0x1D4BA, 'X'),
+    (0x1D4BB, 'M', u'f'),
+    (0x1D4BC, 'X'),
+    (0x1D4BD, 'M', u'h'),
+    (0x1D4BE, 'M', u'i'),
+    (0x1D4BF, 'M', u'j'),
+    (0x1D4C0, 'M', u'k'),
+    (0x1D4C1, 'M', u'l'),
+    (0x1D4C2, 'M', u'm'),
+    (0x1D4C3, 'M', u'n'),
+    ]
+
+def _seg_60():
+    return [
+    (0x1D4C4, 'X'),
+    (0x1D4C5, 'M', u'p'),
+    (0x1D4C6, 'M', u'q'),
+    (0x1D4C7, 'M', u'r'),
+    (0x1D4C8, 'M', u's'),
+    (0x1D4C9, 'M', u't'),
+    (0x1D4CA, 'M', u'u'),
+    (0x1D4CB, 'M', u'v'),
+    (0x1D4CC, 'M', u'w'),
+    (0x1D4CD, 'M', u'x'),
+    (0x1D4CE, 'M', u'y'),
+    (0x1D4CF, 'M', u'z'),
+    (0x1D4D0, 'M', u'a'),
+    (0x1D4D1, 'M', u'b'),
+    (0x1D4D2, 'M', u'c'),
+    (0x1D4D3, 'M', u'd'),
+    (0x1D4D4, 'M', u'e'),
+    (0x1D4D5, 'M', u'f'),
+    (0x1D4D6, 'M', u'g'),
+    (0x1D4D7, 'M', u'h'),
+    (0x1D4D8, 'M', u'i'),
+    (0x1D4D9, 'M', u'j'),
+    (0x1D4DA, 'M', u'k'),
+    (0x1D4DB, 'M', u'l'),
+    (0x1D4DC, 'M', u'm'),
+    (0x1D4DD, 'M', u'n'),
+    (0x1D4DE, 'M', u'o'),
+    (0x1D4DF, 'M', u'p'),
+    (0x1D4E0, 'M', u'q'),
+    (0x1D4E1, 'M', u'r'),
+    (0x1D4E2, 'M', u's'),
+    (0x1D4E3, 'M', u't'),
+    (0x1D4E4, 'M', u'u'),
+    (0x1D4E5, 'M', u'v'),
+    (0x1D4E6, 'M', u'w'),
+    (0x1D4E7, 'M', u'x'),
+    (0x1D4E8, 'M', u'y'),
+    (0x1D4E9, 'M', u'z'),
+    (0x1D4EA, 'M', u'a'),
+    (0x1D4EB, 'M', u'b'),
+    (0x1D4EC, 'M', u'c'),
+    (0x1D4ED, 'M', u'd'),
+    (0x1D4EE, 'M', u'e'),
+    (0x1D4EF, 'M', u'f'),
+    (0x1D4F0, 'M', u'g'),
+    (0x1D4F1, 'M', u'h'),
+    (0x1D4F2, 'M', u'i'),
+    (0x1D4F3, 'M', u'j'),
+    (0x1D4F4, 'M', u'k'),
+    (0x1D4F5, 'M', u'l'),
+    (0x1D4F6, 'M', u'm'),
+    (0x1D4F7, 'M', u'n'),
+    (0x1D4F8, 'M', u'o'),
+    (0x1D4F9, 'M', u'p'),
+    (0x1D4FA, 'M', u'q'),
+    (0x1D4FB, 'M', u'r'),
+    (0x1D4FC, 'M', u's'),
+    (0x1D4FD, 'M', u't'),
+    (0x1D4FE, 'M', u'u'),
+    (0x1D4FF, 'M', u'v'),
+    (0x1D500, 'M', u'w'),
+    (0x1D501, 'M', u'x'),
+    (0x1D502, 'M', u'y'),
+    (0x1D503, 'M', u'z'),
+    (0x1D504, 'M', u'a'),
+    (0x1D505, 'M', u'b'),
+    (0x1D506, 'X'),
+    (0x1D507, 'M', u'd'),
+    (0x1D508, 'M', u'e'),
+    (0x1D509, 'M', u'f'),
+    (0x1D50A, 'M', u'g'),
+    (0x1D50B, 'X'),
+    (0x1D50D, 'M', u'j'),
+    (0x1D50E, 'M', u'k'),
+    (0x1D50F, 'M', u'l'),
+    (0x1D510, 'M', u'm'),
+    (0x1D511, 'M', u'n'),
+    (0x1D512, 'M', u'o'),
+    (0x1D513, 'M', u'p'),
+    (0x1D514, 'M', u'q'),
+    (0x1D515, 'X'),
+    (0x1D516, 'M', u's'),
+    (0x1D517, 'M', u't'),
+    (0x1D518, 'M', u'u'),
+    (0x1D519, 'M', u'v'),
+    (0x1D51A, 'M', u'w'),
+    (0x1D51B, 'M', u'x'),
+    (0x1D51C, 'M', u'y'),
+    (0x1D51D, 'X'),
+    (0x1D51E, 'M', u'a'),
+    (0x1D51F, 'M', u'b'),
+    (0x1D520, 'M', u'c'),
+    (0x1D521, 'M', u'd'),
+    (0x1D522, 'M', u'e'),
+    (0x1D523, 'M', u'f'),
+    (0x1D524, 'M', u'g'),
+    (0x1D525, 'M', u'h'),
+    (0x1D526, 'M', u'i'),
+    (0x1D527, 'M', u'j'),
+    (0x1D528, 'M', u'k'),
+    ]
+
+def _seg_61():
+    return [
+    (0x1D529, 'M', u'l'),
+    (0x1D52A, 'M', u'm'),
+    (0x1D52B, 'M', u'n'),
+    (0x1D52C, 'M', u'o'),
+    (0x1D52D, 'M', u'p'),
+    (0x1D52E, 'M', u'q'),
+    (0x1D52F, 'M', u'r'),
+    (0x1D530, 'M', u's'),
+    (0x1D531, 'M', u't'),
+    (0x1D532, 'M', u'u'),
+    (0x1D533, 'M', u'v'),
+    (0x1D534, 'M', u'w'),
+    (0x1D535, 'M', u'x'),
+    (0x1D536, 'M', u'y'),
+    (0x1D537, 'M', u'z'),
+    (0x1D538, 'M', u'a'),
+    (0x1D539, 'M', u'b'),
+    (0x1D53A, 'X'),
+    (0x1D53B, 'M', u'd'),
+    (0x1D53C, 'M', u'e'),
+    (0x1D53D, 'M', u'f'),
+    (0x1D53E, 'M', u'g'),
+    (0x1D53F, 'X'),
+    (0x1D540, 'M', u'i'),
+    (0x1D541, 'M', u'j'),
+    (0x1D542, 'M', u'k'),
+    (0x1D543, 'M', u'l'),
+    (0x1D544, 'M', u'm'),
+    (0x1D545, 'X'),
+    (0x1D546, 'M', u'o'),
+    (0x1D547, 'X'),
+    (0x1D54A, 'M', u's'),
+    (0x1D54B, 'M', u't'),
+    (0x1D54C, 'M', u'u'),
+    (0x1D54D, 'M', u'v'),
+    (0x1D54E, 'M', u'w'),
+    (0x1D54F, 'M', u'x'),
+    (0x1D550, 'M', u'y'),
+    (0x1D551, 'X'),
+    (0x1D552, 'M', u'a'),
+    (0x1D553, 'M', u'b'),
+    (0x1D554, 'M', u'c'),
+    (0x1D555, 'M', u'd'),
+    (0x1D556, 'M', u'e'),
+    (0x1D557, 'M', u'f'),
+    (0x1D558, 'M', u'g'),
+    (0x1D559, 'M', u'h'),
+    (0x1D55A, 'M', u'i'),
+    (0x1D55B, 'M', u'j'),
+    (0x1D55C, 'M', u'k'),
+    (0x1D55D, 'M', u'l'),
+    (0x1D55E, 'M', u'm'),
+    (0x1D55F, 'M', u'n'),
+    (0x1D560, 'M', u'o'),
+    (0x1D561, 'M', u'p'),
+    (0x1D562, 'M', u'q'),
+    (0x1D563, 'M', u'r'),
+    (0x1D564, 'M', u's'),
+    (0x1D565, 'M', u't'),
+    (0x1D566, 'M', u'u'),
+    (0x1D567, 'M', u'v'),
+    (0x1D568, 'M', u'w'),
+    (0x1D569, 'M', u'x'),
+    (0x1D56A, 'M', u'y'),
+    (0x1D56B, 'M', u'z'),
+    (0x1D56C, 'M', u'a'),
+    (0x1D56D, 'M', u'b'),
+    (0x1D56E, 'M', u'c'),
+    (0x1D56F, 'M', u'd'),
+    (0x1D570, 'M', u'e'),
+    (0x1D571, 'M', u'f'),
+    (0x1D572, 'M', u'g'),
+    (0x1D573, 'M', u'h'),
+    (0x1D574, 'M', u'i'),
+    (0x1D575, 'M', u'j'),
+    (0x1D576, 'M', u'k'),
+    (0x1D577, 'M', u'l'),
+    (0x1D578, 'M', u'm'),
+    (0x1D579, 'M', u'n'),
+    (0x1D57A, 'M', u'o'),
+    (0x1D57B, 'M', u'p'),
+    (0x1D57C, 'M', u'q'),
+    (0x1D57D, 'M', u'r'),
+    (0x1D57E, 'M', u's'),
+    (0x1D57F, 'M', u't'),
+    (0x1D580, 'M', u'u'),
+    (0x1D581, 'M', u'v'),
+    (0x1D582, 'M', u'w'),
+    (0x1D583, 'M', u'x'),
+    (0x1D584, 'M', u'y'),
+    (0x1D585, 'M', u'z'),
+    (0x1D586, 'M', u'a'),
+    (0x1D587, 'M', u'b'),
+    (0x1D588, 'M', u'c'),
+    (0x1D589, 'M', u'd'),
+    (0x1D58A, 'M', u'e'),
+    (0x1D58B, 'M', u'f'),
+    (0x1D58C, 'M', u'g'),
+    (0x1D58D, 'M', u'h'),
+    (0x1D58E, 'M', u'i'),
+    ]
+
+def _seg_62():
+    return [
+    (0x1D58F, 'M', u'j'),
+    (0x1D590, 'M', u'k'),
+    (0x1D591, 'M', u'l'),
+    (0x1D592, 'M', u'm'),
+    (0x1D593, 'M', u'n'),
+    (0x1D594, 'M', u'o'),
+    (0x1D595, 'M', u'p'),
+    (0x1D596, 'M', u'q'),
+    (0x1D597, 'M', u'r'),
+    (0x1D598, 'M', u's'),
+    (0x1D599, 'M', u't'),
+    (0x1D59A, 'M', u'u'),
+    (0x1D59B, 'M', u'v'),
+    (0x1D59C, 'M', u'w'),
+    (0x1D59D, 'M', u'x'),
+    (0x1D59E, 'M', u'y'),
+    (0x1D59F, 'M', u'z'),
+    (0x1D5A0, 'M', u'a'),
+    (0x1D5A1, 'M', u'b'),
+    (0x1D5A2, 'M', u'c'),
+    (0x1D5A3, 'M', u'd'),
+    (0x1D5A4, 'M', u'e'),
+    (0x1D5A5, 'M', u'f'),
+    (0x1D5A6, 'M', u'g'),
+    (0x1D5A7, 'M', u'h'),
+    (0x1D5A8, 'M', u'i'),
+    (0x1D5A9, 'M', u'j'),
+    (0x1D5AA, 'M', u'k'),
+    (0x1D5AB, 'M', u'l'),
+    (0x1D5AC, 'M', u'm'),
+    (0x1D5AD, 'M', u'n'),
+    (0x1D5AE, 'M', u'o'),
+    (0x1D5AF, 'M', u'p'),
+    (0x1D5B0, 'M', u'q'),
+    (0x1D5B1, 'M', u'r'),
+    (0x1D5B2, 'M', u's'),
+    (0x1D5B3, 'M', u't'),
+    (0x1D5B4, 'M', u'u'),
+    (0x1D5B5, 'M', u'v'),
+    (0x1D5B6, 'M', u'w'),
+    (0x1D5B7, 'M', u'x'),
+    (0x1D5B8, 'M', u'y'),
+    (0x1D5B9, 'M', u'z'),
+    (0x1D5BA, 'M', u'a'),
+    (0x1D5BB, 'M', u'b'),
+    (0x1D5BC, 'M', u'c'),
+    (0x1D5BD, 'M', u'd'),
+    (0x1D5BE, 'M', u'e'),
+    (0x1D5BF, 'M', u'f'),
+    (0x1D5C0, 'M', u'g'),
+    (0x1D5C1, 'M', u'h'),
+    (0x1D5C2, 'M', u'i'),
+    (0x1D5C3, 'M', u'j'),
+    (0x1D5C4, 'M', u'k'),
+    (0x1D5C5, 'M', u'l'),
+    (0x1D5C6, 'M', u'm'),
+    (0x1D5C7, 'M', u'n'),
+    (0x1D5C8, 'M', u'o'),
+    (0x1D5C9, 'M', u'p'),
+    (0x1D5CA, 'M', u'q'),
+    (0x1D5CB, 'M', u'r'),
+    (0x1D5CC, 'M', u's'),
+    (0x1D5CD, 'M', u't'),
+    (0x1D5CE, 'M', u'u'),
+    (0x1D5CF, 'M', u'v'),
+    (0x1D5D0, 'M', u'w'),
+    (0x1D5D1, 'M', u'x'),
+    (0x1D5D2, 'M', u'y'),
+    (0x1D5D3, 'M', u'z'),
+    (0x1D5D4, 'M', u'a'),
+    (0x1D5D5, 'M', u'b'),
+    (0x1D5D6, 'M', u'c'),
+    (0x1D5D7, 'M', u'd'),
+    (0x1D5D8, 'M', u'e'),
+    (0x1D5D9, 'M', u'f'),
+    (0x1D5DA, 'M', u'g'),
+    (0x1D5DB, 'M', u'h'),
+    (0x1D5DC, 'M', u'i'),
+    (0x1D5DD, 'M', u'j'),
+    (0x1D5DE, 'M', u'k'),
+    (0x1D5DF, 'M', u'l'),
+    (0x1D5E0, 'M', u'm'),
+    (0x1D5E1, 'M', u'n'),
+    (0x1D5E2, 'M', u'o'),
+    (0x1D5E3, 'M', u'p'),
+    (0x1D5E4, 'M', u'q'),
+    (0x1D5E5, 'M', u'r'),
+    (0x1D5E6, 'M', u's'),
+    (0x1D5E7, 'M', u't'),
+    (0x1D5E8, 'M', u'u'),
+    (0x1D5E9, 'M', u'v'),
+    (0x1D5EA, 'M', u'w'),
+    (0x1D5EB, 'M', u'x'),
+    (0x1D5EC, 'M', u'y'),
+    (0x1D5ED, 'M', u'z'),
+    (0x1D5EE, 'M', u'a'),
+    (0x1D5EF, 'M', u'b'),
+    (0x1D5F0, 'M', u'c'),
+    (0x1D5F1, 'M', u'd'),
+    (0x1D5F2, 'M', u'e'),
+    ]
+
+def _seg_63():
+    return [
+    (0x1D5F3, 'M', u'f'),
+    (0x1D5F4, 'M', u'g'),
+    (0x1D5F5, 'M', u'h'),
+    (0x1D5F6, 'M', u'i'),
+    (0x1D5F7, 'M', u'j'),
+    (0x1D5F8, 'M', u'k'),
+    (0x1D5F9, 'M', u'l'),
+    (0x1D5FA, 'M', u'm'),
+    (0x1D5FB, 'M', u'n'),
+    (0x1D5FC, 'M', u'o'),
+    (0x1D5FD, 'M', u'p'),
+    (0x1D5FE, 'M', u'q'),
+    (0x1D5FF, 'M', u'r'),
+    (0x1D600, 'M', u's'),
+    (0x1D601, 'M', u't'),
+    (0x1D602, 'M', u'u'),
+    (0x1D603, 'M', u'v'),
+    (0x1D604, 'M', u'w'),
+    (0x1D605, 'M', u'x'),
+    (0x1D606, 'M', u'y'),
+    (0x1D607, 'M', u'z'),
+    (0x1D608, 'M', u'a'),
+    (0x1D609, 'M', u'b'),
+    (0x1D60A, 'M', u'c'),
+    (0x1D60B, 'M', u'd'),
+    (0x1D60C, 'M', u'e'),
+    (0x1D60D, 'M', u'f'),
+    (0x1D60E, 'M', u'g'),
+    (0x1D60F, 'M', u'h'),
+    (0x1D610, 'M', u'i'),
+    (0x1D611, 'M', u'j'),
+    (0x1D612, 'M', u'k'),
+    (0x1D613, 'M', u'l'),
+    (0x1D614, 'M', u'm'),
+    (0x1D615, 'M', u'n'),
+    (0x1D616, 'M', u'o'),
+    (0x1D617, 'M', u'p'),
+    (0x1D618, 'M', u'q'),
+    (0x1D619, 'M', u'r'),
+    (0x1D61A, 'M', u's'),
+    (0x1D61B, 'M', u't'),
+    (0x1D61C, 'M', u'u'),
+    (0x1D61D, 'M', u'v'),
+    (0x1D61E, 'M', u'w'),
+    (0x1D61F, 'M', u'x'),
+    (0x1D620, 'M', u'y'),
+    (0x1D621, 'M', u'z'),
+    (0x1D622, 'M', u'a'),
+    (0x1D623, 'M', u'b'),
+    (0x1D624, 'M', u'c'),
+    (0x1D625, 'M', u'd'),
+    (0x1D626, 'M', u'e'),
+    (0x1D627, 'M', u'f'),
+    (0x1D628, 'M', u'g'),
+    (0x1D629, 'M', u'h'),
+    (0x1D62A, 'M', u'i'),
+    (0x1D62B, 'M', u'j'),
+    (0x1D62C, 'M', u'k'),
+    (0x1D62D, 'M', u'l'),
+    (0x1D62E, 'M', u'm'),
+    (0x1D62F, 'M', u'n'),
+    (0x1D630, 'M', u'o'),
+    (0x1D631, 'M', u'p'),
+    (0x1D632, 'M', u'q'),
+    (0x1D633, 'M', u'r'),
+    (0x1D634, 'M', u's'),
+    (0x1D635, 'M', u't'),
+    (0x1D636, 'M', u'u'),
+    (0x1D637, 'M', u'v'),
+    (0x1D638, 'M', u'w'),
+    (0x1D639, 'M', u'x'),
+    (0x1D63A, 'M', u'y'),
+    (0x1D63B, 'M', u'z'),
+    (0x1D63C, 'M', u'a'),
+    (0x1D63D, 'M', u'b'),
+    (0x1D63E, 'M', u'c'),
+    (0x1D63F, 'M', u'd'),
+    (0x1D640, 'M', u'e'),
+    (0x1D641, 'M', u'f'),
+    (0x1D642, 'M', u'g'),
+    (0x1D643, 'M', u'h'),
+    (0x1D644, 'M', u'i'),
+    (0x1D645, 'M', u'j'),
+    (0x1D646, 'M', u'k'),
+    (0x1D647, 'M', u'l'),
+    (0x1D648, 'M', u'm'),
+    (0x1D649, 'M', u'n'),
+    (0x1D64A, 'M', u'o'),
+    (0x1D64B, 'M', u'p'),
+    (0x1D64C, 'M', u'q'),
+    (0x1D64D, 'M', u'r'),
+    (0x1D64E, 'M', u's'),
+    (0x1D64F, 'M', u't'),
+    (0x1D650, 'M', u'u'),
+    (0x1D651, 'M', u'v'),
+    (0x1D652, 'M', u'w'),
+    (0x1D653, 'M', u'x'),
+    (0x1D654, 'M', u'y'),
+    (0x1D655, 'M', u'z'),
+    (0x1D656, 'M', u'a'),
+    ]
+
+def _seg_64():
+    return [
+    (0x1D657, 'M', u'b'),
+    (0x1D658, 'M', u'c'),
+    (0x1D659, 'M', u'd'),
+    (0x1D65A, 'M', u'e'),
+    (0x1D65B, 'M', u'f'),
+    (0x1D65C, 'M', u'g'),
+    (0x1D65D, 'M', u'h'),
+    (0x1D65E, 'M', u'i'),
+    (0x1D65F, 'M', u'j'),
+    (0x1D660, 'M', u'k'),
+    (0x1D661, 'M', u'l'),
+    (0x1D662, 'M', u'm'),
+    (0x1D663, 'M', u'n'),
+    (0x1D664, 'M', u'o'),
+    (0x1D665, 'M', u'p'),
+    (0x1D666, 'M', u'q'),
+    (0x1D667, 'M', u'r'),
+    (0x1D668, 'M', u's'),
+    (0x1D669, 'M', u't'),
+    (0x1D66A, 'M', u'u'),
+    (0x1D66B, 'M', u'v'),
+    (0x1D66C, 'M', u'w'),
+    (0x1D66D, 'M', u'x'),
+    (0x1D66E, 'M', u'y'),
+    (0x1D66F, 'M', u'z'),
+    (0x1D670, 'M', u'a'),
+    (0x1D671, 'M', u'b'),
+    (0x1D672, 'M', u'c'),
+    (0x1D673, 'M', u'd'),
+    (0x1D674, 'M', u'e'),
+    (0x1D675, 'M', u'f'),
+    (0x1D676, 'M', u'g'),
+    (0x1D677, 'M', u'h'),
+    (0x1D678, 'M', u'i'),
+    (0x1D679, 'M', u'j'),
+    (0x1D67A, 'M', u'k'),
+    (0x1D67B, 'M', u'l'),
+    (0x1D67C, 'M', u'm'),
+    (0x1D67D, 'M', u'n'),
+    (0x1D67E, 'M', u'o'),
+    (0x1D67F, 'M', u'p'),
+    (0x1D680, 'M', u'q'),
+    (0x1D681, 'M', u'r'),
+    (0x1D682, 'M', u's'),
+    (0x1D683, 'M', u't'),
+    (0x1D684, 'M', u'u'),
+    (0x1D685, 'M', u'v'),
+    (0x1D686, 'M', u'w'),
+    (0x1D687, 'M', u'x'),
+    (0x1D688, 'M', u'y'),
+    (0x1D689, 'M', u'z'),
+    (0x1D68A, 'M', u'a'),
+    (0x1D68B, 'M', u'b'),
+    (0x1D68C, 'M', u'c'),
+    (0x1D68D, 'M', u'd'),
+    (0x1D68E, 'M', u'e'),
+    (0x1D68F, 'M', u'f'),
+    (0x1D690, 'M', u'g'),
+    (0x1D691, 'M', u'h'),
+    (0x1D692, 'M', u'i'),
+    (0x1D693, 'M', u'j'),
+    (0x1D694, 'M', u'k'),
+    (0x1D695, 'M', u'l'),
+    (0x1D696, 'M', u'm'),
+    (0x1D697, 'M', u'n'),
+    (0x1D698, 'M', u'o'),
+    (0x1D699, 'M', u'p'),
+    (0x1D69A, 'M', u'q'),
+    (0x1D69B, 'M', u'r'),
+    (0x1D69C, 'M', u's'),
+    (0x1D69D, 'M', u't'),
+    (0x1D69E, 'M', u'u'),
+    (0x1D69F, 'M', u'v'),
+    (0x1D6A0, 'M', u'w'),
+    (0x1D6A1, 'M', u'x'),
+    (0x1D6A2, 'M', u'y'),
+    (0x1D6A3, 'M', u'z'),
+    (0x1D6A4, 'M', u'ı'),
+    (0x1D6A5, 'M', u'È·'),
+    (0x1D6A6, 'X'),
+    (0x1D6A8, 'M', u'α'),
+    (0x1D6A9, 'M', u'β'),
+    (0x1D6AA, 'M', u'γ'),
+    (0x1D6AB, 'M', u'δ'),
+    (0x1D6AC, 'M', u'ε'),
+    (0x1D6AD, 'M', u'ζ'),
+    (0x1D6AE, 'M', u'η'),
+    (0x1D6AF, 'M', u'θ'),
+    (0x1D6B0, 'M', u'ι'),
+    (0x1D6B1, 'M', u'κ'),
+    (0x1D6B2, 'M', u'λ'),
+    (0x1D6B3, 'M', u'μ'),
+    (0x1D6B4, 'M', u'ν'),
+    (0x1D6B5, 'M', u'ξ'),
+    (0x1D6B6, 'M', u'ο'),
+    (0x1D6B7, 'M', u'Ï€'),
+    (0x1D6B8, 'M', u'ρ'),
+    (0x1D6B9, 'M', u'θ'),
+    (0x1D6BA, 'M', u'σ'),
+    (0x1D6BB, 'M', u'Ï„'),
+    ]
+
+def _seg_65():
+    return [
+    (0x1D6BC, 'M', u'Ï…'),
+    (0x1D6BD, 'M', u'φ'),
+    (0x1D6BE, 'M', u'χ'),
+    (0x1D6BF, 'M', u'ψ'),
+    (0x1D6C0, 'M', u'ω'),
+    (0x1D6C1, 'M', u'∇'),
+    (0x1D6C2, 'M', u'α'),
+    (0x1D6C3, 'M', u'β'),
+    (0x1D6C4, 'M', u'γ'),
+    (0x1D6C5, 'M', u'δ'),
+    (0x1D6C6, 'M', u'ε'),
+    (0x1D6C7, 'M', u'ζ'),
+    (0x1D6C8, 'M', u'η'),
+    (0x1D6C9, 'M', u'θ'),
+    (0x1D6CA, 'M', u'ι'),
+    (0x1D6CB, 'M', u'κ'),
+    (0x1D6CC, 'M', u'λ'),
+    (0x1D6CD, 'M', u'μ'),
+    (0x1D6CE, 'M', u'ν'),
+    (0x1D6CF, 'M', u'ξ'),
+    (0x1D6D0, 'M', u'ο'),
+    (0x1D6D1, 'M', u'Ï€'),
+    (0x1D6D2, 'M', u'ρ'),
+    (0x1D6D3, 'M', u'σ'),
+    (0x1D6D5, 'M', u'Ï„'),
+    (0x1D6D6, 'M', u'Ï…'),
+    (0x1D6D7, 'M', u'φ'),
+    (0x1D6D8, 'M', u'χ'),
+    (0x1D6D9, 'M', u'ψ'),
+    (0x1D6DA, 'M', u'ω'),
+    (0x1D6DB, 'M', u'∂'),
+    (0x1D6DC, 'M', u'ε'),
+    (0x1D6DD, 'M', u'θ'),
+    (0x1D6DE, 'M', u'κ'),
+    (0x1D6DF, 'M', u'φ'),
+    (0x1D6E0, 'M', u'ρ'),
+    (0x1D6E1, 'M', u'Ï€'),
+    (0x1D6E2, 'M', u'α'),
+    (0x1D6E3, 'M', u'β'),
+    (0x1D6E4, 'M', u'γ'),
+    (0x1D6E5, 'M', u'δ'),
+    (0x1D6E6, 'M', u'ε'),
+    (0x1D6E7, 'M', u'ζ'),
+    (0x1D6E8, 'M', u'η'),
+    (0x1D6E9, 'M', u'θ'),
+    (0x1D6EA, 'M', u'ι'),
+    (0x1D6EB, 'M', u'κ'),
+    (0x1D6EC, 'M', u'λ'),
+    (0x1D6ED, 'M', u'μ'),
+    (0x1D6EE, 'M', u'ν'),
+    (0x1D6EF, 'M', u'ξ'),
+    (0x1D6F0, 'M', u'ο'),
+    (0x1D6F1, 'M', u'Ï€'),
+    (0x1D6F2, 'M', u'ρ'),
+    (0x1D6F3, 'M', u'θ'),
+    (0x1D6F4, 'M', u'σ'),
+    (0x1D6F5, 'M', u'Ï„'),
+    (0x1D6F6, 'M', u'Ï…'),
+    (0x1D6F7, 'M', u'φ'),
+    (0x1D6F8, 'M', u'χ'),
+    (0x1D6F9, 'M', u'ψ'),
+    (0x1D6FA, 'M', u'ω'),
+    (0x1D6FB, 'M', u'∇'),
+    (0x1D6FC, 'M', u'α'),
+    (0x1D6FD, 'M', u'β'),
+    (0x1D6FE, 'M', u'γ'),
+    (0x1D6FF, 'M', u'δ'),
+    (0x1D700, 'M', u'ε'),
+    (0x1D701, 'M', u'ζ'),
+    (0x1D702, 'M', u'η'),
+    (0x1D703, 'M', u'θ'),
+    (0x1D704, 'M', u'ι'),
+    (0x1D705, 'M', u'κ'),
+    (0x1D706, 'M', u'λ'),
+    (0x1D707, 'M', u'μ'),
+    (0x1D708, 'M', u'ν'),
+    (0x1D709, 'M', u'ξ'),
+    (0x1D70A, 'M', u'ο'),
+    (0x1D70B, 'M', u'Ï€'),
+    (0x1D70C, 'M', u'ρ'),
+    (0x1D70D, 'M', u'σ'),
+    (0x1D70F, 'M', u'Ï„'),
+    (0x1D710, 'M', u'Ï…'),
+    (0x1D711, 'M', u'φ'),
+    (0x1D712, 'M', u'χ'),
+    (0x1D713, 'M', u'ψ'),
+    (0x1D714, 'M', u'ω'),
+    (0x1D715, 'M', u'∂'),
+    (0x1D716, 'M', u'ε'),
+    (0x1D717, 'M', u'θ'),
+    (0x1D718, 'M', u'κ'),
+    (0x1D719, 'M', u'φ'),
+    (0x1D71A, 'M', u'ρ'),
+    (0x1D71B, 'M', u'Ï€'),
+    (0x1D71C, 'M', u'α'),
+    (0x1D71D, 'M', u'β'),
+    (0x1D71E, 'M', u'γ'),
+    (0x1D71F, 'M', u'δ'),
+    (0x1D720, 'M', u'ε'),
+    (0x1D721, 'M', u'ζ'),
+    ]
+
+def _seg_66():
+    return [
+    (0x1D722, 'M', u'η'),
+    (0x1D723, 'M', u'θ'),
+    (0x1D724, 'M', u'ι'),
+    (0x1D725, 'M', u'κ'),
+    (0x1D726, 'M', u'λ'),
+    (0x1D727, 'M', u'μ'),
+    (0x1D728, 'M', u'ν'),
+    (0x1D729, 'M', u'ξ'),
+    (0x1D72A, 'M', u'ο'),
+    (0x1D72B, 'M', u'Ï€'),
+    (0x1D72C, 'M', u'ρ'),
+    (0x1D72D, 'M', u'θ'),
+    (0x1D72E, 'M', u'σ'),
+    (0x1D72F, 'M', u'Ï„'),
+    (0x1D730, 'M', u'Ï…'),
+    (0x1D731, 'M', u'φ'),
+    (0x1D732, 'M', u'χ'),
+    (0x1D733, 'M', u'ψ'),
+    (0x1D734, 'M', u'ω'),
+    (0x1D735, 'M', u'∇'),
+    (0x1D736, 'M', u'α'),
+    (0x1D737, 'M', u'β'),
+    (0x1D738, 'M', u'γ'),
+    (0x1D739, 'M', u'δ'),
+    (0x1D73A, 'M', u'ε'),
+    (0x1D73B, 'M', u'ζ'),
+    (0x1D73C, 'M', u'η'),
+    (0x1D73D, 'M', u'θ'),
+    (0x1D73E, 'M', u'ι'),
+    (0x1D73F, 'M', u'κ'),
+    (0x1D740, 'M', u'λ'),
+    (0x1D741, 'M', u'μ'),
+    (0x1D742, 'M', u'ν'),
+    (0x1D743, 'M', u'ξ'),
+    (0x1D744, 'M', u'ο'),
+    (0x1D745, 'M', u'Ï€'),
+    (0x1D746, 'M', u'ρ'),
+    (0x1D747, 'M', u'σ'),
+    (0x1D749, 'M', u'Ï„'),
+    (0x1D74A, 'M', u'Ï…'),
+    (0x1D74B, 'M', u'φ'),
+    (0x1D74C, 'M', u'χ'),
+    (0x1D74D, 'M', u'ψ'),
+    (0x1D74E, 'M', u'ω'),
+    (0x1D74F, 'M', u'∂'),
+    (0x1D750, 'M', u'ε'),
+    (0x1D751, 'M', u'θ'),
+    (0x1D752, 'M', u'κ'),
+    (0x1D753, 'M', u'φ'),
+    (0x1D754, 'M', u'ρ'),
+    (0x1D755, 'M', u'Ï€'),
+    (0x1D756, 'M', u'α'),
+    (0x1D757, 'M', u'β'),
+    (0x1D758, 'M', u'γ'),
+    (0x1D759, 'M', u'δ'),
+    (0x1D75A, 'M', u'ε'),
+    (0x1D75B, 'M', u'ζ'),
+    (0x1D75C, 'M', u'η'),
+    (0x1D75D, 'M', u'θ'),
+    (0x1D75E, 'M', u'ι'),
+    (0x1D75F, 'M', u'κ'),
+    (0x1D760, 'M', u'λ'),
+    (0x1D761, 'M', u'μ'),
+    (0x1D762, 'M', u'ν'),
+    (0x1D763, 'M', u'ξ'),
+    (0x1D764, 'M', u'ο'),
+    (0x1D765, 'M', u'Ï€'),
+    (0x1D766, 'M', u'ρ'),
+    (0x1D767, 'M', u'θ'),
+    (0x1D768, 'M', u'σ'),
+    (0x1D769, 'M', u'Ï„'),
+    (0x1D76A, 'M', u'Ï…'),
+    (0x1D76B, 'M', u'φ'),
+    (0x1D76C, 'M', u'χ'),
+    (0x1D76D, 'M', u'ψ'),
+    (0x1D76E, 'M', u'ω'),
+    (0x1D76F, 'M', u'∇'),
+    (0x1D770, 'M', u'α'),
+    (0x1D771, 'M', u'β'),
+    (0x1D772, 'M', u'γ'),
+    (0x1D773, 'M', u'δ'),
+    (0x1D774, 'M', u'ε'),
+    (0x1D775, 'M', u'ζ'),
+    (0x1D776, 'M', u'η'),
+    (0x1D777, 'M', u'θ'),
+    (0x1D778, 'M', u'ι'),
+    (0x1D779, 'M', u'κ'),
+    (0x1D77A, 'M', u'λ'),
+    (0x1D77B, 'M', u'μ'),
+    (0x1D77C, 'M', u'ν'),
+    (0x1D77D, 'M', u'ξ'),
+    (0x1D77E, 'M', u'ο'),
+    (0x1D77F, 'M', u'Ï€'),
+    (0x1D780, 'M', u'ρ'),
+    (0x1D781, 'M', u'σ'),
+    (0x1D783, 'M', u'Ï„'),
+    (0x1D784, 'M', u'Ï…'),
+    (0x1D785, 'M', u'φ'),
+    (0x1D786, 'M', u'χ'),
+    (0x1D787, 'M', u'ψ'),
+    ]
+
+def _seg_67():
+    return [
+    (0x1D788, 'M', u'ω'),
+    (0x1D789, 'M', u'∂'),
+    (0x1D78A, 'M', u'ε'),
+    (0x1D78B, 'M', u'θ'),
+    (0x1D78C, 'M', u'κ'),
+    (0x1D78D, 'M', u'φ'),
+    (0x1D78E, 'M', u'ρ'),
+    (0x1D78F, 'M', u'Ï€'),
+    (0x1D790, 'M', u'α'),
+    (0x1D791, 'M', u'β'),
+    (0x1D792, 'M', u'γ'),
+    (0x1D793, 'M', u'δ'),
+    (0x1D794, 'M', u'ε'),
+    (0x1D795, 'M', u'ζ'),
+    (0x1D796, 'M', u'η'),
+    (0x1D797, 'M', u'θ'),
+    (0x1D798, 'M', u'ι'),
+    (0x1D799, 'M', u'κ'),
+    (0x1D79A, 'M', u'λ'),
+    (0x1D79B, 'M', u'μ'),
+    (0x1D79C, 'M', u'ν'),
+    (0x1D79D, 'M', u'ξ'),
+    (0x1D79E, 'M', u'ο'),
+    (0x1D79F, 'M', u'Ï€'),
+    (0x1D7A0, 'M', u'ρ'),
+    (0x1D7A1, 'M', u'θ'),
+    (0x1D7A2, 'M', u'σ'),
+    (0x1D7A3, 'M', u'Ï„'),
+    (0x1D7A4, 'M', u'Ï…'),
+    (0x1D7A5, 'M', u'φ'),
+    (0x1D7A6, 'M', u'χ'),
+    (0x1D7A7, 'M', u'ψ'),
+    (0x1D7A8, 'M', u'ω'),
+    (0x1D7A9, 'M', u'∇'),
+    (0x1D7AA, 'M', u'α'),
+    (0x1D7AB, 'M', u'β'),
+    (0x1D7AC, 'M', u'γ'),
+    (0x1D7AD, 'M', u'δ'),
+    (0x1D7AE, 'M', u'ε'),
+    (0x1D7AF, 'M', u'ζ'),
+    (0x1D7B0, 'M', u'η'),
+    (0x1D7B1, 'M', u'θ'),
+    (0x1D7B2, 'M', u'ι'),
+    (0x1D7B3, 'M', u'κ'),
+    (0x1D7B4, 'M', u'λ'),
+    (0x1D7B5, 'M', u'μ'),
+    (0x1D7B6, 'M', u'ν'),
+    (0x1D7B7, 'M', u'ξ'),
+    (0x1D7B8, 'M', u'ο'),
+    (0x1D7B9, 'M', u'Ï€'),
+    (0x1D7BA, 'M', u'ρ'),
+    (0x1D7BB, 'M', u'σ'),
+    (0x1D7BD, 'M', u'Ï„'),
+    (0x1D7BE, 'M', u'Ï…'),
+    (0x1D7BF, 'M', u'φ'),
+    (0x1D7C0, 'M', u'χ'),
+    (0x1D7C1, 'M', u'ψ'),
+    (0x1D7C2, 'M', u'ω'),
+    (0x1D7C3, 'M', u'∂'),
+    (0x1D7C4, 'M', u'ε'),
+    (0x1D7C5, 'M', u'θ'),
+    (0x1D7C6, 'M', u'κ'),
+    (0x1D7C7, 'M', u'φ'),
+    (0x1D7C8, 'M', u'ρ'),
+    (0x1D7C9, 'M', u'Ï€'),
+    (0x1D7CA, 'M', u'ϝ'),
+    (0x1D7CC, 'X'),
+    (0x1D7CE, 'M', u'0'),
+    (0x1D7CF, 'M', u'1'),
+    (0x1D7D0, 'M', u'2'),
+    (0x1D7D1, 'M', u'3'),
+    (0x1D7D2, 'M', u'4'),
+    (0x1D7D3, 'M', u'5'),
+    (0x1D7D4, 'M', u'6'),
+    (0x1D7D5, 'M', u'7'),
+    (0x1D7D6, 'M', u'8'),
+    (0x1D7D7, 'M', u'9'),
+    (0x1D7D8, 'M', u'0'),
+    (0x1D7D9, 'M', u'1'),
+    (0x1D7DA, 'M', u'2'),
+    (0x1D7DB, 'M', u'3'),
+    (0x1D7DC, 'M', u'4'),
+    (0x1D7DD, 'M', u'5'),
+    (0x1D7DE, 'M', u'6'),
+    (0x1D7DF, 'M', u'7'),
+    (0x1D7E0, 'M', u'8'),
+    (0x1D7E1, 'M', u'9'),
+    (0x1D7E2, 'M', u'0'),
+    (0x1D7E3, 'M', u'1'),
+    (0x1D7E4, 'M', u'2'),
+    (0x1D7E5, 'M', u'3'),
+    (0x1D7E6, 'M', u'4'),
+    (0x1D7E7, 'M', u'5'),
+    (0x1D7E8, 'M', u'6'),
+    (0x1D7E9, 'M', u'7'),
+    (0x1D7EA, 'M', u'8'),
+    (0x1D7EB, 'M', u'9'),
+    (0x1D7EC, 'M', u'0'),
+    (0x1D7ED, 'M', u'1'),
+    (0x1D7EE, 'M', u'2'),
+    ]
+
+def _seg_68():
+    return [
+    (0x1D7EF, 'M', u'3'),
+    (0x1D7F0, 'M', u'4'),
+    (0x1D7F1, 'M', u'5'),
+    (0x1D7F2, 'M', u'6'),
+    (0x1D7F3, 'M', u'7'),
+    (0x1D7F4, 'M', u'8'),
+    (0x1D7F5, 'M', u'9'),
+    (0x1D7F6, 'M', u'0'),
+    (0x1D7F7, 'M', u'1'),
+    (0x1D7F8, 'M', u'2'),
+    (0x1D7F9, 'M', u'3'),
+    (0x1D7FA, 'M', u'4'),
+    (0x1D7FB, 'M', u'5'),
+    (0x1D7FC, 'M', u'6'),
+    (0x1D7FD, 'M', u'7'),
+    (0x1D7FE, 'M', u'8'),
+    (0x1D7FF, 'M', u'9'),
+    (0x1D800, 'V'),
+    (0x1DA8C, 'X'),
+    (0x1DA9B, 'V'),
+    (0x1DAA0, 'X'),
+    (0x1DAA1, 'V'),
+    (0x1DAB0, 'X'),
+    (0x1E000, 'V'),
+    (0x1E007, 'X'),
+    (0x1E008, 'V'),
+    (0x1E019, 'X'),
+    (0x1E01B, 'V'),
+    (0x1E022, 'X'),
+    (0x1E023, 'V'),
+    (0x1E025, 'X'),
+    (0x1E026, 'V'),
+    (0x1E02B, 'X'),
+    (0x1E800, 'V'),
+    (0x1E8C5, 'X'),
+    (0x1E8C7, 'V'),
+    (0x1E8D7, 'X'),
+    (0x1E900, 'M', u'𞤢'),
+    (0x1E901, 'M', u'𞤣'),
+    (0x1E902, 'M', u'𞤤'),
+    (0x1E903, 'M', u'𞤥'),
+    (0x1E904, 'M', u'𞤦'),
+    (0x1E905, 'M', u'𞤧'),
+    (0x1E906, 'M', u'𞤨'),
+    (0x1E907, 'M', u'𞤩'),
+    (0x1E908, 'M', u'𞤪'),
+    (0x1E909, 'M', u'𞤫'),
+    (0x1E90A, 'M', u'𞤬'),
+    (0x1E90B, 'M', u'𞤭'),
+    (0x1E90C, 'M', u'𞤮'),
+    (0x1E90D, 'M', u'𞤯'),
+    (0x1E90E, 'M', u'𞤰'),
+    (0x1E90F, 'M', u'𞤱'),
+    (0x1E910, 'M', u'𞤲'),
+    (0x1E911, 'M', u'𞤳'),
+    (0x1E912, 'M', u'𞤴'),
+    (0x1E913, 'M', u'𞤵'),
+    (0x1E914, 'M', u'𞤶'),
+    (0x1E915, 'M', u'𞤷'),
+    (0x1E916, 'M', u'𞤸'),
+    (0x1E917, 'M', u'𞤹'),
+    (0x1E918, 'M', u'𞤺'),
+    (0x1E919, 'M', u'𞤻'),
+    (0x1E91A, 'M', u'𞤼'),
+    (0x1E91B, 'M', u'𞤽'),
+    (0x1E91C, 'M', u'𞤾'),
+    (0x1E91D, 'M', u'𞤿'),
+    (0x1E91E, 'M', u'𞥀'),
+    (0x1E91F, 'M', u'𞥁'),
+    (0x1E920, 'M', u'𞥂'),
+    (0x1E921, 'M', u'𞥃'),
+    (0x1E922, 'V'),
+    (0x1E94B, 'X'),
+    (0x1E950, 'V'),
+    (0x1E95A, 'X'),
+    (0x1E95E, 'V'),
+    (0x1E960, 'X'),
+    (0x1EC71, 'V'),
+    (0x1ECB5, 'X'),
+    (0x1EE00, 'M', u'ا'),
+    (0x1EE01, 'M', u'ب'),
+    (0x1EE02, 'M', u'ج'),
+    (0x1EE03, 'M', u'د'),
+    (0x1EE04, 'X'),
+    (0x1EE05, 'M', u'Ùˆ'),
+    (0x1EE06, 'M', u'ز'),
+    (0x1EE07, 'M', u'Ø­'),
+    (0x1EE08, 'M', u'Ø·'),
+    (0x1EE09, 'M', u'ÙŠ'),
+    (0x1EE0A, 'M', u'Ùƒ'),
+    (0x1EE0B, 'M', u'Ù„'),
+    (0x1EE0C, 'M', u'Ù…'),
+    (0x1EE0D, 'M', u'Ù†'),
+    (0x1EE0E, 'M', u'س'),
+    (0x1EE0F, 'M', u'ع'),
+    (0x1EE10, 'M', u'ف'),
+    (0x1EE11, 'M', u'ص'),
+    (0x1EE12, 'M', u'Ù‚'),
+    (0x1EE13, 'M', u'ر'),
+    (0x1EE14, 'M', u'Ø´'),
+    ]
+
+def _seg_69():
+    return [
+    (0x1EE15, 'M', u'ت'),
+    (0x1EE16, 'M', u'Ø«'),
+    (0x1EE17, 'M', u'Ø®'),
+    (0x1EE18, 'M', u'ذ'),
+    (0x1EE19, 'M', u'ض'),
+    (0x1EE1A, 'M', u'ظ'),
+    (0x1EE1B, 'M', u'غ'),
+    (0x1EE1C, 'M', u'Ù®'),
+    (0x1EE1D, 'M', u'Úº'),
+    (0x1EE1E, 'M', u'Ú¡'),
+    (0x1EE1F, 'M', u'Ù¯'),
+    (0x1EE20, 'X'),
+    (0x1EE21, 'M', u'ب'),
+    (0x1EE22, 'M', u'ج'),
+    (0x1EE23, 'X'),
+    (0x1EE24, 'M', u'Ù‡'),
+    (0x1EE25, 'X'),
+    (0x1EE27, 'M', u'Ø­'),
+    (0x1EE28, 'X'),
+    (0x1EE29, 'M', u'ÙŠ'),
+    (0x1EE2A, 'M', u'Ùƒ'),
+    (0x1EE2B, 'M', u'Ù„'),
+    (0x1EE2C, 'M', u'Ù…'),
+    (0x1EE2D, 'M', u'Ù†'),
+    (0x1EE2E, 'M', u'س'),
+    (0x1EE2F, 'M', u'ع'),
+    (0x1EE30, 'M', u'ف'),
+    (0x1EE31, 'M', u'ص'),
+    (0x1EE32, 'M', u'Ù‚'),
+    (0x1EE33, 'X'),
+    (0x1EE34, 'M', u'Ø´'),
+    (0x1EE35, 'M', u'ت'),
+    (0x1EE36, 'M', u'Ø«'),
+    (0x1EE37, 'M', u'Ø®'),
+    (0x1EE38, 'X'),
+    (0x1EE39, 'M', u'ض'),
+    (0x1EE3A, 'X'),
+    (0x1EE3B, 'M', u'غ'),
+    (0x1EE3C, 'X'),
+    (0x1EE42, 'M', u'ج'),
+    (0x1EE43, 'X'),
+    (0x1EE47, 'M', u'Ø­'),
+    (0x1EE48, 'X'),
+    (0x1EE49, 'M', u'ÙŠ'),
+    (0x1EE4A, 'X'),
+    (0x1EE4B, 'M', u'Ù„'),
+    (0x1EE4C, 'X'),
+    (0x1EE4D, 'M', u'Ù†'),
+    (0x1EE4E, 'M', u'س'),
+    (0x1EE4F, 'M', u'ع'),
+    (0x1EE50, 'X'),
+    (0x1EE51, 'M', u'ص'),
+    (0x1EE52, 'M', u'Ù‚'),
+    (0x1EE53, 'X'),
+    (0x1EE54, 'M', u'Ø´'),
+    (0x1EE55, 'X'),
+    (0x1EE57, 'M', u'Ø®'),
+    (0x1EE58, 'X'),
+    (0x1EE59, 'M', u'ض'),
+    (0x1EE5A, 'X'),
+    (0x1EE5B, 'M', u'غ'),
+    (0x1EE5C, 'X'),
+    (0x1EE5D, 'M', u'Úº'),
+    (0x1EE5E, 'X'),
+    (0x1EE5F, 'M', u'Ù¯'),
+    (0x1EE60, 'X'),
+    (0x1EE61, 'M', u'ب'),
+    (0x1EE62, 'M', u'ج'),
+    (0x1EE63, 'X'),
+    (0x1EE64, 'M', u'Ù‡'),
+    (0x1EE65, 'X'),
+    (0x1EE67, 'M', u'Ø­'),
+    (0x1EE68, 'M', u'Ø·'),
+    (0x1EE69, 'M', u'ÙŠ'),
+    (0x1EE6A, 'M', u'Ùƒ'),
+    (0x1EE6B, 'X'),
+    (0x1EE6C, 'M', u'Ù…'),
+    (0x1EE6D, 'M', u'Ù†'),
+    (0x1EE6E, 'M', u'س'),
+    (0x1EE6F, 'M', u'ع'),
+    (0x1EE70, 'M', u'ف'),
+    (0x1EE71, 'M', u'ص'),
+    (0x1EE72, 'M', u'Ù‚'),
+    (0x1EE73, 'X'),
+    (0x1EE74, 'M', u'Ø´'),
+    (0x1EE75, 'M', u'ت'),
+    (0x1EE76, 'M', u'Ø«'),
+    (0x1EE77, 'M', u'Ø®'),
+    (0x1EE78, 'X'),
+    (0x1EE79, 'M', u'ض'),
+    (0x1EE7A, 'M', u'ظ'),
+    (0x1EE7B, 'M', u'غ'),
+    (0x1EE7C, 'M', u'Ù®'),
+    (0x1EE7D, 'X'),
+    (0x1EE7E, 'M', u'Ú¡'),
+    (0x1EE7F, 'X'),
+    (0x1EE80, 'M', u'ا'),
+    (0x1EE81, 'M', u'ب'),
+    (0x1EE82, 'M', u'ج'),
+    (0x1EE83, 'M', u'د'),
+    ]
+
+def _seg_70():
+    return [
+    (0x1EE84, 'M', u'Ù‡'),
+    (0x1EE85, 'M', u'Ùˆ'),
+    (0x1EE86, 'M', u'ز'),
+    (0x1EE87, 'M', u'Ø­'),
+    (0x1EE88, 'M', u'Ø·'),
+    (0x1EE89, 'M', u'ÙŠ'),
+    (0x1EE8A, 'X'),
+    (0x1EE8B, 'M', u'Ù„'),
+    (0x1EE8C, 'M', u'Ù…'),
+    (0x1EE8D, 'M', u'Ù†'),
+    (0x1EE8E, 'M', u'س'),
+    (0x1EE8F, 'M', u'ع'),
+    (0x1EE90, 'M', u'ف'),
+    (0x1EE91, 'M', u'ص'),
+    (0x1EE92, 'M', u'Ù‚'),
+    (0x1EE93, 'M', u'ر'),
+    (0x1EE94, 'M', u'Ø´'),
+    (0x1EE95, 'M', u'ت'),
+    (0x1EE96, 'M', u'Ø«'),
+    (0x1EE97, 'M', u'Ø®'),
+    (0x1EE98, 'M', u'ذ'),
+    (0x1EE99, 'M', u'ض'),
+    (0x1EE9A, 'M', u'ظ'),
+    (0x1EE9B, 'M', u'غ'),
+    (0x1EE9C, 'X'),
+    (0x1EEA1, 'M', u'ب'),
+    (0x1EEA2, 'M', u'ج'),
+    (0x1EEA3, 'M', u'د'),
+    (0x1EEA4, 'X'),
+    (0x1EEA5, 'M', u'Ùˆ'),
+    (0x1EEA6, 'M', u'ز'),
+    (0x1EEA7, 'M', u'Ø­'),
+    (0x1EEA8, 'M', u'Ø·'),
+    (0x1EEA9, 'M', u'ÙŠ'),
+    (0x1EEAA, 'X'),
+    (0x1EEAB, 'M', u'Ù„'),
+    (0x1EEAC, 'M', u'Ù…'),
+    (0x1EEAD, 'M', u'Ù†'),
+    (0x1EEAE, 'M', u'س'),
+    (0x1EEAF, 'M', u'ع'),
+    (0x1EEB0, 'M', u'ف'),
+    (0x1EEB1, 'M', u'ص'),
+    (0x1EEB2, 'M', u'Ù‚'),
+    (0x1EEB3, 'M', u'ر'),
+    (0x1EEB4, 'M', u'Ø´'),
+    (0x1EEB5, 'M', u'ت'),
+    (0x1EEB6, 'M', u'Ø«'),
+    (0x1EEB7, 'M', u'Ø®'),
+    (0x1EEB8, 'M', u'ذ'),
+    (0x1EEB9, 'M', u'ض'),
+    (0x1EEBA, 'M', u'ظ'),
+    (0x1EEBB, 'M', u'غ'),
+    (0x1EEBC, 'X'),
+    (0x1EEF0, 'V'),
+    (0x1EEF2, 'X'),
+    (0x1F000, 'V'),
+    (0x1F02C, 'X'),
+    (0x1F030, 'V'),
+    (0x1F094, 'X'),
+    (0x1F0A0, 'V'),
+    (0x1F0AF, 'X'),
+    (0x1F0B1, 'V'),
+    (0x1F0C0, 'X'),
+    (0x1F0C1, 'V'),
+    (0x1F0D0, 'X'),
+    (0x1F0D1, 'V'),
+    (0x1F0F6, 'X'),
+    (0x1F101, '3', u'0,'),
+    (0x1F102, '3', u'1,'),
+    (0x1F103, '3', u'2,'),
+    (0x1F104, '3', u'3,'),
+    (0x1F105, '3', u'4,'),
+    (0x1F106, '3', u'5,'),
+    (0x1F107, '3', u'6,'),
+    (0x1F108, '3', u'7,'),
+    (0x1F109, '3', u'8,'),
+    (0x1F10A, '3', u'9,'),
+    (0x1F10B, 'V'),
+    (0x1F10D, 'X'),
+    (0x1F110, '3', u'(a)'),
+    (0x1F111, '3', u'(b)'),
+    (0x1F112, '3', u'(c)'),
+    (0x1F113, '3', u'(d)'),
+    (0x1F114, '3', u'(e)'),
+    (0x1F115, '3', u'(f)'),
+    (0x1F116, '3', u'(g)'),
+    (0x1F117, '3', u'(h)'),
+    (0x1F118, '3', u'(i)'),
+    (0x1F119, '3', u'(j)'),
+    (0x1F11A, '3', u'(k)'),
+    (0x1F11B, '3', u'(l)'),
+    (0x1F11C, '3', u'(m)'),
+    (0x1F11D, '3', u'(n)'),
+    (0x1F11E, '3', u'(o)'),
+    (0x1F11F, '3', u'(p)'),
+    (0x1F120, '3', u'(q)'),
+    (0x1F121, '3', u'(r)'),
+    (0x1F122, '3', u'(s)'),
+    (0x1F123, '3', u'(t)'),
+    (0x1F124, '3', u'(u)'),
+    ]
+
+def _seg_71():
+    return [
+    (0x1F125, '3', u'(v)'),
+    (0x1F126, '3', u'(w)'),
+    (0x1F127, '3', u'(x)'),
+    (0x1F128, '3', u'(y)'),
+    (0x1F129, '3', u'(z)'),
+    (0x1F12A, 'M', u'〔s〕'),
+    (0x1F12B, 'M', u'c'),
+    (0x1F12C, 'M', u'r'),
+    (0x1F12D, 'M', u'cd'),
+    (0x1F12E, 'M', u'wz'),
+    (0x1F12F, 'V'),
+    (0x1F130, 'M', u'a'),
+    (0x1F131, 'M', u'b'),
+    (0x1F132, 'M', u'c'),
+    (0x1F133, 'M', u'd'),
+    (0x1F134, 'M', u'e'),
+    (0x1F135, 'M', u'f'),
+    (0x1F136, 'M', u'g'),
+    (0x1F137, 'M', u'h'),
+    (0x1F138, 'M', u'i'),
+    (0x1F139, 'M', u'j'),
+    (0x1F13A, 'M', u'k'),
+    (0x1F13B, 'M', u'l'),
+    (0x1F13C, 'M', u'm'),
+    (0x1F13D, 'M', u'n'),
+    (0x1F13E, 'M', u'o'),
+    (0x1F13F, 'M', u'p'),
+    (0x1F140, 'M', u'q'),
+    (0x1F141, 'M', u'r'),
+    (0x1F142, 'M', u's'),
+    (0x1F143, 'M', u't'),
+    (0x1F144, 'M', u'u'),
+    (0x1F145, 'M', u'v'),
+    (0x1F146, 'M', u'w'),
+    (0x1F147, 'M', u'x'),
+    (0x1F148, 'M', u'y'),
+    (0x1F149, 'M', u'z'),
+    (0x1F14A, 'M', u'hv'),
+    (0x1F14B, 'M', u'mv'),
+    (0x1F14C, 'M', u'sd'),
+    (0x1F14D, 'M', u'ss'),
+    (0x1F14E, 'M', u'ppv'),
+    (0x1F14F, 'M', u'wc'),
+    (0x1F150, 'V'),
+    (0x1F16A, 'M', u'mc'),
+    (0x1F16B, 'M', u'md'),
+    (0x1F16C, 'X'),
+    (0x1F170, 'V'),
+    (0x1F190, 'M', u'dj'),
+    (0x1F191, 'V'),
+    (0x1F1AD, 'X'),
+    (0x1F1E6, 'V'),
+    (0x1F200, 'M', u'ほか'),
+    (0x1F201, 'M', u'ココ'),
+    (0x1F202, 'M', u'サ'),
+    (0x1F203, 'X'),
+    (0x1F210, 'M', u'手'),
+    (0x1F211, 'M', u'å­—'),
+    (0x1F212, 'M', u'双'),
+    (0x1F213, 'M', u'デ'),
+    (0x1F214, 'M', u'二'),
+    (0x1F215, 'M', u'多'),
+    (0x1F216, 'M', u'è§£'),
+    (0x1F217, 'M', u'天'),
+    (0x1F218, 'M', u'交'),
+    (0x1F219, 'M', u'映'),
+    (0x1F21A, 'M', u'ç„¡'),
+    (0x1F21B, 'M', u'æ–™'),
+    (0x1F21C, 'M', u'前'),
+    (0x1F21D, 'M', u'後'),
+    (0x1F21E, 'M', u'再'),
+    (0x1F21F, 'M', u'æ–°'),
+    (0x1F220, 'M', u'初'),
+    (0x1F221, 'M', u'終'),
+    (0x1F222, 'M', u'生'),
+    (0x1F223, 'M', u'販'),
+    (0x1F224, 'M', u'声'),
+    (0x1F225, 'M', u'吹'),
+    (0x1F226, 'M', u'æ¼”'),
+    (0x1F227, 'M', u'投'),
+    (0x1F228, 'M', u'捕'),
+    (0x1F229, 'M', u'一'),
+    (0x1F22A, 'M', u'三'),
+    (0x1F22B, 'M', u'遊'),
+    (0x1F22C, 'M', u'å·¦'),
+    (0x1F22D, 'M', u'中'),
+    (0x1F22E, 'M', u'右'),
+    (0x1F22F, 'M', u'指'),
+    (0x1F230, 'M', u'èµ°'),
+    (0x1F231, 'M', u'打'),
+    (0x1F232, 'M', u'禁'),
+    (0x1F233, 'M', u'空'),
+    (0x1F234, 'M', u'合'),
+    (0x1F235, 'M', u'満'),
+    (0x1F236, 'M', u'有'),
+    (0x1F237, 'M', u'月'),
+    (0x1F238, 'M', u'申'),
+    (0x1F239, 'M', u'割'),
+    (0x1F23A, 'M', u'å–¶'),
+    (0x1F23B, 'M', u'配'),
+    ]
+
+def _seg_72():
+    return [
+    (0x1F23C, 'X'),
+    (0x1F240, 'M', u'〔本〕'),
+    (0x1F241, 'M', u'〔三〕'),
+    (0x1F242, 'M', u'〔二〕'),
+    (0x1F243, 'M', u'〔安〕'),
+    (0x1F244, 'M', u'〔点〕'),
+    (0x1F245, 'M', u'〔打〕'),
+    (0x1F246, 'M', u'〔盗〕'),
+    (0x1F247, 'M', u'〔勝〕'),
+    (0x1F248, 'M', u'〔敗〕'),
+    (0x1F249, 'X'),
+    (0x1F250, 'M', u'å¾—'),
+    (0x1F251, 'M', u'可'),
+    (0x1F252, 'X'),
+    (0x1F260, 'V'),
+    (0x1F266, 'X'),
+    (0x1F300, 'V'),
+    (0x1F6D5, 'X'),
+    (0x1F6E0, 'V'),
+    (0x1F6ED, 'X'),
+    (0x1F6F0, 'V'),
+    (0x1F6FA, 'X'),
+    (0x1F700, 'V'),
+    (0x1F774, 'X'),
+    (0x1F780, 'V'),
+    (0x1F7D9, 'X'),
+    (0x1F800, 'V'),
+    (0x1F80C, 'X'),
+    (0x1F810, 'V'),
+    (0x1F848, 'X'),
+    (0x1F850, 'V'),
+    (0x1F85A, 'X'),
+    (0x1F860, 'V'),
+    (0x1F888, 'X'),
+    (0x1F890, 'V'),
+    (0x1F8AE, 'X'),
+    (0x1F900, 'V'),
+    (0x1F90C, 'X'),
+    (0x1F910, 'V'),
+    (0x1F93F, 'X'),
+    (0x1F940, 'V'),
+    (0x1F971, 'X'),
+    (0x1F973, 'V'),
+    (0x1F977, 'X'),
+    (0x1F97A, 'V'),
+    (0x1F97B, 'X'),
+    (0x1F97C, 'V'),
+    (0x1F9A3, 'X'),
+    (0x1F9B0, 'V'),
+    (0x1F9BA, 'X'),
+    (0x1F9C0, 'V'),
+    (0x1F9C3, 'X'),
+    (0x1F9D0, 'V'),
+    (0x1FA00, 'X'),
+    (0x1FA60, 'V'),
+    (0x1FA6E, 'X'),
+    (0x20000, 'V'),
+    (0x2A6D7, 'X'),
+    (0x2A700, 'V'),
+    (0x2B735, 'X'),
+    (0x2B740, 'V'),
+    (0x2B81E, 'X'),
+    (0x2B820, 'V'),
+    (0x2CEA2, 'X'),
+    (0x2CEB0, 'V'),
+    (0x2EBE1, 'X'),
+    (0x2F800, 'M', u'丽'),
+    (0x2F801, 'M', u'丸'),
+    (0x2F802, 'M', u'乁'),
+    (0x2F803, 'M', u'ð „¢'),
+    (0x2F804, 'M', u'ä½ '),
+    (0x2F805, 'M', u'ä¾®'),
+    (0x2F806, 'M', u'ä¾»'),
+    (0x2F807, 'M', u'倂'),
+    (0x2F808, 'M', u'偺'),
+    (0x2F809, 'M', u'å‚™'),
+    (0x2F80A, 'M', u'僧'),
+    (0x2F80B, 'M', u'像'),
+    (0x2F80C, 'M', u'ã’ž'),
+    (0x2F80D, 'M', u'𠘺'),
+    (0x2F80E, 'M', u'免'),
+    (0x2F80F, 'M', u'å…”'),
+    (0x2F810, 'M', u'å…¤'),
+    (0x2F811, 'M', u'å…·'),
+    (0x2F812, 'M', u'𠔜'),
+    (0x2F813, 'M', u'ã’¹'),
+    (0x2F814, 'M', u'å…§'),
+    (0x2F815, 'M', u'再'),
+    (0x2F816, 'M', u'ð •‹'),
+    (0x2F817, 'M', u'冗'),
+    (0x2F818, 'M', u'冤'),
+    (0x2F819, 'M', u'仌'),
+    (0x2F81A, 'M', u'冬'),
+    (0x2F81B, 'M', u'况'),
+    (0x2F81C, 'M', u'𩇟'),
+    (0x2F81D, 'M', u'凵'),
+    (0x2F81E, 'M', u'刃'),
+    (0x2F81F, 'M', u'㓟'),
+    (0x2F820, 'M', u'刻'),
+    (0x2F821, 'M', u'剆'),
+    ]
+
+def _seg_73():
+    return [
+    (0x2F822, 'M', u'割'),
+    (0x2F823, 'M', u'剷'),
+    (0x2F824, 'M', u'㔕'),
+    (0x2F825, 'M', u'勇'),
+    (0x2F826, 'M', u'勉'),
+    (0x2F827, 'M', u'勤'),
+    (0x2F828, 'M', u'勺'),
+    (0x2F829, 'M', u'包'),
+    (0x2F82A, 'M', u'匆'),
+    (0x2F82B, 'M', u'北'),
+    (0x2F82C, 'M', u'卉'),
+    (0x2F82D, 'M', u'卑'),
+    (0x2F82E, 'M', u'博'),
+    (0x2F82F, 'M', u'即'),
+    (0x2F830, 'M', u'卽'),
+    (0x2F831, 'M', u'卿'),
+    (0x2F834, 'M', u'𠨬'),
+    (0x2F835, 'M', u'灰'),
+    (0x2F836, 'M', u'及'),
+    (0x2F837, 'M', u'叟'),
+    (0x2F838, 'M', u'ð ­£'),
+    (0x2F839, 'M', u'叫'),
+    (0x2F83A, 'M', u'叱'),
+    (0x2F83B, 'M', u'吆'),
+    (0x2F83C, 'M', u'å’ž'),
+    (0x2F83D, 'M', u'吸'),
+    (0x2F83E, 'M', u'呈'),
+    (0x2F83F, 'M', u'周'),
+    (0x2F840, 'M', u'å’¢'),
+    (0x2F841, 'M', u'å“¶'),
+    (0x2F842, 'M', u'唐'),
+    (0x2F843, 'M', u'å•“'),
+    (0x2F844, 'M', u'å•£'),
+    (0x2F845, 'M', u'å–„'),
+    (0x2F847, 'M', u'å–™'),
+    (0x2F848, 'M', u'å–«'),
+    (0x2F849, 'M', u'å–³'),
+    (0x2F84A, 'M', u'å—‚'),
+    (0x2F84B, 'M', u'圖'),
+    (0x2F84C, 'M', u'嘆'),
+    (0x2F84D, 'M', u'圗'),
+    (0x2F84E, 'M', u'噑'),
+    (0x2F84F, 'M', u'å™´'),
+    (0x2F850, 'M', u'切'),
+    (0x2F851, 'M', u'壮'),
+    (0x2F852, 'M', u'城'),
+    (0x2F853, 'M', u'埴'),
+    (0x2F854, 'M', u'堍'),
+    (0x2F855, 'M', u'åž‹'),
+    (0x2F856, 'M', u'å ²'),
+    (0x2F857, 'M', u'å ±'),
+    (0x2F858, 'M', u'墬'),
+    (0x2F859, 'M', u'𡓤'),
+    (0x2F85A, 'M', u'売'),
+    (0x2F85B, 'M', u'壷'),
+    (0x2F85C, 'M', u'夆'),
+    (0x2F85D, 'M', u'多'),
+    (0x2F85E, 'M', u'夢'),
+    (0x2F85F, 'M', u'奢'),
+    (0x2F860, 'M', u'𡚨'),
+    (0x2F861, 'M', u'𡛪'),
+    (0x2F862, 'M', u'姬'),
+    (0x2F863, 'M', u'娛'),
+    (0x2F864, 'M', u'娧'),
+    (0x2F865, 'M', u'姘'),
+    (0x2F866, 'M', u'婦'),
+    (0x2F867, 'M', u'ã›®'),
+    (0x2F868, 'X'),
+    (0x2F869, 'M', u'嬈'),
+    (0x2F86A, 'M', u'嬾'),
+    (0x2F86C, 'M', u'𡧈'),
+    (0x2F86D, 'M', u'寃'),
+    (0x2F86E, 'M', u'寘'),
+    (0x2F86F, 'M', u'寧'),
+    (0x2F870, 'M', u'寳'),
+    (0x2F871, 'M', u'𡬘'),
+    (0x2F872, 'M', u'寿'),
+    (0x2F873, 'M', u'å°†'),
+    (0x2F874, 'X'),
+    (0x2F875, 'M', u'å°¢'),
+    (0x2F876, 'M', u'㞁'),
+    (0x2F877, 'M', u'å± '),
+    (0x2F878, 'M', u'å±®'),
+    (0x2F879, 'M', u'å³€'),
+    (0x2F87A, 'M', u'岍'),
+    (0x2F87B, 'M', u'ð¡·¤'),
+    (0x2F87C, 'M', u'嵃'),
+    (0x2F87D, 'M', u'ð¡·¦'),
+    (0x2F87E, 'M', u'åµ®'),
+    (0x2F87F, 'M', u'嵫'),
+    (0x2F880, 'M', u'åµ¼'),
+    (0x2F881, 'M', u'å·¡'),
+    (0x2F882, 'M', u'å·¢'),
+    (0x2F883, 'M', u'ã ¯'),
+    (0x2F884, 'M', u'å·½'),
+    (0x2F885, 'M', u'帨'),
+    (0x2F886, 'M', u'帽'),
+    (0x2F887, 'M', u'幩'),
+    (0x2F888, 'M', u'ã¡¢'),
+    (0x2F889, 'M', u'𢆃'),
+    ]
+
+def _seg_74():
+    return [
+    (0x2F88A, 'M', u'㡼'),
+    (0x2F88B, 'M', u'庰'),
+    (0x2F88C, 'M', u'庳'),
+    (0x2F88D, 'M', u'庶'),
+    (0x2F88E, 'M', u'廊'),
+    (0x2F88F, 'M', u'𪎒'),
+    (0x2F890, 'M', u'廾'),
+    (0x2F891, 'M', u'𢌱'),
+    (0x2F893, 'M', u'舁'),
+    (0x2F894, 'M', u'å¼¢'),
+    (0x2F896, 'M', u'㣇'),
+    (0x2F897, 'M', u'𣊸'),
+    (0x2F898, 'M', u'𦇚'),
+    (0x2F899, 'M', u'å½¢'),
+    (0x2F89A, 'M', u'彫'),
+    (0x2F89B, 'M', u'㣣'),
+    (0x2F89C, 'M', u'徚'),
+    (0x2F89D, 'M', u'忍'),
+    (0x2F89E, 'M', u'å¿—'),
+    (0x2F89F, 'M', u'忹'),
+    (0x2F8A0, 'M', u'悁'),
+    (0x2F8A1, 'M', u'㤺'),
+    (0x2F8A2, 'M', u'㤜'),
+    (0x2F8A3, 'M', u'æ‚”'),
+    (0x2F8A4, 'M', u'𢛔'),
+    (0x2F8A5, 'M', u'惇'),
+    (0x2F8A6, 'M', u'æ…ˆ'),
+    (0x2F8A7, 'M', u'慌'),
+    (0x2F8A8, 'M', u'æ…Ž'),
+    (0x2F8A9, 'M', u'慌'),
+    (0x2F8AA, 'M', u'æ…º'),
+    (0x2F8AB, 'M', u'憎'),
+    (0x2F8AC, 'M', u'憲'),
+    (0x2F8AD, 'M', u'憤'),
+    (0x2F8AE, 'M', u'憯'),
+    (0x2F8AF, 'M', u'懞'),
+    (0x2F8B0, 'M', u'懲'),
+    (0x2F8B1, 'M', u'懶'),
+    (0x2F8B2, 'M', u'成'),
+    (0x2F8B3, 'M', u'戛'),
+    (0x2F8B4, 'M', u'扝'),
+    (0x2F8B5, 'M', u'抱'),
+    (0x2F8B6, 'M', u'æ‹”'),
+    (0x2F8B7, 'M', u'捐'),
+    (0x2F8B8, 'M', u'𢬌'),
+    (0x2F8B9, 'M', u'挽'),
+    (0x2F8BA, 'M', u'拼'),
+    (0x2F8BB, 'M', u'捨'),
+    (0x2F8BC, 'M', u'掃'),
+    (0x2F8BD, 'M', u'揤'),
+    (0x2F8BE, 'M', u'𢯱'),
+    (0x2F8BF, 'M', u'搢'),
+    (0x2F8C0, 'M', u'揅'),
+    (0x2F8C1, 'M', u'掩'),
+    (0x2F8C2, 'M', u'㨮'),
+    (0x2F8C3, 'M', u'æ‘©'),
+    (0x2F8C4, 'M', u'摾'),
+    (0x2F8C5, 'M', u'撝'),
+    (0x2F8C6, 'M', u'æ‘·'),
+    (0x2F8C7, 'M', u'㩬'),
+    (0x2F8C8, 'M', u'敏'),
+    (0x2F8C9, 'M', u'敬'),
+    (0x2F8CA, 'M', u'𣀊'),
+    (0x2F8CB, 'M', u'æ—£'),
+    (0x2F8CC, 'M', u'書'),
+    (0x2F8CD, 'M', u'晉'),
+    (0x2F8CE, 'M', u'㬙'),
+    (0x2F8CF, 'M', u'æš‘'),
+    (0x2F8D0, 'M', u'㬈'),
+    (0x2F8D1, 'M', u'㫤'),
+    (0x2F8D2, 'M', u'冒'),
+    (0x2F8D3, 'M', u'冕'),
+    (0x2F8D4, 'M', u'最'),
+    (0x2F8D5, 'M', u'暜'),
+    (0x2F8D6, 'M', u'è‚­'),
+    (0x2F8D7, 'M', u'䏙'),
+    (0x2F8D8, 'M', u'朗'),
+    (0x2F8D9, 'M', u'望'),
+    (0x2F8DA, 'M', u'朡'),
+    (0x2F8DB, 'M', u'杞'),
+    (0x2F8DC, 'M', u'杓'),
+    (0x2F8DD, 'M', u'𣏃'),
+    (0x2F8DE, 'M', u'ã­‰'),
+    (0x2F8DF, 'M', u'柺'),
+    (0x2F8E0, 'M', u'æž…'),
+    (0x2F8E1, 'M', u'æ¡’'),
+    (0x2F8E2, 'M', u'梅'),
+    (0x2F8E3, 'M', u'𣑭'),
+    (0x2F8E4, 'M', u'梎'),
+    (0x2F8E5, 'M', u'æ Ÿ'),
+    (0x2F8E6, 'M', u'椔'),
+    (0x2F8E7, 'M', u'㮝'),
+    (0x2F8E8, 'M', u'楂'),
+    (0x2F8E9, 'M', u'榣'),
+    (0x2F8EA, 'M', u'槪'),
+    (0x2F8EB, 'M', u'檨'),
+    (0x2F8EC, 'M', u'𣚣'),
+    (0x2F8ED, 'M', u'æ«›'),
+    (0x2F8EE, 'M', u'ã°˜'),
+    (0x2F8EF, 'M', u'次'),
+    ]
+
+def _seg_75():
+    return [
+    (0x2F8F0, 'M', u'𣢧'),
+    (0x2F8F1, 'M', u'æ­”'),
+    (0x2F8F2, 'M', u'㱎'),
+    (0x2F8F3, 'M', u'æ­²'),
+    (0x2F8F4, 'M', u'殟'),
+    (0x2F8F5, 'M', u'殺'),
+    (0x2F8F6, 'M', u'æ®»'),
+    (0x2F8F7, 'M', u'𣪍'),
+    (0x2F8F8, 'M', u'ð¡´‹'),
+    (0x2F8F9, 'M', u'𣫺'),
+    (0x2F8FA, 'M', u'汎'),
+    (0x2F8FB, 'M', u'𣲼'),
+    (0x2F8FC, 'M', u'沿'),
+    (0x2F8FD, 'M', u'泍'),
+    (0x2F8FE, 'M', u'æ±§'),
+    (0x2F8FF, 'M', u'æ´–'),
+    (0x2F900, 'M', u'æ´¾'),
+    (0x2F901, 'M', u'æµ·'),
+    (0x2F902, 'M', u'流'),
+    (0x2F903, 'M', u'浩'),
+    (0x2F904, 'M', u'浸'),
+    (0x2F905, 'M', u'æ¶…'),
+    (0x2F906, 'M', u'𣴞'),
+    (0x2F907, 'M', u'æ´´'),
+    (0x2F908, 'M', u'港'),
+    (0x2F909, 'M', u'æ¹®'),
+    (0x2F90A, 'M', u'ã´³'),
+    (0x2F90B, 'M', u'滋'),
+    (0x2F90C, 'M', u'滇'),
+    (0x2F90D, 'M', u'𣻑'),
+    (0x2F90E, 'M', u'æ·¹'),
+    (0x2F90F, 'M', u'æ½®'),
+    (0x2F910, 'M', u'𣽞'),
+    (0x2F911, 'M', u'𣾎'),
+    (0x2F912, 'M', u'濆'),
+    (0x2F913, 'M', u'瀹'),
+    (0x2F914, 'M', u'瀞'),
+    (0x2F915, 'M', u'瀛'),
+    (0x2F916, 'M', u'ã¶–'),
+    (0x2F917, 'M', u'灊'),
+    (0x2F918, 'M', u'災'),
+    (0x2F919, 'M', u'灷'),
+    (0x2F91A, 'M', u'ç‚­'),
+    (0x2F91B, 'M', u'𠔥'),
+    (0x2F91C, 'M', u'ç……'),
+    (0x2F91D, 'M', u'𤉣'),
+    (0x2F91E, 'M', u'熜'),
+    (0x2F91F, 'X'),
+    (0x2F920, 'M', u'爨'),
+    (0x2F921, 'M', u'爵'),
+    (0x2F922, 'M', u'牐'),
+    (0x2F923, 'M', u'𤘈'),
+    (0x2F924, 'M', u'犀'),
+    (0x2F925, 'M', u'犕'),
+    (0x2F926, 'M', u'𤜵'),
+    (0x2F927, 'M', u'𤠔'),
+    (0x2F928, 'M', u'獺'),
+    (0x2F929, 'M', u'王'),
+    (0x2F92A, 'M', u'㺬'),
+    (0x2F92B, 'M', u'玥'),
+    (0x2F92C, 'M', u'㺸'),
+    (0x2F92E, 'M', u'瑇'),
+    (0x2F92F, 'M', u'瑜'),
+    (0x2F930, 'M', u'瑱'),
+    (0x2F931, 'M', u'ç’…'),
+    (0x2F932, 'M', u'瓊'),
+    (0x2F933, 'M', u'ã¼›'),
+    (0x2F934, 'M', u'甤'),
+    (0x2F935, 'M', u'𤰶'),
+    (0x2F936, 'M', u'甾'),
+    (0x2F937, 'M', u'𤲒'),
+    (0x2F938, 'M', u'ç•°'),
+    (0x2F939, 'M', u'𢆟'),
+    (0x2F93A, 'M', u'瘐'),
+    (0x2F93B, 'M', u'𤾡'),
+    (0x2F93C, 'M', u'𤾸'),
+    (0x2F93D, 'M', u'𥁄'),
+    (0x2F93E, 'M', u'㿼'),
+    (0x2F93F, 'M', u'䀈'),
+    (0x2F940, 'M', u'ç›´'),
+    (0x2F941, 'M', u'𥃳'),
+    (0x2F942, 'M', u'𥃲'),
+    (0x2F943, 'M', u'𥄙'),
+    (0x2F944, 'M', u'𥄳'),
+    (0x2F945, 'M', u'眞'),
+    (0x2F946, 'M', u'真'),
+    (0x2F948, 'M', u'睊'),
+    (0x2F949, 'M', u'䀹'),
+    (0x2F94A, 'M', u'çž‹'),
+    (0x2F94B, 'M', u'䁆'),
+    (0x2F94C, 'M', u'ä‚–'),
+    (0x2F94D, 'M', u'𥐝'),
+    (0x2F94E, 'M', u'硎'),
+    (0x2F94F, 'M', u'碌'),
+    (0x2F950, 'M', u'磌'),
+    (0x2F951, 'M', u'䃣'),
+    (0x2F952, 'M', u'𥘦'),
+    (0x2F953, 'M', u'祖'),
+    (0x2F954, 'M', u'𥚚'),
+    (0x2F955, 'M', u'𥛅'),
+    ]
+
+def _seg_76():
+    return [
+    (0x2F956, 'M', u'福'),
+    (0x2F957, 'M', u'ç§«'),
+    (0x2F958, 'M', u'䄯'),
+    (0x2F959, 'M', u'ç©€'),
+    (0x2F95A, 'M', u'穊'),
+    (0x2F95B, 'M', u'穏'),
+    (0x2F95C, 'M', u'𥥼'),
+    (0x2F95D, 'M', u'𥪧'),
+    (0x2F95F, 'X'),
+    (0x2F960, 'M', u'䈂'),
+    (0x2F961, 'M', u'𥮫'),
+    (0x2F962, 'M', u'篆'),
+    (0x2F963, 'M', u'築'),
+    (0x2F964, 'M', u'䈧'),
+    (0x2F965, 'M', u'𥲀'),
+    (0x2F966, 'M', u'ç³’'),
+    (0x2F967, 'M', u'䊠'),
+    (0x2F968, 'M', u'糨'),
+    (0x2F969, 'M', u'ç³£'),
+    (0x2F96A, 'M', u'ç´€'),
+    (0x2F96B, 'M', u'𥾆'),
+    (0x2F96C, 'M', u'çµ£'),
+    (0x2F96D, 'M', u'䌁'),
+    (0x2F96E, 'M', u'ç·‡'),
+    (0x2F96F, 'M', u'縂'),
+    (0x2F970, 'M', u'ç¹…'),
+    (0x2F971, 'M', u'䌴'),
+    (0x2F972, 'M', u'𦈨'),
+    (0x2F973, 'M', u'𦉇'),
+    (0x2F974, 'M', u'䍙'),
+    (0x2F975, 'M', u'𦋙'),
+    (0x2F976, 'M', u'罺'),
+    (0x2F977, 'M', u'𦌾'),
+    (0x2F978, 'M', u'羕'),
+    (0x2F979, 'M', u'翺'),
+    (0x2F97A, 'M', u'者'),
+    (0x2F97B, 'M', u'𦓚'),
+    (0x2F97C, 'M', u'𦔣'),
+    (0x2F97D, 'M', u'聠'),
+    (0x2F97E, 'M', u'𦖨'),
+    (0x2F97F, 'M', u'聰'),
+    (0x2F980, 'M', u'𣍟'),
+    (0x2F981, 'M', u'䏕'),
+    (0x2F982, 'M', u'育'),
+    (0x2F983, 'M', u'脃'),
+    (0x2F984, 'M', u'䐋'),
+    (0x2F985, 'M', u'脾'),
+    (0x2F986, 'M', u'媵'),
+    (0x2F987, 'M', u'𦞧'),
+    (0x2F988, 'M', u'𦞵'),
+    (0x2F989, 'M', u'𣎓'),
+    (0x2F98A, 'M', u'𣎜'),
+    (0x2F98B, 'M', u'舁'),
+    (0x2F98C, 'M', u'舄'),
+    (0x2F98D, 'M', u'辞'),
+    (0x2F98E, 'M', u'ä‘«'),
+    (0x2F98F, 'M', u'芑'),
+    (0x2F990, 'M', u'芋'),
+    (0x2F991, 'M', u'芝'),
+    (0x2F992, 'M', u'劳'),
+    (0x2F993, 'M', u'花'),
+    (0x2F994, 'M', u'芳'),
+    (0x2F995, 'M', u'芽'),
+    (0x2F996, 'M', u'苦'),
+    (0x2F997, 'M', u'𦬼'),
+    (0x2F998, 'M', u'è‹¥'),
+    (0x2F999, 'M', u'茝'),
+    (0x2F99A, 'M', u'荣'),
+    (0x2F99B, 'M', u'莭'),
+    (0x2F99C, 'M', u'茣'),
+    (0x2F99D, 'M', u'莽'),
+    (0x2F99E, 'M', u'菧'),
+    (0x2F99F, 'M', u'è‘—'),
+    (0x2F9A0, 'M', u'荓'),
+    (0x2F9A1, 'M', u'菊'),
+    (0x2F9A2, 'M', u'菌'),
+    (0x2F9A3, 'M', u'菜'),
+    (0x2F9A4, 'M', u'𦰶'),
+    (0x2F9A5, 'M', u'𦵫'),
+    (0x2F9A6, 'M', u'𦳕'),
+    (0x2F9A7, 'M', u'䔫'),
+    (0x2F9A8, 'M', u'蓱'),
+    (0x2F9A9, 'M', u'蓳'),
+    (0x2F9AA, 'M', u'è”–'),
+    (0x2F9AB, 'M', u'𧏊'),
+    (0x2F9AC, 'M', u'蕤'),
+    (0x2F9AD, 'M', u'𦼬'),
+    (0x2F9AE, 'M', u'䕝'),
+    (0x2F9AF, 'M', u'ä•¡'),
+    (0x2F9B0, 'M', u'𦾱'),
+    (0x2F9B1, 'M', u'𧃒'),
+    (0x2F9B2, 'M', u'ä•«'),
+    (0x2F9B3, 'M', u'虐'),
+    (0x2F9B4, 'M', u'虜'),
+    (0x2F9B5, 'M', u'è™§'),
+    (0x2F9B6, 'M', u'虩'),
+    (0x2F9B7, 'M', u'èš©'),
+    (0x2F9B8, 'M', u'蚈'),
+    (0x2F9B9, 'M', u'蜎'),
+    (0x2F9BA, 'M', u'蛢'),
+    ]
+
+def _seg_77():
+    return [
+    (0x2F9BB, 'M', u'蝹'),
+    (0x2F9BC, 'M', u'蜨'),
+    (0x2F9BD, 'M', u'蝫'),
+    (0x2F9BE, 'M', u'螆'),
+    (0x2F9BF, 'X'),
+    (0x2F9C0, 'M', u'蟡'),
+    (0x2F9C1, 'M', u'蠁'),
+    (0x2F9C2, 'M', u'ä—¹'),
+    (0x2F9C3, 'M', u'è¡ '),
+    (0x2F9C4, 'M', u'è¡£'),
+    (0x2F9C5, 'M', u'ð§™§'),
+    (0x2F9C6, 'M', u'裗'),
+    (0x2F9C7, 'M', u'裞'),
+    (0x2F9C8, 'M', u'䘵'),
+    (0x2F9C9, 'M', u'裺'),
+    (0x2F9CA, 'M', u'ã’»'),
+    (0x2F9CB, 'M', u'ð§¢®'),
+    (0x2F9CC, 'M', u'𧥦'),
+    (0x2F9CD, 'M', u'äš¾'),
+    (0x2F9CE, 'M', u'䛇'),
+    (0x2F9CF, 'M', u'誠'),
+    (0x2F9D0, 'M', u'è«­'),
+    (0x2F9D1, 'M', u'變'),
+    (0x2F9D2, 'M', u'豕'),
+    (0x2F9D3, 'M', u'𧲨'),
+    (0x2F9D4, 'M', u'貫'),
+    (0x2F9D5, 'M', u'賁'),
+    (0x2F9D6, 'M', u'è´›'),
+    (0x2F9D7, 'M', u'èµ·'),
+    (0x2F9D8, 'M', u'𧼯'),
+    (0x2F9D9, 'M', u'ð  „'),
+    (0x2F9DA, 'M', u'è·‹'),
+    (0x2F9DB, 'M', u'è¶¼'),
+    (0x2F9DC, 'M', u'è·°'),
+    (0x2F9DD, 'M', u'𠣞'),
+    (0x2F9DE, 'M', u'è»”'),
+    (0x2F9DF, 'M', u'輸'),
+    (0x2F9E0, 'M', u'𨗒'),
+    (0x2F9E1, 'M', u'𨗭'),
+    (0x2F9E2, 'M', u'é‚”'),
+    (0x2F9E3, 'M', u'郱'),
+    (0x2F9E4, 'M', u'é„‘'),
+    (0x2F9E5, 'M', u'𨜮'),
+    (0x2F9E6, 'M', u'é„›'),
+    (0x2F9E7, 'M', u'鈸'),
+    (0x2F9E8, 'M', u'é‹—'),
+    (0x2F9E9, 'M', u'鋘'),
+    (0x2F9EA, 'M', u'鉼'),
+    (0x2F9EB, 'M', u'鏹'),
+    (0x2F9EC, 'M', u'鐕'),
+    (0x2F9ED, 'M', u'𨯺'),
+    (0x2F9EE, 'M', u'é–‹'),
+    (0x2F9EF, 'M', u'䦕'),
+    (0x2F9F0, 'M', u'é–·'),
+    (0x2F9F1, 'M', u'𨵷'),
+    (0x2F9F2, 'M', u'䧦'),
+    (0x2F9F3, 'M', u'雃'),
+    (0x2F9F4, 'M', u'å¶²'),
+    (0x2F9F5, 'M', u'霣'),
+    (0x2F9F6, 'M', u'ð©……'),
+    (0x2F9F7, 'M', u'𩈚'),
+    (0x2F9F8, 'M', u'ä©®'),
+    (0x2F9F9, 'M', u'ä©¶'),
+    (0x2F9FA, 'M', u'韠'),
+    (0x2F9FB, 'M', u'𩐊'),
+    (0x2F9FC, 'M', u'䪲'),
+    (0x2F9FD, 'M', u'ð©’–'),
+    (0x2F9FE, 'M', u'é ‹'),
+    (0x2FA00, 'M', u'é ©'),
+    (0x2FA01, 'M', u'ð©–¶'),
+    (0x2FA02, 'M', u'飢'),
+    (0x2FA03, 'M', u'䬳'),
+    (0x2FA04, 'M', u'餩'),
+    (0x2FA05, 'M', u'馧'),
+    (0x2FA06, 'M', u'é§‚'),
+    (0x2FA07, 'M', u'é§¾'),
+    (0x2FA08, 'M', u'䯎'),
+    (0x2FA09, 'M', u'𩬰'),
+    (0x2FA0A, 'M', u'鬒'),
+    (0x2FA0B, 'M', u'é±€'),
+    (0x2FA0C, 'M', u'é³½'),
+    (0x2FA0D, 'M', u'䳎'),
+    (0x2FA0E, 'M', u'ä³­'),
+    (0x2FA0F, 'M', u'éµ§'),
+    (0x2FA10, 'M', u'𪃎'),
+    (0x2FA11, 'M', u'䳸'),
+    (0x2FA12, 'M', u'𪄅'),
+    (0x2FA13, 'M', u'𪈎'),
+    (0x2FA14, 'M', u'𪊑'),
+    (0x2FA15, 'M', u'麻'),
+    (0x2FA16, 'M', u'äµ–'),
+    (0x2FA17, 'M', u'黹'),
+    (0x2FA18, 'M', u'黾'),
+    (0x2FA19, 'M', u'é¼…'),
+    (0x2FA1A, 'M', u'鼏'),
+    (0x2FA1B, 'M', u'é¼–'),
+    (0x2FA1C, 'M', u'é¼»'),
+    (0x2FA1D, 'M', u'𪘀'),
+    (0x2FA1E, 'X'),
+    (0xE0100, 'I'),
+    ]
+
+def _seg_78():
+    return [
+    (0xE01F0, 'X'),
+    ]
+
+uts46data = tuple(
+    _seg_0()
+    + _seg_1()
+    + _seg_2()
+    + _seg_3()
+    + _seg_4()
+    + _seg_5()
+    + _seg_6()
+    + _seg_7()
+    + _seg_8()
+    + _seg_9()
+    + _seg_10()
+    + _seg_11()
+    + _seg_12()
+    + _seg_13()
+    + _seg_14()
+    + _seg_15()
+    + _seg_16()
+    + _seg_17()
+    + _seg_18()
+    + _seg_19()
+    + _seg_20()
+    + _seg_21()
+    + _seg_22()
+    + _seg_23()
+    + _seg_24()
+    + _seg_25()
+    + _seg_26()
+    + _seg_27()
+    + _seg_28()
+    + _seg_29()
+    + _seg_30()
+    + _seg_31()
+    + _seg_32()
+    + _seg_33()
+    + _seg_34()
+    + _seg_35()
+    + _seg_36()
+    + _seg_37()
+    + _seg_38()
+    + _seg_39()
+    + _seg_40()
+    + _seg_41()
+    + _seg_42()
+    + _seg_43()
+    + _seg_44()
+    + _seg_45()
+    + _seg_46()
+    + _seg_47()
+    + _seg_48()
+    + _seg_49()
+    + _seg_50()
+    + _seg_51()
+    + _seg_52()
+    + _seg_53()
+    + _seg_54()
+    + _seg_55()
+    + _seg_56()
+    + _seg_57()
+    + _seg_58()
+    + _seg_59()
+    + _seg_60()
+    + _seg_61()
+    + _seg_62()
+    + _seg_63()
+    + _seg_64()
+    + _seg_65()
+    + _seg_66()
+    + _seg_67()
+    + _seg_68()
+    + _seg_69()
+    + _seg_70()
+    + _seg_71()
+    + _seg_72()
+    + _seg_73()
+    + _seg_74()
+    + _seg_75()
+    + _seg_76()
+    + _seg_77()
+    + _seg_78()
+)
diff --git a/lib/python3.7/site-packages/pip/_vendor/ipaddress.py b/lib/python3.7/site-packages/pip/_vendor/ipaddress.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2d0766842a1b76a28b77c623be05737e0e8bdb8
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/ipaddress.py
@@ -0,0 +1,2419 @@
+# Copyright 2007 Google Inc.
+#  Licensed to PSF under a Contributor Agreement.
+
+"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
+
+This library is used to create/poke/manipulate IPv4 and IPv6 addresses
+and networks.
+
+"""
+
+from __future__ import unicode_literals
+
+
+import itertools
+import struct
+
+__version__ = '1.0.22'
+
+# Compatibility functions
+_compat_int_types = (int,)
+try:
+    _compat_int_types = (int, long)
+except NameError:
+    pass
+try:
+    _compat_str = unicode
+except NameError:
+    _compat_str = str
+    assert bytes != str
+if b'\0'[0] == 0:  # Python 3 semantics
+    def _compat_bytes_to_byte_vals(byt):
+        return byt
+else:
+    def _compat_bytes_to_byte_vals(byt):
+        return [struct.unpack(b'!B', b)[0] for b in byt]
+try:
+    _compat_int_from_byte_vals = int.from_bytes
+except AttributeError:
+    def _compat_int_from_byte_vals(bytvals, endianess):
+        assert endianess == 'big'
+        res = 0
+        for bv in bytvals:
+            assert isinstance(bv, _compat_int_types)
+            res = (res << 8) + bv
+        return res
+
+
+def _compat_to_bytes(intval, length, endianess):
+    assert isinstance(intval, _compat_int_types)
+    assert endianess == 'big'
+    if length == 4:
+        if intval < 0 or intval >= 2 ** 32:
+            raise struct.error("integer out of range for 'I' format code")
+        return struct.pack(b'!I', intval)
+    elif length == 16:
+        if intval < 0 or intval >= 2 ** 128:
+            raise struct.error("integer out of range for 'QQ' format code")
+        return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
+    else:
+        raise NotImplementedError()
+
+
+if hasattr(int, 'bit_length'):
+    # Not int.bit_length , since that won't work in 2.7 where long exists
+    def _compat_bit_length(i):
+        return i.bit_length()
+else:
+    def _compat_bit_length(i):
+        for res in itertools.count():
+            if i >> res == 0:
+                return res
+
+
+def _compat_range(start, end, step=1):
+    assert step > 0
+    i = start
+    while i < end:
+        yield i
+        i += step
+
+
+class _TotalOrderingMixin(object):
+    __slots__ = ()
+
+    # Helper that derives the other comparison operations from
+    # __lt__ and __eq__
+    # We avoid functools.total_ordering because it doesn't handle
+    # NotImplemented correctly yet (http://bugs.python.org/issue10042)
+    def __eq__(self, other):
+        raise NotImplementedError
+
+    def __ne__(self, other):
+        equal = self.__eq__(other)
+        if equal is NotImplemented:
+            return NotImplemented
+        return not equal
+
+    def __lt__(self, other):
+        raise NotImplementedError
+
+    def __le__(self, other):
+        less = self.__lt__(other)
+        if less is NotImplemented or not less:
+            return self.__eq__(other)
+        return less
+
+    def __gt__(self, other):
+        less = self.__lt__(other)
+        if less is NotImplemented:
+            return NotImplemented
+        equal = self.__eq__(other)
+        if equal is NotImplemented:
+            return NotImplemented
+        return not (less or equal)
+
+    def __ge__(self, other):
+        less = self.__lt__(other)
+        if less is NotImplemented:
+            return NotImplemented
+        return not less
+
+
+IPV4LENGTH = 32
+IPV6LENGTH = 128
+
+
+class AddressValueError(ValueError):
+    """A Value Error related to the address."""
+
+
+class NetmaskValueError(ValueError):
+    """A Value Error related to the netmask."""
+
+
+def ip_address(address):
+    """Take an IP string/int and return an object of the correct type.
+
+    Args:
+        address: A string or integer, the IP address.  Either IPv4 or
+          IPv6 addresses may be supplied; integers less than 2**32 will
+          be considered to be IPv4 by default.
+
+    Returns:
+        An IPv4Address or IPv6Address object.
+
+    Raises:
+        ValueError: if the *address* passed isn't either a v4 or a v6
+          address
+
+    """
+    try:
+        return IPv4Address(address)
+    except (AddressValueError, NetmaskValueError):
+        pass
+
+    try:
+        return IPv6Address(address)
+    except (AddressValueError, NetmaskValueError):
+        pass
+
+    if isinstance(address, bytes):
+        raise AddressValueError(
+            '%r does not appear to be an IPv4 or IPv6 address. '
+            'Did you pass in a bytes (str in Python 2) instead of'
+            ' a unicode object?' % address)
+
+    raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
+                     address)
+
+
+def ip_network(address, strict=True):
+    """Take an IP string/int and return an object of the correct type.
+
+    Args:
+        address: A string or integer, the IP network.  Either IPv4 or
+          IPv6 networks may be supplied; integers less than 2**32 will
+          be considered to be IPv4 by default.
+
+    Returns:
+        An IPv4Network or IPv6Network object.
+
+    Raises:
+        ValueError: if the string passed isn't either a v4 or a v6
+          address. Or if the network has host bits set.
+
+    """
+    try:
+        return IPv4Network(address, strict)
+    except (AddressValueError, NetmaskValueError):
+        pass
+
+    try:
+        return IPv6Network(address, strict)
+    except (AddressValueError, NetmaskValueError):
+        pass
+
+    if isinstance(address, bytes):
+        raise AddressValueError(
+            '%r does not appear to be an IPv4 or IPv6 network. '
+            'Did you pass in a bytes (str in Python 2) instead of'
+            ' a unicode object?' % address)
+
+    raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
+                     address)
+
+
+def ip_interface(address):
+    """Take an IP string/int and return an object of the correct type.
+
+    Args:
+        address: A string or integer, the IP address.  Either IPv4 or
+          IPv6 addresses may be supplied; integers less than 2**32 will
+          be considered to be IPv4 by default.
+
+    Returns:
+        An IPv4Interface or IPv6Interface object.
+
+    Raises:
+        ValueError: if the string passed isn't either a v4 or a v6
+          address.
+
+    Notes:
+        The IPv?Interface classes describe an Address on a particular
+        Network, so they're basically a combination of both the Address
+        and Network classes.
+
+    """
+    try:
+        return IPv4Interface(address)
+    except (AddressValueError, NetmaskValueError):
+        pass
+
+    try:
+        return IPv6Interface(address)
+    except (AddressValueError, NetmaskValueError):
+        pass
+
+    raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
+                     address)
+
+
+def v4_int_to_packed(address):
+    """Represent an address as 4 packed bytes in network (big-endian) order.
+
+    Args:
+        address: An integer representation of an IPv4 IP address.
+
+    Returns:
+        The integer address packed as 4 bytes in network (big-endian) order.
+
+    Raises:
+        ValueError: If the integer is negative or too large to be an
+          IPv4 IP address.
+
+    """
+    try:
+        return _compat_to_bytes(address, 4, 'big')
+    except (struct.error, OverflowError):
+        raise ValueError("Address negative or too large for IPv4")
+
+
+def v6_int_to_packed(address):
+    """Represent an address as 16 packed bytes in network (big-endian) order.
+
+    Args:
+        address: An integer representation of an IPv6 IP address.
+
+    Returns:
+        The integer address packed as 16 bytes in network (big-endian) order.
+
+    """
+    try:
+        return _compat_to_bytes(address, 16, 'big')
+    except (struct.error, OverflowError):
+        raise ValueError("Address negative or too large for IPv6")
+
+
+def _split_optional_netmask(address):
+    """Helper to split the netmask and raise AddressValueError if needed"""
+    addr = _compat_str(address).split('/')
+    if len(addr) > 2:
+        raise AddressValueError("Only one '/' permitted in %r" % address)
+    return addr
+
+
+def _find_address_range(addresses):
+    """Find a sequence of sorted deduplicated IPv#Address.
+
+    Args:
+        addresses: a list of IPv#Address objects.
+
+    Yields:
+        A tuple containing the first and last IP addresses in the sequence.
+
+    """
+    it = iter(addresses)
+    first = last = next(it)
+    for ip in it:
+        if ip._ip != last._ip + 1:
+            yield first, last
+            first = ip
+        last = ip
+    yield first, last
+
+
+def _count_righthand_zero_bits(number, bits):
+    """Count the number of zero bits on the right hand side.
+
+    Args:
+        number: an integer.
+        bits: maximum number of bits to count.
+
+    Returns:
+        The number of zero bits on the right hand side of the number.
+
+    """
+    if number == 0:
+        return bits
+    return min(bits, _compat_bit_length(~number & (number - 1)))
+
+
+def summarize_address_range(first, last):
+    """Summarize a network range given the first and last IP addresses.
+
+    Example:
+        >>> list(summarize_address_range(IPv4Address('192.0.2.0'),
+        ...                              IPv4Address('192.0.2.130')))
+        ...                                #doctest: +NORMALIZE_WHITESPACE
+        [IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
+         IPv4Network('192.0.2.130/32')]
+
+    Args:
+        first: the first IPv4Address or IPv6Address in the range.
+        last: the last IPv4Address or IPv6Address in the range.
+
+    Returns:
+        An iterator of the summarized IPv(4|6) network objects.
+
+    Raise:
+        TypeError:
+            If the first and last objects are not IP addresses.
+            If the first and last objects are not the same version.
+        ValueError:
+            If the last object is not greater than the first.
+            If the version of the first address is not 4 or 6.
+
+    """
+    if (not (isinstance(first, _BaseAddress) and
+             isinstance(last, _BaseAddress))):
+        raise TypeError('first and last must be IP addresses, not networks')
+    if first.version != last.version:
+        raise TypeError("%s and %s are not of the same version" % (
+                        first, last))
+    if first > last:
+        raise ValueError('last IP address must be greater than first')
+
+    if first.version == 4:
+        ip = IPv4Network
+    elif first.version == 6:
+        ip = IPv6Network
+    else:
+        raise ValueError('unknown IP version')
+
+    ip_bits = first._max_prefixlen
+    first_int = first._ip
+    last_int = last._ip
+    while first_int <= last_int:
+        nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
+                    _compat_bit_length(last_int - first_int + 1) - 1)
+        net = ip((first_int, ip_bits - nbits))
+        yield net
+        first_int += 1 << nbits
+        if first_int - 1 == ip._ALL_ONES:
+            break
+
+
+def _collapse_addresses_internal(addresses):
+    """Loops through the addresses, collapsing concurrent netblocks.
+
+    Example:
+
+        ip1 = IPv4Network('192.0.2.0/26')
+        ip2 = IPv4Network('192.0.2.64/26')
+        ip3 = IPv4Network('192.0.2.128/26')
+        ip4 = IPv4Network('192.0.2.192/26')
+
+        _collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
+          [IPv4Network('192.0.2.0/24')]
+
+        This shouldn't be called directly; it is called via
+          collapse_addresses([]).
+
+    Args:
+        addresses: A list of IPv4Network's or IPv6Network's
+
+    Returns:
+        A list of IPv4Network's or IPv6Network's depending on what we were
+        passed.
+
+    """
+    # First merge
+    to_merge = list(addresses)
+    subnets = {}
+    while to_merge:
+        net = to_merge.pop()
+        supernet = net.supernet()
+        existing = subnets.get(supernet)
+        if existing is None:
+            subnets[supernet] = net
+        elif existing != net:
+            # Merge consecutive subnets
+            del subnets[supernet]
+            to_merge.append(supernet)
+    # Then iterate over resulting networks, skipping subsumed subnets
+    last = None
+    for net in sorted(subnets.values()):
+        if last is not None:
+            # Since they are sorted,
+            # last.network_address <= net.network_address is a given.
+            if last.broadcast_address >= net.broadcast_address:
+                continue
+        yield net
+        last = net
+
+
+def collapse_addresses(addresses):
+    """Collapse a list of IP objects.
+
+    Example:
+        collapse_addresses([IPv4Network('192.0.2.0/25'),
+                            IPv4Network('192.0.2.128/25')]) ->
+                           [IPv4Network('192.0.2.0/24')]
+
+    Args:
+        addresses: An iterator of IPv4Network or IPv6Network objects.
+
+    Returns:
+        An iterator of the collapsed IPv(4|6)Network objects.
+
+    Raises:
+        TypeError: If passed a list of mixed version objects.
+
+    """
+    addrs = []
+    ips = []
+    nets = []
+
+    # split IP addresses and networks
+    for ip in addresses:
+        if isinstance(ip, _BaseAddress):
+            if ips and ips[-1]._version != ip._version:
+                raise TypeError("%s and %s are not of the same version" % (
+                                ip, ips[-1]))
+            ips.append(ip)
+        elif ip._prefixlen == ip._max_prefixlen:
+            if ips and ips[-1]._version != ip._version:
+                raise TypeError("%s and %s are not of the same version" % (
+                                ip, ips[-1]))
+            try:
+                ips.append(ip.ip)
+            except AttributeError:
+                ips.append(ip.network_address)
+        else:
+            if nets and nets[-1]._version != ip._version:
+                raise TypeError("%s and %s are not of the same version" % (
+                                ip, nets[-1]))
+            nets.append(ip)
+
+    # sort and dedup
+    ips = sorted(set(ips))
+
+    # find consecutive address ranges in the sorted sequence and summarize them
+    if ips:
+        for first, last in _find_address_range(ips):
+            addrs.extend(summarize_address_range(first, last))
+
+    return _collapse_addresses_internal(addrs + nets)
+
+
+def get_mixed_type_key(obj):
+    """Return a key suitable for sorting between networks and addresses.
+
+    Address and Network objects are not sortable by default; they're
+    fundamentally different so the expression
+
+        IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
+
+    doesn't make any sense.  There are some times however, where you may wish
+    to have ipaddress sort these for you anyway. If you need to do this, you
+    can use this function as the key= argument to sorted().
+
+    Args:
+      obj: either a Network or Address object.
+    Returns:
+      appropriate key.
+
+    """
+    if isinstance(obj, _BaseNetwork):
+        return obj._get_networks_key()
+    elif isinstance(obj, _BaseAddress):
+        return obj._get_address_key()
+    return NotImplemented
+
+
+class _IPAddressBase(_TotalOrderingMixin):
+
+    """The mother class."""
+
+    __slots__ = ()
+
+    @property
+    def exploded(self):
+        """Return the longhand version of the IP address as a string."""
+        return self._explode_shorthand_ip_string()
+
+    @property
+    def compressed(self):
+        """Return the shorthand version of the IP address as a string."""
+        return _compat_str(self)
+
+    @property
+    def reverse_pointer(self):
+        """The name of the reverse DNS pointer for the IP address, e.g.:
+            >>> ipaddress.ip_address("127.0.0.1").reverse_pointer
+            '1.0.0.127.in-addr.arpa'
+            >>> ipaddress.ip_address("2001:db8::1").reverse_pointer
+            '1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
+
+        """
+        return self._reverse_pointer()
+
+    @property
+    def version(self):
+        msg = '%200s has no version specified' % (type(self),)
+        raise NotImplementedError(msg)
+
+    def _check_int_address(self, address):
+        if address < 0:
+            msg = "%d (< 0) is not permitted as an IPv%d address"
+            raise AddressValueError(msg % (address, self._version))
+        if address > self._ALL_ONES:
+            msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
+            raise AddressValueError(msg % (address, self._max_prefixlen,
+                                           self._version))
+
+    def _check_packed_address(self, address, expected_len):
+        address_len = len(address)
+        if address_len != expected_len:
+            msg = (
+                '%r (len %d != %d) is not permitted as an IPv%d address. '
+                'Did you pass in a bytes (str in Python 2) instead of'
+                ' a unicode object?')
+            raise AddressValueError(msg % (address, address_len,
+                                           expected_len, self._version))
+
+    @classmethod
+    def _ip_int_from_prefix(cls, prefixlen):
+        """Turn the prefix length into a bitwise netmask
+
+        Args:
+            prefixlen: An integer, the prefix length.
+
+        Returns:
+            An integer.
+
+        """
+        return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
+
+    @classmethod
+    def _prefix_from_ip_int(cls, ip_int):
+        """Return prefix length from the bitwise netmask.
+
+        Args:
+            ip_int: An integer, the netmask in expanded bitwise format
+
+        Returns:
+            An integer, the prefix length.
+
+        Raises:
+            ValueError: If the input intermingles zeroes & ones
+        """
+        trailing_zeroes = _count_righthand_zero_bits(ip_int,
+                                                     cls._max_prefixlen)
+        prefixlen = cls._max_prefixlen - trailing_zeroes
+        leading_ones = ip_int >> trailing_zeroes
+        all_ones = (1 << prefixlen) - 1
+        if leading_ones != all_ones:
+            byteslen = cls._max_prefixlen // 8
+            details = _compat_to_bytes(ip_int, byteslen, 'big')
+            msg = 'Netmask pattern %r mixes zeroes & ones'
+            raise ValueError(msg % details)
+        return prefixlen
+
+    @classmethod
+    def _report_invalid_netmask(cls, netmask_str):
+        msg = '%r is not a valid netmask' % netmask_str
+        raise NetmaskValueError(msg)
+
+    @classmethod
+    def _prefix_from_prefix_string(cls, prefixlen_str):
+        """Return prefix length from a numeric string
+
+        Args:
+            prefixlen_str: The string to be converted
+
+        Returns:
+            An integer, the prefix length.
+
+        Raises:
+            NetmaskValueError: If the input is not a valid netmask
+        """
+        # int allows a leading +/- as well as surrounding whitespace,
+        # so we ensure that isn't the case
+        if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
+            cls._report_invalid_netmask(prefixlen_str)
+        try:
+            prefixlen = int(prefixlen_str)
+        except ValueError:
+            cls._report_invalid_netmask(prefixlen_str)
+        if not (0 <= prefixlen <= cls._max_prefixlen):
+            cls._report_invalid_netmask(prefixlen_str)
+        return prefixlen
+
+    @classmethod
+    def _prefix_from_ip_string(cls, ip_str):
+        """Turn a netmask/hostmask string into a prefix length
+
+        Args:
+            ip_str: The netmask/hostmask to be converted
+
+        Returns:
+            An integer, the prefix length.
+
+        Raises:
+            NetmaskValueError: If the input is not a valid netmask/hostmask
+        """
+        # Parse the netmask/hostmask like an IP address.
+        try:
+            ip_int = cls._ip_int_from_string(ip_str)
+        except AddressValueError:
+            cls._report_invalid_netmask(ip_str)
+
+        # Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
+        # Note that the two ambiguous cases (all-ones and all-zeroes) are
+        # treated as netmasks.
+        try:
+            return cls._prefix_from_ip_int(ip_int)
+        except ValueError:
+            pass
+
+        # Invert the bits, and try matching a /0+1+/ hostmask instead.
+        ip_int ^= cls._ALL_ONES
+        try:
+            return cls._prefix_from_ip_int(ip_int)
+        except ValueError:
+            cls._report_invalid_netmask(ip_str)
+
+    def __reduce__(self):
+        return self.__class__, (_compat_str(self),)
+
+
+class _BaseAddress(_IPAddressBase):
+
+    """A generic IP object.
+
+    This IP class contains the version independent methods which are
+    used by single IP addresses.
+    """
+
+    __slots__ = ()
+
+    def __int__(self):
+        return self._ip
+
+    def __eq__(self, other):
+        try:
+            return (self._ip == other._ip and
+                    self._version == other._version)
+        except AttributeError:
+            return NotImplemented
+
+    def __lt__(self, other):
+        if not isinstance(other, _IPAddressBase):
+            return NotImplemented
+        if not isinstance(other, _BaseAddress):
+            raise TypeError('%s and %s are not of the same type' % (
+                self, other))
+        if self._version != other._version:
+            raise TypeError('%s and %s are not of the same version' % (
+                self, other))
+        if self._ip != other._ip:
+            return self._ip < other._ip
+        return False
+
+    # Shorthand for Integer addition and subtraction. This is not
+    # meant to ever support addition/subtraction of addresses.
+    def __add__(self, other):
+        if not isinstance(other, _compat_int_types):
+            return NotImplemented
+        return self.__class__(int(self) + other)
+
+    def __sub__(self, other):
+        if not isinstance(other, _compat_int_types):
+            return NotImplemented
+        return self.__class__(int(self) - other)
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
+
+    def __str__(self):
+        return _compat_str(self._string_from_ip_int(self._ip))
+
+    def __hash__(self):
+        return hash(hex(int(self._ip)))
+
+    def _get_address_key(self):
+        return (self._version, self)
+
+    def __reduce__(self):
+        return self.__class__, (self._ip,)
+
+
+class _BaseNetwork(_IPAddressBase):
+
+    """A generic IP network object.
+
+    This IP class contains the version independent methods which are
+    used by networks.
+
+    """
+    def __init__(self, address):
+        self._cache = {}
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
+
+    def __str__(self):
+        return '%s/%d' % (self.network_address, self.prefixlen)
+
+    def hosts(self):
+        """Generate Iterator over usable hosts in a network.
+
+        This is like __iter__ except it doesn't return the network
+        or broadcast addresses.
+
+        """
+        network = int(self.network_address)
+        broadcast = int(self.broadcast_address)
+        for x in _compat_range(network + 1, broadcast):
+            yield self._address_class(x)
+
+    def __iter__(self):
+        network = int(self.network_address)
+        broadcast = int(self.broadcast_address)
+        for x in _compat_range(network, broadcast + 1):
+            yield self._address_class(x)
+
+    def __getitem__(self, n):
+        network = int(self.network_address)
+        broadcast = int(self.broadcast_address)
+        if n >= 0:
+            if network + n > broadcast:
+                raise IndexError('address out of range')
+            return self._address_class(network + n)
+        else:
+            n += 1
+            if broadcast + n < network:
+                raise IndexError('address out of range')
+            return self._address_class(broadcast + n)
+
+    def __lt__(self, other):
+        if not isinstance(other, _IPAddressBase):
+            return NotImplemented
+        if not isinstance(other, _BaseNetwork):
+            raise TypeError('%s and %s are not of the same type' % (
+                            self, other))
+        if self._version != other._version:
+            raise TypeError('%s and %s are not of the same version' % (
+                            self, other))
+        if self.network_address != other.network_address:
+            return self.network_address < other.network_address
+        if self.netmask != other.netmask:
+            return self.netmask < other.netmask
+        return False
+
+    def __eq__(self, other):
+        try:
+            return (self._version == other._version and
+                    self.network_address == other.network_address and
+                    int(self.netmask) == int(other.netmask))
+        except AttributeError:
+            return NotImplemented
+
+    def __hash__(self):
+        return hash(int(self.network_address) ^ int(self.netmask))
+
+    def __contains__(self, other):
+        # always false if one is v4 and the other is v6.
+        if self._version != other._version:
+            return False
+        # dealing with another network.
+        if isinstance(other, _BaseNetwork):
+            return False
+        # dealing with another address
+        else:
+            # address
+            return (int(self.network_address) <= int(other._ip) <=
+                    int(self.broadcast_address))
+
+    def overlaps(self, other):
+        """Tell if self is partly contained in other."""
+        return self.network_address in other or (
+            self.broadcast_address in other or (
+                other.network_address in self or (
+                    other.broadcast_address in self)))
+
+    @property
+    def broadcast_address(self):
+        x = self._cache.get('broadcast_address')
+        if x is None:
+            x = self._address_class(int(self.network_address) |
+                                    int(self.hostmask))
+            self._cache['broadcast_address'] = x
+        return x
+
+    @property
+    def hostmask(self):
+        x = self._cache.get('hostmask')
+        if x is None:
+            x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
+            self._cache['hostmask'] = x
+        return x
+
+    @property
+    def with_prefixlen(self):
+        return '%s/%d' % (self.network_address, self._prefixlen)
+
+    @property
+    def with_netmask(self):
+        return '%s/%s' % (self.network_address, self.netmask)
+
+    @property
+    def with_hostmask(self):
+        return '%s/%s' % (self.network_address, self.hostmask)
+
+    @property
+    def num_addresses(self):
+        """Number of hosts in the current subnet."""
+        return int(self.broadcast_address) - int(self.network_address) + 1
+
+    @property
+    def _address_class(self):
+        # Returning bare address objects (rather than interfaces) allows for
+        # more consistent behaviour across the network address, broadcast
+        # address and individual host addresses.
+        msg = '%200s has no associated address class' % (type(self),)
+        raise NotImplementedError(msg)
+
+    @property
+    def prefixlen(self):
+        return self._prefixlen
+
+    def address_exclude(self, other):
+        """Remove an address from a larger block.
+
+        For example:
+
+            addr1 = ip_network('192.0.2.0/28')
+            addr2 = ip_network('192.0.2.1/32')
+            list(addr1.address_exclude(addr2)) =
+                [IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
+                 IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
+
+        or IPv6:
+
+            addr1 = ip_network('2001:db8::1/32')
+            addr2 = ip_network('2001:db8::1/128')
+            list(addr1.address_exclude(addr2)) =
+                [ip_network('2001:db8::1/128'),
+                 ip_network('2001:db8::2/127'),
+                 ip_network('2001:db8::4/126'),
+                 ip_network('2001:db8::8/125'),
+                 ...
+                 ip_network('2001:db8:8000::/33')]
+
+        Args:
+            other: An IPv4Network or IPv6Network object of the same type.
+
+        Returns:
+            An iterator of the IPv(4|6)Network objects which is self
+            minus other.
+
+        Raises:
+            TypeError: If self and other are of differing address
+              versions, or if other is not a network object.
+            ValueError: If other is not completely contained by self.
+
+        """
+        if not self._version == other._version:
+            raise TypeError("%s and %s are not of the same version" % (
+                            self, other))
+
+        if not isinstance(other, _BaseNetwork):
+            raise TypeError("%s is not a network object" % other)
+
+        if not other.subnet_of(self):
+            raise ValueError('%s not contained in %s' % (other, self))
+        if other == self:
+            return
+
+        # Make sure we're comparing the network of other.
+        other = other.__class__('%s/%s' % (other.network_address,
+                                           other.prefixlen))
+
+        s1, s2 = self.subnets()
+        while s1 != other and s2 != other:
+            if other.subnet_of(s1):
+                yield s2
+                s1, s2 = s1.subnets()
+            elif other.subnet_of(s2):
+                yield s1
+                s1, s2 = s2.subnets()
+            else:
+                # If we got here, there's a bug somewhere.
+                raise AssertionError('Error performing exclusion: '
+                                     's1: %s s2: %s other: %s' %
+                                     (s1, s2, other))
+        if s1 == other:
+            yield s2
+        elif s2 == other:
+            yield s1
+        else:
+            # If we got here, there's a bug somewhere.
+            raise AssertionError('Error performing exclusion: '
+                                 's1: %s s2: %s other: %s' %
+                                 (s1, s2, other))
+
+    def compare_networks(self, other):
+        """Compare two IP objects.
+
+        This is only concerned about the comparison of the integer
+        representation of the network addresses.  This means that the
+        host bits aren't considered at all in this method.  If you want
+        to compare host bits, you can easily enough do a
+        'HostA._ip < HostB._ip'
+
+        Args:
+            other: An IP object.
+
+        Returns:
+            If the IP versions of self and other are the same, returns:
+
+            -1 if self < other:
+              eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
+              IPv6Network('2001:db8::1000/124') <
+                  IPv6Network('2001:db8::2000/124')
+            0 if self == other
+              eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
+              IPv6Network('2001:db8::1000/124') ==
+                  IPv6Network('2001:db8::1000/124')
+            1 if self > other
+              eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
+                  IPv6Network('2001:db8::2000/124') >
+                      IPv6Network('2001:db8::1000/124')
+
+          Raises:
+              TypeError if the IP versions are different.
+
+        """
+        # does this need to raise a ValueError?
+        if self._version != other._version:
+            raise TypeError('%s and %s are not of the same type' % (
+                            self, other))
+        # self._version == other._version below here:
+        if self.network_address < other.network_address:
+            return -1
+        if self.network_address > other.network_address:
+            return 1
+        # self.network_address == other.network_address below here:
+        if self.netmask < other.netmask:
+            return -1
+        if self.netmask > other.netmask:
+            return 1
+        return 0
+
+    def _get_networks_key(self):
+        """Network-only key function.
+
+        Returns an object that identifies this address' network and
+        netmask. This function is a suitable "key" argument for sorted()
+        and list.sort().
+
+        """
+        return (self._version, self.network_address, self.netmask)
+
+    def subnets(self, prefixlen_diff=1, new_prefix=None):
+        """The subnets which join to make the current subnet.
+
+        In the case that self contains only one IP
+        (self._prefixlen == 32 for IPv4 or self._prefixlen == 128
+        for IPv6), yield an iterator with just ourself.
+
+        Args:
+            prefixlen_diff: An integer, the amount the prefix length
+              should be increased by. This should not be set if
+              new_prefix is also set.
+            new_prefix: The desired new prefix length. This must be a
+              larger number (smaller prefix) than the existing prefix.
+              This should not be set if prefixlen_diff is also set.
+
+        Returns:
+            An iterator of IPv(4|6) objects.
+
+        Raises:
+            ValueError: The prefixlen_diff is too small or too large.
+                OR
+            prefixlen_diff and new_prefix are both set or new_prefix
+              is a smaller number than the current prefix (smaller
+              number means a larger network)
+
+        """
+        if self._prefixlen == self._max_prefixlen:
+            yield self
+            return
+
+        if new_prefix is not None:
+            if new_prefix < self._prefixlen:
+                raise ValueError('new prefix must be longer')
+            if prefixlen_diff != 1:
+                raise ValueError('cannot set prefixlen_diff and new_prefix')
+            prefixlen_diff = new_prefix - self._prefixlen
+
+        if prefixlen_diff < 0:
+            raise ValueError('prefix length diff must be > 0')
+        new_prefixlen = self._prefixlen + prefixlen_diff
+
+        if new_prefixlen > self._max_prefixlen:
+            raise ValueError(
+                'prefix length diff %d is invalid for netblock %s' % (
+                    new_prefixlen, self))
+
+        start = int(self.network_address)
+        end = int(self.broadcast_address) + 1
+        step = (int(self.hostmask) + 1) >> prefixlen_diff
+        for new_addr in _compat_range(start, end, step):
+            current = self.__class__((new_addr, new_prefixlen))
+            yield current
+
+    def supernet(self, prefixlen_diff=1, new_prefix=None):
+        """The supernet containing the current network.
+
+        Args:
+            prefixlen_diff: An integer, the amount the prefix length of
+              the network should be decreased by.  For example, given a
+              /24 network and a prefixlen_diff of 3, a supernet with a
+              /21 netmask is returned.
+
+        Returns:
+            An IPv4 network object.
+
+        Raises:
+            ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
+              a negative prefix length.
+                OR
+            If prefixlen_diff and new_prefix are both set or new_prefix is a
+              larger number than the current prefix (larger number means a
+              smaller network)
+
+        """
+        if self._prefixlen == 0:
+            return self
+
+        if new_prefix is not None:
+            if new_prefix > self._prefixlen:
+                raise ValueError('new prefix must be shorter')
+            if prefixlen_diff != 1:
+                raise ValueError('cannot set prefixlen_diff and new_prefix')
+            prefixlen_diff = self._prefixlen - new_prefix
+
+        new_prefixlen = self.prefixlen - prefixlen_diff
+        if new_prefixlen < 0:
+            raise ValueError(
+                'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
+                (self.prefixlen, prefixlen_diff))
+        return self.__class__((
+            int(self.network_address) & (int(self.netmask) << prefixlen_diff),
+            new_prefixlen))
+
+    @property
+    def is_multicast(self):
+        """Test if the address is reserved for multicast use.
+
+        Returns:
+            A boolean, True if the address is a multicast address.
+            See RFC 2373 2.7 for details.
+
+        """
+        return (self.network_address.is_multicast and
+                self.broadcast_address.is_multicast)
+
+    @staticmethod
+    def _is_subnet_of(a, b):
+        try:
+            # Always false if one is v4 and the other is v6.
+            if a._version != b._version:
+                raise TypeError("%s and %s are not of the same version" (a, b))
+            return (b.network_address <= a.network_address and
+                    b.broadcast_address >= a.broadcast_address)
+        except AttributeError:
+            raise TypeError("Unable to test subnet containment "
+                            "between %s and %s" % (a, b))
+
+    def subnet_of(self, other):
+        """Return True if this network is a subnet of other."""
+        return self._is_subnet_of(self, other)
+
+    def supernet_of(self, other):
+        """Return True if this network is a supernet of other."""
+        return self._is_subnet_of(other, self)
+
+    @property
+    def is_reserved(self):
+        """Test if the address is otherwise IETF reserved.
+
+        Returns:
+            A boolean, True if the address is within one of the
+            reserved IPv6 Network ranges.
+
+        """
+        return (self.network_address.is_reserved and
+                self.broadcast_address.is_reserved)
+
+    @property
+    def is_link_local(self):
+        """Test if the address is reserved for link-local.
+
+        Returns:
+            A boolean, True if the address is reserved per RFC 4291.
+
+        """
+        return (self.network_address.is_link_local and
+                self.broadcast_address.is_link_local)
+
+    @property
+    def is_private(self):
+        """Test if this address is allocated for private networks.
+
+        Returns:
+            A boolean, True if the address is reserved per
+            iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+        """
+        return (self.network_address.is_private and
+                self.broadcast_address.is_private)
+
+    @property
+    def is_global(self):
+        """Test if this address is allocated for public networks.
+
+        Returns:
+            A boolean, True if the address is not reserved per
+            iana-ipv4-special-registry or iana-ipv6-special-registry.
+
+        """
+        return not self.is_private
+
+    @property
+    def is_unspecified(self):
+        """Test if the address is unspecified.
+
+        Returns:
+            A boolean, True if this is the unspecified address as defined in
+            RFC 2373 2.5.2.
+
+        """
+        return (self.network_address.is_unspecified and
+                self.broadcast_address.is_unspecified)
+
+    @property
+    def is_loopback(self):
+        """Test if the address is a loopback address.
+
+        Returns:
+            A boolean, True if the address is a loopback address as defined in
+            RFC 2373 2.5.3.
+
+        """
+        return (self.network_address.is_loopback and
+                self.broadcast_address.is_loopback)
+
+
+class _BaseV4(object):
+
+    """Base IPv4 object.
+
+    The following methods are used by IPv4 objects in both single IP
+    addresses and networks.
+
+    """
+
+    __slots__ = ()
+    _version = 4
+    # Equivalent to 255.255.255.255 or 32 bits of 1's.
+    _ALL_ONES = (2 ** IPV4LENGTH) - 1
+    _DECIMAL_DIGITS = frozenset('0123456789')
+
+    # the valid octets for host and netmasks. only useful for IPv4.
+    _valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
+
+    _max_prefixlen = IPV4LENGTH
+    # There are only a handful of valid v4 netmasks, so we cache them all
+    # when constructed (see _make_netmask()).
+    _netmask_cache = {}
+
+    def _explode_shorthand_ip_string(self):
+        return _compat_str(self)
+
+    @classmethod
+    def _make_netmask(cls, arg):
+        """Make a (netmask, prefix_len) tuple from the given argument.
+
+        Argument can be:
+        - an integer (the prefix length)
+        - a string representing the prefix length (e.g. "24")
+        - a string representing the prefix netmask (e.g. "255.255.255.0")
+        """
+        if arg not in cls._netmask_cache:
+            if isinstance(arg, _compat_int_types):
+                prefixlen = arg
+            else:
+                try:
+                    # Check for a netmask in prefix length form
+                    prefixlen = cls._prefix_from_prefix_string(arg)
+                except NetmaskValueError:
+                    # Check for a netmask or hostmask in dotted-quad form.
+                    # This may raise NetmaskValueError.
+                    prefixlen = cls._prefix_from_ip_string(arg)
+            netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
+            cls._netmask_cache[arg] = netmask, prefixlen
+        return cls._netmask_cache[arg]
+
+    @classmethod
+    def _ip_int_from_string(cls, ip_str):
+        """Turn the given IP string into an integer for comparison.
+
+        Args:
+            ip_str: A string, the IP ip_str.
+
+        Returns:
+            The IP ip_str as an integer.
+
+        Raises:
+            AddressValueError: if ip_str isn't a valid IPv4 Address.
+
+        """
+        if not ip_str:
+            raise AddressValueError('Address cannot be empty')
+
+        octets = ip_str.split('.')
+        if len(octets) != 4:
+            raise AddressValueError("Expected 4 octets in %r" % ip_str)
+
+        try:
+            return _compat_int_from_byte_vals(
+                map(cls._parse_octet, octets), 'big')
+        except ValueError as exc:
+            raise AddressValueError("%s in %r" % (exc, ip_str))
+
+    @classmethod
+    def _parse_octet(cls, octet_str):
+        """Convert a decimal octet into an integer.
+
+        Args:
+            octet_str: A string, the number to parse.
+
+        Returns:
+            The octet as an integer.
+
+        Raises:
+            ValueError: if the octet isn't strictly a decimal from [0..255].
+
+        """
+        if not octet_str:
+            raise ValueError("Empty octet not permitted")
+        # Whitelist the characters, since int() allows a lot of bizarre stuff.
+        if not cls._DECIMAL_DIGITS.issuperset(octet_str):
+            msg = "Only decimal digits permitted in %r"
+            raise ValueError(msg % octet_str)
+        # We do the length check second, since the invalid character error
+        # is likely to be more informative for the user
+        if len(octet_str) > 3:
+            msg = "At most 3 characters permitted in %r"
+            raise ValueError(msg % octet_str)
+        # Convert to integer (we know digits are legal)
+        octet_int = int(octet_str, 10)
+        # Any octets that look like they *might* be written in octal,
+        # and which don't look exactly the same in both octal and
+        # decimal are rejected as ambiguous
+        if octet_int > 7 and octet_str[0] == '0':
+            msg = "Ambiguous (octal/decimal) value in %r not permitted"
+            raise ValueError(msg % octet_str)
+        if octet_int > 255:
+            raise ValueError("Octet %d (> 255) not permitted" % octet_int)
+        return octet_int
+
+    @classmethod
+    def _string_from_ip_int(cls, ip_int):
+        """Turns a 32-bit integer into dotted decimal notation.
+
+        Args:
+            ip_int: An integer, the IP address.
+
+        Returns:
+            The IP address as a string in dotted decimal notation.
+
+        """
+        return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
+                                    if isinstance(b, bytes)
+                                    else b)
+                        for b in _compat_to_bytes(ip_int, 4, 'big'))
+
+    def _is_hostmask(self, ip_str):
+        """Test if the IP string is a hostmask (rather than a netmask).
+
+        Args:
+            ip_str: A string, the potential hostmask.
+
+        Returns:
+            A boolean, True if the IP string is a hostmask.
+
+        """
+        bits = ip_str.split('.')
+        try:
+            parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
+        except ValueError:
+            return False
+        if len(parts) != len(bits):
+            return False
+        if parts[0] < parts[-1]:
+            return True
+        return False
+
+    def _reverse_pointer(self):
+        """Return the reverse DNS pointer name for the IPv4 address.
+
+        This implements the method described in RFC1035 3.5.
+
+        """
+        reverse_octets = _compat_str(self).split('.')[::-1]
+        return '.'.join(reverse_octets) + '.in-addr.arpa'
+
+    @property
+    def max_prefixlen(self):
+        return self._max_prefixlen
+
+    @property
+    def version(self):
+        return self._version
+
+
+class IPv4Address(_BaseV4, _BaseAddress):
+
+    """Represent and manipulate single IPv4 Addresses."""
+
+    __slots__ = ('_ip', '__weakref__')
+
+    def __init__(self, address):
+
+        """
+        Args:
+            address: A string or integer representing the IP
+
+              Additionally, an integer can be passed, so
+              IPv4Address('192.0.2.1') == IPv4Address(3221225985).
+              or, more generally
+              IPv4Address(int(IPv4Address('192.0.2.1'))) ==
+                IPv4Address('192.0.2.1')
+
+        Raises:
+            AddressValueError: If ipaddress isn't a valid IPv4 address.
+
+        """
+        # Efficient constructor from integer.
+        if isinstance(address, _compat_int_types):
+            self._check_int_address(address)
+            self._ip = address
+            return
+
+        # Constructing from a packed address
+        if isinstance(address, bytes):
+            self._check_packed_address(address, 4)
+            bvs = _compat_bytes_to_byte_vals(address)
+            self._ip = _compat_int_from_byte_vals(bvs, 'big')
+            return
+
+        # Assume input argument to be string or any object representation
+        # which converts into a formatted IP string.
+        addr_str = _compat_str(address)
+        if '/' in addr_str:
+            raise AddressValueError("Unexpected '/' in %r" % address)
+        self._ip = self._ip_int_from_string(addr_str)
+
+    @property
+    def packed(self):
+        """The binary representation of this address."""
+        return v4_int_to_packed(self._ip)
+
+    @property
+    def is_reserved(self):
+        """Test if the address is otherwise IETF reserved.
+
+         Returns:
+             A boolean, True if the address is within the
+             reserved IPv4 Network range.
+
+        """
+        return self in self._constants._reserved_network
+
+    @property
+    def is_private(self):
+        """Test if this address is allocated for private networks.
+
+        Returns:
+            A boolean, True if the address is reserved per
+            iana-ipv4-special-registry.
+
+        """
+        return any(self in net for net in self._constants._private_networks)
+
+    @property
+    def is_global(self):
+        return (
+            self not in self._constants._public_network and
+            not self.is_private)
+
+    @property
+    def is_multicast(self):
+        """Test if the address is reserved for multicast use.
+
+        Returns:
+            A boolean, True if the address is multicast.
+            See RFC 3171 for details.
+
+        """
+        return self in self._constants._multicast_network
+
+    @property
+    def is_unspecified(self):
+        """Test if the address is unspecified.
+
+        Returns:
+            A boolean, True if this is the unspecified address as defined in
+            RFC 5735 3.
+
+        """
+        return self == self._constants._unspecified_address
+
+    @property
+    def is_loopback(self):
+        """Test if the address is a loopback address.
+
+        Returns:
+            A boolean, True if the address is a loopback per RFC 3330.
+
+        """
+        return self in self._constants._loopback_network
+
+    @property
+    def is_link_local(self):
+        """Test if the address is reserved for link-local.
+
+        Returns:
+            A boolean, True if the address is link-local per RFC 3927.
+
+        """
+        return self in self._constants._linklocal_network
+
+
+class IPv4Interface(IPv4Address):
+
+    def __init__(self, address):
+        if isinstance(address, (bytes, _compat_int_types)):
+            IPv4Address.__init__(self, address)
+            self.network = IPv4Network(self._ip)
+            self._prefixlen = self._max_prefixlen
+            return
+
+        if isinstance(address, tuple):
+            IPv4Address.__init__(self, address[0])
+            if len(address) > 1:
+                self._prefixlen = int(address[1])
+            else:
+                self._prefixlen = self._max_prefixlen
+
+            self.network = IPv4Network(address, strict=False)
+            self.netmask = self.network.netmask
+            self.hostmask = self.network.hostmask
+            return
+
+        addr = _split_optional_netmask(address)
+        IPv4Address.__init__(self, addr[0])
+
+        self.network = IPv4Network(address, strict=False)
+        self._prefixlen = self.network._prefixlen
+
+        self.netmask = self.network.netmask
+        self.hostmask = self.network.hostmask
+
+    def __str__(self):
+        return '%s/%d' % (self._string_from_ip_int(self._ip),
+                          self.network.prefixlen)
+
+    def __eq__(self, other):
+        address_equal = IPv4Address.__eq__(self, other)
+        if not address_equal or address_equal is NotImplemented:
+            return address_equal
+        try:
+            return self.network == other.network
+        except AttributeError:
+            # An interface with an associated network is NOT the
+            # same as an unassociated address. That's why the hash
+            # takes the extra info into account.
+            return False
+
+    def __lt__(self, other):
+        address_less = IPv4Address.__lt__(self, other)
+        if address_less is NotImplemented:
+            return NotImplemented
+        try:
+            return (self.network < other.network or
+                    self.network == other.network and address_less)
+        except AttributeError:
+            # We *do* allow addresses and interfaces to be sorted. The
+            # unassociated address is considered less than all interfaces.
+            return False
+
+    def __hash__(self):
+        return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+    __reduce__ = _IPAddressBase.__reduce__
+
+    @property
+    def ip(self):
+        return IPv4Address(self._ip)
+
+    @property
+    def with_prefixlen(self):
+        return '%s/%s' % (self._string_from_ip_int(self._ip),
+                          self._prefixlen)
+
+    @property
+    def with_netmask(self):
+        return '%s/%s' % (self._string_from_ip_int(self._ip),
+                          self.netmask)
+
+    @property
+    def with_hostmask(self):
+        return '%s/%s' % (self._string_from_ip_int(self._ip),
+                          self.hostmask)
+
+
+class IPv4Network(_BaseV4, _BaseNetwork):
+
+    """This class represents and manipulates 32-bit IPv4 network + addresses..
+
+    Attributes: [examples for IPv4Network('192.0.2.0/27')]
+        .network_address: IPv4Address('192.0.2.0')
+        .hostmask: IPv4Address('0.0.0.31')
+        .broadcast_address: IPv4Address('192.0.2.32')
+        .netmask: IPv4Address('255.255.255.224')
+        .prefixlen: 27
+
+    """
+    # Class to use when creating address objects
+    _address_class = IPv4Address
+
+    def __init__(self, address, strict=True):
+
+        """Instantiate a new IPv4 network object.
+
+        Args:
+            address: A string or integer representing the IP [& network].
+              '192.0.2.0/24'
+              '192.0.2.0/255.255.255.0'
+              '192.0.0.2/0.0.0.255'
+              are all functionally the same in IPv4. Similarly,
+              '192.0.2.1'
+              '192.0.2.1/255.255.255.255'
+              '192.0.2.1/32'
+              are also functionally equivalent. That is to say, failing to
+              provide a subnetmask will create an object with a mask of /32.
+
+              If the mask (portion after the / in the argument) is given in
+              dotted quad form, it is treated as a netmask if it starts with a
+              non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
+              starts with a zero field (e.g. 0.255.255.255 == /8), with the
+              single exception of an all-zero mask which is treated as a
+              netmask == /0. If no mask is given, a default of /32 is used.
+
+              Additionally, an integer can be passed, so
+              IPv4Network('192.0.2.1') == IPv4Network(3221225985)
+              or, more generally
+              IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
+                IPv4Interface('192.0.2.1')
+
+        Raises:
+            AddressValueError: If ipaddress isn't a valid IPv4 address.
+            NetmaskValueError: If the netmask isn't valid for
+              an IPv4 address.
+            ValueError: If strict is True and a network address is not
+              supplied.
+
+        """
+        _BaseNetwork.__init__(self, address)
+
+        # Constructing from a packed address or integer
+        if isinstance(address, (_compat_int_types, bytes)):
+            self.network_address = IPv4Address(address)
+            self.netmask, self._prefixlen = self._make_netmask(
+                self._max_prefixlen)
+            # fixme: address/network test here.
+            return
+
+        if isinstance(address, tuple):
+            if len(address) > 1:
+                arg = address[1]
+            else:
+                # We weren't given an address[1]
+                arg = self._max_prefixlen
+            self.network_address = IPv4Address(address[0])
+            self.netmask, self._prefixlen = self._make_netmask(arg)
+            packed = int(self.network_address)
+            if packed & int(self.netmask) != packed:
+                if strict:
+                    raise ValueError('%s has host bits set' % self)
+                else:
+                    self.network_address = IPv4Address(packed &
+                                                       int(self.netmask))
+            return
+
+        # Assume input argument to be string or any object representation
+        # which converts into a formatted IP prefix string.
+        addr = _split_optional_netmask(address)
+        self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
+
+        if len(addr) == 2:
+            arg = addr[1]
+        else:
+            arg = self._max_prefixlen
+        self.netmask, self._prefixlen = self._make_netmask(arg)
+
+        if strict:
+            if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
+                    self.network_address):
+                raise ValueError('%s has host bits set' % self)
+        self.network_address = IPv4Address(int(self.network_address) &
+                                           int(self.netmask))
+
+        if self._prefixlen == (self._max_prefixlen - 1):
+            self.hosts = self.__iter__
+
+    @property
+    def is_global(self):
+        """Test if this address is allocated for public networks.
+
+        Returns:
+            A boolean, True if the address is not reserved per
+            iana-ipv4-special-registry.
+
+        """
+        return (not (self.network_address in IPv4Network('100.64.0.0/10') and
+                self.broadcast_address in IPv4Network('100.64.0.0/10')) and
+                not self.is_private)
+
+
+class _IPv4Constants(object):
+
+    _linklocal_network = IPv4Network('169.254.0.0/16')
+
+    _loopback_network = IPv4Network('127.0.0.0/8')
+
+    _multicast_network = IPv4Network('224.0.0.0/4')
+
+    _public_network = IPv4Network('100.64.0.0/10')
+
+    _private_networks = [
+        IPv4Network('0.0.0.0/8'),
+        IPv4Network('10.0.0.0/8'),
+        IPv4Network('127.0.0.0/8'),
+        IPv4Network('169.254.0.0/16'),
+        IPv4Network('172.16.0.0/12'),
+        IPv4Network('192.0.0.0/29'),
+        IPv4Network('192.0.0.170/31'),
+        IPv4Network('192.0.2.0/24'),
+        IPv4Network('192.168.0.0/16'),
+        IPv4Network('198.18.0.0/15'),
+        IPv4Network('198.51.100.0/24'),
+        IPv4Network('203.0.113.0/24'),
+        IPv4Network('240.0.0.0/4'),
+        IPv4Network('255.255.255.255/32'),
+    ]
+
+    _reserved_network = IPv4Network('240.0.0.0/4')
+
+    _unspecified_address = IPv4Address('0.0.0.0')
+
+
+IPv4Address._constants = _IPv4Constants
+
+
+class _BaseV6(object):
+
+    """Base IPv6 object.
+
+    The following methods are used by IPv6 objects in both single IP
+    addresses and networks.
+
+    """
+
+    __slots__ = ()
+    _version = 6
+    _ALL_ONES = (2 ** IPV6LENGTH) - 1
+    _HEXTET_COUNT = 8
+    _HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
+    _max_prefixlen = IPV6LENGTH
+
+    # There are only a bunch of valid v6 netmasks, so we cache them all
+    # when constructed (see _make_netmask()).
+    _netmask_cache = {}
+
+    @classmethod
+    def _make_netmask(cls, arg):
+        """Make a (netmask, prefix_len) tuple from the given argument.
+
+        Argument can be:
+        - an integer (the prefix length)
+        - a string representing the prefix length (e.g. "24")
+        - a string representing the prefix netmask (e.g. "255.255.255.0")
+        """
+        if arg not in cls._netmask_cache:
+            if isinstance(arg, _compat_int_types):
+                prefixlen = arg
+            else:
+                prefixlen = cls._prefix_from_prefix_string(arg)
+            netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
+            cls._netmask_cache[arg] = netmask, prefixlen
+        return cls._netmask_cache[arg]
+
+    @classmethod
+    def _ip_int_from_string(cls, ip_str):
+        """Turn an IPv6 ip_str into an integer.
+
+        Args:
+            ip_str: A string, the IPv6 ip_str.
+
+        Returns:
+            An int, the IPv6 address
+
+        Raises:
+            AddressValueError: if ip_str isn't a valid IPv6 Address.
+
+        """
+        if not ip_str:
+            raise AddressValueError('Address cannot be empty')
+
+        parts = ip_str.split(':')
+
+        # An IPv6 address needs at least 2 colons (3 parts).
+        _min_parts = 3
+        if len(parts) < _min_parts:
+            msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
+            raise AddressValueError(msg)
+
+        # If the address has an IPv4-style suffix, convert it to hexadecimal.
+        if '.' in parts[-1]:
+            try:
+                ipv4_int = IPv4Address(parts.pop())._ip
+            except AddressValueError as exc:
+                raise AddressValueError("%s in %r" % (exc, ip_str))
+            parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
+            parts.append('%x' % (ipv4_int & 0xFFFF))
+
+        # An IPv6 address can't have more than 8 colons (9 parts).
+        # The extra colon comes from using the "::" notation for a single
+        # leading or trailing zero part.
+        _max_parts = cls._HEXTET_COUNT + 1
+        if len(parts) > _max_parts:
+            msg = "At most %d colons permitted in %r" % (
+                _max_parts - 1, ip_str)
+            raise AddressValueError(msg)
+
+        # Disregarding the endpoints, find '::' with nothing in between.
+        # This indicates that a run of zeroes has been skipped.
+        skip_index = None
+        for i in _compat_range(1, len(parts) - 1):
+            if not parts[i]:
+                if skip_index is not None:
+                    # Can't have more than one '::'
+                    msg = "At most one '::' permitted in %r" % ip_str
+                    raise AddressValueError(msg)
+                skip_index = i
+
+        # parts_hi is the number of parts to copy from above/before the '::'
+        # parts_lo is the number of parts to copy from below/after the '::'
+        if skip_index is not None:
+            # If we found a '::', then check if it also covers the endpoints.
+            parts_hi = skip_index
+            parts_lo = len(parts) - skip_index - 1
+            if not parts[0]:
+                parts_hi -= 1
+                if parts_hi:
+                    msg = "Leading ':' only permitted as part of '::' in %r"
+                    raise AddressValueError(msg % ip_str)  # ^: requires ^::
+            if not parts[-1]:
+                parts_lo -= 1
+                if parts_lo:
+                    msg = "Trailing ':' only permitted as part of '::' in %r"
+                    raise AddressValueError(msg % ip_str)  # :$ requires ::$
+            parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
+            if parts_skipped < 1:
+                msg = "Expected at most %d other parts with '::' in %r"
+                raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
+        else:
+            # Otherwise, allocate the entire address to parts_hi.  The
+            # endpoints could still be empty, but _parse_hextet() will check
+            # for that.
+            if len(parts) != cls._HEXTET_COUNT:
+                msg = "Exactly %d parts expected without '::' in %r"
+                raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
+            if not parts[0]:
+                msg = "Leading ':' only permitted as part of '::' in %r"
+                raise AddressValueError(msg % ip_str)  # ^: requires ^::
+            if not parts[-1]:
+                msg = "Trailing ':' only permitted as part of '::' in %r"
+                raise AddressValueError(msg % ip_str)  # :$ requires ::$
+            parts_hi = len(parts)
+            parts_lo = 0
+            parts_skipped = 0
+
+        try:
+            # Now, parse the hextets into a 128-bit integer.
+            ip_int = 0
+            for i in range(parts_hi):
+                ip_int <<= 16
+                ip_int |= cls._parse_hextet(parts[i])
+            ip_int <<= 16 * parts_skipped
+            for i in range(-parts_lo, 0):
+                ip_int <<= 16
+                ip_int |= cls._parse_hextet(parts[i])
+            return ip_int
+        except ValueError as exc:
+            raise AddressValueError("%s in %r" % (exc, ip_str))
+
+    @classmethod
+    def _parse_hextet(cls, hextet_str):
+        """Convert an IPv6 hextet string into an integer.
+
+        Args:
+            hextet_str: A string, the number to parse.
+
+        Returns:
+            The hextet as an integer.
+
+        Raises:
+            ValueError: if the input isn't strictly a hex number from
+              [0..FFFF].
+
+        """
+        # Whitelist the characters, since int() allows a lot of bizarre stuff.
+        if not cls._HEX_DIGITS.issuperset(hextet_str):
+            raise ValueError("Only hex digits permitted in %r" % hextet_str)
+        # We do the length check second, since the invalid character error
+        # is likely to be more informative for the user
+        if len(hextet_str) > 4:
+            msg = "At most 4 characters permitted in %r"
+            raise ValueError(msg % hextet_str)
+        # Length check means we can skip checking the integer value
+        return int(hextet_str, 16)
+
+    @classmethod
+    def _compress_hextets(cls, hextets):
+        """Compresses a list of hextets.
+
+        Compresses a list of strings, replacing the longest continuous
+        sequence of "0" in the list with "" and adding empty strings at
+        the beginning or at the end of the string such that subsequently
+        calling ":".join(hextets) will produce the compressed version of
+        the IPv6 address.
+
+        Args:
+            hextets: A list of strings, the hextets to compress.
+
+        Returns:
+            A list of strings.
+
+        """
+        best_doublecolon_start = -1
+        best_doublecolon_len = 0
+        doublecolon_start = -1
+        doublecolon_len = 0
+        for index, hextet in enumerate(hextets):
+            if hextet == '0':
+                doublecolon_len += 1
+                if doublecolon_start == -1:
+                    # Start of a sequence of zeros.
+                    doublecolon_start = index
+                if doublecolon_len > best_doublecolon_len:
+                    # This is the longest sequence of zeros so far.
+                    best_doublecolon_len = doublecolon_len
+                    best_doublecolon_start = doublecolon_start
+            else:
+                doublecolon_len = 0
+                doublecolon_start = -1
+
+        if best_doublecolon_len > 1:
+            best_doublecolon_end = (best_doublecolon_start +
+                                    best_doublecolon_len)
+            # For zeros at the end of the address.
+            if best_doublecolon_end == len(hextets):
+                hextets += ['']
+            hextets[best_doublecolon_start:best_doublecolon_end] = ['']
+            # For zeros at the beginning of the address.
+            if best_doublecolon_start == 0:
+                hextets = [''] + hextets
+
+        return hextets
+
+    @classmethod
+    def _string_from_ip_int(cls, ip_int=None):
+        """Turns a 128-bit integer into hexadecimal notation.
+
+        Args:
+            ip_int: An integer, the IP address.
+
+        Returns:
+            A string, the hexadecimal representation of the address.
+
+        Raises:
+            ValueError: The address is bigger than 128 bits of all ones.
+
+        """
+        if ip_int is None:
+            ip_int = int(cls._ip)
+
+        if ip_int > cls._ALL_ONES:
+            raise ValueError('IPv6 address is too large')
+
+        hex_str = '%032x' % ip_int
+        hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
+
+        hextets = cls._compress_hextets(hextets)
+        return ':'.join(hextets)
+
+    def _explode_shorthand_ip_string(self):
+        """Expand a shortened IPv6 address.
+
+        Args:
+            ip_str: A string, the IPv6 address.
+
+        Returns:
+            A string, the expanded IPv6 address.
+
+        """
+        if isinstance(self, IPv6Network):
+            ip_str = _compat_str(self.network_address)
+        elif isinstance(self, IPv6Interface):
+            ip_str = _compat_str(self.ip)
+        else:
+            ip_str = _compat_str(self)
+
+        ip_int = self._ip_int_from_string(ip_str)
+        hex_str = '%032x' % ip_int
+        parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
+        if isinstance(self, (_BaseNetwork, IPv6Interface)):
+            return '%s/%d' % (':'.join(parts), self._prefixlen)
+        return ':'.join(parts)
+
+    def _reverse_pointer(self):
+        """Return the reverse DNS pointer name for the IPv6 address.
+
+        This implements the method described in RFC3596 2.5.
+
+        """
+        reverse_chars = self.exploded[::-1].replace(':', '')
+        return '.'.join(reverse_chars) + '.ip6.arpa'
+
+    @property
+    def max_prefixlen(self):
+        return self._max_prefixlen
+
+    @property
+    def version(self):
+        return self._version
+
+
+class IPv6Address(_BaseV6, _BaseAddress):
+
+    """Represent and manipulate single IPv6 Addresses."""
+
+    __slots__ = ('_ip', '__weakref__')
+
+    def __init__(self, address):
+        """Instantiate a new IPv6 address object.
+
+        Args:
+            address: A string or integer representing the IP
+
+              Additionally, an integer can be passed, so
+              IPv6Address('2001:db8::') ==
+                IPv6Address(42540766411282592856903984951653826560)
+              or, more generally
+              IPv6Address(int(IPv6Address('2001:db8::'))) ==
+                IPv6Address('2001:db8::')
+
+        Raises:
+            AddressValueError: If address isn't a valid IPv6 address.
+
+        """
+        # Efficient constructor from integer.
+        if isinstance(address, _compat_int_types):
+            self._check_int_address(address)
+            self._ip = address
+            return
+
+        # Constructing from a packed address
+        if isinstance(address, bytes):
+            self._check_packed_address(address, 16)
+            bvs = _compat_bytes_to_byte_vals(address)
+            self._ip = _compat_int_from_byte_vals(bvs, 'big')
+            return
+
+        # Assume input argument to be string or any object representation
+        # which converts into a formatted IP string.
+        addr_str = _compat_str(address)
+        if '/' in addr_str:
+            raise AddressValueError("Unexpected '/' in %r" % address)
+        self._ip = self._ip_int_from_string(addr_str)
+
+    @property
+    def packed(self):
+        """The binary representation of this address."""
+        return v6_int_to_packed(self._ip)
+
+    @property
+    def is_multicast(self):
+        """Test if the address is reserved for multicast use.
+
+        Returns:
+            A boolean, True if the address is a multicast address.
+            See RFC 2373 2.7 for details.
+
+        """
+        return self in self._constants._multicast_network
+
+    @property
+    def is_reserved(self):
+        """Test if the address is otherwise IETF reserved.
+
+        Returns:
+            A boolean, True if the address is within one of the
+            reserved IPv6 Network ranges.
+
+        """
+        return any(self in x for x in self._constants._reserved_networks)
+
+    @property
+    def is_link_local(self):
+        """Test if the address is reserved for link-local.
+
+        Returns:
+            A boolean, True if the address is reserved per RFC 4291.
+
+        """
+        return self in self._constants._linklocal_network
+
+    @property
+    def is_site_local(self):
+        """Test if the address is reserved for site-local.
+
+        Note that the site-local address space has been deprecated by RFC 3879.
+        Use is_private to test if this address is in the space of unique local
+        addresses as defined by RFC 4193.
+
+        Returns:
+            A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+        """
+        return self in self._constants._sitelocal_network
+
+    @property
+    def is_private(self):
+        """Test if this address is allocated for private networks.
+
+        Returns:
+            A boolean, True if the address is reserved per
+            iana-ipv6-special-registry.
+
+        """
+        return any(self in net for net in self._constants._private_networks)
+
+    @property
+    def is_global(self):
+        """Test if this address is allocated for public networks.
+
+        Returns:
+            A boolean, true if the address is not reserved per
+            iana-ipv6-special-registry.
+
+        """
+        return not self.is_private
+
+    @property
+    def is_unspecified(self):
+        """Test if the address is unspecified.
+
+        Returns:
+            A boolean, True if this is the unspecified address as defined in
+            RFC 2373 2.5.2.
+
+        """
+        return self._ip == 0
+
+    @property
+    def is_loopback(self):
+        """Test if the address is a loopback address.
+
+        Returns:
+            A boolean, True if the address is a loopback address as defined in
+            RFC 2373 2.5.3.
+
+        """
+        return self._ip == 1
+
+    @property
+    def ipv4_mapped(self):
+        """Return the IPv4 mapped address.
+
+        Returns:
+            If the IPv6 address is a v4 mapped address, return the
+            IPv4 mapped address. Return None otherwise.
+
+        """
+        if (self._ip >> 32) != 0xFFFF:
+            return None
+        return IPv4Address(self._ip & 0xFFFFFFFF)
+
+    @property
+    def teredo(self):
+        """Tuple of embedded teredo IPs.
+
+        Returns:
+            Tuple of the (server, client) IPs or None if the address
+            doesn't appear to be a teredo address (doesn't start with
+            2001::/32)
+
+        """
+        if (self._ip >> 96) != 0x20010000:
+            return None
+        return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
+                IPv4Address(~self._ip & 0xFFFFFFFF))
+
+    @property
+    def sixtofour(self):
+        """Return the IPv4 6to4 embedded address.
+
+        Returns:
+            The IPv4 6to4-embedded address if present or None if the
+            address doesn't appear to contain a 6to4 embedded address.
+
+        """
+        if (self._ip >> 112) != 0x2002:
+            return None
+        return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
+
+
+class IPv6Interface(IPv6Address):
+
+    def __init__(self, address):
+        if isinstance(address, (bytes, _compat_int_types)):
+            IPv6Address.__init__(self, address)
+            self.network = IPv6Network(self._ip)
+            self._prefixlen = self._max_prefixlen
+            return
+        if isinstance(address, tuple):
+            IPv6Address.__init__(self, address[0])
+            if len(address) > 1:
+                self._prefixlen = int(address[1])
+            else:
+                self._prefixlen = self._max_prefixlen
+            self.network = IPv6Network(address, strict=False)
+            self.netmask = self.network.netmask
+            self.hostmask = self.network.hostmask
+            return
+
+        addr = _split_optional_netmask(address)
+        IPv6Address.__init__(self, addr[0])
+        self.network = IPv6Network(address, strict=False)
+        self.netmask = self.network.netmask
+        self._prefixlen = self.network._prefixlen
+        self.hostmask = self.network.hostmask
+
+    def __str__(self):
+        return '%s/%d' % (self._string_from_ip_int(self._ip),
+                          self.network.prefixlen)
+
+    def __eq__(self, other):
+        address_equal = IPv6Address.__eq__(self, other)
+        if not address_equal or address_equal is NotImplemented:
+            return address_equal
+        try:
+            return self.network == other.network
+        except AttributeError:
+            # An interface with an associated network is NOT the
+            # same as an unassociated address. That's why the hash
+            # takes the extra info into account.
+            return False
+
+    def __lt__(self, other):
+        address_less = IPv6Address.__lt__(self, other)
+        if address_less is NotImplemented:
+            return NotImplemented
+        try:
+            return (self.network < other.network or
+                    self.network == other.network and address_less)
+        except AttributeError:
+            # We *do* allow addresses and interfaces to be sorted. The
+            # unassociated address is considered less than all interfaces.
+            return False
+
+    def __hash__(self):
+        return self._ip ^ self._prefixlen ^ int(self.network.network_address)
+
+    __reduce__ = _IPAddressBase.__reduce__
+
+    @property
+    def ip(self):
+        return IPv6Address(self._ip)
+
+    @property
+    def with_prefixlen(self):
+        return '%s/%s' % (self._string_from_ip_int(self._ip),
+                          self._prefixlen)
+
+    @property
+    def with_netmask(self):
+        return '%s/%s' % (self._string_from_ip_int(self._ip),
+                          self.netmask)
+
+    @property
+    def with_hostmask(self):
+        return '%s/%s' % (self._string_from_ip_int(self._ip),
+                          self.hostmask)
+
+    @property
+    def is_unspecified(self):
+        return self._ip == 0 and self.network.is_unspecified
+
+    @property
+    def is_loopback(self):
+        return self._ip == 1 and self.network.is_loopback
+
+
+class IPv6Network(_BaseV6, _BaseNetwork):
+
+    """This class represents and manipulates 128-bit IPv6 networks.
+
+    Attributes: [examples for IPv6('2001:db8::1000/124')]
+        .network_address: IPv6Address('2001:db8::1000')
+        .hostmask: IPv6Address('::f')
+        .broadcast_address: IPv6Address('2001:db8::100f')
+        .netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
+        .prefixlen: 124
+
+    """
+
+    # Class to use when creating address objects
+    _address_class = IPv6Address
+
+    def __init__(self, address, strict=True):
+        """Instantiate a new IPv6 Network object.
+
+        Args:
+            address: A string or integer representing the IPv6 network or the
+              IP and prefix/netmask.
+              '2001:db8::/128'
+              '2001:db8:0000:0000:0000:0000:0000:0000/128'
+              '2001:db8::'
+              are all functionally the same in IPv6.  That is to say,
+              failing to provide a subnetmask will create an object with
+              a mask of /128.
+
+              Additionally, an integer can be passed, so
+              IPv6Network('2001:db8::') ==
+                IPv6Network(42540766411282592856903984951653826560)
+              or, more generally
+              IPv6Network(int(IPv6Network('2001:db8::'))) ==
+                IPv6Network('2001:db8::')
+
+            strict: A boolean. If true, ensure that we have been passed
+              A true network address, eg, 2001:db8::1000/124 and not an
+              IP address on a network, eg, 2001:db8::1/124.
+
+        Raises:
+            AddressValueError: If address isn't a valid IPv6 address.
+            NetmaskValueError: If the netmask isn't valid for
+              an IPv6 address.
+            ValueError: If strict was True and a network address was not
+              supplied.
+
+        """
+        _BaseNetwork.__init__(self, address)
+
+        # Efficient constructor from integer or packed address
+        if isinstance(address, (bytes, _compat_int_types)):
+            self.network_address = IPv6Address(address)
+            self.netmask, self._prefixlen = self._make_netmask(
+                self._max_prefixlen)
+            return
+
+        if isinstance(address, tuple):
+            if len(address) > 1:
+                arg = address[1]
+            else:
+                arg = self._max_prefixlen
+            self.netmask, self._prefixlen = self._make_netmask(arg)
+            self.network_address = IPv6Address(address[0])
+            packed = int(self.network_address)
+            if packed & int(self.netmask) != packed:
+                if strict:
+                    raise ValueError('%s has host bits set' % self)
+                else:
+                    self.network_address = IPv6Address(packed &
+                                                       int(self.netmask))
+            return
+
+        # Assume input argument to be string or any object representation
+        # which converts into a formatted IP prefix string.
+        addr = _split_optional_netmask(address)
+
+        self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
+
+        if len(addr) == 2:
+            arg = addr[1]
+        else:
+            arg = self._max_prefixlen
+        self.netmask, self._prefixlen = self._make_netmask(arg)
+
+        if strict:
+            if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
+                    self.network_address):
+                raise ValueError('%s has host bits set' % self)
+        self.network_address = IPv6Address(int(self.network_address) &
+                                           int(self.netmask))
+
+        if self._prefixlen == (self._max_prefixlen - 1):
+            self.hosts = self.__iter__
+
+    def hosts(self):
+        """Generate Iterator over usable hosts in a network.
+
+          This is like __iter__ except it doesn't return the
+          Subnet-Router anycast address.
+
+        """
+        network = int(self.network_address)
+        broadcast = int(self.broadcast_address)
+        for x in _compat_range(network + 1, broadcast + 1):
+            yield self._address_class(x)
+
+    @property
+    def is_site_local(self):
+        """Test if the address is reserved for site-local.
+
+        Note that the site-local address space has been deprecated by RFC 3879.
+        Use is_private to test if this address is in the space of unique local
+        addresses as defined by RFC 4193.
+
+        Returns:
+            A boolean, True if the address is reserved per RFC 3513 2.5.6.
+
+        """
+        return (self.network_address.is_site_local and
+                self.broadcast_address.is_site_local)
+
+
+class _IPv6Constants(object):
+
+    _linklocal_network = IPv6Network('fe80::/10')
+
+    _multicast_network = IPv6Network('ff00::/8')
+
+    _private_networks = [
+        IPv6Network('::1/128'),
+        IPv6Network('::/128'),
+        IPv6Network('::ffff:0:0/96'),
+        IPv6Network('100::/64'),
+        IPv6Network('2001::/23'),
+        IPv6Network('2001:2::/48'),
+        IPv6Network('2001:db8::/32'),
+        IPv6Network('2001:10::/28'),
+        IPv6Network('fc00::/7'),
+        IPv6Network('fe80::/10'),
+    ]
+
+    _reserved_networks = [
+        IPv6Network('::/8'), IPv6Network('100::/8'),
+        IPv6Network('200::/7'), IPv6Network('400::/6'),
+        IPv6Network('800::/5'), IPv6Network('1000::/4'),
+        IPv6Network('4000::/3'), IPv6Network('6000::/3'),
+        IPv6Network('8000::/3'), IPv6Network('A000::/3'),
+        IPv6Network('C000::/3'), IPv6Network('E000::/4'),
+        IPv6Network('F000::/5'), IPv6Network('F800::/6'),
+        IPv6Network('FE00::/9'),
+    ]
+
+    _sitelocal_network = IPv6Network('fec0::/10')
+
+
+IPv6Address._constants = _IPv6Constants
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/__init__.py b/lib/python3.7/site-packages/pip/_vendor/lockfile/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6f44a55c6377c746ec8ca831999fdde5e401aa6
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/lockfile/__init__.py
@@ -0,0 +1,347 @@
+# -*- coding: utf-8 -*-
+
+"""
+lockfile.py - Platform-independent advisory file locks.
+
+Requires Python 2.5 unless you apply 2.4.diff
+Locking is done on a per-thread basis instead of a per-process basis.
+
+Usage:
+
+>>> lock = LockFile('somefile')
+>>> try:
+...     lock.acquire()
+... except AlreadyLocked:
+...     print 'somefile', 'is locked already.'
+... except LockFailed:
+...     print 'somefile', 'can\\'t be locked.'
+... else:
+...     print 'got lock'
+got lock
+>>> print lock.is_locked()
+True
+>>> lock.release()
+
+>>> lock = LockFile('somefile')
+>>> print lock.is_locked()
+False
+>>> with lock:
+...    print lock.is_locked()
+True
+>>> print lock.is_locked()
+False
+
+>>> lock = LockFile('somefile')
+>>> # It is okay to lock twice from the same thread...
+>>> with lock:
+...     lock.acquire()
+...
+>>> # Though no counter is kept, so you can't unlock multiple times...
+>>> print lock.is_locked()
+False
+
+Exceptions:
+
+    Error - base class for other exceptions
+        LockError - base class for all locking exceptions
+            AlreadyLocked - Another thread or process already holds the lock
+            LockFailed - Lock failed for some other reason
+        UnlockError - base class for all unlocking exceptions
+            AlreadyUnlocked - File was not locked.
+            NotMyLock - File was locked but not by the current thread/process
+"""
+
+from __future__ import absolute_import
+
+import functools
+import os
+import socket
+import threading
+import warnings
+
+# Work with PEP8 and non-PEP8 versions of threading module.
+if not hasattr(threading, "current_thread"):
+    threading.current_thread = threading.currentThread
+if not hasattr(threading.Thread, "get_name"):
+    threading.Thread.get_name = threading.Thread.getName
+
+__all__ = ['Error', 'LockError', 'LockTimeout', 'AlreadyLocked',
+           'LockFailed', 'UnlockError', 'NotLocked', 'NotMyLock',
+           'LinkFileLock', 'MkdirFileLock', 'SQLiteFileLock',
+           'LockBase', 'locked']
+
+
+class Error(Exception):
+    """
+    Base class for other exceptions.
+
+    >>> try:
+    ...   raise Error
+    ... except Exception:
+    ...   pass
+    """
+    pass
+
+
+class LockError(Error):
+    """
+    Base class for error arising from attempts to acquire the lock.
+
+    >>> try:
+    ...   raise LockError
+    ... except Error:
+    ...   pass
+    """
+    pass
+
+
+class LockTimeout(LockError):
+    """Raised when lock creation fails within a user-defined period of time.
+
+    >>> try:
+    ...   raise LockTimeout
+    ... except LockError:
+    ...   pass
+    """
+    pass
+
+
+class AlreadyLocked(LockError):
+    """Some other thread/process is locking the file.
+
+    >>> try:
+    ...   raise AlreadyLocked
+    ... except LockError:
+    ...   pass
+    """
+    pass
+
+
+class LockFailed(LockError):
+    """Lock file creation failed for some other reason.
+
+    >>> try:
+    ...   raise LockFailed
+    ... except LockError:
+    ...   pass
+    """
+    pass
+
+
+class UnlockError(Error):
+    """
+    Base class for errors arising from attempts to release the lock.
+
+    >>> try:
+    ...   raise UnlockError
+    ... except Error:
+    ...   pass
+    """
+    pass
+
+
+class NotLocked(UnlockError):
+    """Raised when an attempt is made to unlock an unlocked file.
+
+    >>> try:
+    ...   raise NotLocked
+    ... except UnlockError:
+    ...   pass
+    """
+    pass
+
+
+class NotMyLock(UnlockError):
+    """Raised when an attempt is made to unlock a file someone else locked.
+
+    >>> try:
+    ...   raise NotMyLock
+    ... except UnlockError:
+    ...   pass
+    """
+    pass
+
+
+class _SharedBase(object):
+    def __init__(self, path):
+        self.path = path
+
+    def acquire(self, timeout=None):
+        """
+        Acquire the lock.
+
+        * If timeout is omitted (or None), wait forever trying to lock the
+          file.
+
+        * If timeout > 0, try to acquire the lock for that many seconds.  If
+          the lock period expires and the file is still locked, raise
+          LockTimeout.
+
+        * If timeout <= 0, raise AlreadyLocked immediately if the file is
+          already locked.
+        """
+        raise NotImplemented("implement in subclass")
+
+    def release(self):
+        """
+        Release the lock.
+
+        If the file is not locked, raise NotLocked.
+        """
+        raise NotImplemented("implement in subclass")
+
+    def __enter__(self):
+        """
+        Context manager support.
+        """
+        self.acquire()
+        return self
+
+    def __exit__(self, *_exc):
+        """
+        Context manager support.
+        """
+        self.release()
+
+    def __repr__(self):
+        return "<%s: %r>" % (self.__class__.__name__, self.path)
+
+
+class LockBase(_SharedBase):
+    """Base class for platform-specific lock classes."""
+    def __init__(self, path, threaded=True, timeout=None):
+        """
+        >>> lock = LockBase('somefile')
+        >>> lock = LockBase('somefile', threaded=False)
+        """
+        super(LockBase, self).__init__(path)
+        self.lock_file = os.path.abspath(path) + ".lock"
+        self.hostname = socket.gethostname()
+        self.pid = os.getpid()
+        if threaded:
+            t = threading.current_thread()
+            # Thread objects in Python 2.4 and earlier do not have ident
+            # attrs.  Worm around that.
+            ident = getattr(t, "ident", hash(t))
+            self.tname = "-%x" % (ident & 0xffffffff)
+        else:
+            self.tname = ""
+        dirname = os.path.dirname(self.lock_file)
+
+        # unique name is mostly about the current process, but must
+        # also contain the path -- otherwise, two adjacent locked
+        # files conflict (one file gets locked, creating lock-file and
+        # unique file, the other one gets locked, creating lock-file
+        # and overwriting the already existing lock-file, then one
+        # gets unlocked, deleting both lock-file and unique file,
+        # finally the last lock errors out upon releasing.
+        self.unique_name = os.path.join(dirname,
+                                        "%s%s.%s%s" % (self.hostname,
+                                                       self.tname,
+                                                       self.pid,
+                                                       hash(self.path)))
+        self.timeout = timeout
+
+    def is_locked(self):
+        """
+        Tell whether or not the file is locked.
+        """
+        raise NotImplemented("implement in subclass")
+
+    def i_am_locking(self):
+        """
+        Return True if this object is locking the file.
+        """
+        raise NotImplemented("implement in subclass")
+
+    def break_lock(self):
+        """
+        Remove a lock.  Useful if a locking thread failed to unlock.
+        """
+        raise NotImplemented("implement in subclass")
+
+    def __repr__(self):
+        return "<%s: %r -- %r>" % (self.__class__.__name__, self.unique_name,
+                                   self.path)
+
+
+def _fl_helper(cls, mod, *args, **kwds):
+    warnings.warn("Import from %s module instead of lockfile package" % mod,
+                  DeprecationWarning, stacklevel=2)
+    # This is a bit funky, but it's only for awhile.  The way the unit tests
+    # are constructed this function winds up as an unbound method, so it
+    # actually takes three args, not two.  We want to toss out self.
+    if not isinstance(args[0], str):
+        # We are testing, avoid the first arg
+        args = args[1:]
+    if len(args) == 1 and not kwds:
+        kwds["threaded"] = True
+    return cls(*args, **kwds)
+
+
+def LinkFileLock(*args, **kwds):
+    """Factory function provided for backwards compatibility.
+
+    Do not use in new code.  Instead, import LinkLockFile from the
+    lockfile.linklockfile module.
+    """
+    from . import linklockfile
+    return _fl_helper(linklockfile.LinkLockFile, "lockfile.linklockfile",
+                      *args, **kwds)
+
+
+def MkdirFileLock(*args, **kwds):
+    """Factory function provided for backwards compatibility.
+
+    Do not use in new code.  Instead, import MkdirLockFile from the
+    lockfile.mkdirlockfile module.
+    """
+    from . import mkdirlockfile
+    return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile",
+                      *args, **kwds)
+
+
+def SQLiteFileLock(*args, **kwds):
+    """Factory function provided for backwards compatibility.
+
+    Do not use in new code.  Instead, import SQLiteLockFile from the
+    lockfile.mkdirlockfile module.
+    """
+    from . import sqlitelockfile
+    return _fl_helper(sqlitelockfile.SQLiteLockFile, "lockfile.sqlitelockfile",
+                      *args, **kwds)
+
+
+def locked(path, timeout=None):
+    """Decorator which enables locks for decorated function.
+
+    Arguments:
+     - path: path for lockfile.
+     - timeout (optional): Timeout for acquiring lock.
+
+     Usage:
+         @locked('/var/run/myname', timeout=0)
+         def myname(...):
+             ...
+    """
+    def decor(func):
+        @functools.wraps(func)
+        def wrapper(*args, **kwargs):
+            lock = FileLock(path, timeout=timeout)
+            lock.acquire()
+            try:
+                return func(*args, **kwargs)
+            finally:
+                lock.release()
+        return wrapper
+    return decor
+
+
+if hasattr(os, "link"):
+    from . import linklockfile as _llf
+    LockFile = _llf.LinkLockFile
+else:
+    from . import mkdirlockfile as _mlf
+    LockFile = _mlf.MkdirLockFile
+
+FileLock = LockFile
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..319ca7bf405fc4aadc07b7ffb7985372c5e8655e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/linklockfile.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/linklockfile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..58cd317a6a2c78405f9e727677c80ebbd9779843
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/linklockfile.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/mkdirlockfile.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/mkdirlockfile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8efd740b2ff6395ea9a9d01819b07f5c213976a
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/mkdirlockfile.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/pidlockfile.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/pidlockfile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..21a71ad9bf4cbb7a59803398f984044c22a8ab8f
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/pidlockfile.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/sqlitelockfile.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/sqlitelockfile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18dd7243056408a01d1917aa93c7dfda9794b255
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/sqlitelockfile.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/symlinklockfile.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/symlinklockfile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..320666b3557fcc049e0f7d83b63ce1e0ad9c8ee8
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/lockfile/__pycache__/symlinklockfile.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/linklockfile.py b/lib/python3.7/site-packages/pip/_vendor/lockfile/linklockfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ca9be0423591e61b3382b559473fcc56363cc46
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/lockfile/linklockfile.py
@@ -0,0 +1,73 @@
+from __future__ import absolute_import
+
+import time
+import os
+
+from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
+               AlreadyLocked)
+
+
+class LinkLockFile(LockBase):
+    """Lock access to a file using atomic property of link(2).
+
+    >>> lock = LinkLockFile('somefile')
+    >>> lock = LinkLockFile('somefile', threaded=False)
+    """
+
+    def acquire(self, timeout=None):
+        try:
+            open(self.unique_name, "wb").close()
+        except IOError:
+            raise LockFailed("failed to create %s" % self.unique_name)
+
+        timeout = timeout if timeout is not None else self.timeout
+        end_time = time.time()
+        if timeout is not None and timeout > 0:
+            end_time += timeout
+
+        while True:
+            # Try and create a hard link to it.
+            try:
+                os.link(self.unique_name, self.lock_file)
+            except OSError:
+                # Link creation failed.  Maybe we've double-locked?
+                nlinks = os.stat(self.unique_name).st_nlink
+                if nlinks == 2:
+                    # The original link plus the one I created == 2.  We're
+                    # good to go.
+                    return
+                else:
+                    # Otherwise the lock creation failed.
+                    if timeout is not None and time.time() > end_time:
+                        os.unlink(self.unique_name)
+                        if timeout > 0:
+                            raise LockTimeout("Timeout waiting to acquire"
+                                              " lock for %s" %
+                                              self.path)
+                        else:
+                            raise AlreadyLocked("%s is already locked" %
+                                                self.path)
+                    time.sleep(timeout is not None and timeout / 10 or 0.1)
+            else:
+                # Link creation succeeded.  We're good to go.
+                return
+
+    def release(self):
+        if not self.is_locked():
+            raise NotLocked("%s is not locked" % self.path)
+        elif not os.path.exists(self.unique_name):
+            raise NotMyLock("%s is locked, but not by me" % self.path)
+        os.unlink(self.unique_name)
+        os.unlink(self.lock_file)
+
+    def is_locked(self):
+        return os.path.exists(self.lock_file)
+
+    def i_am_locking(self):
+        return (self.is_locked() and
+                os.path.exists(self.unique_name) and
+                os.stat(self.unique_name).st_nlink == 2)
+
+    def break_lock(self):
+        if os.path.exists(self.lock_file):
+            os.unlink(self.lock_file)
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.py b/lib/python3.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..05a8c96ca517271bbd4953d0cf83dc4173521b2e
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/lockfile/mkdirlockfile.py
@@ -0,0 +1,84 @@
+from __future__ import absolute_import, division
+
+import time
+import os
+import sys
+import errno
+
+from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
+               AlreadyLocked)
+
+
+class MkdirLockFile(LockBase):
+    """Lock file by creating a directory."""
+    def __init__(self, path, threaded=True, timeout=None):
+        """
+        >>> lock = MkdirLockFile('somefile')
+        >>> lock = MkdirLockFile('somefile', threaded=False)
+        """
+        LockBase.__init__(self, path, threaded, timeout)
+        # Lock file itself is a directory.  Place the unique file name into
+        # it.
+        self.unique_name = os.path.join(self.lock_file,
+                                        "%s.%s%s" % (self.hostname,
+                                                     self.tname,
+                                                     self.pid))
+
+    def acquire(self, timeout=None):
+        timeout = timeout if timeout is not None else self.timeout
+        end_time = time.time()
+        if timeout is not None and timeout > 0:
+            end_time += timeout
+
+        if timeout is None:
+            wait = 0.1
+        else:
+            wait = max(0, timeout / 10)
+
+        while True:
+            try:
+                os.mkdir(self.lock_file)
+            except OSError:
+                err = sys.exc_info()[1]
+                if err.errno == errno.EEXIST:
+                    # Already locked.
+                    if os.path.exists(self.unique_name):
+                        # Already locked by me.
+                        return
+                    if timeout is not None and time.time() > end_time:
+                        if timeout > 0:
+                            raise LockTimeout("Timeout waiting to acquire"
+                                              " lock for %s" %
+                                              self.path)
+                        else:
+                            # Someone else has the lock.
+                            raise AlreadyLocked("%s is already locked" %
+                                                self.path)
+                    time.sleep(wait)
+                else:
+                    # Couldn't create the lock for some other reason
+                    raise LockFailed("failed to create %s" % self.lock_file)
+            else:
+                open(self.unique_name, "wb").close()
+                return
+
+    def release(self):
+        if not self.is_locked():
+            raise NotLocked("%s is not locked" % self.path)
+        elif not os.path.exists(self.unique_name):
+            raise NotMyLock("%s is locked, but not by me" % self.path)
+        os.unlink(self.unique_name)
+        os.rmdir(self.lock_file)
+
+    def is_locked(self):
+        return os.path.exists(self.lock_file)
+
+    def i_am_locking(self):
+        return (self.is_locked() and
+                os.path.exists(self.unique_name))
+
+    def break_lock(self):
+        if os.path.exists(self.lock_file):
+            for name in os.listdir(self.lock_file):
+                os.unlink(os.path.join(self.lock_file, name))
+            os.rmdir(self.lock_file)
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/pidlockfile.py b/lib/python3.7/site-packages/pip/_vendor/lockfile/pidlockfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..069e85b15bdfcb4fd7042222516fbfb046db06c9
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/lockfile/pidlockfile.py
@@ -0,0 +1,190 @@
+# -*- coding: utf-8 -*-
+
+# pidlockfile.py
+#
+# Copyright © 2008–2009 Ben Finney <ben+python@benfinney.id.au>
+#
+# This is free software: you may copy, modify, and/or distribute this work
+# under the terms of the Python Software Foundation License, version 2 or
+# later as published by the Python Software Foundation.
+# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
+
+""" Lockfile behaviour implemented via Unix PID files.
+    """
+
+from __future__ import absolute_import
+
+import errno
+import os
+import time
+
+from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
+               LockTimeout)
+
+
+class PIDLockFile(LockBase):
+    """ Lockfile implemented as a Unix PID file.
+
+    The lock file is a normal file named by the attribute `path`.
+    A lock's PID file contains a single line of text, containing
+    the process ID (PID) of the process that acquired the lock.
+
+    >>> lock = PIDLockFile('somefile')
+    >>> lock = PIDLockFile('somefile')
+    """
+
+    def __init__(self, path, threaded=False, timeout=None):
+        # pid lockfiles don't support threaded operation, so always force
+        # False as the threaded arg.
+        LockBase.__init__(self, path, False, timeout)
+        self.unique_name = self.path
+
+    def read_pid(self):
+        """ Get the PID from the lock file.
+            """
+        return read_pid_from_pidfile(self.path)
+
+    def is_locked(self):
+        """ Test if the lock is currently held.
+
+            The lock is held if the PID file for this lock exists.
+
+            """
+        return os.path.exists(self.path)
+
+    def i_am_locking(self):
+        """ Test if the lock is held by the current process.
+
+        Returns ``True`` if the current process ID matches the
+        number stored in the PID file.
+        """
+        return self.is_locked() and os.getpid() == self.read_pid()
+
+    def acquire(self, timeout=None):
+        """ Acquire the lock.
+
+        Creates the PID file for this lock, or raises an error if
+        the lock could not be acquired.
+        """
+
+        timeout = timeout if timeout is not None else self.timeout
+        end_time = time.time()
+        if timeout is not None and timeout > 0:
+            end_time += timeout
+
+        while True:
+            try:
+                write_pid_to_pidfile(self.path)
+            except OSError as exc:
+                if exc.errno == errno.EEXIST:
+                    # The lock creation failed.  Maybe sleep a bit.
+                    if time.time() > end_time:
+                        if timeout is not None and timeout > 0:
+                            raise LockTimeout("Timeout waiting to acquire"
+                                              " lock for %s" %
+                                              self.path)
+                        else:
+                            raise AlreadyLocked("%s is already locked" %
+                                                self.path)
+                    time.sleep(timeout is not None and timeout / 10 or 0.1)
+                else:
+                    raise LockFailed("failed to create %s" % self.path)
+            else:
+                return
+
+    def release(self):
+        """ Release the lock.
+
+            Removes the PID file to release the lock, or raises an
+            error if the current process does not hold the lock.
+
+            """
+        if not self.is_locked():
+            raise NotLocked("%s is not locked" % self.path)
+        if not self.i_am_locking():
+            raise NotMyLock("%s is locked, but not by me" % self.path)
+        remove_existing_pidfile(self.path)
+
+    def break_lock(self):
+        """ Break an existing lock.
+
+            Removes the PID file if it already exists, otherwise does
+            nothing.
+
+            """
+        remove_existing_pidfile(self.path)
+
+
+def read_pid_from_pidfile(pidfile_path):
+    """ Read the PID recorded in the named PID file.
+
+        Read and return the numeric PID recorded as text in the named
+        PID file. If the PID file cannot be read, or if the content is
+        not a valid PID, return ``None``.
+
+        """
+    pid = None
+    try:
+        pidfile = open(pidfile_path, 'r')
+    except IOError:
+        pass
+    else:
+        # According to the FHS 2.3 section on PID files in /var/run:
+        #
+        #   The file must consist of the process identifier in
+        #   ASCII-encoded decimal, followed by a newline character.
+        #
+        #   Programs that read PID files should be somewhat flexible
+        #   in what they accept; i.e., they should ignore extra
+        #   whitespace, leading zeroes, absence of the trailing
+        #   newline, or additional lines in the PID file.
+
+        line = pidfile.readline().strip()
+        try:
+            pid = int(line)
+        except ValueError:
+            pass
+        pidfile.close()
+
+    return pid
+
+
+def write_pid_to_pidfile(pidfile_path):
+    """ Write the PID in the named PID file.
+
+        Get the numeric process ID (“PID”) of the current process
+        and write it to the named file as a line of text.
+
+        """
+    open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
+    open_mode = 0o644
+    pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
+    pidfile = os.fdopen(pidfile_fd, 'w')
+
+    # According to the FHS 2.3 section on PID files in /var/run:
+    #
+    #   The file must consist of the process identifier in
+    #   ASCII-encoded decimal, followed by a newline character. For
+    #   example, if crond was process number 25, /var/run/crond.pid
+    #   would contain three characters: two, five, and newline.
+
+    pid = os.getpid()
+    pidfile.write("%s\n" % pid)
+    pidfile.close()
+
+
+def remove_existing_pidfile(pidfile_path):
+    """ Remove the named PID file if it exists.
+
+        Removing a PID file that doesn't already exist puts us in the
+        desired state, so we ignore the condition if the file does not
+        exist.
+
+        """
+    try:
+        os.remove(pidfile_path)
+    except OSError as exc:
+        if exc.errno == errno.ENOENT:
+            pass
+        else:
+            raise
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.py b/lib/python3.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..f997e2444e75b176761965bbf3e1f82c5c3ad0e2
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/lockfile/sqlitelockfile.py
@@ -0,0 +1,156 @@
+from __future__ import absolute_import, division
+
+import time
+import os
+
+try:
+    unicode
+except NameError:
+    unicode = str
+
+from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
+
+
+class SQLiteLockFile(LockBase):
+    "Demonstrate SQL-based locking."
+
+    testdb = None
+
+    def __init__(self, path, threaded=True, timeout=None):
+        """
+        >>> lock = SQLiteLockFile('somefile')
+        >>> lock = SQLiteLockFile('somefile', threaded=False)
+        """
+        LockBase.__init__(self, path, threaded, timeout)
+        self.lock_file = unicode(self.lock_file)
+        self.unique_name = unicode(self.unique_name)
+
+        if SQLiteLockFile.testdb is None:
+            import tempfile
+            _fd, testdb = tempfile.mkstemp()
+            os.close(_fd)
+            os.unlink(testdb)
+            del _fd, tempfile
+            SQLiteLockFile.testdb = testdb
+
+        import sqlite3
+        self.connection = sqlite3.connect(SQLiteLockFile.testdb)
+
+        c = self.connection.cursor()
+        try:
+            c.execute("create table locks"
+                      "("
+                      "   lock_file varchar(32),"
+                      "   unique_name varchar(32)"
+                      ")")
+        except sqlite3.OperationalError:
+            pass
+        else:
+            self.connection.commit()
+            import atexit
+            atexit.register(os.unlink, SQLiteLockFile.testdb)
+
+    def acquire(self, timeout=None):
+        timeout = timeout if timeout is not None else self.timeout
+        end_time = time.time()
+        if timeout is not None and timeout > 0:
+            end_time += timeout
+
+        if timeout is None:
+            wait = 0.1
+        elif timeout <= 0:
+            wait = 0
+        else:
+            wait = timeout / 10
+
+        cursor = self.connection.cursor()
+
+        while True:
+            if not self.is_locked():
+                # Not locked.  Try to lock it.
+                cursor.execute("insert into locks"
+                               "  (lock_file, unique_name)"
+                               "  values"
+                               "  (?, ?)",
+                               (self.lock_file, self.unique_name))
+                self.connection.commit()
+
+                # Check to see if we are the only lock holder.
+                cursor.execute("select * from locks"
+                               "  where unique_name = ?",
+                               (self.unique_name,))
+                rows = cursor.fetchall()
+                if len(rows) > 1:
+                    # Nope.  Someone else got there.  Remove our lock.
+                    cursor.execute("delete from locks"
+                                   "  where unique_name = ?",
+                                   (self.unique_name,))
+                    self.connection.commit()
+                else:
+                    # Yup.  We're done, so go home.
+                    return
+            else:
+                # Check to see if we are the only lock holder.
+                cursor.execute("select * from locks"
+                               "  where unique_name = ?",
+                               (self.unique_name,))
+                rows = cursor.fetchall()
+                if len(rows) == 1:
+                    # We're the locker, so go home.
+                    return
+
+            # Maybe we should wait a bit longer.
+            if timeout is not None and time.time() > end_time:
+                if timeout > 0:
+                    # No more waiting.
+                    raise LockTimeout("Timeout waiting to acquire"
+                                      " lock for %s" %
+                                      self.path)
+                else:
+                    # Someone else has the lock and we are impatient..
+                    raise AlreadyLocked("%s is already locked" % self.path)
+
+            # Well, okay.  We'll give it a bit longer.
+            time.sleep(wait)
+
+    def release(self):
+        if not self.is_locked():
+            raise NotLocked("%s is not locked" % self.path)
+        if not self.i_am_locking():
+            raise NotMyLock("%s is locked, but not by me (by %s)" %
+                            (self.unique_name, self._who_is_locking()))
+        cursor = self.connection.cursor()
+        cursor.execute("delete from locks"
+                       "  where unique_name = ?",
+                       (self.unique_name,))
+        self.connection.commit()
+
+    def _who_is_locking(self):
+        cursor = self.connection.cursor()
+        cursor.execute("select unique_name from locks"
+                       "  where lock_file = ?",
+                       (self.lock_file,))
+        return cursor.fetchone()[0]
+
+    def is_locked(self):
+        cursor = self.connection.cursor()
+        cursor.execute("select * from locks"
+                       "  where lock_file = ?",
+                       (self.lock_file,))
+        rows = cursor.fetchall()
+        return not not rows
+
+    def i_am_locking(self):
+        cursor = self.connection.cursor()
+        cursor.execute("select * from locks"
+                       "  where lock_file = ?"
+                       "    and unique_name = ?",
+                       (self.lock_file, self.unique_name))
+        return not not cursor.fetchall()
+
+    def break_lock(self):
+        cursor = self.connection.cursor()
+        cursor.execute("delete from locks"
+                       "  where lock_file = ?",
+                       (self.lock_file,))
+        self.connection.commit()
diff --git a/lib/python3.7/site-packages/pip/_vendor/lockfile/symlinklockfile.py b/lib/python3.7/site-packages/pip/_vendor/lockfile/symlinklockfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..23b41f582b9f8d40a1375b47ed1fb25da12d0c3b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/lockfile/symlinklockfile.py
@@ -0,0 +1,70 @@
+from __future__ import absolute_import
+
+import os
+import time
+
+from . import (LockBase, NotLocked, NotMyLock, LockTimeout,
+               AlreadyLocked)
+
+
+class SymlinkLockFile(LockBase):
+    """Lock access to a file using symlink(2)."""
+
+    def __init__(self, path, threaded=True, timeout=None):
+        # super(SymlinkLockFile).__init(...)
+        LockBase.__init__(self, path, threaded, timeout)
+        # split it back!
+        self.unique_name = os.path.split(self.unique_name)[1]
+
+    def acquire(self, timeout=None):
+        # Hopefully unnecessary for symlink.
+        # try:
+        #     open(self.unique_name, "wb").close()
+        # except IOError:
+        #     raise LockFailed("failed to create %s" % self.unique_name)
+        timeout = timeout if timeout is not None else self.timeout
+        end_time = time.time()
+        if timeout is not None and timeout > 0:
+            end_time += timeout
+
+        while True:
+            # Try and create a symbolic link to it.
+            try:
+                os.symlink(self.unique_name, self.lock_file)
+            except OSError:
+                # Link creation failed.  Maybe we've double-locked?
+                if self.i_am_locking():
+                    # Linked to out unique name. Proceed.
+                    return
+                else:
+                    # Otherwise the lock creation failed.
+                    if timeout is not None and time.time() > end_time:
+                        if timeout > 0:
+                            raise LockTimeout("Timeout waiting to acquire"
+                                              " lock for %s" %
+                                              self.path)
+                        else:
+                            raise AlreadyLocked("%s is already locked" %
+                                                self.path)
+                    time.sleep(timeout / 10 if timeout is not None else 0.1)
+            else:
+                # Link creation succeeded.  We're good to go.
+                return
+
+    def release(self):
+        if not self.is_locked():
+            raise NotLocked("%s is not locked" % self.path)
+        elif not self.i_am_locking():
+            raise NotMyLock("%s is locked, but not by me" % self.path)
+        os.unlink(self.lock_file)
+
+    def is_locked(self):
+        return os.path.islink(self.lock_file)
+
+    def i_am_locking(self):
+        return (os.path.islink(self.lock_file)
+                and os.readlink(self.lock_file) == self.unique_name)
+
+    def break_lock(self):
+        if os.path.islink(self.lock_file):  # exists && link
+            os.unlink(self.lock_file)
diff --git a/lib/python3.7/site-packages/pip/_vendor/msgpack/__init__.py b/lib/python3.7/site-packages/pip/_vendor/msgpack/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2afca5ad50faf2516bc0151eceb327c58e9bdcad
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/msgpack/__init__.py
@@ -0,0 +1,66 @@
+# coding: utf-8
+from pip._vendor.msgpack._version import version
+from pip._vendor.msgpack.exceptions import *
+
+from collections import namedtuple
+
+
+class ExtType(namedtuple('ExtType', 'code data')):
+    """ExtType represents ext type in msgpack."""
+    def __new__(cls, code, data):
+        if not isinstance(code, int):
+            raise TypeError("code must be int")
+        if not isinstance(data, bytes):
+            raise TypeError("data must be bytes")
+        if not 0 <= code <= 127:
+            raise ValueError("code must be 0~127")
+        return super(ExtType, cls).__new__(cls, code, data)
+
+
+import os
+if os.environ.get('MSGPACK_PUREPYTHON'):
+    from pip._vendor.msgpack.fallback import Packer, unpackb, Unpacker
+else:
+    try:
+        from pip._vendor.msgpack._packer import Packer
+        from pip._vendor.msgpack._unpacker import unpackb, Unpacker
+    except ImportError:
+        from pip._vendor.msgpack.fallback import Packer, unpackb, Unpacker
+
+
+def pack(o, stream, **kwargs):
+    """
+    Pack object `o` and write it to `stream`
+
+    See :class:`Packer` for options.
+    """
+    packer = Packer(**kwargs)
+    stream.write(packer.pack(o))
+
+
+def packb(o, **kwargs):
+    """
+    Pack object `o` and return packed bytes
+
+    See :class:`Packer` for options.
+    """
+    return Packer(**kwargs).pack(o)
+
+
+def unpack(stream, **kwargs):
+    """
+    Unpack an object from `stream`.
+
+    Raises `ExtraData` when `stream` contains extra bytes.
+    See :class:`Unpacker` for options.
+    """
+    data = stream.read()
+    return unpackb(data, **kwargs)
+
+
+# alias for compatibility to simplejson/marshal/pickle.
+load = unpack
+loads = unpackb
+
+dump = pack
+dumps = packb
diff --git a/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..abe87478df49963b5712c1c698b9c78128de5d97
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/_version.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/_version.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c5a85ee98812aff3c97512ec0200e51705f142b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/_version.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84e668fa1bc92788d80355c87031ad1c5cc223e4
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/exceptions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ea3ec2a2c82f465d4dc4c24d183892885884d668
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/msgpack/__pycache__/fallback.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/msgpack/_version.py b/lib/python3.7/site-packages/pip/_vendor/msgpack/_version.py
new file mode 100644
index 0000000000000000000000000000000000000000..d28f0deb86cf2216f269b5ca7a8f45479076b43d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/msgpack/_version.py
@@ -0,0 +1 @@
+version = (0, 5, 6)
diff --git a/lib/python3.7/site-packages/pip/_vendor/msgpack/exceptions.py b/lib/python3.7/site-packages/pip/_vendor/msgpack/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..97668814f2767c3c642132d312b76a9ded1bca2e
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/msgpack/exceptions.py
@@ -0,0 +1,41 @@
+class UnpackException(Exception):
+    """Deprecated.  Use Exception instead to catch all exception during unpacking."""
+
+
+class BufferFull(UnpackException):
+    pass
+
+
+class OutOfData(UnpackException):
+    pass
+
+
+class UnpackValueError(UnpackException, ValueError):
+    """Deprecated.  Use ValueError instead."""
+
+
+class ExtraData(UnpackValueError):
+    def __init__(self, unpacked, extra):
+        self.unpacked = unpacked
+        self.extra = extra
+
+    def __str__(self):
+        return "unpack(b) received extra data."
+
+
+class PackException(Exception):
+    """Deprecated.  Use Exception instead to catch all exception during packing."""
+
+
+class PackValueError(PackException, ValueError):
+    """PackValueError is raised when type of input data is supported but it's value is unsupported.
+
+    Deprecated.  Use ValueError instead.
+    """
+
+
+class PackOverflowError(PackValueError, OverflowError):
+    """PackOverflowError is raised when integer value is out of range of msgpack support [-2**31, 2**32).
+
+    Deprecated.  Use ValueError instead.
+    """
diff --git a/lib/python3.7/site-packages/pip/_vendor/msgpack/fallback.py b/lib/python3.7/site-packages/pip/_vendor/msgpack/fallback.py
new file mode 100644
index 0000000000000000000000000000000000000000..94184218aff5013f9cbf3086e37fd08c682582b4
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/msgpack/fallback.py
@@ -0,0 +1,977 @@
+"""Fallback pure Python implementation of msgpack"""
+
+import sys
+import struct
+import warnings
+
+if sys.version_info[0] == 3:
+    PY3 = True
+    int_types = int
+    Unicode = str
+    xrange = range
+    def dict_iteritems(d):
+        return d.items()
+else:
+    PY3 = False
+    int_types = (int, long)
+    Unicode = unicode
+    def dict_iteritems(d):
+        return d.iteritems()
+
+
+if hasattr(sys, 'pypy_version_info'):
+    # cStringIO is slow on PyPy, StringIO is faster.  However: PyPy's own
+    # StringBuilder is fastest.
+    from __pypy__ import newlist_hint
+    try:
+        from __pypy__.builders import BytesBuilder as StringBuilder
+    except ImportError:
+        from __pypy__.builders import StringBuilder
+    USING_STRINGBUILDER = True
+    class StringIO(object):
+        def __init__(self, s=b''):
+            if s:
+                self.builder = StringBuilder(len(s))
+                self.builder.append(s)
+            else:
+                self.builder = StringBuilder()
+        def write(self, s):
+            if isinstance(s, memoryview):
+                s = s.tobytes()
+            elif isinstance(s, bytearray):
+                s = bytes(s)
+            self.builder.append(s)
+        def getvalue(self):
+            return self.builder.build()
+else:
+    USING_STRINGBUILDER = False
+    from io import BytesIO as StringIO
+    newlist_hint = lambda size: []
+
+
+from pip._vendor.msgpack.exceptions import (
+    BufferFull,
+    OutOfData,
+    UnpackValueError,
+    PackValueError,
+    PackOverflowError,
+    ExtraData)
+
+from pip._vendor.msgpack import ExtType
+
+
+EX_SKIP                 = 0
+EX_CONSTRUCT            = 1
+EX_READ_ARRAY_HEADER    = 2
+EX_READ_MAP_HEADER      = 3
+
+TYPE_IMMEDIATE          = 0
+TYPE_ARRAY              = 1
+TYPE_MAP                = 2
+TYPE_RAW                = 3
+TYPE_BIN                = 4
+TYPE_EXT                = 5
+
+DEFAULT_RECURSE_LIMIT = 511
+
+
+def _check_type_strict(obj, t, type=type, tuple=tuple):
+    if type(t) is tuple:
+        return type(obj) in t
+    else:
+        return type(obj) is t
+
+
+def _get_data_from_buffer(obj):
+    try:
+        view = memoryview(obj)
+    except TypeError:
+        # try to use legacy buffer protocol if 2.7, otherwise re-raise
+        if not PY3:
+            view = memoryview(buffer(obj))
+            warnings.warn("using old buffer interface to unpack %s; "
+                          "this leads to unpacking errors if slicing is used and "
+                          "will be removed in a future version" % type(obj),
+                          RuntimeWarning)
+        else:
+            raise
+    if view.itemsize != 1:
+        raise ValueError("cannot unpack from multi-byte object")
+    return view
+
+
+def unpack(stream, **kwargs):
+    warnings.warn(
+        "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.",
+        PendingDeprecationWarning)
+    data = stream.read()
+    return unpackb(data, **kwargs)
+
+
+def unpackb(packed, **kwargs):
+    """
+    Unpack an object from `packed`.
+
+    Raises `ExtraData` when `packed` contains extra bytes.
+    See :class:`Unpacker` for options.
+    """
+    unpacker = Unpacker(None, **kwargs)
+    unpacker.feed(packed)
+    try:
+        ret = unpacker._unpack()
+    except OutOfData:
+        raise UnpackValueError("Data is not enough.")
+    if unpacker._got_extradata():
+        raise ExtraData(ret, unpacker._get_extradata())
+    return ret
+
+
+class Unpacker(object):
+    """Streaming unpacker.
+
+    arguments:
+
+    :param file_like:
+        File-like object having `.read(n)` method.
+        If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable.
+
+    :param int read_size:
+        Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`)
+
+    :param bool use_list:
+        If true, unpack msgpack array to Python list.
+        Otherwise, unpack to Python tuple. (default: True)
+
+    :param bool raw:
+        If true, unpack msgpack raw to Python bytes (default).
+        Otherwise, unpack to Python str (or unicode on Python 2) by decoding
+        with UTF-8 encoding (recommended).
+        Currently, the default is true, but it will be changed to false in
+        near future.  So you must specify it explicitly for keeping backward
+        compatibility.
+
+        *encoding* option which is deprecated overrides this option.
+
+    :param callable object_hook:
+        When specified, it should be callable.
+        Unpacker calls it with a dict argument after unpacking msgpack map.
+        (See also simplejson)
+
+    :param callable object_pairs_hook:
+        When specified, it should be callable.
+        Unpacker calls it with a list of key-value pairs after unpacking msgpack map.
+        (See also simplejson)
+
+    :param str encoding:
+        Encoding used for decoding msgpack raw.
+        If it is None (default), msgpack raw is deserialized to Python bytes.
+
+    :param str unicode_errors:
+        (deprecated) Used for decoding msgpack raw with *encoding*.
+        (default: `'strict'`)
+
+    :param int max_buffer_size:
+        Limits size of data waiting unpacked.  0 means system's INT_MAX (default).
+        Raises `BufferFull` exception when it is insufficient.
+        You should set this parameter when unpacking data from untrusted source.
+
+    :param int max_str_len:
+        Limits max length of str. (default: 2**31-1)
+
+    :param int max_bin_len:
+        Limits max length of bin. (default: 2**31-1)
+
+    :param int max_array_len:
+        Limits max length of array. (default: 2**31-1)
+
+    :param int max_map_len:
+        Limits max length of map. (default: 2**31-1)
+
+
+    example of streaming deserialize from file-like object::
+
+        unpacker = Unpacker(file_like, raw=False)
+        for o in unpacker:
+            process(o)
+
+    example of streaming deserialize from socket::
+
+        unpacker = Unpacker(raw=False)
+        while True:
+            buf = sock.recv(1024**2)
+            if not buf:
+                break
+            unpacker.feed(buf)
+            for o in unpacker:
+                process(o)
+    """
+
+    def __init__(self, file_like=None, read_size=0, use_list=True, raw=True,
+                 object_hook=None, object_pairs_hook=None, list_hook=None,
+                 encoding=None, unicode_errors=None, max_buffer_size=0,
+                 ext_hook=ExtType,
+                 max_str_len=2147483647, # 2**32-1
+                 max_bin_len=2147483647,
+                 max_array_len=2147483647,
+                 max_map_len=2147483647,
+                 max_ext_len=2147483647):
+
+        if encoding is not None:
+            warnings.warn(
+                "encoding is deprecated, Use raw=False instead.",
+                PendingDeprecationWarning)
+
+        if unicode_errors is None:
+            unicode_errors = 'strict'
+
+        if file_like is None:
+            self._feeding = True
+        else:
+            if not callable(file_like.read):
+                raise TypeError("`file_like.read` must be callable")
+            self.file_like = file_like
+            self._feeding = False
+
+        #: array of bytes fed.
+        self._buffer = bytearray()
+        # Some very old pythons don't support `struct.unpack_from()` with a
+        # `bytearray`. So we wrap it in a `buffer()` there.
+        if sys.version_info < (2, 7, 6):
+            self._buffer_view = buffer(self._buffer)
+        else:
+            self._buffer_view = self._buffer
+        #: Which position we currently reads
+        self._buff_i = 0
+
+        # When Unpacker is used as an iterable, between the calls to next(),
+        # the buffer is not "consumed" completely, for efficiency sake.
+        # Instead, it is done sloppily.  To make sure we raise BufferFull at
+        # the correct moments, we have to keep track of how sloppy we were.
+        # Furthermore, when the buffer is incomplete (that is: in the case
+        # we raise an OutOfData) we need to rollback the buffer to the correct
+        # state, which _buf_checkpoint records.
+        self._buf_checkpoint = 0
+
+        self._max_buffer_size = max_buffer_size or 2**31-1
+        if read_size > self._max_buffer_size:
+            raise ValueError("read_size must be smaller than max_buffer_size")
+        self._read_size = read_size or min(self._max_buffer_size, 16*1024)
+        self._raw = bool(raw)
+        self._encoding = encoding
+        self._unicode_errors = unicode_errors
+        self._use_list = use_list
+        self._list_hook = list_hook
+        self._object_hook = object_hook
+        self._object_pairs_hook = object_pairs_hook
+        self._ext_hook = ext_hook
+        self._max_str_len = max_str_len
+        self._max_bin_len = max_bin_len
+        self._max_array_len = max_array_len
+        self._max_map_len = max_map_len
+        self._max_ext_len = max_ext_len
+        self._stream_offset = 0
+
+        if list_hook is not None and not callable(list_hook):
+            raise TypeError('`list_hook` is not callable')
+        if object_hook is not None and not callable(object_hook):
+            raise TypeError('`object_hook` is not callable')
+        if object_pairs_hook is not None and not callable(object_pairs_hook):
+            raise TypeError('`object_pairs_hook` is not callable')
+        if object_hook is not None and object_pairs_hook is not None:
+            raise TypeError("object_pairs_hook and object_hook are mutually "
+                            "exclusive")
+        if not callable(ext_hook):
+            raise TypeError("`ext_hook` is not callable")
+
+    def feed(self, next_bytes):
+        assert self._feeding
+        view = _get_data_from_buffer(next_bytes)
+        if (len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size):
+            raise BufferFull
+
+        # Strip buffer before checkpoint before reading file.
+        if self._buf_checkpoint > 0:
+            del self._buffer[:self._buf_checkpoint]
+            self._buff_i -= self._buf_checkpoint
+            self._buf_checkpoint = 0
+
+        self._buffer += view
+
+    def _consume(self):
+        """ Gets rid of the used parts of the buffer. """
+        self._stream_offset += self._buff_i - self._buf_checkpoint
+        self._buf_checkpoint = self._buff_i
+
+    def _got_extradata(self):
+        return self._buff_i < len(self._buffer)
+
+    def _get_extradata(self):
+        return self._buffer[self._buff_i:]
+
+    def read_bytes(self, n):
+        return self._read(n)
+
+    def _read(self, n):
+        # (int) -> bytearray
+        self._reserve(n)
+        i = self._buff_i
+        self._buff_i = i+n
+        return self._buffer[i:i+n]
+
+    def _reserve(self, n):
+        remain_bytes = len(self._buffer) - self._buff_i - n
+
+        # Fast path: buffer has n bytes already
+        if remain_bytes >= 0:
+            return
+
+        if self._feeding:
+            self._buff_i = self._buf_checkpoint
+            raise OutOfData
+
+        # Strip buffer before checkpoint before reading file.
+        if self._buf_checkpoint > 0:
+            del self._buffer[:self._buf_checkpoint]
+            self._buff_i -= self._buf_checkpoint
+            self._buf_checkpoint = 0
+
+        # Read from file
+        remain_bytes = -remain_bytes
+        while remain_bytes > 0:
+            to_read_bytes = max(self._read_size, remain_bytes)
+            read_data = self.file_like.read(to_read_bytes)
+            if not read_data:
+                break
+            assert isinstance(read_data, bytes)
+            self._buffer += read_data
+            remain_bytes -= len(read_data)
+
+        if len(self._buffer) < n + self._buff_i:
+            self._buff_i = 0  # rollback
+            raise OutOfData
+
+    def _read_header(self, execute=EX_CONSTRUCT):
+        typ = TYPE_IMMEDIATE
+        n = 0
+        obj = None
+        self._reserve(1)
+        b = self._buffer[self._buff_i]
+        self._buff_i += 1
+        if b & 0b10000000 == 0:
+            obj = b
+        elif b & 0b11100000 == 0b11100000:
+            obj = -1 - (b ^ 0xff)
+        elif b & 0b11100000 == 0b10100000:
+            n = b & 0b00011111
+            typ = TYPE_RAW
+            if n > self._max_str_len:
+                raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
+            obj = self._read(n)
+        elif b & 0b11110000 == 0b10010000:
+            n = b & 0b00001111
+            typ = TYPE_ARRAY
+            if n > self._max_array_len:
+                raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len)
+        elif b & 0b11110000 == 0b10000000:
+            n = b & 0b00001111
+            typ = TYPE_MAP
+            if n > self._max_map_len:
+                raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len)
+        elif b == 0xc0:
+            obj = None
+        elif b == 0xc2:
+            obj = False
+        elif b == 0xc3:
+            obj = True
+        elif b == 0xc4:
+            typ = TYPE_BIN
+            self._reserve(1)
+            n = self._buffer[self._buff_i]
+            self._buff_i += 1
+            if n > self._max_bin_len:
+                raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
+            obj = self._read(n)
+        elif b == 0xc5:
+            typ = TYPE_BIN
+            self._reserve(2)
+            n = struct.unpack_from(">H", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 2
+            if n > self._max_bin_len:
+                raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
+            obj = self._read(n)
+        elif b == 0xc6:
+            typ = TYPE_BIN
+            self._reserve(4)
+            n = struct.unpack_from(">I", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 4
+            if n > self._max_bin_len:
+                raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
+            obj = self._read(n)
+        elif b == 0xc7:  # ext 8
+            typ = TYPE_EXT
+            self._reserve(2)
+            L, n = struct.unpack_from('Bb', self._buffer_view, self._buff_i)
+            self._buff_i += 2
+            if L > self._max_ext_len:
+                raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
+            obj = self._read(L)
+        elif b == 0xc8:  # ext 16
+            typ = TYPE_EXT
+            self._reserve(3)
+            L, n = struct.unpack_from('>Hb', self._buffer_view, self._buff_i)
+            self._buff_i += 3
+            if L > self._max_ext_len:
+                raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
+            obj = self._read(L)
+        elif b == 0xc9:  # ext 32
+            typ = TYPE_EXT
+            self._reserve(5)
+            L, n = struct.unpack_from('>Ib', self._buffer_view, self._buff_i)
+            self._buff_i += 5
+            if L > self._max_ext_len:
+                raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
+            obj = self._read(L)
+        elif b == 0xca:
+            self._reserve(4)
+            obj = struct.unpack_from(">f", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 4
+        elif b == 0xcb:
+            self._reserve(8)
+            obj = struct.unpack_from(">d", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 8
+        elif b == 0xcc:
+            self._reserve(1)
+            obj = self._buffer[self._buff_i]
+            self._buff_i += 1
+        elif b == 0xcd:
+            self._reserve(2)
+            obj = struct.unpack_from(">H", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 2
+        elif b == 0xce:
+            self._reserve(4)
+            obj = struct.unpack_from(">I", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 4
+        elif b == 0xcf:
+            self._reserve(8)
+            obj = struct.unpack_from(">Q", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 8
+        elif b == 0xd0:
+            self._reserve(1)
+            obj = struct.unpack_from("b", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 1
+        elif b == 0xd1:
+            self._reserve(2)
+            obj = struct.unpack_from(">h", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 2
+        elif b == 0xd2:
+            self._reserve(4)
+            obj = struct.unpack_from(">i", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 4
+        elif b == 0xd3:
+            self._reserve(8)
+            obj = struct.unpack_from(">q", self._buffer_view, self._buff_i)[0]
+            self._buff_i += 8
+        elif b == 0xd4:  # fixext 1
+            typ = TYPE_EXT
+            if self._max_ext_len < 1:
+                raise UnpackValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len))
+            self._reserve(2)
+            n, obj = struct.unpack_from("b1s", self._buffer_view, self._buff_i)
+            self._buff_i += 2
+        elif b == 0xd5:  # fixext 2
+            typ = TYPE_EXT
+            if self._max_ext_len < 2:
+                raise UnpackValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len))
+            self._reserve(3)
+            n, obj = struct.unpack_from("b2s", self._buffer_view, self._buff_i)
+            self._buff_i += 3
+        elif b == 0xd6:  # fixext 4
+            typ = TYPE_EXT
+            if self._max_ext_len < 4:
+                raise UnpackValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len))
+            self._reserve(5)
+            n, obj = struct.unpack_from("b4s", self._buffer_view, self._buff_i)
+            self._buff_i += 5
+        elif b == 0xd7:  # fixext 8
+            typ = TYPE_EXT
+            if self._max_ext_len < 8:
+                raise UnpackValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len))
+            self._reserve(9)
+            n, obj = struct.unpack_from("b8s", self._buffer_view, self._buff_i)
+            self._buff_i += 9
+        elif b == 0xd8:  # fixext 16
+            typ = TYPE_EXT
+            if self._max_ext_len < 16:
+                raise UnpackValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len))
+            self._reserve(17)
+            n, obj = struct.unpack_from("b16s", self._buffer_view, self._buff_i)
+            self._buff_i += 17
+        elif b == 0xd9:
+            typ = TYPE_RAW
+            self._reserve(1)
+            n = self._buffer[self._buff_i]
+            self._buff_i += 1
+            if n > self._max_str_len:
+                raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
+            obj = self._read(n)
+        elif b == 0xda:
+            typ = TYPE_RAW
+            self._reserve(2)
+            n, = struct.unpack_from(">H", self._buffer_view, self._buff_i)
+            self._buff_i += 2
+            if n > self._max_str_len:
+                raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
+            obj = self._read(n)
+        elif b == 0xdb:
+            typ = TYPE_RAW
+            self._reserve(4)
+            n, = struct.unpack_from(">I", self._buffer_view, self._buff_i)
+            self._buff_i += 4
+            if n > self._max_str_len:
+                raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
+            obj = self._read(n)
+        elif b == 0xdc:
+            typ = TYPE_ARRAY
+            self._reserve(2)
+            n, = struct.unpack_from(">H", self._buffer_view, self._buff_i)
+            self._buff_i += 2
+            if n > self._max_array_len:
+                raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len)
+        elif b == 0xdd:
+            typ = TYPE_ARRAY
+            self._reserve(4)
+            n, = struct.unpack_from(">I", self._buffer_view, self._buff_i)
+            self._buff_i += 4
+            if n > self._max_array_len:
+                raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len)
+        elif b == 0xde:
+            self._reserve(2)
+            n, = struct.unpack_from(">H", self._buffer_view, self._buff_i)
+            self._buff_i += 2
+            if n > self._max_map_len:
+                raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len)
+            typ = TYPE_MAP
+        elif b == 0xdf:
+            self._reserve(4)
+            n, = struct.unpack_from(">I", self._buffer_view, self._buff_i)
+            self._buff_i += 4
+            if n > self._max_map_len:
+                raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len)
+            typ = TYPE_MAP
+        else:
+            raise UnpackValueError("Unknown header: 0x%x" % b)
+        return typ, n, obj
+
+    def _unpack(self, execute=EX_CONSTRUCT):
+        typ, n, obj = self._read_header(execute)
+
+        if execute == EX_READ_ARRAY_HEADER:
+            if typ != TYPE_ARRAY:
+                raise UnpackValueError("Expected array")
+            return n
+        if execute == EX_READ_MAP_HEADER:
+            if typ != TYPE_MAP:
+                raise UnpackValueError("Expected map")
+            return n
+        # TODO should we eliminate the recursion?
+        if typ == TYPE_ARRAY:
+            if execute == EX_SKIP:
+                for i in xrange(n):
+                    # TODO check whether we need to call `list_hook`
+                    self._unpack(EX_SKIP)
+                return
+            ret = newlist_hint(n)
+            for i in xrange(n):
+                ret.append(self._unpack(EX_CONSTRUCT))
+            if self._list_hook is not None:
+                ret = self._list_hook(ret)
+            # TODO is the interaction between `list_hook` and `use_list` ok?
+            return ret if self._use_list else tuple(ret)
+        if typ == TYPE_MAP:
+            if execute == EX_SKIP:
+                for i in xrange(n):
+                    # TODO check whether we need to call hooks
+                    self._unpack(EX_SKIP)
+                    self._unpack(EX_SKIP)
+                return
+            if self._object_pairs_hook is not None:
+                ret = self._object_pairs_hook(
+                    (self._unpack(EX_CONSTRUCT),
+                     self._unpack(EX_CONSTRUCT))
+                    for _ in xrange(n))
+            else:
+                ret = {}
+                for _ in xrange(n):
+                    key = self._unpack(EX_CONSTRUCT)
+                    ret[key] = self._unpack(EX_CONSTRUCT)
+                if self._object_hook is not None:
+                    ret = self._object_hook(ret)
+            return ret
+        if execute == EX_SKIP:
+            return
+        if typ == TYPE_RAW:
+            if self._encoding is not None:
+                obj = obj.decode(self._encoding, self._unicode_errors)
+            elif self._raw:
+                obj = bytes(obj)
+            else:
+                obj = obj.decode('utf_8')
+            return obj
+        if typ == TYPE_EXT:
+            return self._ext_hook(n, bytes(obj))
+        if typ == TYPE_BIN:
+            return bytes(obj)
+        assert typ == TYPE_IMMEDIATE
+        return obj
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        try:
+            ret = self._unpack(EX_CONSTRUCT)
+            self._consume()
+            return ret
+        except OutOfData:
+            self._consume()
+            raise StopIteration
+
+    next = __next__
+
+    def skip(self, write_bytes=None):
+        self._unpack(EX_SKIP)
+        if write_bytes is not None:
+            warnings.warn("`write_bytes` option is deprecated.  Use `.tell()` instead.", DeprecationWarning)
+            write_bytes(self._buffer[self._buf_checkpoint:self._buff_i])
+        self._consume()
+
+    def unpack(self, write_bytes=None):
+        ret = self._unpack(EX_CONSTRUCT)
+        if write_bytes is not None:
+            warnings.warn("`write_bytes` option is deprecated.  Use `.tell()` instead.", DeprecationWarning)
+            write_bytes(self._buffer[self._buf_checkpoint:self._buff_i])
+        self._consume()
+        return ret
+
+    def read_array_header(self, write_bytes=None):
+        ret = self._unpack(EX_READ_ARRAY_HEADER)
+        if write_bytes is not None:
+            warnings.warn("`write_bytes` option is deprecated.  Use `.tell()` instead.", DeprecationWarning)
+            write_bytes(self._buffer[self._buf_checkpoint:self._buff_i])
+        self._consume()
+        return ret
+
+    def read_map_header(self, write_bytes=None):
+        ret = self._unpack(EX_READ_MAP_HEADER)
+        if write_bytes is not None:
+            warnings.warn("`write_bytes` option is deprecated.  Use `.tell()` instead.", DeprecationWarning)
+            write_bytes(self._buffer[self._buf_checkpoint:self._buff_i])
+        self._consume()
+        return ret
+
+    def tell(self):
+        return self._stream_offset
+
+
+class Packer(object):
+    """
+    MessagePack Packer
+
+    usage:
+
+        packer = Packer()
+        astream.write(packer.pack(a))
+        astream.write(packer.pack(b))
+
+    Packer's constructor has some keyword arguments:
+
+    :param callable default:
+        Convert user type to builtin type that Packer supports.
+        See also simplejson's document.
+
+    :param bool use_single_float:
+        Use single precision float type for float. (default: False)
+
+    :param bool autoreset:
+        Reset buffer after each pack and return its content as `bytes`. (default: True).
+        If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
+
+    :param bool use_bin_type:
+        Use bin type introduced in msgpack spec 2.0 for bytes.
+        It also enables str8 type for unicode.
+
+    :param bool strict_types:
+        If set to true, types will be checked to be exact. Derived classes
+        from serializeable types will not be serialized and will be
+        treated as unsupported type and forwarded to default.
+        Additionally tuples will not be serialized as lists.
+        This is useful when trying to implement accurate serialization
+        for python types.
+
+    :param str encoding:
+        (deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8')
+
+    :param str unicode_errors:
+        Error handler for encoding unicode. (default: 'strict')
+    """
+    def __init__(self, default=None, encoding=None, unicode_errors=None,
+                 use_single_float=False, autoreset=True, use_bin_type=False,
+                 strict_types=False):
+        if encoding is None:
+            encoding = 'utf_8'
+        else:
+            warnings.warn(
+                "encoding is deprecated, Use raw=False instead.",
+                PendingDeprecationWarning)
+
+        if unicode_errors is None:
+            unicode_errors = 'strict'
+
+        self._strict_types = strict_types
+        self._use_float = use_single_float
+        self._autoreset = autoreset
+        self._use_bin_type = use_bin_type
+        self._encoding = encoding
+        self._unicode_errors = unicode_errors
+        self._buffer = StringIO()
+        if default is not None:
+            if not callable(default):
+                raise TypeError("default must be callable")
+        self._default = default
+
+    def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT,
+              check=isinstance, check_type_strict=_check_type_strict):
+        default_used = False
+        if self._strict_types:
+            check = check_type_strict
+            list_types = list
+        else:
+            list_types = (list, tuple)
+        while True:
+            if nest_limit < 0:
+                raise PackValueError("recursion limit exceeded")
+            if obj is None:
+                return self._buffer.write(b"\xc0")
+            if check(obj, bool):
+                if obj:
+                    return self._buffer.write(b"\xc3")
+                return self._buffer.write(b"\xc2")
+            if check(obj, int_types):
+                if 0 <= obj < 0x80:
+                    return self._buffer.write(struct.pack("B", obj))
+                if -0x20 <= obj < 0:
+                    return self._buffer.write(struct.pack("b", obj))
+                if 0x80 <= obj <= 0xff:
+                    return self._buffer.write(struct.pack("BB", 0xcc, obj))
+                if -0x80 <= obj < 0:
+                    return self._buffer.write(struct.pack(">Bb", 0xd0, obj))
+                if 0xff < obj <= 0xffff:
+                    return self._buffer.write(struct.pack(">BH", 0xcd, obj))
+                if -0x8000 <= obj < -0x80:
+                    return self._buffer.write(struct.pack(">Bh", 0xd1, obj))
+                if 0xffff < obj <= 0xffffffff:
+                    return self._buffer.write(struct.pack(">BI", 0xce, obj))
+                if -0x80000000 <= obj < -0x8000:
+                    return self._buffer.write(struct.pack(">Bi", 0xd2, obj))
+                if 0xffffffff < obj <= 0xffffffffffffffff:
+                    return self._buffer.write(struct.pack(">BQ", 0xcf, obj))
+                if -0x8000000000000000 <= obj < -0x80000000:
+                    return self._buffer.write(struct.pack(">Bq", 0xd3, obj))
+                if not default_used and self._default is not None:
+                    obj = self._default(obj)
+                    default_used = True
+                    continue
+                raise PackOverflowError("Integer value out of range")
+            if check(obj, (bytes, bytearray)):
+                n = len(obj)
+                if n >= 2**32:
+                    raise PackValueError("%s is too large" % type(obj).__name__)
+                self._pack_bin_header(n)
+                return self._buffer.write(obj)
+            if check(obj, Unicode):
+                if self._encoding is None:
+                    raise TypeError(
+                        "Can't encode unicode string: "
+                        "no encoding is specified")
+                obj = obj.encode(self._encoding, self._unicode_errors)
+                n = len(obj)
+                if n >= 2**32:
+                    raise PackValueError("String is too large")
+                self._pack_raw_header(n)
+                return self._buffer.write(obj)
+            if check(obj, memoryview):
+                n = len(obj) * obj.itemsize
+                if n >= 2**32:
+                    raise PackValueError("Memoryview is too large")
+                self._pack_bin_header(n)
+                return self._buffer.write(obj)
+            if check(obj, float):
+                if self._use_float:
+                    return self._buffer.write(struct.pack(">Bf", 0xca, obj))
+                return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
+            if check(obj, ExtType):
+                code = obj.code
+                data = obj.data
+                assert isinstance(code, int)
+                assert isinstance(data, bytes)
+                L = len(data)
+                if L == 1:
+                    self._buffer.write(b'\xd4')
+                elif L == 2:
+                    self._buffer.write(b'\xd5')
+                elif L == 4:
+                    self._buffer.write(b'\xd6')
+                elif L == 8:
+                    self._buffer.write(b'\xd7')
+                elif L == 16:
+                    self._buffer.write(b'\xd8')
+                elif L <= 0xff:
+                    self._buffer.write(struct.pack(">BB", 0xc7, L))
+                elif L <= 0xffff:
+                    self._buffer.write(struct.pack(">BH", 0xc8, L))
+                else:
+                    self._buffer.write(struct.pack(">BI", 0xc9, L))
+                self._buffer.write(struct.pack("b", code))
+                self._buffer.write(data)
+                return
+            if check(obj, list_types):
+                n = len(obj)
+                self._pack_array_header(n)
+                for i in xrange(n):
+                    self._pack(obj[i], nest_limit - 1)
+                return
+            if check(obj, dict):
+                return self._pack_map_pairs(len(obj), dict_iteritems(obj),
+                                               nest_limit - 1)
+            if not default_used and self._default is not None:
+                obj = self._default(obj)
+                default_used = 1
+                continue
+            raise TypeError("Cannot serialize %r" % (obj, ))
+
+    def pack(self, obj):
+        try:
+            self._pack(obj)
+        except:
+            self._buffer = StringIO()  # force reset
+            raise
+        ret = self._buffer.getvalue()
+        if self._autoreset:
+            self._buffer = StringIO()
+        elif USING_STRINGBUILDER:
+            self._buffer = StringIO(ret)
+        return ret
+
+    def pack_map_pairs(self, pairs):
+        self._pack_map_pairs(len(pairs), pairs)
+        ret = self._buffer.getvalue()
+        if self._autoreset:
+            self._buffer = StringIO()
+        elif USING_STRINGBUILDER:
+            self._buffer = StringIO(ret)
+        return ret
+
+    def pack_array_header(self, n):
+        if n >= 2**32:
+            raise PackValueError
+        self._pack_array_header(n)
+        ret = self._buffer.getvalue()
+        if self._autoreset:
+            self._buffer = StringIO()
+        elif USING_STRINGBUILDER:
+            self._buffer = StringIO(ret)
+        return ret
+
+    def pack_map_header(self, n):
+        if n >= 2**32:
+            raise PackValueError
+        self._pack_map_header(n)
+        ret = self._buffer.getvalue()
+        if self._autoreset:
+            self._buffer = StringIO()
+        elif USING_STRINGBUILDER:
+            self._buffer = StringIO(ret)
+        return ret
+
+    def pack_ext_type(self, typecode, data):
+        if not isinstance(typecode, int):
+            raise TypeError("typecode must have int type.")
+        if not 0 <= typecode <= 127:
+            raise ValueError("typecode should be 0-127")
+        if not isinstance(data, bytes):
+            raise TypeError("data must have bytes type")
+        L = len(data)
+        if L > 0xffffffff:
+            raise PackValueError("Too large data")
+        if L == 1:
+            self._buffer.write(b'\xd4')
+        elif L == 2:
+            self._buffer.write(b'\xd5')
+        elif L == 4:
+            self._buffer.write(b'\xd6')
+        elif L == 8:
+            self._buffer.write(b'\xd7')
+        elif L == 16:
+            self._buffer.write(b'\xd8')
+        elif L <= 0xff:
+            self._buffer.write(b'\xc7' + struct.pack('B', L))
+        elif L <= 0xffff:
+            self._buffer.write(b'\xc8' + struct.pack('>H', L))
+        else:
+            self._buffer.write(b'\xc9' + struct.pack('>I', L))
+        self._buffer.write(struct.pack('B', typecode))
+        self._buffer.write(data)
+
+    def _pack_array_header(self, n):
+        if n <= 0x0f:
+            return self._buffer.write(struct.pack('B', 0x90 + n))
+        if n <= 0xffff:
+            return self._buffer.write(struct.pack(">BH", 0xdc, n))
+        if n <= 0xffffffff:
+            return self._buffer.write(struct.pack(">BI", 0xdd, n))
+        raise PackValueError("Array is too large")
+
+    def _pack_map_header(self, n):
+        if n <= 0x0f:
+            return self._buffer.write(struct.pack('B', 0x80 + n))
+        if n <= 0xffff:
+            return self._buffer.write(struct.pack(">BH", 0xde, n))
+        if n <= 0xffffffff:
+            return self._buffer.write(struct.pack(">BI", 0xdf, n))
+        raise PackValueError("Dict is too large")
+
+    def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
+        self._pack_map_header(n)
+        for (k, v) in pairs:
+            self._pack(k, nest_limit - 1)
+            self._pack(v, nest_limit - 1)
+
+    def _pack_raw_header(self, n):
+        if n <= 0x1f:
+            self._buffer.write(struct.pack('B', 0xa0 + n))
+        elif self._use_bin_type and n <= 0xff:
+            self._buffer.write(struct.pack('>BB', 0xd9, n))
+        elif n <= 0xffff:
+            self._buffer.write(struct.pack(">BH", 0xda, n))
+        elif n <= 0xffffffff:
+            self._buffer.write(struct.pack(">BI", 0xdb, n))
+        else:
+            raise PackValueError('Raw is too large')
+
+    def _pack_bin_header(self, n):
+        if not self._use_bin_type:
+            return self._pack_raw_header(n)
+        elif n <= 0xff:
+            return self._buffer.write(struct.pack('>BB', 0xc4, n))
+        elif n <= 0xffff:
+            return self._buffer.write(struct.pack(">BH", 0xc5, n))
+        elif n <= 0xffffffff:
+            return self._buffer.write(struct.pack(">BI", 0xc6, n))
+        else:
+            raise PackValueError('Bin is too large')
+
+    def bytes(self):
+        return self._buffer.getvalue()
+
+    def reset(self):
+        self._buffer = StringIO()
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__about__.py b/lib/python3.7/site-packages/pip/_vendor/packaging/__about__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7481c9e298370f034602faff6c045cb1fecf504b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/packaging/__about__.py
@@ -0,0 +1,27 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+__all__ = [
+    "__title__",
+    "__summary__",
+    "__uri__",
+    "__version__",
+    "__author__",
+    "__email__",
+    "__license__",
+    "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "19.0"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD or Apache License, Version 2.0"
+__copyright__ = "Copyright 2014-2019 %s" % __author__
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__init__.py b/lib/python3.7/site-packages/pip/_vendor/packaging/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0cf67df5245be16a020ca048832e180f7ce8661
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/packaging/__init__.py
@@ -0,0 +1,26 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+from .__about__ import (
+    __author__,
+    __copyright__,
+    __email__,
+    __license__,
+    __summary__,
+    __title__,
+    __uri__,
+    __version__,
+)
+
+__all__ = [
+    "__title__",
+    "__summary__",
+    "__uri__",
+    "__version__",
+    "__author__",
+    "__email__",
+    "__license__",
+    "__copyright__",
+]
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/__about__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/__about__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a3f3957ddb70cecffec6a753566db4471b18813
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/__about__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f480292da5a0958711726a2078ebb0ea219f52c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/_compat.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/_compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8b6f867df6afa110060d0cf2cd23f4a4b74f01b9
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/_compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..73fef27c5a01931a4e0aea08b5f3a4e23890f61e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/_structures.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5587d183b0a1fa357fd49f7c5b4b066e00e91a86
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/markers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2cafdf4b980ff09e97c5ba3df03b631dba8b2226
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/requirements.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d39f3ee14b9b18cf5a7bb2f3b455aa769bbedd51
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e385beb37a0d044920bef31e407a76275a9af9d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8e965a068dbd83f160c165bd01d7ce2233a00f59
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/packaging/__pycache__/version.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/_compat.py b/lib/python3.7/site-packages/pip/_vendor/packaging/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..25da473c196855ad59a6d2d785ef1ddef49795be
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/packaging/_compat.py
@@ -0,0 +1,31 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+# flake8: noqa
+
+if PY3:
+    string_types = (str,)
+else:
+    string_types = (basestring,)
+
+
+def with_metaclass(meta, *bases):
+    """
+    Create a base class with a metaclass.
+    """
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+
+    return type.__new__(metaclass, "temporary_class", (), {})
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/_structures.py b/lib/python3.7/site-packages/pip/_vendor/packaging/_structures.py
new file mode 100644
index 0000000000000000000000000000000000000000..68dcca634d8e3f0081bad2f9ae5e653a2942db68
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/packaging/_structures.py
@@ -0,0 +1,68 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+
+class Infinity(object):
+    def __repr__(self):
+        return "Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return False
+
+    def __le__(self, other):
+        return False
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return True
+
+    def __ge__(self, other):
+        return True
+
+    def __neg__(self):
+        return NegativeInfinity
+
+
+Infinity = Infinity()
+
+
+class NegativeInfinity(object):
+    def __repr__(self):
+        return "-Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return True
+
+    def __le__(self, other):
+        return True
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return False
+
+    def __ge__(self, other):
+        return False
+
+    def __neg__(self):
+        return Infinity
+
+
+NegativeInfinity = NegativeInfinity()
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/markers.py b/lib/python3.7/site-packages/pip/_vendor/packaging/markers.py
new file mode 100644
index 0000000000000000000000000000000000000000..548247681bc180e8c2e7981952864ccd0dd70a28
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/packaging/markers.py
@@ -0,0 +1,296 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import operator
+import os
+import platform
+import sys
+
+from pip._vendor.pyparsing import ParseException, ParseResults, stringStart, stringEnd
+from pip._vendor.pyparsing import ZeroOrMore, Group, Forward, QuotedString
+from pip._vendor.pyparsing import Literal as L  # noqa
+
+from ._compat import string_types
+from .specifiers import Specifier, InvalidSpecifier
+
+
+__all__ = [
+    "InvalidMarker",
+    "UndefinedComparison",
+    "UndefinedEnvironmentName",
+    "Marker",
+    "default_environment",
+]
+
+
+class InvalidMarker(ValueError):
+    """
+    An invalid marker was found, users should refer to PEP 508.
+    """
+
+
+class UndefinedComparison(ValueError):
+    """
+    An invalid operation was attempted on a value that doesn't support it.
+    """
+
+
+class UndefinedEnvironmentName(ValueError):
+    """
+    A name was attempted to be used that does not exist inside of the
+    environment.
+    """
+
+
+class Node(object):
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return str(self.value)
+
+    def __repr__(self):
+        return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
+
+    def serialize(self):
+        raise NotImplementedError
+
+
+class Variable(Node):
+    def serialize(self):
+        return str(self)
+
+
+class Value(Node):
+    def serialize(self):
+        return '"{0}"'.format(self)
+
+
+class Op(Node):
+    def serialize(self):
+        return str(self)
+
+
+VARIABLE = (
+    L("implementation_version")
+    | L("platform_python_implementation")
+    | L("implementation_name")
+    | L("python_full_version")
+    | L("platform_release")
+    | L("platform_version")
+    | L("platform_machine")
+    | L("platform_system")
+    | L("python_version")
+    | L("sys_platform")
+    | L("os_name")
+    | L("os.name")
+    | L("sys.platform")  # PEP-345
+    | L("platform.version")  # PEP-345
+    | L("platform.machine")  # PEP-345
+    | L("platform.python_implementation")  # PEP-345
+    | L("python_implementation")  # PEP-345
+    | L("extra")  # undocumented setuptools legacy
+)
+ALIASES = {
+    "os.name": "os_name",
+    "sys.platform": "sys_platform",
+    "platform.version": "platform_version",
+    "platform.machine": "platform_machine",
+    "platform.python_implementation": "platform_python_implementation",
+    "python_implementation": "platform_python_implementation",
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+    L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results):
+    if isinstance(results, ParseResults):
+        return [_coerce_parse_result(i) for i in results]
+    else:
+        return results
+
+
+def _format_marker(marker, first=True):
+    assert isinstance(marker, (list, tuple, string_types))
+
+    # Sometimes we have a structure like [[...]] which is a single item list
+    # where the single item is itself it's own list. In that case we want skip
+    # the rest of this function so that we don't get extraneous () on the
+    # outside.
+    if (
+        isinstance(marker, list)
+        and len(marker) == 1
+        and isinstance(marker[0], (list, tuple))
+    ):
+        return _format_marker(marker[0])
+
+    if isinstance(marker, list):
+        inner = (_format_marker(m, first=False) for m in marker)
+        if first:
+            return " ".join(inner)
+        else:
+            return "(" + " ".join(inner) + ")"
+    elif isinstance(marker, tuple):
+        return " ".join([m.serialize() for m in marker])
+    else:
+        return marker
+
+
+_operators = {
+    "in": lambda lhs, rhs: lhs in rhs,
+    "not in": lambda lhs, rhs: lhs not in rhs,
+    "<": operator.lt,
+    "<=": operator.le,
+    "==": operator.eq,
+    "!=": operator.ne,
+    ">=": operator.ge,
+    ">": operator.gt,
+}
+
+
+def _eval_op(lhs, op, rhs):
+    try:
+        spec = Specifier("".join([op.serialize(), rhs]))
+    except InvalidSpecifier:
+        pass
+    else:
+        return spec.contains(lhs)
+
+    oper = _operators.get(op.serialize())
+    if oper is None:
+        raise UndefinedComparison(
+            "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
+        )
+
+    return oper(lhs, rhs)
+
+
+_undefined = object()
+
+
+def _get_env(environment, name):
+    value = environment.get(name, _undefined)
+
+    if value is _undefined:
+        raise UndefinedEnvironmentName(
+            "{0!r} does not exist in evaluation environment.".format(name)
+        )
+
+    return value
+
+
+def _evaluate_markers(markers, environment):
+    groups = [[]]
+
+    for marker in markers:
+        assert isinstance(marker, (list, tuple, string_types))
+
+        if isinstance(marker, list):
+            groups[-1].append(_evaluate_markers(marker, environment))
+        elif isinstance(marker, tuple):
+            lhs, op, rhs = marker
+
+            if isinstance(lhs, Variable):
+                lhs_value = _get_env(environment, lhs.value)
+                rhs_value = rhs.value
+            else:
+                lhs_value = lhs.value
+                rhs_value = _get_env(environment, rhs.value)
+
+            groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+        else:
+            assert marker in ["and", "or"]
+            if marker == "or":
+                groups.append([])
+
+    return any(all(item) for item in groups)
+
+
+def format_full_version(info):
+    version = "{0.major}.{0.minor}.{0.micro}".format(info)
+    kind = info.releaselevel
+    if kind != "final":
+        version += kind[0] + str(info.serial)
+    return version
+
+
+def default_environment():
+    if hasattr(sys, "implementation"):
+        iver = format_full_version(sys.implementation.version)
+        implementation_name = sys.implementation.name
+    else:
+        iver = "0"
+        implementation_name = ""
+
+    return {
+        "implementation_name": implementation_name,
+        "implementation_version": iver,
+        "os_name": os.name,
+        "platform_machine": platform.machine(),
+        "platform_release": platform.release(),
+        "platform_system": platform.system(),
+        "platform_version": platform.version(),
+        "python_full_version": platform.python_version(),
+        "platform_python_implementation": platform.python_implementation(),
+        "python_version": platform.python_version()[:3],
+        "sys_platform": sys.platform,
+    }
+
+
+class Marker(object):
+    def __init__(self, marker):
+        try:
+            self._markers = _coerce_parse_result(MARKER.parseString(marker))
+        except ParseException as e:
+            err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
+                marker, marker[e.loc : e.loc + 8]
+            )
+            raise InvalidMarker(err_str)
+
+    def __str__(self):
+        return _format_marker(self._markers)
+
+    def __repr__(self):
+        return "<Marker({0!r})>".format(str(self))
+
+    def evaluate(self, environment=None):
+        """Evaluate a marker.
+
+        Return the boolean from evaluating the given marker against the
+        environment. environment is an optional argument to override all or
+        part of the determined environment.
+
+        The environment is determined from the current Python process.
+        """
+        current_environment = default_environment()
+        if environment is not None:
+            current_environment.update(environment)
+
+        return _evaluate_markers(self._markers, current_environment)
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/requirements.py b/lib/python3.7/site-packages/pip/_vendor/packaging/requirements.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbc5f11db264f183b24002193f81e3045b939a81
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/packaging/requirements.py
@@ -0,0 +1,138 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import string
+import re
+
+from pip._vendor.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
+from pip._vendor.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
+from pip._vendor.pyparsing import Literal as L  # noqa
+from pip._vendor.six.moves.urllib import parse as urlparse
+
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+
+class InvalidRequirement(ValueError):
+    """
+    An invalid requirement was found, users should refer to PEP 508.
+    """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r"[^ ]+")("url")
+URL = AT + URI
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(
+    VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
+)("_raw_spec")
+_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+    lambda s, l, t: Marker(s[t._original_start : t._original_end])
+)
+MARKER_SEPARATOR = SEMICOLON
+MARKER = MARKER_SEPARATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+# pyparsing isn't thread safe during initialization, so we do it eagerly, see
+# issue #104
+REQUIREMENT.parseString("x[]")
+
+
+class Requirement(object):
+    """Parse a requirement.
+
+    Parse a given requirement string into its parts, such as name, specifier,
+    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+    string.
+    """
+
+    # TODO: Can we test whether something is contained within a requirement?
+    #       If so how do we do that? Do we need to test against the _name_ of
+    #       the thing as well as the version? What about the markers?
+    # TODO: Can we normalize the name and extra name?
+
+    def __init__(self, requirement_string):
+        try:
+            req = REQUIREMENT.parseString(requirement_string)
+        except ParseException as e:
+            raise InvalidRequirement(
+                'Parse error at "{0!r}": {1}'.format(
+                    requirement_string[e.loc : e.loc + 8], e.msg
+                )
+            )
+
+        self.name = req.name
+        if req.url:
+            parsed_url = urlparse.urlparse(req.url)
+            if parsed_url.scheme == "file":
+                if urlparse.urlunparse(parsed_url) != req.url:
+                    raise InvalidRequirement("Invalid URL given")
+            elif not (parsed_url.scheme and parsed_url.netloc) or (
+                not parsed_url.scheme and not parsed_url.netloc
+            ):
+                raise InvalidRequirement("Invalid URL: {0}".format(req.url))
+            self.url = req.url
+        else:
+            self.url = None
+        self.extras = set(req.extras.asList() if req.extras else [])
+        self.specifier = SpecifierSet(req.specifier)
+        self.marker = req.marker if req.marker else None
+
+    def __str__(self):
+        parts = [self.name]
+
+        if self.extras:
+            parts.append("[{0}]".format(",".join(sorted(self.extras))))
+
+        if self.specifier:
+            parts.append(str(self.specifier))
+
+        if self.url:
+            parts.append("@ {0}".format(self.url))
+            if self.marker:
+                parts.append(" ")
+
+        if self.marker:
+            parts.append("; {0}".format(self.marker))
+
+        return "".join(parts)
+
+    def __repr__(self):
+        return "<Requirement({0!r})>".format(str(self))
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/specifiers.py b/lib/python3.7/site-packages/pip/_vendor/packaging/specifiers.py
new file mode 100644
index 0000000000000000000000000000000000000000..743576a080a0af8d0995f307ea6afc645b13ca61
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/packaging/specifiers.py
@@ -0,0 +1,749 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import abc
+import functools
+import itertools
+import re
+
+from ._compat import string_types, with_metaclass
+from .version import Version, LegacyVersion, parse
+
+
+class InvalidSpecifier(ValueError):
+    """
+    An invalid specifier was found, users should refer to PEP 440.
+    """
+
+
+class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
+    @abc.abstractmethod
+    def __str__(self):
+        """
+        Returns the str representation of this Specifier like object. This
+        should be representative of the Specifier itself.
+        """
+
+    @abc.abstractmethod
+    def __hash__(self):
+        """
+        Returns a hash value for this Specifier like object.
+        """
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        """
+        Returns a boolean representing whether or not the two Specifier like
+        objects are equal.
+        """
+
+    @abc.abstractmethod
+    def __ne__(self, other):
+        """
+        Returns a boolean representing whether or not the two Specifier like
+        objects are not equal.
+        """
+
+    @abc.abstractproperty
+    def prereleases(self):
+        """
+        Returns whether or not pre-releases as a whole are allowed by this
+        specifier.
+        """
+
+    @prereleases.setter
+    def prereleases(self, value):
+        """
+        Sets whether or not pre-releases as a whole are allowed by this
+        specifier.
+        """
+
+    @abc.abstractmethod
+    def contains(self, item, prereleases=None):
+        """
+        Determines if the given item is contained within this specifier.
+        """
+
+    @abc.abstractmethod
+    def filter(self, iterable, prereleases=None):
+        """
+        Takes an iterable of items and filters them so that only items which
+        are contained within this specifier are allowed in it.
+        """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+    _operators = {}
+
+    def __init__(self, spec="", prereleases=None):
+        match = self._regex.search(spec)
+        if not match:
+            raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
+
+        self._spec = (match.group("operator").strip(), match.group("version").strip())
+
+        # Store whether or not this Specifier should accept prereleases
+        self._prereleases = prereleases
+
+    def __repr__(self):
+        pre = (
+            ", prereleases={0!r}".format(self.prereleases)
+            if self._prereleases is not None
+            else ""
+        )
+
+        return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre)
+
+    def __str__(self):
+        return "{0}{1}".format(*self._spec)
+
+    def __hash__(self):
+        return hash(self._spec)
+
+    def __eq__(self, other):
+        if isinstance(other, string_types):
+            try:
+                other = self.__class__(other)
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._spec == other._spec
+
+    def __ne__(self, other):
+        if isinstance(other, string_types):
+            try:
+                other = self.__class__(other)
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._spec != other._spec
+
+    def _get_operator(self, op):
+        return getattr(self, "_compare_{0}".format(self._operators[op]))
+
+    def _coerce_version(self, version):
+        if not isinstance(version, (LegacyVersion, Version)):
+            version = parse(version)
+        return version
+
+    @property
+    def operator(self):
+        return self._spec[0]
+
+    @property
+    def version(self):
+        return self._spec[1]
+
+    @property
+    def prereleases(self):
+        return self._prereleases
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+    def __contains__(self, item):
+        return self.contains(item)
+
+    def contains(self, item, prereleases=None):
+        # Determine if prereleases are to be allowed or not.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # Normalize item to a Version or LegacyVersion, this allows us to have
+        # a shortcut for ``"2.0" in Specifier(">=2")
+        item = self._coerce_version(item)
+
+        # Determine if we should be supporting prereleases in this specifier
+        # or not, if we do not support prereleases than we can short circuit
+        # logic if this version is a prereleases.
+        if item.is_prerelease and not prereleases:
+            return False
+
+        # Actually do the comparison to determine if this item is contained
+        # within this Specifier or not.
+        return self._get_operator(self.operator)(item, self.version)
+
+    def filter(self, iterable, prereleases=None):
+        yielded = False
+        found_prereleases = []
+
+        kw = {"prereleases": prereleases if prereleases is not None else True}
+
+        # Attempt to iterate over all the values in the iterable and if any of
+        # them match, yield them.
+        for version in iterable:
+            parsed_version = self._coerce_version(version)
+
+            if self.contains(parsed_version, **kw):
+                # If our version is a prerelease, and we were not set to allow
+                # prereleases, then we'll store it for later incase nothing
+                # else matches this specifier.
+                if parsed_version.is_prerelease and not (
+                    prereleases or self.prereleases
+                ):
+                    found_prereleases.append(version)
+                # Either this is not a prerelease, or we should have been
+                # accepting prereleases from the beginning.
+                else:
+                    yielded = True
+                    yield version
+
+        # Now that we've iterated over everything, determine if we've yielded
+        # any values, and if we have not and we have any prereleases stored up
+        # then we will go ahead and yield the prereleases.
+        if not yielded and found_prereleases:
+            for version in found_prereleases:
+                yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+    _regex_str = r"""
+        (?P<operator>(==|!=|<=|>=|<|>))
+        \s*
+        (?P<version>
+            [^,;\s)]* # Since this is a "legacy" specifier, and the version
+                      # string can be just about anything, we match everything
+                      # except for whitespace, a semi-colon for marker support,
+                      # a closing paren since versions can be enclosed in
+                      # them, and a comma since it's a version separator.
+        )
+        """
+
+    _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    _operators = {
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+    }
+
+    def _coerce_version(self, version):
+        if not isinstance(version, LegacyVersion):
+            version = LegacyVersion(str(version))
+        return version
+
+    def _compare_equal(self, prospective, spec):
+        return prospective == self._coerce_version(spec)
+
+    def _compare_not_equal(self, prospective, spec):
+        return prospective != self._coerce_version(spec)
+
+    def _compare_less_than_equal(self, prospective, spec):
+        return prospective <= self._coerce_version(spec)
+
+    def _compare_greater_than_equal(self, prospective, spec):
+        return prospective >= self._coerce_version(spec)
+
+    def _compare_less_than(self, prospective, spec):
+        return prospective < self._coerce_version(spec)
+
+    def _compare_greater_than(self, prospective, spec):
+        return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(fn):
+    @functools.wraps(fn)
+    def wrapped(self, prospective, spec):
+        if not isinstance(prospective, Version):
+            return False
+        return fn(self, prospective, spec)
+
+    return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+    _regex_str = r"""
+        (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+        (?P<version>
+            (?:
+                # The identity operators allow for an escape hatch that will
+                # do an exact string match of the version you wish to install.
+                # This will not be parsed by PEP 440 and we cannot determine
+                # any semantic meaning from it. This operator is discouraged
+                # but included entirely as an escape hatch.
+                (?<====)  # Only match for the identity operator
+                \s*
+                [^\s]*    # We just match everything, except for whitespace
+                          # since we are only testing for strict identity.
+            )
+            |
+            (?:
+                # The (non)equality operators allow for wild card and local
+                # versions to be specified so we have to define these two
+                # operators separately to enable that.
+                (?<===|!=)            # Only match for equals and not equals
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+
+                # You cannot use a wild card and a dev or local version
+                # together so group them with a | and make them optional.
+                (?:
+                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
+                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+                    |
+                    \.\*  # Wild card syntax of .*
+                )?
+            )
+            |
+            (?:
+                # The compatible operator requires at least two digits in the
+                # release segment.
+                (?<=~=)               # Only match for the compatible operator
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+            |
+            (?:
+                # All other operators only allow a sub set of what the
+                # (non)equality operators do. Specifically they do not allow
+                # local versions to be specified nor do they allow the prefix
+                # matching wild cards.
+                (?<!==|!=|~=)         # We have special cases for these
+                                      # operators so we want to make sure they
+                                      # don't match here.
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+        )
+        """
+
+    _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    _operators = {
+        "~=": "compatible",
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+        "===": "arbitrary",
+    }
+
+    @_require_version_compare
+    def _compare_compatible(self, prospective, spec):
+        # Compatible releases have an equivalent combination of >= and ==. That
+        # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+        # implement this in terms of the other specifiers instead of
+        # implementing it ourselves. The only thing we need to do is construct
+        # the other specifiers.
+
+        # We want everything but the last item in the version, but we want to
+        # ignore post and dev releases and we want to treat the pre-release as
+        # it's own separate segment.
+        prefix = ".".join(
+            list(
+                itertools.takewhile(
+                    lambda x: (not x.startswith("post") and not x.startswith("dev")),
+                    _version_split(spec),
+                )
+            )[:-1]
+        )
+
+        # Add the prefix notation to the end of our string
+        prefix += ".*"
+
+        return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
+            prospective, prefix
+        )
+
+    @_require_version_compare
+    def _compare_equal(self, prospective, spec):
+        # We need special logic to handle prefix matching
+        if spec.endswith(".*"):
+            # In the case of prefix matching we want to ignore local segment.
+            prospective = Version(prospective.public)
+            # Split the spec out by dots, and pretend that there is an implicit
+            # dot in between a release segment and a pre-release segment.
+            spec = _version_split(spec[:-2])  # Remove the trailing .*
+
+            # Split the prospective version out by dots, and pretend that there
+            # is an implicit dot in between a release segment and a pre-release
+            # segment.
+            prospective = _version_split(str(prospective))
+
+            # Shorten the prospective version to be the same length as the spec
+            # so that we can determine if the specifier is a prefix of the
+            # prospective version or not.
+            prospective = prospective[: len(spec)]
+
+            # Pad out our two sides with zeros so that they both equal the same
+            # length.
+            spec, prospective = _pad_version(spec, prospective)
+        else:
+            # Convert our spec string into a Version
+            spec = Version(spec)
+
+            # If the specifier does not have a local segment, then we want to
+            # act as if the prospective version also does not have a local
+            # segment.
+            if not spec.local:
+                prospective = Version(prospective.public)
+
+        return prospective == spec
+
+    @_require_version_compare
+    def _compare_not_equal(self, prospective, spec):
+        return not self._compare_equal(prospective, spec)
+
+    @_require_version_compare
+    def _compare_less_than_equal(self, prospective, spec):
+        return prospective <= Version(spec)
+
+    @_require_version_compare
+    def _compare_greater_than_equal(self, prospective, spec):
+        return prospective >= Version(spec)
+
+    @_require_version_compare
+    def _compare_less_than(self, prospective, spec):
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec)
+
+        # Check to see if the prospective version is less than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective < spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a pre-release version, that we do not accept pre-release
+        # versions for the version mentioned in the specifier (e.g. <3.1 should
+        # not match 3.1.dev0, but should match 3.0.dev0).
+        if not spec.is_prerelease and prospective.is_prerelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # less than the spec version *and* it's not a pre-release of the same
+        # version in the spec.
+        return True
+
+    @_require_version_compare
+    def _compare_greater_than(self, prospective, spec):
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec)
+
+        # Check to see if the prospective version is greater than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective > spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a post-release version, that we do not accept
+        # post-release versions for the version mentioned in the specifier
+        # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+        if not spec.is_postrelease and prospective.is_postrelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # Ensure that we do not allow a local version of the version mentioned
+        # in the specifier, which is technically greater than, to match.
+        if prospective.local is not None:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # greater than the spec version *and* it's not a pre-release of the
+        # same version in the spec.
+        return True
+
+    def _compare_arbitrary(self, prospective, spec):
+        return str(prospective).lower() == str(spec).lower()
+
+    @property
+    def prereleases(self):
+        # If there is an explicit prereleases set for this, then we'll just
+        # blindly use that.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # Look at all of our specifiers and determine if they are inclusive
+        # operators, and if they are if they are including an explicit
+        # prerelease.
+        operator, version = self._spec
+        if operator in ["==", ">=", "<=", "~=", "==="]:
+            # The == specifier can include a trailing .*, if it does we
+            # want to remove before parsing.
+            if operator == "==" and version.endswith(".*"):
+                version = version[:-2]
+
+            # Parse the version, and if it is a pre-release than this
+            # specifier allows pre-releases.
+            if parse(version).is_prerelease:
+                return True
+
+        return False
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version):
+    result = []
+    for item in version.split("."):
+        match = _prefix_regex.search(item)
+        if match:
+            result.extend(match.groups())
+        else:
+            result.append(item)
+    return result
+
+
+def _pad_version(left, right):
+    left_split, right_split = [], []
+
+    # Get the release segment of our versions
+    left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+    right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+    # Get the rest of our versions
+    left_split.append(left[len(left_split[0]) :])
+    right_split.append(right[len(right_split[0]) :])
+
+    # Insert our padding
+    left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
+    right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
+
+    return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
+
+
+class SpecifierSet(BaseSpecifier):
+    def __init__(self, specifiers="", prereleases=None):
+        # Split on , to break each indidivual specifier into it's own item, and
+        # strip each item to remove leading/trailing whitespace.
+        specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+        # Parsed each individual specifier, attempting first to make it a
+        # Specifier and falling back to a LegacySpecifier.
+        parsed = set()
+        for specifier in specifiers:
+            try:
+                parsed.add(Specifier(specifier))
+            except InvalidSpecifier:
+                parsed.add(LegacySpecifier(specifier))
+
+        # Turn our parsed specifiers into a frozen set and save them for later.
+        self._specs = frozenset(parsed)
+
+        # Store our prereleases value so we can use it later to determine if
+        # we accept prereleases or not.
+        self._prereleases = prereleases
+
+    def __repr__(self):
+        pre = (
+            ", prereleases={0!r}".format(self.prereleases)
+            if self._prereleases is not None
+            else ""
+        )
+
+        return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
+
+    def __str__(self):
+        return ",".join(sorted(str(s) for s in self._specs))
+
+    def __hash__(self):
+        return hash(self._specs)
+
+    def __and__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        specifier = SpecifierSet()
+        specifier._specs = frozenset(self._specs | other._specs)
+
+        if self._prereleases is None and other._prereleases is not None:
+            specifier._prereleases = other._prereleases
+        elif self._prereleases is not None and other._prereleases is None:
+            specifier._prereleases = self._prereleases
+        elif self._prereleases == other._prereleases:
+            specifier._prereleases = self._prereleases
+        else:
+            raise ValueError(
+                "Cannot combine SpecifierSets with True and False prerelease "
+                "overrides."
+            )
+
+        return specifier
+
+    def __eq__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif isinstance(other, _IndividualSpecifier):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs == other._specs
+
+    def __ne__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif isinstance(other, _IndividualSpecifier):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs != other._specs
+
+    def __len__(self):
+        return len(self._specs)
+
+    def __iter__(self):
+        return iter(self._specs)
+
+    @property
+    def prereleases(self):
+        # If we have been given an explicit prerelease modifier, then we'll
+        # pass that through here.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # If we don't have any specifiers, and we don't have a forced value,
+        # then we'll just return None since we don't know if this should have
+        # pre-releases or not.
+        if not self._specs:
+            return None
+
+        # Otherwise we'll see if any of the given specifiers accept
+        # prereleases, if any of them do we'll return True, otherwise False.
+        return any(s.prereleases for s in self._specs)
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+    def __contains__(self, item):
+        return self.contains(item)
+
+    def contains(self, item, prereleases=None):
+        # Ensure that our item is a Version or LegacyVersion instance.
+        if not isinstance(item, (LegacyVersion, Version)):
+            item = parse(item)
+
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # We can determine if we're going to allow pre-releases by looking to
+        # see if any of the underlying items supports them. If none of them do
+        # and this item is a pre-release then we do not allow it and we can
+        # short circuit that here.
+        # Note: This means that 1.0.dev1 would not be contained in something
+        #       like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+        if not prereleases and item.is_prerelease:
+            return False
+
+        # We simply dispatch to the underlying specs here to make sure that the
+        # given version is contained within all of them.
+        # Note: This use of all() here means that an empty set of specifiers
+        #       will always return True, this is an explicit design decision.
+        return all(s.contains(item, prereleases=prereleases) for s in self._specs)
+
+    def filter(self, iterable, prereleases=None):
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # If we have any specifiers, then we want to wrap our iterable in the
+        # filter method for each one, this will act as a logical AND amongst
+        # each specifier.
+        if self._specs:
+            for spec in self._specs:
+                iterable = spec.filter(iterable, prereleases=bool(prereleases))
+            return iterable
+        # If we do not have any specifiers, then we need to have a rough filter
+        # which will filter out any pre-releases, unless there are no final
+        # releases, and which will filter out LegacyVersion in general.
+        else:
+            filtered = []
+            found_prereleases = []
+
+            for item in iterable:
+                # Ensure that we some kind of Version class for this item.
+                if not isinstance(item, (LegacyVersion, Version)):
+                    parsed_version = parse(item)
+                else:
+                    parsed_version = item
+
+                # Filter out any item which is parsed as a LegacyVersion
+                if isinstance(parsed_version, LegacyVersion):
+                    continue
+
+                # Store any item which is a pre-release for later unless we've
+                # already found a final version or we are accepting prereleases
+                if parsed_version.is_prerelease and not prereleases:
+                    if not filtered:
+                        found_prereleases.append(item)
+                else:
+                    filtered.append(item)
+
+            # If we've found no items except for pre-releases, then we'll go
+            # ahead and use the pre-releases
+            if not filtered and found_prereleases and prereleases is None:
+                return found_prereleases
+
+            return filtered
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/utils.py b/lib/python3.7/site-packages/pip/_vendor/packaging/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..88418786933b8bc5f6179b8e191f60f79efd7074
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/packaging/utils.py
@@ -0,0 +1,57 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import re
+
+from .version import InvalidVersion, Version
+
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+
+
+def canonicalize_name(name):
+    # This is taken from PEP 503.
+    return _canonicalize_regex.sub("-", name).lower()
+
+
+def canonicalize_version(version):
+    """
+    This is very similar to Version.__str__, but has one subtle differences
+    with the way it handles the release segment.
+    """
+
+    try:
+        version = Version(version)
+    except InvalidVersion:
+        # Legacy versions cannot be normalized
+        return version
+
+    parts = []
+
+    # Epoch
+    if version.epoch != 0:
+        parts.append("{0}!".format(version.epoch))
+
+    # Release segment
+    # NB: This strips trailing '.0's to normalize
+    parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release)))
+
+    # Pre-release
+    if version.pre is not None:
+        parts.append("".join(str(x) for x in version.pre))
+
+    # Post-release
+    if version.post is not None:
+        parts.append(".post{0}".format(version.post))
+
+    # Development release
+    if version.dev is not None:
+        parts.append(".dev{0}".format(version.dev))
+
+    # Local version segment
+    if version.local is not None:
+        parts.append("+{0}".format(version.local))
+
+    return "".join(parts)
diff --git a/lib/python3.7/site-packages/pip/_vendor/packaging/version.py b/lib/python3.7/site-packages/pip/_vendor/packaging/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..95157a1f78c26829ffbe1bd2463f7735b636d16f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/packaging/version.py
@@ -0,0 +1,420 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import collections
+import itertools
+import re
+
+from ._structures import Infinity
+
+
+__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
+
+
+_Version = collections.namedtuple(
+    "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
+)
+
+
+def parse(version):
+    """
+    Parse the given version string and return either a :class:`Version` object
+    or a :class:`LegacyVersion` object depending on if the given version is
+    a valid PEP 440 version or a legacy version.
+    """
+    try:
+        return Version(version)
+    except InvalidVersion:
+        return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+    """
+    An invalid version was found, users should refer to PEP 440.
+    """
+
+
+class _BaseVersion(object):
+    def __hash__(self):
+        return hash(self._key)
+
+    def __lt__(self, other):
+        return self._compare(other, lambda s, o: s < o)
+
+    def __le__(self, other):
+        return self._compare(other, lambda s, o: s <= o)
+
+    def __eq__(self, other):
+        return self._compare(other, lambda s, o: s == o)
+
+    def __ge__(self, other):
+        return self._compare(other, lambda s, o: s >= o)
+
+    def __gt__(self, other):
+        return self._compare(other, lambda s, o: s > o)
+
+    def __ne__(self, other):
+        return self._compare(other, lambda s, o: s != o)
+
+    def _compare(self, other, method):
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+    def __init__(self, version):
+        self._version = str(version)
+        self._key = _legacy_cmpkey(self._version)
+
+    def __str__(self):
+        return self._version
+
+    def __repr__(self):
+        return "<LegacyVersion({0})>".format(repr(str(self)))
+
+    @property
+    def public(self):
+        return self._version
+
+    @property
+    def base_version(self):
+        return self._version
+
+    @property
+    def epoch(self):
+        return -1
+
+    @property
+    def release(self):
+        return None
+
+    @property
+    def pre(self):
+        return None
+
+    @property
+    def post(self):
+        return None
+
+    @property
+    def dev(self):
+        return None
+
+    @property
+    def local(self):
+        return None
+
+    @property
+    def is_prerelease(self):
+        return False
+
+    @property
+    def is_postrelease(self):
+        return False
+
+    @property
+    def is_devrelease(self):
+        return False
+
+
+_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
+
+_legacy_version_replacement_map = {
+    "pre": "c",
+    "preview": "c",
+    "-": "final-",
+    "rc": "c",
+    "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+    for part in _legacy_version_component_re.split(s):
+        part = _legacy_version_replacement_map.get(part, part)
+
+        if not part or part == ".":
+            continue
+
+        if part[:1] in "0123456789":
+            # pad for numeric comparison
+            yield part.zfill(8)
+        else:
+            yield "*" + part
+
+    # ensure that alpha/beta/candidate are before final
+    yield "*final"
+
+
+def _legacy_cmpkey(version):
+    # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+    # greater than or equal to 0. This will effectively put the LegacyVersion,
+    # which uses the defacto standard originally implemented by setuptools,
+    # as before all PEP 440 versions.
+    epoch = -1
+
+    # This scheme is taken from pkg_resources.parse_version setuptools prior to
+    # it's adoption of the packaging library.
+    parts = []
+    for part in _parse_version_parts(version.lower()):
+        if part.startswith("*"):
+            # remove "-" before a prerelease tag
+            if part < "*final":
+                while parts and parts[-1] == "*final-":
+                    parts.pop()
+
+            # remove trailing zeros from each series of numeric parts
+            while parts and parts[-1] == "00000000":
+                parts.pop()
+
+        parts.append(part)
+    parts = tuple(parts)
+
+    return epoch, parts
+
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+    v?
+    (?:
+        (?:(?P<epoch>[0-9]+)!)?                           # epoch
+        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
+        (?P<pre>                                          # pre-release
+            [-_\.]?
+            (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P<pre_n>[0-9]+)?
+        )?
+        (?P<post>                                         # post release
+            (?:-(?P<post_n1>[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?P<post_l>post|rev|r)
+                [-_\.]?
+                (?P<post_n2>[0-9]+)?
+            )
+        )?
+        (?P<dev>                                          # dev release
+            [-_\.]?
+            (?P<dev_l>dev)
+            [-_\.]?
+            (?P<dev_n>[0-9]+)?
+        )?
+    )
+    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "<Version({0})>".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append("{0}!".format(self.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(".post{0}".format(self.post))
+
+        # Development release
+        if self.dev is not None:
+            parts.append(".dev{0}".format(self.dev))
+
+        # Local version segment
+        if self.local is not None:
+            parts.append("+{0}".format(self.local))
+
+        return "".join(parts)
+
+    @property
+    def epoch(self):
+        return self._version.epoch
+
+    @property
+    def release(self):
+        return self._version.release
+
+    @property
+    def pre(self):
+        return self._version.pre
+
+    @property
+    def post(self):
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self):
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self):
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append("{0}!".format(self.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self):
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self):
+        return self.post is not None
+
+    @property
+    def is_devrelease(self):
+        return self.dev is not None
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple((i, "") if isinstance(i, int) else (-Infinity, i) for i in local)
+
+    return epoch, release, pre, post, dev, local
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/__init__.py b/lib/python3.7/site-packages/pip/_vendor/pep517/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c1a098f78cedaa99049c71c96c778d8a84ac3a1
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pep517/__init__.py
@@ -0,0 +1,4 @@
+"""Wrappers to build Python packages using PEP 517 hooks
+"""
+
+__version__ = '0.5.0'
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37bd539ad6bba9f6e78f72d13e544f51c27b9840
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/_in_process.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/_in_process.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8978f514d203c259abfa4b71f780ba54e18937ec
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/_in_process.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/build.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/build.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..62e623bd54939fb88cc5c0a4d93c30b894e56722
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/build.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/check.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/check.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c8d58906f21fed6094b0bedf79db3dd3efe8cd1
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/check.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/colorlog.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/colorlog.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..af7855d971be32a9038b161da802e79ee30213f3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/colorlog.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/compat.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..063e6ecc1b454abce5d49706f876ef4c97e766da
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/envbuild.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/envbuild.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..42f532e37345671d7fe652013aac1b8e525d9a93
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/envbuild.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/wrappers.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/wrappers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a9bdad8857eb36f19abbc5828d6fb242b305fcd
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pep517/__pycache__/wrappers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/_in_process.py b/lib/python3.7/site-packages/pip/_vendor/pep517/_in_process.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6524b660a89853ac078bb853bedaf61d5eadb77
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pep517/_in_process.py
@@ -0,0 +1,207 @@
+"""This is invoked in a subprocess to call the build backend hooks.
+
+It expects:
+- Command line args: hook_name, control_dir
+- Environment variable: PEP517_BUILD_BACKEND=entry.point:spec
+- control_dir/input.json:
+  - {"kwargs": {...}}
+
+Results:
+- control_dir/output.json
+  - {"return_val": ...}
+"""
+from glob import glob
+from importlib import import_module
+import os
+from os.path import join as pjoin
+import re
+import shutil
+import sys
+
+# This is run as a script, not a module, so it can't do a relative import
+import compat
+
+
+class BackendUnavailable(Exception):
+    """Raised if we cannot import the backend"""
+
+
+def _build_backend():
+    """Find and load the build backend"""
+    ep = os.environ['PEP517_BUILD_BACKEND']
+    mod_path, _, obj_path = ep.partition(':')
+    try:
+        obj = import_module(mod_path)
+    except ImportError:
+        raise BackendUnavailable
+    if obj_path:
+        for path_part in obj_path.split('.'):
+            obj = getattr(obj, path_part)
+    return obj
+
+
+def get_requires_for_build_wheel(config_settings):
+    """Invoke the optional get_requires_for_build_wheel hook
+
+    Returns [] if the hook is not defined.
+    """
+    backend = _build_backend()
+    try:
+        hook = backend.get_requires_for_build_wheel
+    except AttributeError:
+        return []
+    else:
+        return hook(config_settings)
+
+
+def prepare_metadata_for_build_wheel(metadata_directory, config_settings):
+    """Invoke optional prepare_metadata_for_build_wheel
+
+    Implements a fallback by building a wheel if the hook isn't defined.
+    """
+    backend = _build_backend()
+    try:
+        hook = backend.prepare_metadata_for_build_wheel
+    except AttributeError:
+        return _get_wheel_metadata_from_wheel(backend, metadata_directory,
+                                              config_settings)
+    else:
+        return hook(metadata_directory, config_settings)
+
+
+WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL'
+
+
+def _dist_info_files(whl_zip):
+    """Identify the .dist-info folder inside a wheel ZipFile."""
+    res = []
+    for path in whl_zip.namelist():
+        m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path)
+        if m:
+            res.append(path)
+    if res:
+        return res
+    raise Exception("No .dist-info folder found in wheel")
+
+
+def _get_wheel_metadata_from_wheel(
+        backend, metadata_directory, config_settings):
+    """Build a wheel and extract the metadata from it.
+
+    Fallback for when the build backend does not
+    define the 'get_wheel_metadata' hook.
+    """
+    from zipfile import ZipFile
+    whl_basename = backend.build_wheel(metadata_directory, config_settings)
+    with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'):
+        pass  # Touch marker file
+
+    whl_file = os.path.join(metadata_directory, whl_basename)
+    with ZipFile(whl_file) as zipf:
+        dist_info = _dist_info_files(zipf)
+        zipf.extractall(path=metadata_directory, members=dist_info)
+    return dist_info[0].split('/')[0]
+
+
+def _find_already_built_wheel(metadata_directory):
+    """Check for a wheel already built during the get_wheel_metadata hook.
+    """
+    if not metadata_directory:
+        return None
+    metadata_parent = os.path.dirname(metadata_directory)
+    if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)):
+        return None
+
+    whl_files = glob(os.path.join(metadata_parent, '*.whl'))
+    if not whl_files:
+        print('Found wheel built marker, but no .whl files')
+        return None
+    if len(whl_files) > 1:
+        print('Found multiple .whl files; unspecified behaviour. '
+              'Will call build_wheel.')
+        return None
+
+    # Exactly one .whl file
+    return whl_files[0]
+
+
+def build_wheel(wheel_directory, config_settings, metadata_directory=None):
+    """Invoke the mandatory build_wheel hook.
+
+    If a wheel was already built in the
+    prepare_metadata_for_build_wheel fallback, this
+    will copy it rather than rebuilding the wheel.
+    """
+    prebuilt_whl = _find_already_built_wheel(metadata_directory)
+    if prebuilt_whl:
+        shutil.copy2(prebuilt_whl, wheel_directory)
+        return os.path.basename(prebuilt_whl)
+
+    return _build_backend().build_wheel(wheel_directory, config_settings,
+                                        metadata_directory)
+
+
+def get_requires_for_build_sdist(config_settings):
+    """Invoke the optional get_requires_for_build_wheel hook
+
+    Returns [] if the hook is not defined.
+    """
+    backend = _build_backend()
+    try:
+        hook = backend.get_requires_for_build_sdist
+    except AttributeError:
+        return []
+    else:
+        return hook(config_settings)
+
+
+class _DummyException(Exception):
+    """Nothing should ever raise this exception"""
+
+
+class GotUnsupportedOperation(Exception):
+    """For internal use when backend raises UnsupportedOperation"""
+
+
+def build_sdist(sdist_directory, config_settings):
+    """Invoke the mandatory build_sdist hook."""
+    backend = _build_backend()
+    try:
+        return backend.build_sdist(sdist_directory, config_settings)
+    except getattr(backend, 'UnsupportedOperation', _DummyException):
+        raise GotUnsupportedOperation
+
+
+HOOK_NAMES = {
+    'get_requires_for_build_wheel',
+    'prepare_metadata_for_build_wheel',
+    'build_wheel',
+    'get_requires_for_build_sdist',
+    'build_sdist',
+}
+
+
+def main():
+    if len(sys.argv) < 3:
+        sys.exit("Needs args: hook_name, control_dir")
+    hook_name = sys.argv[1]
+    control_dir = sys.argv[2]
+    if hook_name not in HOOK_NAMES:
+        sys.exit("Unknown hook: %s" % hook_name)
+    hook = globals()[hook_name]
+
+    hook_input = compat.read_json(pjoin(control_dir, 'input.json'))
+
+    json_out = {'unsupported': False, 'return_val': None}
+    try:
+        json_out['return_val'] = hook(**hook_input['kwargs'])
+    except BackendUnavailable:
+        json_out['no_backend'] = True
+    except GotUnsupportedOperation:
+        json_out['unsupported'] = True
+
+    compat.write_json(json_out, pjoin(control_dir, 'output.json'), indent=2)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/build.py b/lib/python3.7/site-packages/pip/_vendor/pep517/build.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac6c9495caa9385386a3b3c84b32e15c5a1dba28
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pep517/build.py
@@ -0,0 +1,108 @@
+"""Build a project using PEP 517 hooks.
+"""
+import argparse
+import logging
+import os
+import contextlib
+from pip._vendor import pytoml
+import shutil
+import errno
+import tempfile
+
+from .envbuild import BuildEnvironment
+from .wrappers import Pep517HookCaller
+
+log = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def tempdir():
+    td = tempfile.mkdtemp()
+    try:
+        yield td
+    finally:
+        shutil.rmtree(td)
+
+
+def _do_build(hooks, env, dist, dest):
+    get_requires_name = 'get_requires_for_build_{dist}'.format(**locals())
+    get_requires = getattr(hooks, get_requires_name)
+    reqs = get_requires({})
+    log.info('Got build requires: %s', reqs)
+
+    env.pip_install(reqs)
+    log.info('Installed dynamic build dependencies')
+
+    with tempdir() as td:
+        log.info('Trying to build %s in %s', dist, td)
+        build_name = 'build_{dist}'.format(**locals())
+        build = getattr(hooks, build_name)
+        filename = build(td, {})
+        source = os.path.join(td, filename)
+        shutil.move(source, os.path.join(dest, os.path.basename(filename)))
+
+
+def mkdir_p(*args, **kwargs):
+    """Like `mkdir`, but does not raise an exception if the
+    directory already exists.
+    """
+    try:
+        return os.mkdir(*args, **kwargs)
+    except OSError as exc:
+        if exc.errno != errno.EEXIST:
+            raise
+
+
+def build(source_dir, dist, dest=None):
+    pyproject = os.path.join(source_dir, 'pyproject.toml')
+    dest = os.path.join(source_dir, dest or 'dist')
+    mkdir_p(dest)
+
+    with open(pyproject) as f:
+        pyproject_data = pytoml.load(f)
+    # Ensure the mandatory data can be loaded
+    buildsys = pyproject_data['build-system']
+    requires = buildsys['requires']
+    backend = buildsys['build-backend']
+
+    hooks = Pep517HookCaller(source_dir, backend)
+
+    with BuildEnvironment() as env:
+        env.pip_install(requires)
+        _do_build(hooks, env, dist, dest)
+
+
+parser = argparse.ArgumentParser()
+parser.add_argument(
+    'source_dir',
+    help="A directory containing pyproject.toml",
+)
+parser.add_argument(
+    '--binary', '-b',
+    action='store_true',
+    default=False,
+)
+parser.add_argument(
+    '--source', '-s',
+    action='store_true',
+    default=False,
+)
+parser.add_argument(
+    '--out-dir', '-o',
+    help="Destination in which to save the builds relative to source dir",
+)
+
+
+def main(args):
+    # determine which dists to build
+    dists = list(filter(None, (
+        'sdist' if args.source or not args.binary else None,
+        'wheel' if args.binary or not args.source else None,
+    )))
+
+    for dist in dists:
+        build(args.source_dir, dist, args.out_dir)
+
+
+if __name__ == '__main__':
+    main(parser.parse_args())
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/check.py b/lib/python3.7/site-packages/pip/_vendor/pep517/check.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4cdc6bec979fc28acba9d2f7c41923548fe253f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pep517/check.py
@@ -0,0 +1,202 @@
+"""Check a project and backend by attempting to build using PEP 517 hooks.
+"""
+import argparse
+import logging
+import os
+from os.path import isfile, join as pjoin
+from pip._vendor.pytoml import TomlError, load as toml_load
+import shutil
+from subprocess import CalledProcessError
+import sys
+import tarfile
+from tempfile import mkdtemp
+import zipfile
+
+from .colorlog import enable_colourful_output
+from .envbuild import BuildEnvironment
+from .wrappers import Pep517HookCaller
+
+log = logging.getLogger(__name__)
+
+
+def check_build_sdist(hooks, build_sys_requires):
+    with BuildEnvironment() as env:
+        try:
+            env.pip_install(build_sys_requires)
+            log.info('Installed static build dependencies')
+        except CalledProcessError:
+            log.error('Failed to install static build dependencies')
+            return False
+
+        try:
+            reqs = hooks.get_requires_for_build_sdist({})
+            log.info('Got build requires: %s', reqs)
+        except Exception:
+            log.error('Failure in get_requires_for_build_sdist', exc_info=True)
+            return False
+
+        try:
+            env.pip_install(reqs)
+            log.info('Installed dynamic build dependencies')
+        except CalledProcessError:
+            log.error('Failed to install dynamic build dependencies')
+            return False
+
+        td = mkdtemp()
+        log.info('Trying to build sdist in %s', td)
+        try:
+            try:
+                filename = hooks.build_sdist(td, {})
+                log.info('build_sdist returned %r', filename)
+            except Exception:
+                log.info('Failure in build_sdist', exc_info=True)
+                return False
+
+            if not filename.endswith('.tar.gz'):
+                log.error(
+                    "Filename %s doesn't have .tar.gz extension", filename)
+                return False
+
+            path = pjoin(td, filename)
+            if isfile(path):
+                log.info("Output file %s exists", path)
+            else:
+                log.error("Output file %s does not exist", path)
+                return False
+
+            if tarfile.is_tarfile(path):
+                log.info("Output file is a tar file")
+            else:
+                log.error("Output file is not a tar file")
+                return False
+
+        finally:
+            shutil.rmtree(td)
+
+        return True
+
+
+def check_build_wheel(hooks, build_sys_requires):
+    with BuildEnvironment() as env:
+        try:
+            env.pip_install(build_sys_requires)
+            log.info('Installed static build dependencies')
+        except CalledProcessError:
+            log.error('Failed to install static build dependencies')
+            return False
+
+        try:
+            reqs = hooks.get_requires_for_build_wheel({})
+            log.info('Got build requires: %s', reqs)
+        except Exception:
+            log.error('Failure in get_requires_for_build_sdist', exc_info=True)
+            return False
+
+        try:
+            env.pip_install(reqs)
+            log.info('Installed dynamic build dependencies')
+        except CalledProcessError:
+            log.error('Failed to install dynamic build dependencies')
+            return False
+
+        td = mkdtemp()
+        log.info('Trying to build wheel in %s', td)
+        try:
+            try:
+                filename = hooks.build_wheel(td, {})
+                log.info('build_wheel returned %r', filename)
+            except Exception:
+                log.info('Failure in build_wheel', exc_info=True)
+                return False
+
+            if not filename.endswith('.whl'):
+                log.error("Filename %s doesn't have .whl extension", filename)
+                return False
+
+            path = pjoin(td, filename)
+            if isfile(path):
+                log.info("Output file %s exists", path)
+            else:
+                log.error("Output file %s does not exist", path)
+                return False
+
+            if zipfile.is_zipfile(path):
+                log.info("Output file is a zip file")
+            else:
+                log.error("Output file is not a zip file")
+                return False
+
+        finally:
+            shutil.rmtree(td)
+
+        return True
+
+
+def check(source_dir):
+    pyproject = pjoin(source_dir, 'pyproject.toml')
+    if isfile(pyproject):
+        log.info('Found pyproject.toml')
+    else:
+        log.error('Missing pyproject.toml')
+        return False
+
+    try:
+        with open(pyproject) as f:
+            pyproject_data = toml_load(f)
+        # Ensure the mandatory data can be loaded
+        buildsys = pyproject_data['build-system']
+        requires = buildsys['requires']
+        backend = buildsys['build-backend']
+        log.info('Loaded pyproject.toml')
+    except (TomlError, KeyError):
+        log.error("Invalid pyproject.toml", exc_info=True)
+        return False
+
+    hooks = Pep517HookCaller(source_dir, backend)
+
+    sdist_ok = check_build_sdist(hooks, requires)
+    wheel_ok = check_build_wheel(hooks, requires)
+
+    if not sdist_ok:
+        log.warning('Sdist checks failed; scroll up to see')
+    if not wheel_ok:
+        log.warning('Wheel checks failed')
+
+    return sdist_ok
+
+
+def main(argv=None):
+    ap = argparse.ArgumentParser()
+    ap.add_argument(
+        'source_dir',
+        help="A directory containing pyproject.toml")
+    args = ap.parse_args(argv)
+
+    enable_colourful_output()
+
+    ok = check(args.source_dir)
+
+    if ok:
+        print(ansi('Checks passed', 'green'))
+    else:
+        print(ansi('Checks failed', 'red'))
+        sys.exit(1)
+
+
+ansi_codes = {
+    'reset': '\x1b[0m',
+    'bold': '\x1b[1m',
+    'red': '\x1b[31m',
+    'green': '\x1b[32m',
+}
+
+
+def ansi(s, attr):
+    if os.name != 'nt' and sys.stdout.isatty():
+        return ansi_codes[attr] + str(s) + ansi_codes['reset']
+    else:
+        return str(s)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/colorlog.py b/lib/python3.7/site-packages/pip/_vendor/pep517/colorlog.py
new file mode 100644
index 0000000000000000000000000000000000000000..69c8a59d3d4e038450aa37ec5b801914b817e675
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pep517/colorlog.py
@@ -0,0 +1,115 @@
+"""Nicer log formatting with colours.
+
+Code copied from Tornado, Apache licensed.
+"""
+# Copyright 2012 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import sys
+
+try:
+    import curses
+except ImportError:
+    curses = None
+
+
+def _stderr_supports_color():
+    color = False
+    if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
+        try:
+            curses.setupterm()
+            if curses.tigetnum("colors") > 0:
+                color = True
+        except Exception:
+            pass
+    return color
+
+
+class LogFormatter(logging.Formatter):
+    """Log formatter with colour support
+    """
+    DEFAULT_COLORS = {
+        logging.INFO: 2,  # Green
+        logging.WARNING: 3,  # Yellow
+        logging.ERROR: 1,  # Red
+        logging.CRITICAL: 1,
+    }
+
+    def __init__(self, color=True, datefmt=None):
+        r"""
+        :arg bool color: Enables color support.
+        :arg string fmt: Log message format.
+        It will be applied to the attributes dict of log records. The
+        text between ``%(color)s`` and ``%(end_color)s`` will be colored
+        depending on the level if color support is on.
+        :arg dict colors: color mappings from logging level to terminal color
+        code
+        :arg string datefmt: Datetime format.
+        Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
+        .. versionchanged:: 3.2
+        Added ``fmt`` and ``datefmt`` arguments.
+        """
+        logging.Formatter.__init__(self, datefmt=datefmt)
+        self._colors = {}
+        if color and _stderr_supports_color():
+            # The curses module has some str/bytes confusion in
+            # python3. Until version 3.2.3, most methods return
+            # bytes, but only accept strings. In addition, we want to
+            # output these strings with the logging module, which
+            # works with unicode strings. The explicit calls to
+            # unicode() below are harmless in python2 but will do the
+            # right conversion in python 3.
+            fg_color = (curses.tigetstr("setaf") or
+                        curses.tigetstr("setf") or "")
+            if (3, 0) < sys.version_info < (3, 2, 3):
+                fg_color = str(fg_color, "ascii")
+
+            for levelno, code in self.DEFAULT_COLORS.items():
+                self._colors[levelno] = str(
+                    curses.tparm(fg_color, code), "ascii")
+            self._normal = str(curses.tigetstr("sgr0"), "ascii")
+
+            scr = curses.initscr()
+            self.termwidth = scr.getmaxyx()[1]
+            curses.endwin()
+        else:
+            self._normal = ''
+            # Default width is usually 80, but too wide is
+            # worse than too narrow
+            self.termwidth = 70
+
+    def formatMessage(self, record):
+        mlen = len(record.message)
+        right_text = '{initial}-{name}'.format(initial=record.levelname[0],
+                                               name=record.name)
+        if mlen + len(right_text) < self.termwidth:
+            space = ' ' * (self.termwidth - (mlen + len(right_text)))
+        else:
+            space = '  '
+
+        if record.levelno in self._colors:
+            start_color = self._colors[record.levelno]
+            end_color = self._normal
+        else:
+            start_color = end_color = ''
+
+        return record.message + space + start_color + right_text + end_color
+
+
+def enable_colourful_output(level=logging.INFO):
+    handler = logging.StreamHandler()
+    handler.setFormatter(LogFormatter())
+    logging.root.addHandler(handler)
+    logging.root.setLevel(level)
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/compat.py b/lib/python3.7/site-packages/pip/_vendor/pep517/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..01c66fc7e41fa1a2ac2afaddecb5b17d6cadfc93
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pep517/compat.py
@@ -0,0 +1,23 @@
+"""Handle reading and writing JSON in UTF-8, on Python 3 and 2."""
+import json
+import sys
+
+if sys.version_info[0] >= 3:
+    # Python 3
+    def write_json(obj, path, **kwargs):
+        with open(path, 'w', encoding='utf-8') as f:
+            json.dump(obj, f, **kwargs)
+
+    def read_json(path):
+        with open(path, 'r', encoding='utf-8') as f:
+            return json.load(f)
+
+else:
+    # Python 2
+    def write_json(obj, path, **kwargs):
+        with open(path, 'wb') as f:
+            json.dump(obj, f, encoding='utf-8', **kwargs)
+
+    def read_json(path):
+        with open(path, 'rb') as f:
+            return json.load(f)
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/envbuild.py b/lib/python3.7/site-packages/pip/_vendor/pep517/envbuild.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7ac5f46f7c16d645d4573ab84dbceba5c33b920
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pep517/envbuild.py
@@ -0,0 +1,158 @@
+"""Build wheels/sdists by installing build deps to a temporary environment.
+"""
+
+import os
+import logging
+from pip._vendor import pytoml
+import shutil
+from subprocess import check_call
+import sys
+from sysconfig import get_paths
+from tempfile import mkdtemp
+
+from .wrappers import Pep517HookCaller
+
+log = logging.getLogger(__name__)
+
+
+def _load_pyproject(source_dir):
+    with open(os.path.join(source_dir, 'pyproject.toml')) as f:
+        pyproject_data = pytoml.load(f)
+    buildsys = pyproject_data['build-system']
+    return buildsys['requires'], buildsys['build-backend']
+
+
+class BuildEnvironment(object):
+    """Context manager to install build deps in a simple temporary environment
+
+    Based on code I wrote for pip, which is MIT licensed.
+    """
+    # Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file)
+    #
+    # Permission is hereby granted, free of charge, to any person obtaining
+    # a copy of this software and associated documentation files (the
+    # "Software"), to deal in the Software without restriction, including
+    # without limitation the rights to use, copy, modify, merge, publish,
+    # distribute, sublicense, and/or sell copies of the Software, and to
+    # permit persons to whom the Software is furnished to do so, subject to
+    # the following conditions:
+    #
+    # The above copyright notice and this permission notice shall be
+    # included in all copies or substantial portions of the Software.
+    #
+    # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+    # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+    # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+    # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+    # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+    # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+    # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+    path = None
+
+    def __init__(self, cleanup=True):
+        self._cleanup = cleanup
+
+    def __enter__(self):
+        self.path = mkdtemp(prefix='pep517-build-env-')
+        log.info('Temporary build environment: %s', self.path)
+
+        self.save_path = os.environ.get('PATH', None)
+        self.save_pythonpath = os.environ.get('PYTHONPATH', None)
+
+        install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix'
+        install_dirs = get_paths(install_scheme, vars={
+            'base': self.path,
+            'platbase': self.path,
+        })
+
+        scripts = install_dirs['scripts']
+        if self.save_path:
+            os.environ['PATH'] = scripts + os.pathsep + self.save_path
+        else:
+            os.environ['PATH'] = scripts + os.pathsep + os.defpath
+
+        if install_dirs['purelib'] == install_dirs['platlib']:
+            lib_dirs = install_dirs['purelib']
+        else:
+            lib_dirs = install_dirs['purelib'] + os.pathsep + \
+                install_dirs['platlib']
+        if self.save_pythonpath:
+            os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \
+                self.save_pythonpath
+        else:
+            os.environ['PYTHONPATH'] = lib_dirs
+
+        return self
+
+    def pip_install(self, reqs):
+        """Install dependencies into this env by calling pip in a subprocess"""
+        if not reqs:
+            return
+        log.info('Calling pip to install %s', reqs)
+        check_call([
+            sys.executable, '-m', 'pip', 'install', '--ignore-installed',
+            '--prefix', self.path] + list(reqs))
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        needs_cleanup = (
+            self._cleanup and
+            self.path is not None and
+            os.path.isdir(self.path)
+        )
+        if needs_cleanup:
+            shutil.rmtree(self.path)
+
+        if self.save_path is None:
+            os.environ.pop('PATH', None)
+        else:
+            os.environ['PATH'] = self.save_path
+
+        if self.save_pythonpath is None:
+            os.environ.pop('PYTHONPATH', None)
+        else:
+            os.environ['PYTHONPATH'] = self.save_pythonpath
+
+
+def build_wheel(source_dir, wheel_dir, config_settings=None):
+    """Build a wheel from a source directory using PEP 517 hooks.
+
+    :param str source_dir: Source directory containing pyproject.toml
+    :param str wheel_dir: Target directory to create wheel in
+    :param dict config_settings: Options to pass to build backend
+
+    This is a blocking function which will run pip in a subprocess to install
+    build requirements.
+    """
+    if config_settings is None:
+        config_settings = {}
+    requires, backend = _load_pyproject(source_dir)
+    hooks = Pep517HookCaller(source_dir, backend)
+
+    with BuildEnvironment() as env:
+        env.pip_install(requires)
+        reqs = hooks.get_requires_for_build_wheel(config_settings)
+        env.pip_install(reqs)
+        return hooks.build_wheel(wheel_dir, config_settings)
+
+
+def build_sdist(source_dir, sdist_dir, config_settings=None):
+    """Build an sdist from a source directory using PEP 517 hooks.
+
+    :param str source_dir: Source directory containing pyproject.toml
+    :param str sdist_dir: Target directory to place sdist in
+    :param dict config_settings: Options to pass to build backend
+
+    This is a blocking function which will run pip in a subprocess to install
+    build requirements.
+    """
+    if config_settings is None:
+        config_settings = {}
+    requires, backend = _load_pyproject(source_dir)
+    hooks = Pep517HookCaller(source_dir, backend)
+
+    with BuildEnvironment() as env:
+        env.pip_install(requires)
+        reqs = hooks.get_requires_for_build_sdist(config_settings)
+        env.pip_install(reqs)
+        return hooks.build_sdist(sdist_dir, config_settings)
diff --git a/lib/python3.7/site-packages/pip/_vendor/pep517/wrappers.py b/lib/python3.7/site-packages/pip/_vendor/pep517/wrappers.py
new file mode 100644
index 0000000000000000000000000000000000000000..b14b899150a07d9bf1d9551a739a52f2795ffbd2
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pep517/wrappers.py
@@ -0,0 +1,163 @@
+from contextlib import contextmanager
+import os
+from os.path import dirname, abspath, join as pjoin
+import shutil
+from subprocess import check_call
+import sys
+from tempfile import mkdtemp
+
+from . import compat
+
+_in_proc_script = pjoin(dirname(abspath(__file__)), '_in_process.py')
+
+
+@contextmanager
+def tempdir():
+    td = mkdtemp()
+    try:
+        yield td
+    finally:
+        shutil.rmtree(td)
+
+
+class BackendUnavailable(Exception):
+    """Will be raised if the backend cannot be imported in the hook process."""
+
+
+class UnsupportedOperation(Exception):
+    """May be raised by build_sdist if the backend indicates that it can't."""
+
+
+def default_subprocess_runner(cmd, cwd=None, extra_environ=None):
+    """The default method of calling the wrapper subprocess."""
+    env = os.environ.copy()
+    if extra_environ:
+        env.update(extra_environ)
+
+    check_call(cmd, cwd=cwd, env=env)
+
+
+class Pep517HookCaller(object):
+    """A wrapper around a source directory to be built with a PEP 517 backend.
+
+    source_dir : The path to the source directory, containing pyproject.toml.
+    backend : The build backend spec, as per PEP 517, from pyproject.toml.
+    """
+    def __init__(self, source_dir, build_backend):
+        self.source_dir = abspath(source_dir)
+        self.build_backend = build_backend
+        self._subprocess_runner = default_subprocess_runner
+
+    # TODO: Is this over-engineered? Maybe frontends only need to
+    #       set this when creating the wrapper, not on every call.
+    @contextmanager
+    def subprocess_runner(self, runner):
+        prev = self._subprocess_runner
+        self._subprocess_runner = runner
+        yield
+        self._subprocess_runner = prev
+
+    def get_requires_for_build_wheel(self, config_settings=None):
+        """Identify packages required for building a wheel
+
+        Returns a list of dependency specifications, e.g.:
+            ["wheel >= 0.25", "setuptools"]
+
+        This does not include requirements specified in pyproject.toml.
+        It returns the result of calling the equivalently named hook in a
+        subprocess.
+        """
+        return self._call_hook('get_requires_for_build_wheel', {
+            'config_settings': config_settings
+        })
+
+    def prepare_metadata_for_build_wheel(
+            self, metadata_directory, config_settings=None):
+        """Prepare a *.dist-info folder with metadata for this project.
+
+        Returns the name of the newly created folder.
+
+        If the build backend defines a hook with this name, it will be called
+        in a subprocess. If not, the backend will be asked to build a wheel,
+        and the dist-info extracted from that.
+        """
+        return self._call_hook('prepare_metadata_for_build_wheel', {
+            'metadata_directory': abspath(metadata_directory),
+            'config_settings': config_settings,
+        })
+
+    def build_wheel(
+            self, wheel_directory, config_settings=None,
+            metadata_directory=None):
+        """Build a wheel from this project.
+
+        Returns the name of the newly created file.
+
+        In general, this will call the 'build_wheel' hook in the backend.
+        However, if that was previously called by
+        'prepare_metadata_for_build_wheel', and the same metadata_directory is
+        used, the previously built wheel will be copied to wheel_directory.
+        """
+        if metadata_directory is not None:
+            metadata_directory = abspath(metadata_directory)
+        return self._call_hook('build_wheel', {
+            'wheel_directory': abspath(wheel_directory),
+            'config_settings': config_settings,
+            'metadata_directory': metadata_directory,
+        })
+
+    def get_requires_for_build_sdist(self, config_settings=None):
+        """Identify packages required for building a wheel
+
+        Returns a list of dependency specifications, e.g.:
+            ["setuptools >= 26"]
+
+        This does not include requirements specified in pyproject.toml.
+        It returns the result of calling the equivalently named hook in a
+        subprocess.
+        """
+        return self._call_hook('get_requires_for_build_sdist', {
+            'config_settings': config_settings
+        })
+
+    def build_sdist(self, sdist_directory, config_settings=None):
+        """Build an sdist from this project.
+
+        Returns the name of the newly created file.
+
+        This calls the 'build_sdist' backend hook in a subprocess.
+        """
+        return self._call_hook('build_sdist', {
+            'sdist_directory': abspath(sdist_directory),
+            'config_settings': config_settings,
+        })
+
+    def _call_hook(self, hook_name, kwargs):
+        # On Python 2, pytoml returns Unicode values (which is correct) but the
+        # environment passed to check_call needs to contain string values. We
+        # convert here by encoding using ASCII (the backend can only contain
+        # letters, digits and _, . and : characters, and will be used as a
+        # Python identifier, so non-ASCII content is wrong on Python 2 in
+        # any case).
+        if sys.version_info[0] == 2:
+            build_backend = self.build_backend.encode('ASCII')
+        else:
+            build_backend = self.build_backend
+
+        with tempdir() as td:
+            compat.write_json({'kwargs': kwargs}, pjoin(td, 'input.json'),
+                              indent=2)
+
+            # Run the hook in a subprocess
+            self._subprocess_runner(
+                [sys.executable, _in_proc_script, hook_name, td],
+                cwd=self.source_dir,
+                extra_environ={'PEP517_BUILD_BACKEND': build_backend}
+            )
+
+            data = compat.read_json(pjoin(td, 'output.json'))
+            if data.get('unsupported'):
+                raise UnsupportedOperation
+            if data.get('no_backend'):
+                raise BackendUnavailable
+            return data['return_val']
diff --git a/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__init__.py b/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdd40de414991a0ac8b2819d386b1f372601375e
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__init__.py
@@ -0,0 +1,3286 @@
+# coding: utf-8
+"""
+Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof.  The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is.  Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files.  It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+"""
+
+from __future__ import absolute_import
+
+import sys
+import os
+import io
+import time
+import re
+import types
+import zipfile
+import zipimport
+import warnings
+import stat
+import functools
+import pkgutil
+import operator
+import platform
+import collections
+import plistlib
+import email.parser
+import errno
+import tempfile
+import textwrap
+import itertools
+import inspect
+import ntpath
+import posixpath
+from pkgutil import get_importer
+
+try:
+    import _imp
+except ImportError:
+    # Python 3.2 compatibility
+    import imp as _imp
+
+try:
+    FileExistsError
+except NameError:
+    FileExistsError = OSError
+
+from pip._vendor import six
+from pip._vendor.six.moves import urllib, map, filter
+
+# capture these to bypass sandboxing
+from os import utime
+try:
+    from os import mkdir, rename, unlink
+    WRITE_SUPPORT = True
+except ImportError:
+    # no write support, probably under GAE
+    WRITE_SUPPORT = False
+
+from os import open as os_open
+from os.path import isdir, split
+
+try:
+    import importlib.machinery as importlib_machinery
+    # access attribute to force import under delayed import mechanisms.
+    importlib_machinery.__name__
+except ImportError:
+    importlib_machinery = None
+
+from . import py31compat
+from pip._vendor import appdirs
+from pip._vendor import packaging
+__import__('pip._vendor.packaging.version')
+__import__('pip._vendor.packaging.specifiers')
+__import__('pip._vendor.packaging.requirements')
+__import__('pip._vendor.packaging.markers')
+
+
+__metaclass__ = type
+
+
+if (3, 0) < sys.version_info < (3, 4):
+    raise RuntimeError("Python 3.4 or later is required")
+
+if six.PY2:
+    # Those builtin exceptions are only defined in Python 3
+    PermissionError = None
+    NotADirectoryError = None
+
+# declare some globals that will be defined later to
+# satisfy the linters.
+require = None
+working_set = None
+add_activation_listener = None
+resources_stream = None
+cleanup_resources = None
+resource_dir = None
+resource_stream = None
+set_extraction_path = None
+resource_isdir = None
+resource_string = None
+iter_entry_points = None
+resource_listdir = None
+resource_filename = None
+resource_exists = None
+_distribution_finders = None
+_namespace_handlers = None
+_namespace_packages = None
+
+
+class PEP440Warning(RuntimeWarning):
+    """
+    Used when there is an issue with a version or specifier not complying with
+    PEP 440.
+    """
+
+
+def parse_version(v):
+    try:
+        return packaging.version.Version(v)
+    except packaging.version.InvalidVersion:
+        return packaging.version.LegacyVersion(v)
+
+
+_state_vars = {}
+
+
+def _declare_state(vartype, **kw):
+    globals().update(kw)
+    _state_vars.update(dict.fromkeys(kw, vartype))
+
+
+def __getstate__():
+    state = {}
+    g = globals()
+    for k, v in _state_vars.items():
+        state[k] = g['_sget_' + v](g[k])
+    return state
+
+
+def __setstate__(state):
+    g = globals()
+    for k, v in state.items():
+        g['_sset_' + _state_vars[k]](k, g[k], v)
+    return state
+
+
+def _sget_dict(val):
+    return val.copy()
+
+
+def _sset_dict(key, ob, state):
+    ob.clear()
+    ob.update(state)
+
+
+def _sget_object(val):
+    return val.__getstate__()
+
+
+def _sset_object(key, ob, state):
+    ob.__setstate__(state)
+
+
+_sget_none = _sset_none = lambda *args: None
+
+
+def get_supported_platform():
+    """Return this platform's maximum compatible version.
+
+    distutils.util.get_platform() normally reports the minimum version
+    of Mac OS X that would be required to *use* extensions produced by
+    distutils.  But what we want when checking compatibility is to know the
+    version of Mac OS X that we are *running*.  To allow usage of packages that
+    explicitly require a newer version of Mac OS X, we must also know the
+    current version of the OS.
+
+    If this condition occurs for any other platform with a version in its
+    platform strings, this function should be extended accordingly.
+    """
+    plat = get_build_platform()
+    m = macosVersionString.match(plat)
+    if m is not None and sys.platform == "darwin":
+        try:
+            plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
+        except ValueError:
+            # not Mac OS X
+            pass
+    return plat
+
+
+__all__ = [
+    # Basic resource access and distribution/entry point discovery
+    'require', 'run_script', 'get_provider', 'get_distribution',
+    'load_entry_point', 'get_entry_map', 'get_entry_info',
+    'iter_entry_points',
+    'resource_string', 'resource_stream', 'resource_filename',
+    'resource_listdir', 'resource_exists', 'resource_isdir',
+
+    # Environmental control
+    'declare_namespace', 'working_set', 'add_activation_listener',
+    'find_distributions', 'set_extraction_path', 'cleanup_resources',
+    'get_default_cache',
+
+    # Primary implementation classes
+    'Environment', 'WorkingSet', 'ResourceManager',
+    'Distribution', 'Requirement', 'EntryPoint',
+
+    # Exceptions
+    'ResolutionError', 'VersionConflict', 'DistributionNotFound',
+    'UnknownExtra', 'ExtractionError',
+
+    # Warnings
+    'PEP440Warning',
+
+    # Parsing functions and string utilities
+    'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
+    'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
+    'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
+
+    # filesystem utilities
+    'ensure_directory', 'normalize_path',
+
+    # Distribution "precedence" constants
+    'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
+
+    # "Provider" interfaces, implementations, and registration/lookup APIs
+    'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
+    'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
+    'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
+    'register_finder', 'register_namespace_handler', 'register_loader_type',
+    'fixup_namespace_packages', 'get_importer',
+
+    # Warnings
+    'PkgResourcesDeprecationWarning',
+
+    # Deprecated/backward compatibility only
+    'run_main', 'AvailableDistributions',
+]
+
+
+class ResolutionError(Exception):
+    """Abstract base for dependency resolution errors"""
+
+    def __repr__(self):
+        return self.__class__.__name__ + repr(self.args)
+
+
+class VersionConflict(ResolutionError):
+    """
+    An already-installed version conflicts with the requested version.
+
+    Should be initialized with the installed Distribution and the requested
+    Requirement.
+    """
+
+    _template = "{self.dist} is installed but {self.req} is required"
+
+    @property
+    def dist(self):
+        return self.args[0]
+
+    @property
+    def req(self):
+        return self.args[1]
+
+    def report(self):
+        return self._template.format(**locals())
+
+    def with_context(self, required_by):
+        """
+        If required_by is non-empty, return a version of self that is a
+        ContextualVersionConflict.
+        """
+        if not required_by:
+            return self
+        args = self.args + (required_by,)
+        return ContextualVersionConflict(*args)
+
+
+class ContextualVersionConflict(VersionConflict):
+    """
+    A VersionConflict that accepts a third parameter, the set of the
+    requirements that required the installed Distribution.
+    """
+
+    _template = VersionConflict._template + ' by {self.required_by}'
+
+    @property
+    def required_by(self):
+        return self.args[2]
+
+
+class DistributionNotFound(ResolutionError):
+    """A requested distribution was not found"""
+
+    _template = ("The '{self.req}' distribution was not found "
+                 "and is required by {self.requirers_str}")
+
+    @property
+    def req(self):
+        return self.args[0]
+
+    @property
+    def requirers(self):
+        return self.args[1]
+
+    @property
+    def requirers_str(self):
+        if not self.requirers:
+            return 'the application'
+        return ', '.join(self.requirers)
+
+    def report(self):
+        return self._template.format(**locals())
+
+    def __str__(self):
+        return self.report()
+
+
+class UnknownExtra(ResolutionError):
+    """Distribution doesn't have an "extra feature" of the given name"""
+
+
+_provider_factories = {}
+
+PY_MAJOR = sys.version[:3]
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+
+def register_loader_type(loader_type, provider_factory):
+    """Register `provider_factory` to make providers for `loader_type`
+
+    `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+    and `provider_factory` is a function that, passed a *module* object,
+    returns an ``IResourceProvider`` for that module.
+    """
+    _provider_factories[loader_type] = provider_factory
+
+
+def get_provider(moduleOrReq):
+    """Return an IResourceProvider for the named module or requirement"""
+    if isinstance(moduleOrReq, Requirement):
+        return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+    try:
+        module = sys.modules[moduleOrReq]
+    except KeyError:
+        __import__(moduleOrReq)
+        module = sys.modules[moduleOrReq]
+    loader = getattr(module, '__loader__', None)
+    return _find_adapter(_provider_factories, loader)(module)
+
+
+def _macosx_vers(_cache=[]):
+    if not _cache:
+        version = platform.mac_ver()[0]
+        # fallback for MacPorts
+        if version == '':
+            plist = '/System/Library/CoreServices/SystemVersion.plist'
+            if os.path.exists(plist):
+                if hasattr(plistlib, 'readPlist'):
+                    plist_content = plistlib.readPlist(plist)
+                    if 'ProductVersion' in plist_content:
+                        version = plist_content['ProductVersion']
+
+        _cache.append(version.split('.'))
+    return _cache[0]
+
+
+def _macosx_arch(machine):
+    return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
+
+
+def get_build_platform():
+    """Return this platform's string for platform-specific distributions
+
+    XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+    needs some hacks for Linux and Mac OS X.
+    """
+    from sysconfig import get_platform
+
+    plat = get_platform()
+    if sys.platform == "darwin" and not plat.startswith('macosx-'):
+        try:
+            version = _macosx_vers()
+            machine = os.uname()[4].replace(" ", "_")
+            return "macosx-%d.%d-%s" % (
+                int(version[0]), int(version[1]),
+                _macosx_arch(machine),
+            )
+        except ValueError:
+            # if someone is running a non-Mac darwin system, this will fall
+            # through to the default implementation
+            pass
+    return plat
+
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+# XXX backward compat
+get_platform = get_build_platform
+
+
+def compatible_platforms(provided, required):
+    """Can code for the `provided` platform run on the `required` platform?
+
+    Returns true if either platform is ``None``, or the platforms are equal.
+
+    XXX Needs compatibility checks for Linux and other unixy OSes.
+    """
+    if provided is None or required is None or provided == required:
+        # easy case
+        return True
+
+    # Mac OS X special cases
+    reqMac = macosVersionString.match(required)
+    if reqMac:
+        provMac = macosVersionString.match(provided)
+
+        # is this a Mac package?
+        if not provMac:
+            # this is backwards compatibility for packages built before
+            # setuptools 0.6. All packages built after this point will
+            # use the new macosx designation.
+            provDarwin = darwinVersionString.match(provided)
+            if provDarwin:
+                dversion = int(provDarwin.group(1))
+                macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+                if dversion == 7 and macosversion >= "10.3" or \
+                        dversion == 8 and macosversion >= "10.4":
+                    return True
+            # egg isn't macosx or legacy darwin
+            return False
+
+        # are they the same major version and machine type?
+        if provMac.group(1) != reqMac.group(1) or \
+                provMac.group(3) != reqMac.group(3):
+            return False
+
+        # is the required OS major update >= the provided one?
+        if int(provMac.group(2)) > int(reqMac.group(2)):
+            return False
+
+        return True
+
+    # XXX Linux and other platforms' special cases should go here
+    return False
+
+
+def run_script(dist_spec, script_name):
+    """Locate distribution `dist_spec` and run its `script_name` script"""
+    ns = sys._getframe(1).f_globals
+    name = ns['__name__']
+    ns.clear()
+    ns['__name__'] = name
+    require(dist_spec)[0].run_script(script_name, ns)
+
+
+# backward compatibility
+run_main = run_script
+
+
+def get_distribution(dist):
+    """Return a current distribution object for a Requirement or string"""
+    if isinstance(dist, six.string_types):
+        dist = Requirement.parse(dist)
+    if isinstance(dist, Requirement):
+        dist = get_provider(dist)
+    if not isinstance(dist, Distribution):
+        raise TypeError("Expected string, Requirement, or Distribution", dist)
+    return dist
+
+
+def load_entry_point(dist, group, name):
+    """Return `name` entry point of `group` for `dist` or raise ImportError"""
+    return get_distribution(dist).load_entry_point(group, name)
+
+
+def get_entry_map(dist, group=None):
+    """Return the entry point map for `group`, or the full entry map"""
+    return get_distribution(dist).get_entry_map(group)
+
+
+def get_entry_info(dist, group, name):
+    """Return the EntryPoint object for `group`+`name`, or ``None``"""
+    return get_distribution(dist).get_entry_info(group, name)
+
+
+class IMetadataProvider:
+    def has_metadata(name):
+        """Does the package's distribution contain the named metadata?"""
+
+    def get_metadata(name):
+        """The named metadata resource as a string"""
+
+    def get_metadata_lines(name):
+        """Yield named metadata resource as list of non-blank non-comment lines
+
+       Leading and trailing whitespace is stripped from each line, and lines
+       with ``#`` as the first non-blank character are omitted."""
+
+    def metadata_isdir(name):
+        """Is the named metadata a directory?  (like ``os.path.isdir()``)"""
+
+    def metadata_listdir(name):
+        """List of metadata names in the directory (like ``os.listdir()``)"""
+
+    def run_script(script_name, namespace):
+        """Execute the named script in the supplied namespace dictionary"""
+
+
+class IResourceProvider(IMetadataProvider):
+    """An object that provides access to package resources"""
+
+    def get_resource_filename(manager, resource_name):
+        """Return a true filesystem path for `resource_name`
+
+        `manager` must be an ``IResourceManager``"""
+
+    def get_resource_stream(manager, resource_name):
+        """Return a readable file-like object for `resource_name`
+
+        `manager` must be an ``IResourceManager``"""
+
+    def get_resource_string(manager, resource_name):
+        """Return a string containing the contents of `resource_name`
+
+        `manager` must be an ``IResourceManager``"""
+
+    def has_resource(resource_name):
+        """Does the package contain the named resource?"""
+
+    def resource_isdir(resource_name):
+        """Is the named resource a directory?  (like ``os.path.isdir()``)"""
+
+    def resource_listdir(resource_name):
+        """List of resource names in the directory (like ``os.listdir()``)"""
+
+
+class WorkingSet:
+    """A collection of active distributions on sys.path (or a similar list)"""
+
+    def __init__(self, entries=None):
+        """Create working set from list of path entries (default=sys.path)"""
+        self.entries = []
+        self.entry_keys = {}
+        self.by_key = {}
+        self.callbacks = []
+
+        if entries is None:
+            entries = sys.path
+
+        for entry in entries:
+            self.add_entry(entry)
+
+    @classmethod
+    def _build_master(cls):
+        """
+        Prepare the master working set.
+        """
+        ws = cls()
+        try:
+            from __main__ import __requires__
+        except ImportError:
+            # The main program does not list any requirements
+            return ws
+
+        # ensure the requirements are met
+        try:
+            ws.require(__requires__)
+        except VersionConflict:
+            return cls._build_from_requirements(__requires__)
+
+        return ws
+
+    @classmethod
+    def _build_from_requirements(cls, req_spec):
+        """
+        Build a working set from a requirement spec. Rewrites sys.path.
+        """
+        # try it without defaults already on sys.path
+        # by starting with an empty path
+        ws = cls([])
+        reqs = parse_requirements(req_spec)
+        dists = ws.resolve(reqs, Environment())
+        for dist in dists:
+            ws.add(dist)
+
+        # add any missing entries from sys.path
+        for entry in sys.path:
+            if entry not in ws.entries:
+                ws.add_entry(entry)
+
+        # then copy back to sys.path
+        sys.path[:] = ws.entries
+        return ws
+
+    def add_entry(self, entry):
+        """Add a path item to ``.entries``, finding any distributions on it
+
+        ``find_distributions(entry, True)`` is used to find distributions
+        corresponding to the path entry, and they are added.  `entry` is
+        always appended to ``.entries``, even if it is already present.
+        (This is because ``sys.path`` can contain the same value more than
+        once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+        equal ``sys.path``.)
+        """
+        self.entry_keys.setdefault(entry, [])
+        self.entries.append(entry)
+        for dist in find_distributions(entry, True):
+            self.add(dist, entry, False)
+
+    def __contains__(self, dist):
+        """True if `dist` is the active distribution for its project"""
+        return self.by_key.get(dist.key) == dist
+
+    def find(self, req):
+        """Find a distribution matching requirement `req`
+
+        If there is an active distribution for the requested project, this
+        returns it as long as it meets the version requirement specified by
+        `req`.  But, if there is an active distribution for the project and it
+        does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+        If there is no active distribution for the requested project, ``None``
+        is returned.
+        """
+        dist = self.by_key.get(req.key)
+        if dist is not None and dist not in req:
+            # XXX add more info
+            raise VersionConflict(dist, req)
+        return dist
+
+    def iter_entry_points(self, group, name=None):
+        """Yield entry point objects from `group` matching `name`
+
+        If `name` is None, yields all entry points in `group` from all
+        distributions in the working set, otherwise only ones matching
+        both `group` and `name` are yielded (in distribution order).
+        """
+        return (
+            entry
+            for dist in self
+            for entry in dist.get_entry_map(group).values()
+            if name is None or name == entry.name
+        )
+
+    def run_script(self, requires, script_name):
+        """Locate distribution for `requires` and run `script_name` script"""
+        ns = sys._getframe(1).f_globals
+        name = ns['__name__']
+        ns.clear()
+        ns['__name__'] = name
+        self.require(requires)[0].run_script(script_name, ns)
+
+    def __iter__(self):
+        """Yield distributions for non-duplicate projects in the working set
+
+        The yield order is the order in which the items' path entries were
+        added to the working set.
+        """
+        seen = {}
+        for item in self.entries:
+            if item not in self.entry_keys:
+                # workaround a cache issue
+                continue
+
+            for key in self.entry_keys[item]:
+                if key not in seen:
+                    seen[key] = 1
+                    yield self.by_key[key]
+
+    def add(self, dist, entry=None, insert=True, replace=False):
+        """Add `dist` to working set, associated with `entry`
+
+        If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+        On exit from this routine, `entry` is added to the end of the working
+        set's ``.entries`` (if it wasn't already present).
+
+        `dist` is only added to the working set if it's for a project that
+        doesn't already have a distribution in the set, unless `replace=True`.
+        If it's added, any callbacks registered with the ``subscribe()`` method
+        will be called.
+        """
+        if insert:
+            dist.insert_on(self.entries, entry, replace=replace)
+
+        if entry is None:
+            entry = dist.location
+        keys = self.entry_keys.setdefault(entry, [])
+        keys2 = self.entry_keys.setdefault(dist.location, [])
+        if not replace and dist.key in self.by_key:
+            # ignore hidden distros
+            return
+
+        self.by_key[dist.key] = dist
+        if dist.key not in keys:
+            keys.append(dist.key)
+        if dist.key not in keys2:
+            keys2.append(dist.key)
+        self._added_new(dist)
+
+    def resolve(self, requirements, env=None, installer=None,
+                replace_conflicting=False, extras=None):
+        """List all distributions needed to (recursively) meet `requirements`
+
+        `requirements` must be a sequence of ``Requirement`` objects.  `env`,
+        if supplied, should be an ``Environment`` instance.  If
+        not supplied, it defaults to all distributions available within any
+        entry or distribution in the working set.  `installer`, if supplied,
+        will be invoked with each requirement that cannot be met by an
+        already-installed distribution; it should return a ``Distribution`` or
+        ``None``.
+
+        Unless `replace_conflicting=True`, raises a VersionConflict exception
+        if
+        any requirements are found on the path that have the correct name but
+        the wrong version.  Otherwise, if an `installer` is supplied it will be
+        invoked to obtain the correct version of the requirement and activate
+        it.
+
+        `extras` is a list of the extras to be used with these requirements.
+        This is important because extra requirements may look like `my_req;
+        extra = "my_extra"`, which would otherwise be interpreted as a purely
+        optional requirement.  Instead, we want to be able to assert that these
+        requirements are truly required.
+        """
+
+        # set up the stack
+        requirements = list(requirements)[::-1]
+        # set of processed requirements
+        processed = {}
+        # key -> dist
+        best = {}
+        to_activate = []
+
+        req_extras = _ReqExtras()
+
+        # Mapping of requirement to set of distributions that required it;
+        # useful for reporting info about conflicts.
+        required_by = collections.defaultdict(set)
+
+        while requirements:
+            # process dependencies breadth-first
+            req = requirements.pop(0)
+            if req in processed:
+                # Ignore cyclic or redundant dependencies
+                continue
+
+            if not req_extras.markers_pass(req, extras):
+                continue
+
+            dist = best.get(req.key)
+            if dist is None:
+                # Find the best distribution and add it to the map
+                dist = self.by_key.get(req.key)
+                if dist is None or (dist not in req and replace_conflicting):
+                    ws = self
+                    if env is None:
+                        if dist is None:
+                            env = Environment(self.entries)
+                        else:
+                            # Use an empty environment and workingset to avoid
+                            # any further conflicts with the conflicting
+                            # distribution
+                            env = Environment([])
+                            ws = WorkingSet([])
+                    dist = best[req.key] = env.best_match(
+                        req, ws, installer,
+                        replace_conflicting=replace_conflicting
+                    )
+                    if dist is None:
+                        requirers = required_by.get(req, None)
+                        raise DistributionNotFound(req, requirers)
+                to_activate.append(dist)
+            if dist not in req:
+                # Oops, the "best" so far conflicts with a dependency
+                dependent_req = required_by[req]
+                raise VersionConflict(dist, req).with_context(dependent_req)
+
+            # push the new requirements onto the stack
+            new_requirements = dist.requires(req.extras)[::-1]
+            requirements.extend(new_requirements)
+
+            # Register the new requirements needed by req
+            for new_requirement in new_requirements:
+                required_by[new_requirement].add(req.project_name)
+                req_extras[new_requirement] = req.extras
+
+            processed[req] = True
+
+        # return list of distros to activate
+        return to_activate
+
+    def find_plugins(
+            self, plugin_env, full_env=None, installer=None, fallback=True):
+        """Find all activatable distributions in `plugin_env`
+
+        Example usage::
+
+            distributions, errors = working_set.find_plugins(
+                Environment(plugin_dirlist)
+            )
+            # add plugins+libs to sys.path
+            map(working_set.add, distributions)
+            # display errors
+            print('Could not load', errors)
+
+        The `plugin_env` should be an ``Environment`` instance that contains
+        only distributions that are in the project's "plugin directory" or
+        directories. The `full_env`, if supplied, should be an ``Environment``
+        contains all currently-available distributions.  If `full_env` is not
+        supplied, one is created automatically from the ``WorkingSet`` this
+        method is called on, which will typically mean that every directory on
+        ``sys.path`` will be scanned for distributions.
+
+        `installer` is a standard installer callback as used by the
+        ``resolve()`` method. The `fallback` flag indicates whether we should
+        attempt to resolve older versions of a plugin if the newest version
+        cannot be resolved.
+
+        This method returns a 2-tuple: (`distributions`, `error_info`), where
+        `distributions` is a list of the distributions found in `plugin_env`
+        that were loadable, along with any other distributions that are needed
+        to resolve their dependencies.  `error_info` is a dictionary mapping
+        unloadable plugin distributions to an exception instance describing the
+        error that occurred. Usually this will be a ``DistributionNotFound`` or
+        ``VersionConflict`` instance.
+        """
+
+        plugin_projects = list(plugin_env)
+        # scan project names in alphabetic order
+        plugin_projects.sort()
+
+        error_info = {}
+        distributions = {}
+
+        if full_env is None:
+            env = Environment(self.entries)
+            env += plugin_env
+        else:
+            env = full_env + plugin_env
+
+        shadow_set = self.__class__([])
+        # put all our entries in shadow_set
+        list(map(shadow_set.add, self))
+
+        for project_name in plugin_projects:
+
+            for dist in plugin_env[project_name]:
+
+                req = [dist.as_requirement()]
+
+                try:
+                    resolvees = shadow_set.resolve(req, env, installer)
+
+                except ResolutionError as v:
+                    # save error info
+                    error_info[dist] = v
+                    if fallback:
+                        # try the next older version of project
+                        continue
+                    else:
+                        # give up on this project, keep going
+                        break
+
+                else:
+                    list(map(shadow_set.add, resolvees))
+                    distributions.update(dict.fromkeys(resolvees))
+
+                    # success, no need to try any more versions of this project
+                    break
+
+        distributions = list(distributions)
+        distributions.sort()
+
+        return distributions, error_info
+
+    def require(self, *requirements):
+        """Ensure that distributions matching `requirements` are activated
+
+        `requirements` must be a string or a (possibly-nested) sequence
+        thereof, specifying the distributions and versions required.  The
+        return value is a sequence of the distributions that needed to be
+        activated to fulfill the requirements; all relevant distributions are
+        included, even if they were already activated in this working set.
+        """
+        needed = self.resolve(parse_requirements(requirements))
+
+        for dist in needed:
+            self.add(dist)
+
+        return needed
+
+    def subscribe(self, callback, existing=True):
+        """Invoke `callback` for all distributions
+
+        If `existing=True` (default),
+        call on all existing ones, as well.
+        """
+        if callback in self.callbacks:
+            return
+        self.callbacks.append(callback)
+        if not existing:
+            return
+        for dist in self:
+            callback(dist)
+
+    def _added_new(self, dist):
+        for callback in self.callbacks:
+            callback(dist)
+
+    def __getstate__(self):
+        return (
+            self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
+            self.callbacks[:]
+        )
+
+    def __setstate__(self, e_k_b_c):
+        entries, keys, by_key, callbacks = e_k_b_c
+        self.entries = entries[:]
+        self.entry_keys = keys.copy()
+        self.by_key = by_key.copy()
+        self.callbacks = callbacks[:]
+
+
+class _ReqExtras(dict):
+    """
+    Map each requirement to the extras that demanded it.
+    """
+
+    def markers_pass(self, req, extras=None):
+        """
+        Evaluate markers for req against each extra that
+        demanded it.
+
+        Return False if the req has a marker and fails
+        evaluation. Otherwise, return True.
+        """
+        extra_evals = (
+            req.marker.evaluate({'extra': extra})
+            for extra in self.get(req, ()) + (extras or (None,))
+        )
+        return not req.marker or any(extra_evals)
+
+
+class Environment:
+    """Searchable snapshot of distributions on a search path"""
+
+    def __init__(
+            self, search_path=None, platform=get_supported_platform(),
+            python=PY_MAJOR):
+        """Snapshot distributions available on a search path
+
+        Any distributions found on `search_path` are added to the environment.
+        `search_path` should be a sequence of ``sys.path`` items.  If not
+        supplied, ``sys.path`` is used.
+
+        `platform` is an optional string specifying the name of the platform
+        that platform-specific distributions must be compatible with.  If
+        unspecified, it defaults to the current platform.  `python` is an
+        optional string naming the desired version of Python (e.g. ``'3.6'``);
+        it defaults to the current version.
+
+        You may explicitly set `platform` (and/or `python`) to ``None`` if you
+        wish to map *all* distributions, not just those compatible with the
+        running platform or Python version.
+        """
+        self._distmap = {}
+        self.platform = platform
+        self.python = python
+        self.scan(search_path)
+
+    def can_add(self, dist):
+        """Is distribution `dist` acceptable for this environment?
+
+        The distribution must match the platform and python version
+        requirements specified when this environment was created, or False
+        is returned.
+        """
+        py_compat = (
+            self.python is None
+            or dist.py_version is None
+            or dist.py_version == self.python
+        )
+        return py_compat and compatible_platforms(dist.platform, self.platform)
+
+    def remove(self, dist):
+        """Remove `dist` from the environment"""
+        self._distmap[dist.key].remove(dist)
+
+    def scan(self, search_path=None):
+        """Scan `search_path` for distributions usable in this environment
+
+        Any distributions found are added to the environment.
+        `search_path` should be a sequence of ``sys.path`` items.  If not
+        supplied, ``sys.path`` is used.  Only distributions conforming to
+        the platform/python version defined at initialization are added.
+        """
+        if search_path is None:
+            search_path = sys.path
+
+        for item in search_path:
+            for dist in find_distributions(item):
+                self.add(dist)
+
+    def __getitem__(self, project_name):
+        """Return a newest-to-oldest list of distributions for `project_name`
+
+        Uses case-insensitive `project_name` comparison, assuming all the
+        project's distributions use their project's name converted to all
+        lowercase as their key.
+
+        """
+        distribution_key = project_name.lower()
+        return self._distmap.get(distribution_key, [])
+
+    def add(self, dist):
+        """Add `dist` if we ``can_add()`` it and it has not already been added
+        """
+        if self.can_add(dist) and dist.has_version():
+            dists = self._distmap.setdefault(dist.key, [])
+            if dist not in dists:
+                dists.append(dist)
+                dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
+
+    def best_match(
+            self, req, working_set, installer=None, replace_conflicting=False):
+        """Find distribution best matching `req` and usable on `working_set`
+
+        This calls the ``find(req)`` method of the `working_set` to see if a
+        suitable distribution is already active.  (This may raise
+        ``VersionConflict`` if an unsuitable version of the project is already
+        active in the specified `working_set`.)  If a suitable distribution
+        isn't active, this method returns the newest distribution in the
+        environment that meets the ``Requirement`` in `req`.  If no suitable
+        distribution is found, and `installer` is supplied, then the result of
+        calling the environment's ``obtain(req, installer)`` method will be
+        returned.
+        """
+        try:
+            dist = working_set.find(req)
+        except VersionConflict:
+            if not replace_conflicting:
+                raise
+            dist = None
+        if dist is not None:
+            return dist
+        for dist in self[req.key]:
+            if dist in req:
+                return dist
+        # try to download/install
+        return self.obtain(req, installer)
+
+    def obtain(self, requirement, installer=None):
+        """Obtain a distribution matching `requirement` (e.g. via download)
+
+        Obtain a distro that matches requirement (e.g. via download).  In the
+        base ``Environment`` class, this routine just returns
+        ``installer(requirement)``, unless `installer` is None, in which case
+        None is returned instead.  This method is a hook that allows subclasses
+        to attempt other ways of obtaining a distribution before falling back
+        to the `installer` argument."""
+        if installer is not None:
+            return installer(requirement)
+
+    def __iter__(self):
+        """Yield the unique project names of the available distributions"""
+        for key in self._distmap.keys():
+            if self[key]:
+                yield key
+
+    def __iadd__(self, other):
+        """In-place addition of a distribution or environment"""
+        if isinstance(other, Distribution):
+            self.add(other)
+        elif isinstance(other, Environment):
+            for project in other:
+                for dist in other[project]:
+                    self.add(dist)
+        else:
+            raise TypeError("Can't add %r to environment" % (other,))
+        return self
+
+    def __add__(self, other):
+        """Add an environment or distribution to an environment"""
+        new = self.__class__([], platform=None, python=None)
+        for env in self, other:
+            new += env
+        return new
+
+
+# XXX backward compatibility
+AvailableDistributions = Environment
+
+
+class ExtractionError(RuntimeError):
+    """An error occurred extracting a resource
+
+    The following attributes are available from instances of this exception:
+
+    manager
+        The resource manager that raised this exception
+
+    cache_path
+        The base directory for resource extraction
+
+    original_error
+        The exception instance that caused extraction to fail
+    """
+
+
+class ResourceManager:
+    """Manage resource extraction and packages"""
+    extraction_path = None
+
+    def __init__(self):
+        self.cached_files = {}
+
+    def resource_exists(self, package_or_requirement, resource_name):
+        """Does the named resource exist?"""
+        return get_provider(package_or_requirement).has_resource(resource_name)
+
+    def resource_isdir(self, package_or_requirement, resource_name):
+        """Is the named resource an existing directory?"""
+        return get_provider(package_or_requirement).resource_isdir(
+            resource_name
+        )
+
+    def resource_filename(self, package_or_requirement, resource_name):
+        """Return a true filesystem path for specified resource"""
+        return get_provider(package_or_requirement).get_resource_filename(
+            self, resource_name
+        )
+
+    def resource_stream(self, package_or_requirement, resource_name):
+        """Return a readable file-like object for specified resource"""
+        return get_provider(package_or_requirement).get_resource_stream(
+            self, resource_name
+        )
+
+    def resource_string(self, package_or_requirement, resource_name):
+        """Return specified resource as a string"""
+        return get_provider(package_or_requirement).get_resource_string(
+            self, resource_name
+        )
+
+    def resource_listdir(self, package_or_requirement, resource_name):
+        """List the contents of the named resource directory"""
+        return get_provider(package_or_requirement).resource_listdir(
+            resource_name
+        )
+
+    def extraction_error(self):
+        """Give an error message for problems extracting file(s)"""
+
+        old_exc = sys.exc_info()[1]
+        cache_path = self.extraction_path or get_default_cache()
+
+        tmpl = textwrap.dedent("""
+            Can't extract file(s) to egg cache
+
+            The following error occurred while trying to extract file(s)
+            to the Python egg cache:
+
+              {old_exc}
+
+            The Python egg cache directory is currently set to:
+
+              {cache_path}
+
+            Perhaps your account does not have write access to this directory?
+            You can change the cache directory by setting the PYTHON_EGG_CACHE
+            environment variable to point to an accessible directory.
+            """).lstrip()
+        err = ExtractionError(tmpl.format(**locals()))
+        err.manager = self
+        err.cache_path = cache_path
+        err.original_error = old_exc
+        raise err
+
+    def get_cache_path(self, archive_name, names=()):
+        """Return absolute location in cache for `archive_name` and `names`
+
+        The parent directory of the resulting path will be created if it does
+        not already exist.  `archive_name` should be the base filename of the
+        enclosing egg (which may not be the name of the enclosing zipfile!),
+        including its ".egg" extension.  `names`, if provided, should be a
+        sequence of path name parts "under" the egg's extraction location.
+
+        This method should only be called by resource providers that need to
+        obtain an extraction location, and only for names they intend to
+        extract, as it tracks the generated names for possible cleanup later.
+        """
+        extract_path = self.extraction_path or get_default_cache()
+        target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
+        try:
+            _bypass_ensure_directory(target_path)
+        except Exception:
+            self.extraction_error()
+
+        self._warn_unsafe_extraction_path(extract_path)
+
+        self.cached_files[target_path] = 1
+        return target_path
+
+    @staticmethod
+    def _warn_unsafe_extraction_path(path):
+        """
+        If the default extraction path is overridden and set to an insecure
+        location, such as /tmp, it opens up an opportunity for an attacker to
+        replace an extracted file with an unauthorized payload. Warn the user
+        if a known insecure location is used.
+
+        See Distribute #375 for more details.
+        """
+        if os.name == 'nt' and not path.startswith(os.environ['windir']):
+            # On Windows, permissions are generally restrictive by default
+            #  and temp directories are not writable by other users, so
+            #  bypass the warning.
+            return
+        mode = os.stat(path).st_mode
+        if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
+            msg = (
+                "%s is writable by group/others and vulnerable to attack "
+                "when "
+                "used with get_resource_filename. Consider a more secure "
+                "location (set with .set_extraction_path or the "
+                "PYTHON_EGG_CACHE environment variable)." % path
+            )
+            warnings.warn(msg, UserWarning)
+
+    def postprocess(self, tempname, filename):
+        """Perform any platform-specific postprocessing of `tempname`
+
+        This is where Mac header rewrites should be done; other platforms don't
+        have anything special they should do.
+
+        Resource providers should call this method ONLY after successfully
+        extracting a compressed resource.  They must NOT call it on resources
+        that are already in the filesystem.
+
+        `tempname` is the current (temporary) name of the file, and `filename`
+        is the name it will be renamed to by the caller after this routine
+        returns.
+        """
+
+        if os.name == 'posix':
+            # Make the resource executable
+            mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
+            os.chmod(tempname, mode)
+
+    def set_extraction_path(self, path):
+        """Set the base path where resources will be extracted to, if needed.
+
+        If you do not call this routine before any extractions take place, the
+        path defaults to the return value of ``get_default_cache()``.  (Which
+        is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+        platform-specific fallbacks.  See that routine's documentation for more
+        details.)
+
+        Resources are extracted to subdirectories of this path based upon
+        information given by the ``IResourceProvider``.  You may set this to a
+        temporary directory, but then you must call ``cleanup_resources()`` to
+        delete the extracted files when done.  There is no guarantee that
+        ``cleanup_resources()`` will be able to remove all extracted files.
+
+        (Note: you may not change the extraction path for a given resource
+        manager once resources have been extracted, unless you first call
+        ``cleanup_resources()``.)
+        """
+        if self.cached_files:
+            raise ValueError(
+                "Can't change extraction path, files already extracted"
+            )
+
+        self.extraction_path = path
+
+    def cleanup_resources(self, force=False):
+        """
+        Delete all extracted resource files and directories, returning a list
+        of the file and directory names that could not be successfully removed.
+        This function does not have any concurrency protection, so it should
+        generally only be called when the extraction path is a temporary
+        directory exclusive to a single process.  This method is not
+        automatically called; you must call it explicitly or register it as an
+        ``atexit`` function if you wish to ensure cleanup of a temporary
+        directory used for extractions.
+        """
+        # XXX
+
+
+def get_default_cache():
+    """
+    Return the ``PYTHON_EGG_CACHE`` environment variable
+    or a platform-relevant user cache dir for an app
+    named "Python-Eggs".
+    """
+    return (
+        os.environ.get('PYTHON_EGG_CACHE')
+        or appdirs.user_cache_dir(appname='Python-Eggs')
+    )
+
+
+def safe_name(name):
+    """Convert an arbitrary string to a standard distribution name
+
+    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+    """
+    return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+    """
+    Convert an arbitrary string to a standard version string
+    """
+    try:
+        # normalize the version
+        return str(packaging.version.Version(version))
+    except packaging.version.InvalidVersion:
+        version = version.replace(' ', '.')
+        return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def safe_extra(extra):
+    """Convert an arbitrary string to a standard 'extra' name
+
+    Any runs of non-alphanumeric characters are replaced with a single '_',
+    and the result is always lowercased.
+    """
+    return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
+
+
+def to_filename(name):
+    """Convert a project or version name to its filename-escaped form
+
+    Any '-' characters are currently replaced with '_'.
+    """
+    return name.replace('-', '_')
+
+
+def invalid_marker(text):
+    """
+    Validate text as a PEP 508 environment marker; return an exception
+    if invalid or False otherwise.
+    """
+    try:
+        evaluate_marker(text)
+    except SyntaxError as e:
+        e.filename = None
+        e.lineno = None
+        return e
+    return False
+
+
+def evaluate_marker(text, extra=None):
+    """
+    Evaluate a PEP 508 environment marker.
+    Return a boolean indicating the marker result in this environment.
+    Raise SyntaxError if marker is invalid.
+
+    This implementation uses the 'pyparsing' module.
+    """
+    try:
+        marker = packaging.markers.Marker(text)
+        return marker.evaluate()
+    except packaging.markers.InvalidMarker as e:
+        raise SyntaxError(e)
+
+
+class NullProvider:
+    """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+    egg_name = None
+    egg_info = None
+    loader = None
+
+    def __init__(self, module):
+        self.loader = getattr(module, '__loader__', None)
+        self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+    def get_resource_filename(self, manager, resource_name):
+        return self._fn(self.module_path, resource_name)
+
+    def get_resource_stream(self, manager, resource_name):
+        return io.BytesIO(self.get_resource_string(manager, resource_name))
+
+    def get_resource_string(self, manager, resource_name):
+        return self._get(self._fn(self.module_path, resource_name))
+
+    def has_resource(self, resource_name):
+        return self._has(self._fn(self.module_path, resource_name))
+
+    def _get_metadata_path(self, name):
+        return self._fn(self.egg_info, name)
+
+    def has_metadata(self, name):
+        if not self.egg_info:
+            return self.egg_info
+
+        path = self._get_metadata_path(name)
+        return self._has(path)
+
+    def get_metadata(self, name):
+        if not self.egg_info:
+            return ""
+        value = self._get(self._fn(self.egg_info, name))
+        return value.decode('utf-8') if six.PY3 else value
+
+    def get_metadata_lines(self, name):
+        return yield_lines(self.get_metadata(name))
+
+    def resource_isdir(self, resource_name):
+        return self._isdir(self._fn(self.module_path, resource_name))
+
+    def metadata_isdir(self, name):
+        return self.egg_info and self._isdir(self._fn(self.egg_info, name))
+
+    def resource_listdir(self, resource_name):
+        return self._listdir(self._fn(self.module_path, resource_name))
+
+    def metadata_listdir(self, name):
+        if self.egg_info:
+            return self._listdir(self._fn(self.egg_info, name))
+        return []
+
+    def run_script(self, script_name, namespace):
+        script = 'scripts/' + script_name
+        if not self.has_metadata(script):
+            raise ResolutionError(
+                "Script {script!r} not found in metadata at {self.egg_info!r}"
+                .format(**locals()),
+            )
+        script_text = self.get_metadata(script).replace('\r\n', '\n')
+        script_text = script_text.replace('\r', '\n')
+        script_filename = self._fn(self.egg_info, script)
+        namespace['__file__'] = script_filename
+        if os.path.exists(script_filename):
+            source = open(script_filename).read()
+            code = compile(source, script_filename, 'exec')
+            exec(code, namespace, namespace)
+        else:
+            from linecache import cache
+            cache[script_filename] = (
+                len(script_text), 0, script_text.split('\n'), script_filename
+            )
+            script_code = compile(script_text, script_filename, 'exec')
+            exec(script_code, namespace, namespace)
+
+    def _has(self, path):
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _isdir(self, path):
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _listdir(self, path):
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _fn(self, base, resource_name):
+        self._validate_resource_path(resource_name)
+        if resource_name:
+            return os.path.join(base, *resource_name.split('/'))
+        return base
+
+    @staticmethod
+    def _validate_resource_path(path):
+        """
+        Validate the resource paths according to the docs.
+        https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
+
+        >>> warned = getfixture('recwarn')
+        >>> warnings.simplefilter('always')
+        >>> vrp = NullProvider._validate_resource_path
+        >>> vrp('foo/bar.txt')
+        >>> bool(warned)
+        False
+        >>> vrp('../foo/bar.txt')
+        >>> bool(warned)
+        True
+        >>> warned.clear()
+        >>> vrp('/foo/bar.txt')
+        >>> bool(warned)
+        True
+        >>> vrp('foo/../../bar.txt')
+        >>> bool(warned)
+        True
+        >>> warned.clear()
+        >>> vrp('foo/f../bar.txt')
+        >>> bool(warned)
+        False
+
+        Windows path separators are straight-up disallowed.
+        >>> vrp(r'\\foo/bar.txt')
+        Traceback (most recent call last):
+        ...
+        ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+        >>> vrp(r'C:\\foo/bar.txt')
+        Traceback (most recent call last):
+        ...
+        ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+        Blank values are allowed
+
+        >>> vrp('')
+        >>> bool(warned)
+        False
+
+        Non-string values are not.
+
+        >>> vrp(None)
+        Traceback (most recent call last):
+        ...
+        AttributeError: ...
+        """
+        invalid = (
+            os.path.pardir in path.split(posixpath.sep) or
+            posixpath.isabs(path) or
+            ntpath.isabs(path)
+        )
+        if not invalid:
+            return
+
+        msg = "Use of .. or absolute path in a resource path is not allowed."
+
+        # Aggressively disallow Windows absolute paths
+        if ntpath.isabs(path) and not posixpath.isabs(path):
+            raise ValueError(msg)
+
+        # for compatibility, warn; in future
+        # raise ValueError(msg)
+        warnings.warn(
+            msg[:-1] + " and will raise exceptions in a future release.",
+            DeprecationWarning,
+            stacklevel=4,
+        )
+
+    def _get(self, path):
+        if hasattr(self.loader, 'get_data'):
+            return self.loader.get_data(path)
+        raise NotImplementedError(
+            "Can't perform this operation for loaders without 'get_data()'"
+        )
+
+
+register_loader_type(object, NullProvider)
+
+
+class EggProvider(NullProvider):
+    """Provider based on a virtual filesystem"""
+
+    def __init__(self, module):
+        NullProvider.__init__(self, module)
+        self._setup_prefix()
+
+    def _setup_prefix(self):
+        # we assume here that our metadata may be nested inside a "basket"
+        # of multiple eggs; that's why we use module_path instead of .archive
+        path = self.module_path
+        old = None
+        while path != old:
+            if _is_egg_path(path):
+                self.egg_name = os.path.basename(path)
+                self.egg_info = os.path.join(path, 'EGG-INFO')
+                self.egg_root = path
+                break
+            old = path
+            path, base = os.path.split(path)
+
+
+class DefaultProvider(EggProvider):
+    """Provides access to package resources in the filesystem"""
+
+    def _has(self, path):
+        return os.path.exists(path)
+
+    def _isdir(self, path):
+        return os.path.isdir(path)
+
+    def _listdir(self, path):
+        return os.listdir(path)
+
+    def get_resource_stream(self, manager, resource_name):
+        return open(self._fn(self.module_path, resource_name), 'rb')
+
+    def _get(self, path):
+        with open(path, 'rb') as stream:
+            return stream.read()
+
+    @classmethod
+    def _register(cls):
+        loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
+        for name in loader_names:
+            loader_cls = getattr(importlib_machinery, name, type(None))
+            register_loader_type(loader_cls, cls)
+
+
+DefaultProvider._register()
+
+
+class EmptyProvider(NullProvider):
+    """Provider that returns nothing for all requests"""
+
+    module_path = None
+
+    _isdir = _has = lambda self, path: False
+
+    def _get(self, path):
+        return ''
+
+    def _listdir(self, path):
+        return []
+
+    def __init__(self):
+        pass
+
+
+empty_provider = EmptyProvider()
+
+
+class ZipManifests(dict):
+    """
+    zip manifest builder
+    """
+
+    @classmethod
+    def build(cls, path):
+        """
+        Build a dictionary similar to the zipimport directory
+        caches, except instead of tuples, store ZipInfo objects.
+
+        Use a platform-specific path separator (os.sep) for the path keys
+        for compatibility with pypy on Windows.
+        """
+        with zipfile.ZipFile(path) as zfile:
+            items = (
+                (
+                    name.replace('/', os.sep),
+                    zfile.getinfo(name),
+                )
+                for name in zfile.namelist()
+            )
+            return dict(items)
+
+    load = build
+
+
+class MemoizedZipManifests(ZipManifests):
+    """
+    Memoized zipfile manifests.
+    """
+    manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
+
+    def load(self, path):
+        """
+        Load a manifest at path or return a suitable manifest already loaded.
+        """
+        path = os.path.normpath(path)
+        mtime = os.stat(path).st_mtime
+
+        if path not in self or self[path].mtime != mtime:
+            manifest = self.build(path)
+            self[path] = self.manifest_mod(manifest, mtime)
+
+        return self[path].manifest
+
+
+class ZipProvider(EggProvider):
+    """Resource support for zips and eggs"""
+
+    eagers = None
+    _zip_manifests = MemoizedZipManifests()
+
+    def __init__(self, module):
+        EggProvider.__init__(self, module)
+        self.zip_pre = self.loader.archive + os.sep
+
+    def _zipinfo_name(self, fspath):
+        # Convert a virtual filename (full path to file) into a zipfile subpath
+        # usable with the zipimport directory cache for our target archive
+        fspath = fspath.rstrip(os.sep)
+        if fspath == self.loader.archive:
+            return ''
+        if fspath.startswith(self.zip_pre):
+            return fspath[len(self.zip_pre):]
+        raise AssertionError(
+            "%s is not a subpath of %s" % (fspath, self.zip_pre)
+        )
+
+    def _parts(self, zip_path):
+        # Convert a zipfile subpath into an egg-relative path part list.
+        # pseudo-fs path
+        fspath = self.zip_pre + zip_path
+        if fspath.startswith(self.egg_root + os.sep):
+            return fspath[len(self.egg_root) + 1:].split(os.sep)
+        raise AssertionError(
+            "%s is not a subpath of %s" % (fspath, self.egg_root)
+        )
+
+    @property
+    def zipinfo(self):
+        return self._zip_manifests.load(self.loader.archive)
+
+    def get_resource_filename(self, manager, resource_name):
+        if not self.egg_name:
+            raise NotImplementedError(
+                "resource_filename() only supported for .egg, not .zip"
+            )
+        # no need to lock for extraction, since we use temp names
+        zip_path = self._resource_to_zip(resource_name)
+        eagers = self._get_eager_resources()
+        if '/'.join(self._parts(zip_path)) in eagers:
+            for name in eagers:
+                self._extract_resource(manager, self._eager_to_zip(name))
+        return self._extract_resource(manager, zip_path)
+
+    @staticmethod
+    def _get_date_and_size(zip_stat):
+        size = zip_stat.file_size
+        # ymdhms+wday, yday, dst
+        date_time = zip_stat.date_time + (0, 0, -1)
+        # 1980 offset already done
+        timestamp = time.mktime(date_time)
+        return timestamp, size
+
+    def _extract_resource(self, manager, zip_path):
+
+        if zip_path in self._index():
+            for name in self._index()[zip_path]:
+                last = self._extract_resource(
+                    manager, os.path.join(zip_path, name)
+                )
+            # return the extracted directory name
+            return os.path.dirname(last)
+
+        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+
+        if not WRITE_SUPPORT:
+            raise IOError('"os.rename" and "os.unlink" are not supported '
+                          'on this platform')
+        try:
+
+            real_path = manager.get_cache_path(
+                self.egg_name, self._parts(zip_path)
+            )
+
+            if self._is_current(real_path, zip_path):
+                return real_path
+
+            outf, tmpnam = _mkstemp(
+                ".$extract",
+                dir=os.path.dirname(real_path),
+            )
+            os.write(outf, self.loader.get_data(zip_path))
+            os.close(outf)
+            utime(tmpnam, (timestamp, timestamp))
+            manager.postprocess(tmpnam, real_path)
+
+            try:
+                rename(tmpnam, real_path)
+
+            except os.error:
+                if os.path.isfile(real_path):
+                    if self._is_current(real_path, zip_path):
+                        # the file became current since it was checked above,
+                        #  so proceed.
+                        return real_path
+                    # Windows, del old file and retry
+                    elif os.name == 'nt':
+                        unlink(real_path)
+                        rename(tmpnam, real_path)
+                        return real_path
+                raise
+
+        except os.error:
+            # report a user-friendly error
+            manager.extraction_error()
+
+        return real_path
+
+    def _is_current(self, file_path, zip_path):
+        """
+        Return True if the file_path is current for this zip_path
+        """
+        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+        if not os.path.isfile(file_path):
+            return False
+        stat = os.stat(file_path)
+        if stat.st_size != size or stat.st_mtime != timestamp:
+            return False
+        # check that the contents match
+        zip_contents = self.loader.get_data(zip_path)
+        with open(file_path, 'rb') as f:
+            file_contents = f.read()
+        return zip_contents == file_contents
+
+    def _get_eager_resources(self):
+        if self.eagers is None:
+            eagers = []
+            for name in ('native_libs.txt', 'eager_resources.txt'):
+                if self.has_metadata(name):
+                    eagers.extend(self.get_metadata_lines(name))
+            self.eagers = eagers
+        return self.eagers
+
+    def _index(self):
+        try:
+            return self._dirindex
+        except AttributeError:
+            ind = {}
+            for path in self.zipinfo:
+                parts = path.split(os.sep)
+                while parts:
+                    parent = os.sep.join(parts[:-1])
+                    if parent in ind:
+                        ind[parent].append(parts[-1])
+                        break
+                    else:
+                        ind[parent] = [parts.pop()]
+            self._dirindex = ind
+            return ind
+
+    def _has(self, fspath):
+        zip_path = self._zipinfo_name(fspath)
+        return zip_path in self.zipinfo or zip_path in self._index()
+
+    def _isdir(self, fspath):
+        return self._zipinfo_name(fspath) in self._index()
+
+    def _listdir(self, fspath):
+        return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+    def _eager_to_zip(self, resource_name):
+        return self._zipinfo_name(self._fn(self.egg_root, resource_name))
+
+    def _resource_to_zip(self, resource_name):
+        return self._zipinfo_name(self._fn(self.module_path, resource_name))
+
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+class FileMetadata(EmptyProvider):
+    """Metadata handler for standalone PKG-INFO files
+
+    Usage::
+
+        metadata = FileMetadata("/path/to/PKG-INFO")
+
+    This provider rejects all data and metadata requests except for PKG-INFO,
+    which is treated as existing, and will be the contents of the file at
+    the provided location.
+    """
+
+    def __init__(self, path):
+        self.path = path
+
+    def _get_metadata_path(self, name):
+        return self.path
+
+    def has_metadata(self, name):
+        return name == 'PKG-INFO' and os.path.isfile(self.path)
+
+    def get_metadata(self, name):
+        if name != 'PKG-INFO':
+            raise KeyError("No metadata except PKG-INFO is available")
+
+        with io.open(self.path, encoding='utf-8', errors="replace") as f:
+            metadata = f.read()
+        self._warn_on_replacement(metadata)
+        return metadata
+
+    def _warn_on_replacement(self, metadata):
+        # Python 2.7 compat for: replacement_char = '�'
+        replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
+        if replacement_char in metadata:
+            tmpl = "{self.path} could not be properly decoded in UTF-8"
+            msg = tmpl.format(**locals())
+            warnings.warn(msg)
+
+    def get_metadata_lines(self, name):
+        return yield_lines(self.get_metadata(name))
+
+
+class PathMetadata(DefaultProvider):
+    """Metadata provider for egg directories
+
+    Usage::
+
+        # Development eggs:
+
+        egg_info = "/path/to/PackageName.egg-info"
+        base_dir = os.path.dirname(egg_info)
+        metadata = PathMetadata(base_dir, egg_info)
+        dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+        dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
+
+        # Unpacked egg directories:
+
+        egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+        metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+        dist = Distribution.from_filename(egg_path, metadata=metadata)
+    """
+
+    def __init__(self, path, egg_info):
+        self.module_path = path
+        self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+    """Metadata provider for .egg files"""
+
+    def __init__(self, importer):
+        """Create a metadata provider from a zipimporter"""
+
+        self.zip_pre = importer.archive + os.sep
+        self.loader = importer
+        if importer.prefix:
+            self.module_path = os.path.join(importer.archive, importer.prefix)
+        else:
+            self.module_path = importer.archive
+        self._setup_prefix()
+
+
+_declare_state('dict', _distribution_finders={})
+
+
+def register_finder(importer_type, distribution_finder):
+    """Register `distribution_finder` to find distributions in sys.path items
+
+    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+    handler), and `distribution_finder` is a callable that, passed a path
+    item and the importer instance, yields ``Distribution`` instances found on
+    that path item.  See ``pkg_resources.find_on_path`` for an example."""
+    _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item, only=False):
+    """Yield distributions accessible via `path_item`"""
+    importer = get_importer(path_item)
+    finder = _find_adapter(_distribution_finders, importer)
+    return finder(importer, path_item, only)
+
+
+def find_eggs_in_zip(importer, path_item, only=False):
+    """
+    Find eggs in zip files; possibly multiple nested eggs.
+    """
+    if importer.archive.endswith('.whl'):
+        # wheels are not supported with this finder
+        # they don't have PKG-INFO metadata, and won't ever contain eggs
+        return
+    metadata = EggMetadata(importer)
+    if metadata.has_metadata('PKG-INFO'):
+        yield Distribution.from_filename(path_item, metadata=metadata)
+    if only:
+        # don't yield nested distros
+        return
+    for subitem in metadata.resource_listdir(''):
+        if _is_egg_path(subitem):
+            subpath = os.path.join(path_item, subitem)
+            dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
+            for dist in dists:
+                yield dist
+        elif subitem.lower().endswith('.dist-info'):
+            subpath = os.path.join(path_item, subitem)
+            submeta = EggMetadata(zipimport.zipimporter(subpath))
+            submeta.egg_info = subpath
+            yield Distribution.from_location(path_item, subitem, submeta)
+
+
+register_finder(zipimport.zipimporter, find_eggs_in_zip)
+
+
+def find_nothing(importer, path_item, only=False):
+    return ()
+
+
+register_finder(object, find_nothing)
+
+
+def _by_version_descending(names):
+    """
+    Given a list of filenames, return them in descending order
+    by version number.
+
+    >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
+    >>> _by_version_descending(names)
+    ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
+    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
+    >>> _by_version_descending(names)
+    ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
+    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
+    >>> _by_version_descending(names)
+    ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
+    """
+    def _by_version(name):
+        """
+        Parse each component of the filename
+        """
+        name, ext = os.path.splitext(name)
+        parts = itertools.chain(name.split('-'), [ext])
+        return [packaging.version.parse(part) for part in parts]
+
+    return sorted(names, key=_by_version, reverse=True)
+
+
+def find_on_path(importer, path_item, only=False):
+    """Yield distributions accessible on a sys.path directory"""
+    path_item = _normalize_cached(path_item)
+
+    if _is_unpacked_egg(path_item):
+        yield Distribution.from_filename(
+            path_item, metadata=PathMetadata(
+                path_item, os.path.join(path_item, 'EGG-INFO')
+            )
+        )
+        return
+
+    entries = safe_listdir(path_item)
+
+    # for performance, before sorting by version,
+    # screen entries for only those that will yield
+    # distributions
+    filtered = (
+        entry
+        for entry in entries
+        if dist_factory(path_item, entry, only)
+    )
+
+    # scan for .egg and .egg-info in directory
+    path_item_entries = _by_version_descending(filtered)
+    for entry in path_item_entries:
+        fullpath = os.path.join(path_item, entry)
+        factory = dist_factory(path_item, entry, only)
+        for dist in factory(fullpath):
+            yield dist
+
+
+def dist_factory(path_item, entry, only):
+    """
+    Return a dist_factory for a path_item and entry
+    """
+    lower = entry.lower()
+    is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
+    return (
+        distributions_from_metadata
+        if is_meta else
+        find_distributions
+        if not only and _is_egg_path(entry) else
+        resolve_egg_link
+        if not only and lower.endswith('.egg-link') else
+        NoDists()
+    )
+
+
+class NoDists:
+    """
+    >>> bool(NoDists())
+    False
+
+    >>> list(NoDists()('anything'))
+    []
+    """
+    def __bool__(self):
+        return False
+    if six.PY2:
+        __nonzero__ = __bool__
+
+    def __call__(self, fullpath):
+        return iter(())
+
+
+def safe_listdir(path):
+    """
+    Attempt to list contents of path, but suppress some exceptions.
+    """
+    try:
+        return os.listdir(path)
+    except (PermissionError, NotADirectoryError):
+        pass
+    except OSError as e:
+        # Ignore the directory if does not exist, not a directory or
+        # permission denied
+        ignorable = (
+            e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
+            # Python 2 on Windows needs to be handled this way :(
+            or getattr(e, "winerror", None) == 267
+        )
+        if not ignorable:
+            raise
+    return ()
+
+
+def distributions_from_metadata(path):
+    root = os.path.dirname(path)
+    if os.path.isdir(path):
+        if len(os.listdir(path)) == 0:
+            # empty metadata dir; skip
+            return
+        metadata = PathMetadata(root, path)
+    else:
+        metadata = FileMetadata(path)
+    entry = os.path.basename(path)
+    yield Distribution.from_location(
+        root, entry, metadata, precedence=DEVELOP_DIST,
+    )
+
+
+def non_empty_lines(path):
+    """
+    Yield non-empty lines from file at path
+    """
+    with open(path) as f:
+        for line in f:
+            line = line.strip()
+            if line:
+                yield line
+
+
+def resolve_egg_link(path):
+    """
+    Given a path to an .egg-link, resolve distributions
+    present in the referenced path.
+    """
+    referenced_paths = non_empty_lines(path)
+    resolved_paths = (
+        os.path.join(os.path.dirname(path), ref)
+        for ref in referenced_paths
+    )
+    dist_groups = map(find_distributions, resolved_paths)
+    return next(dist_groups, ())
+
+
+register_finder(pkgutil.ImpImporter, find_on_path)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+    register_finder(importlib_machinery.FileFinder, find_on_path)
+
+_declare_state('dict', _namespace_handlers={})
+_declare_state('dict', _namespace_packages={})
+
+
+def register_namespace_handler(importer_type, namespace_handler):
+    """Register `namespace_handler` to declare namespace packages
+
+    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+    handler), and `namespace_handler` is a callable like this::
+
+        def namespace_handler(importer, path_entry, moduleName, module):
+            # return a path_entry to use for child packages
+
+    Namespace handlers are only called if the importer object has already
+    agreed that it can handle the relevant path item, and they should only
+    return a subpath if the module __path__ does not already contain an
+    equivalent subpath.  For an example namespace handler, see
+    ``pkg_resources.file_ns_handler``.
+    """
+    _namespace_handlers[importer_type] = namespace_handler
+
+
+def _handle_ns(packageName, path_item):
+    """Ensure that named package includes a subpath of path_item (if needed)"""
+
+    importer = get_importer(path_item)
+    if importer is None:
+        return None
+
+    # capture warnings due to #1111
+    with warnings.catch_warnings():
+        warnings.simplefilter("ignore")
+        loader = importer.find_module(packageName)
+
+    if loader is None:
+        return None
+    module = sys.modules.get(packageName)
+    if module is None:
+        module = sys.modules[packageName] = types.ModuleType(packageName)
+        module.__path__ = []
+        _set_parent_ns(packageName)
+    elif not hasattr(module, '__path__'):
+        raise TypeError("Not a package:", packageName)
+    handler = _find_adapter(_namespace_handlers, importer)
+    subpath = handler(importer, path_item, packageName, module)
+    if subpath is not None:
+        path = module.__path__
+        path.append(subpath)
+        loader.load_module(packageName)
+        _rebuild_mod_path(path, packageName, module)
+    return subpath
+
+
+def _rebuild_mod_path(orig_path, package_name, module):
+    """
+    Rebuild module.__path__ ensuring that all entries are ordered
+    corresponding to their sys.path order
+    """
+    sys_path = [_normalize_cached(p) for p in sys.path]
+
+    def safe_sys_path_index(entry):
+        """
+        Workaround for #520 and #513.
+        """
+        try:
+            return sys_path.index(entry)
+        except ValueError:
+            return float('inf')
+
+    def position_in_sys_path(path):
+        """
+        Return the ordinal of the path based on its position in sys.path
+        """
+        path_parts = path.split(os.sep)
+        module_parts = package_name.count('.') + 1
+        parts = path_parts[:-module_parts]
+        return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
+
+    new_path = sorted(orig_path, key=position_in_sys_path)
+    new_path = [_normalize_cached(p) for p in new_path]
+
+    if isinstance(module.__path__, list):
+        module.__path__[:] = new_path
+    else:
+        module.__path__ = new_path
+
+
+def declare_namespace(packageName):
+    """Declare that package 'packageName' is a namespace package"""
+
+    _imp.acquire_lock()
+    try:
+        if packageName in _namespace_packages:
+            return
+
+        path = sys.path
+        parent, _, _ = packageName.rpartition('.')
+
+        if parent:
+            declare_namespace(parent)
+            if parent not in _namespace_packages:
+                __import__(parent)
+            try:
+                path = sys.modules[parent].__path__
+            except AttributeError:
+                raise TypeError("Not a package:", parent)
+
+        # Track what packages are namespaces, so when new path items are added,
+        # they can be updated
+        _namespace_packages.setdefault(parent or None, []).append(packageName)
+        _namespace_packages.setdefault(packageName, [])
+
+        for path_item in path:
+            # Ensure all the parent's path items are reflected in the child,
+            # if they apply
+            _handle_ns(packageName, path_item)
+
+    finally:
+        _imp.release_lock()
+
+
+def fixup_namespace_packages(path_item, parent=None):
+    """Ensure that previously-declared namespace packages include path_item"""
+    _imp.acquire_lock()
+    try:
+        for package in _namespace_packages.get(parent, ()):
+            subpath = _handle_ns(package, path_item)
+            if subpath:
+                fixup_namespace_packages(subpath, package)
+    finally:
+        _imp.release_lock()
+
+
+def file_ns_handler(importer, path_item, packageName, module):
+    """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+    subpath = os.path.join(path_item, packageName.split('.')[-1])
+    normalized = _normalize_cached(subpath)
+    for item in module.__path__:
+        if _normalize_cached(item) == normalized:
+            break
+    else:
+        # Only return the path if it's not already there
+        return subpath
+
+
+register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+    register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
+
+
+def null_ns_handler(importer, path_item, packageName, module):
+    return None
+
+
+register_namespace_handler(object, null_ns_handler)
+
+
+def normalize_path(filename):
+    """Normalize a file/dir name for comparison purposes"""
+    return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
+
+
+def _cygwin_patch(filename):  # pragma: nocover
+    """
+    Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
+    symlink components. Using
+    os.path.abspath() works around this limitation. A fix in os.getcwd()
+    would probably better, in Cygwin even more so, except
+    that this seems to be by design...
+    """
+    return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
+
+
+def _normalize_cached(filename, _cache={}):
+    try:
+        return _cache[filename]
+    except KeyError:
+        _cache[filename] = result = normalize_path(filename)
+        return result
+
+
+def _is_egg_path(path):
+    """
+    Determine if given path appears to be an egg.
+    """
+    return path.lower().endswith('.egg')
+
+
+def _is_unpacked_egg(path):
+    """
+    Determine if given path appears to be an unpacked egg.
+    """
+    return (
+        _is_egg_path(path) and
+        os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
+    )
+
+
+def _set_parent_ns(packageName):
+    parts = packageName.split('.')
+    name = parts.pop()
+    if parts:
+        parent = '.'.join(parts)
+        setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
+def yield_lines(strs):
+    """Yield non-empty/non-comment lines of a string or sequence"""
+    if isinstance(strs, six.string_types):
+        for s in strs.splitlines():
+            s = s.strip()
+            # skip blank lines/comments
+            if s and not s.startswith('#'):
+                yield s
+    else:
+        for ss in strs:
+            for s in yield_lines(ss):
+                yield s
+
+
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+    r"""
+    (?P<name>[^-]+) (
+        -(?P<ver>[^-]+) (
+            -py(?P<pyver>[^-]+) (
+                -(?P<plat>.+)
+            )?
+        )?
+    )?
+    """,
+    re.VERBOSE | re.IGNORECASE,
+).match
+
+
+class EntryPoint:
+    """Object representing an advertised importable object"""
+
+    def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
+        if not MODULE(module_name):
+            raise ValueError("Invalid module name", module_name)
+        self.name = name
+        self.module_name = module_name
+        self.attrs = tuple(attrs)
+        self.extras = tuple(extras)
+        self.dist = dist
+
+    def __str__(self):
+        s = "%s = %s" % (self.name, self.module_name)
+        if self.attrs:
+            s += ':' + '.'.join(self.attrs)
+        if self.extras:
+            s += ' [%s]' % ','.join(self.extras)
+        return s
+
+    def __repr__(self):
+        return "EntryPoint.parse(%r)" % str(self)
+
+    def load(self, require=True, *args, **kwargs):
+        """
+        Require packages for this EntryPoint, then resolve it.
+        """
+        if not require or args or kwargs:
+            warnings.warn(
+                "Parameters to load are deprecated.  Call .resolve and "
+                ".require separately.",
+                PkgResourcesDeprecationWarning,
+                stacklevel=2,
+            )
+        if require:
+            self.require(*args, **kwargs)
+        return self.resolve()
+
+    def resolve(self):
+        """
+        Resolve the entry point from its module and attrs.
+        """
+        module = __import__(self.module_name, fromlist=['__name__'], level=0)
+        try:
+            return functools.reduce(getattr, self.attrs, module)
+        except AttributeError as exc:
+            raise ImportError(str(exc))
+
+    def require(self, env=None, installer=None):
+        if self.extras and not self.dist:
+            raise UnknownExtra("Can't require() without a distribution", self)
+
+        # Get the requirements for this entry point with all its extras and
+        # then resolve them. We have to pass `extras` along when resolving so
+        # that the working set knows what extras we want. Otherwise, for
+        # dist-info distributions, the working set will assume that the
+        # requirements for that extra are purely optional and skip over them.
+        reqs = self.dist.requires(self.extras)
+        items = working_set.resolve(reqs, env, installer, extras=self.extras)
+        list(map(working_set.add, items))
+
+    pattern = re.compile(
+        r'\s*'
+        r'(?P<name>.+?)\s*'
+        r'=\s*'
+        r'(?P<module>[\w.]+)\s*'
+        r'(:\s*(?P<attr>[\w.]+))?\s*'
+        r'(?P<extras>\[.*\])?\s*$'
+    )
+
+    @classmethod
+    def parse(cls, src, dist=None):
+        """Parse a single entry point from string `src`
+
+        Entry point syntax follows the form::
+
+            name = some.module:some.attr [extra1, extra2]
+
+        The entry name and module name are required, but the ``:attrs`` and
+        ``[extras]`` parts are optional
+        """
+        m = cls.pattern.match(src)
+        if not m:
+            msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
+            raise ValueError(msg, src)
+        res = m.groupdict()
+        extras = cls._parse_extras(res['extras'])
+        attrs = res['attr'].split('.') if res['attr'] else ()
+        return cls(res['name'], res['module'], attrs, extras, dist)
+
+    @classmethod
+    def _parse_extras(cls, extras_spec):
+        if not extras_spec:
+            return ()
+        req = Requirement.parse('x' + extras_spec)
+        if req.specs:
+            raise ValueError()
+        return req.extras
+
+    @classmethod
+    def parse_group(cls, group, lines, dist=None):
+        """Parse an entry point group"""
+        if not MODULE(group):
+            raise ValueError("Invalid group name", group)
+        this = {}
+        for line in yield_lines(lines):
+            ep = cls.parse(line, dist)
+            if ep.name in this:
+                raise ValueError("Duplicate entry point", group, ep.name)
+            this[ep.name] = ep
+        return this
+
+    @classmethod
+    def parse_map(cls, data, dist=None):
+        """Parse a map of entry point groups"""
+        if isinstance(data, dict):
+            data = data.items()
+        else:
+            data = split_sections(data)
+        maps = {}
+        for group, lines in data:
+            if group is None:
+                if not lines:
+                    continue
+                raise ValueError("Entry points must be listed in groups")
+            group = group.strip()
+            if group in maps:
+                raise ValueError("Duplicate group name", group)
+            maps[group] = cls.parse_group(group, lines, dist)
+        return maps
+
+
+def _remove_md5_fragment(location):
+    if not location:
+        return ''
+    parsed = urllib.parse.urlparse(location)
+    if parsed[-1].startswith('md5='):
+        return urllib.parse.urlunparse(parsed[:-1] + ('',))
+    return location
+
+
+def _version_from_file(lines):
+    """
+    Given an iterable of lines from a Metadata file, return
+    the value of the Version field, if present, or None otherwise.
+    """
+    def is_version_line(line):
+        return line.lower().startswith('version:')
+    version_lines = filter(is_version_line, lines)
+    line = next(iter(version_lines), '')
+    _, _, value = line.partition(':')
+    return safe_version(value.strip()) or None
+
+
+class Distribution:
+    """Wrap an actual or potential sys.path entry w/metadata"""
+    PKG_INFO = 'PKG-INFO'
+
+    def __init__(
+            self, location=None, metadata=None, project_name=None,
+            version=None, py_version=PY_MAJOR, platform=None,
+            precedence=EGG_DIST):
+        self.project_name = safe_name(project_name or 'Unknown')
+        if version is not None:
+            self._version = safe_version(version)
+        self.py_version = py_version
+        self.platform = platform
+        self.location = location
+        self.precedence = precedence
+        self._provider = metadata or empty_provider
+
+    @classmethod
+    def from_location(cls, location, basename, metadata=None, **kw):
+        project_name, version, py_version, platform = [None] * 4
+        basename, ext = os.path.splitext(basename)
+        if ext.lower() in _distributionImpl:
+            cls = _distributionImpl[ext.lower()]
+
+            match = EGG_NAME(basename)
+            if match:
+                project_name, version, py_version, platform = match.group(
+                    'name', 'ver', 'pyver', 'plat'
+                )
+        return cls(
+            location, metadata, project_name=project_name, version=version,
+            py_version=py_version, platform=platform, **kw
+        )._reload_version()
+
+    def _reload_version(self):
+        return self
+
+    @property
+    def hashcmp(self):
+        return (
+            self.parsed_version,
+            self.precedence,
+            self.key,
+            _remove_md5_fragment(self.location),
+            self.py_version or '',
+            self.platform or '',
+        )
+
+    def __hash__(self):
+        return hash(self.hashcmp)
+
+    def __lt__(self, other):
+        return self.hashcmp < other.hashcmp
+
+    def __le__(self, other):
+        return self.hashcmp <= other.hashcmp
+
+    def __gt__(self, other):
+        return self.hashcmp > other.hashcmp
+
+    def __ge__(self, other):
+        return self.hashcmp >= other.hashcmp
+
+    def __eq__(self, other):
+        if not isinstance(other, self.__class__):
+            # It's not a Distribution, so they are not equal
+            return False
+        return self.hashcmp == other.hashcmp
+
+    def __ne__(self, other):
+        return not self == other
+
+    # These properties have to be lazy so that we don't have to load any
+    # metadata until/unless it's actually needed.  (i.e., some distributions
+    # may not know their name or version without loading PKG-INFO)
+
+    @property
+    def key(self):
+        try:
+            return self._key
+        except AttributeError:
+            self._key = key = self.project_name.lower()
+            return key
+
+    @property
+    def parsed_version(self):
+        if not hasattr(self, "_parsed_version"):
+            self._parsed_version = parse_version(self.version)
+
+        return self._parsed_version
+
+    def _warn_legacy_version(self):
+        LV = packaging.version.LegacyVersion
+        is_legacy = isinstance(self._parsed_version, LV)
+        if not is_legacy:
+            return
+
+        # While an empty version is technically a legacy version and
+        # is not a valid PEP 440 version, it's also unlikely to
+        # actually come from someone and instead it is more likely that
+        # it comes from setuptools attempting to parse a filename and
+        # including it in the list. So for that we'll gate this warning
+        # on if the version is anything at all or not.
+        if not self.version:
+            return
+
+        tmpl = textwrap.dedent("""
+            '{project_name} ({version})' is being parsed as a legacy,
+            non PEP 440,
+            version. You may find odd behavior and sort order.
+            In particular it will be sorted as less than 0.0. It
+            is recommended to migrate to PEP 440 compatible
+            versions.
+            """).strip().replace('\n', ' ')
+
+        warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
+
+    @property
+    def version(self):
+        try:
+            return self._version
+        except AttributeError:
+            version = self._get_version()
+            if version is None:
+                path = self._get_metadata_path_for_display(self.PKG_INFO)
+                msg = (
+                    "Missing 'Version:' header and/or {} file at path: {}"
+                ).format(self.PKG_INFO, path)
+                raise ValueError(msg, self)
+
+            return version
+
+    @property
+    def _dep_map(self):
+        """
+        A map of extra to its list of (direct) requirements
+        for this distribution, including the null extra.
+        """
+        try:
+            return self.__dep_map
+        except AttributeError:
+            self.__dep_map = self._filter_extras(self._build_dep_map())
+        return self.__dep_map
+
+    @staticmethod
+    def _filter_extras(dm):
+        """
+        Given a mapping of extras to dependencies, strip off
+        environment markers and filter out any dependencies
+        not matching the markers.
+        """
+        for extra in list(filter(None, dm)):
+            new_extra = extra
+            reqs = dm.pop(extra)
+            new_extra, _, marker = extra.partition(':')
+            fails_marker = marker and (
+                invalid_marker(marker)
+                or not evaluate_marker(marker)
+            )
+            if fails_marker:
+                reqs = []
+            new_extra = safe_extra(new_extra) or None
+
+            dm.setdefault(new_extra, []).extend(reqs)
+        return dm
+
+    def _build_dep_map(self):
+        dm = {}
+        for name in 'requires.txt', 'depends.txt':
+            for extra, reqs in split_sections(self._get_metadata(name)):
+                dm.setdefault(extra, []).extend(parse_requirements(reqs))
+        return dm
+
+    def requires(self, extras=()):
+        """List of Requirements needed for this distro if `extras` are used"""
+        dm = self._dep_map
+        deps = []
+        deps.extend(dm.get(None, ()))
+        for ext in extras:
+            try:
+                deps.extend(dm[safe_extra(ext)])
+            except KeyError:
+                raise UnknownExtra(
+                    "%s has no such extra feature %r" % (self, ext)
+                )
+        return deps
+
+    def _get_metadata_path_for_display(self, name):
+        """
+        Return the path to the given metadata file, if available.
+        """
+        try:
+            # We need to access _get_metadata_path() on the provider object
+            # directly rather than through this class's __getattr__()
+            # since _get_metadata_path() is marked private.
+            path = self._provider._get_metadata_path(name)
+
+        # Handle exceptions e.g. in case the distribution's metadata
+        # provider doesn't support _get_metadata_path().
+        except Exception:
+            return '[could not detect]'
+
+        return path
+
+    def _get_metadata(self, name):
+        if self.has_metadata(name):
+            for line in self.get_metadata_lines(name):
+                yield line
+
+    def _get_version(self):
+        lines = self._get_metadata(self.PKG_INFO)
+        version = _version_from_file(lines)
+
+        return version
+
+    def activate(self, path=None, replace=False):
+        """Ensure distribution is importable on `path` (default=sys.path)"""
+        if path is None:
+            path = sys.path
+        self.insert_on(path, replace=replace)
+        if path is sys.path:
+            fixup_namespace_packages(self.location)
+            for pkg in self._get_metadata('namespace_packages.txt'):
+                if pkg in sys.modules:
+                    declare_namespace(pkg)
+
+    def egg_name(self):
+        """Return what this distribution's standard .egg filename should be"""
+        filename = "%s-%s-py%s" % (
+            to_filename(self.project_name), to_filename(self.version),
+            self.py_version or PY_MAJOR
+        )
+
+        if self.platform:
+            filename += '-' + self.platform
+        return filename
+
+    def __repr__(self):
+        if self.location:
+            return "%s (%s)" % (self, self.location)
+        else:
+            return str(self)
+
+    def __str__(self):
+        try:
+            version = getattr(self, 'version', None)
+        except ValueError:
+            version = None
+        version = version or "[unknown version]"
+        return "%s %s" % (self.project_name, version)
+
+    def __getattr__(self, attr):
+        """Delegate all unrecognized public attributes to .metadata provider"""
+        if attr.startswith('_'):
+            raise AttributeError(attr)
+        return getattr(self._provider, attr)
+
+    def __dir__(self):
+        return list(
+            set(super(Distribution, self).__dir__())
+            | set(
+                attr for attr in self._provider.__dir__()
+                if not attr.startswith('_')
+            )
+        )
+
+    if not hasattr(object, '__dir__'):
+        # python 2.7 not supported
+        del __dir__
+
+    @classmethod
+    def from_filename(cls, filename, metadata=None, **kw):
+        return cls.from_location(
+            _normalize_cached(filename), os.path.basename(filename), metadata,
+            **kw
+        )
+
+    def as_requirement(self):
+        """Return a ``Requirement`` that matches this distribution exactly"""
+        if isinstance(self.parsed_version, packaging.version.Version):
+            spec = "%s==%s" % (self.project_name, self.parsed_version)
+        else:
+            spec = "%s===%s" % (self.project_name, self.parsed_version)
+
+        return Requirement.parse(spec)
+
+    def load_entry_point(self, group, name):
+        """Return the `name` entry point of `group` or raise ImportError"""
+        ep = self.get_entry_info(group, name)
+        if ep is None:
+            raise ImportError("Entry point %r not found" % ((group, name),))
+        return ep.load()
+
+    def get_entry_map(self, group=None):
+        """Return the entry point map for `group`, or the full entry map"""
+        try:
+            ep_map = self._ep_map
+        except AttributeError:
+            ep_map = self._ep_map = EntryPoint.parse_map(
+                self._get_metadata('entry_points.txt'), self
+            )
+        if group is not None:
+            return ep_map.get(group, {})
+        return ep_map
+
+    def get_entry_info(self, group, name):
+        """Return the EntryPoint object for `group`+`name`, or ``None``"""
+        return self.get_entry_map(group).get(name)
+
+    def insert_on(self, path, loc=None, replace=False):
+        """Ensure self.location is on path
+
+        If replace=False (default):
+            - If location is already in path anywhere, do nothing.
+            - Else:
+              - If it's an egg and its parent directory is on path,
+                insert just ahead of the parent.
+              - Else: add to the end of path.
+        If replace=True:
+            - If location is already on path anywhere (not eggs)
+              or higher priority than its parent (eggs)
+              do nothing.
+            - Else:
+              - If it's an egg and its parent directory is on path,
+                insert just ahead of the parent,
+                removing any lower-priority entries.
+              - Else: add it to the front of path.
+        """
+
+        loc = loc or self.location
+        if not loc:
+            return
+
+        nloc = _normalize_cached(loc)
+        bdir = os.path.dirname(nloc)
+        npath = [(p and _normalize_cached(p) or p) for p in path]
+
+        for p, item in enumerate(npath):
+            if item == nloc:
+                if replace:
+                    break
+                else:
+                    # don't modify path (even removing duplicates) if
+                    # found and not replace
+                    return
+            elif item == bdir and self.precedence == EGG_DIST:
+                # if it's an .egg, give it precedence over its directory
+                # UNLESS it's already been added to sys.path and replace=False
+                if (not replace) and nloc in npath[p:]:
+                    return
+                if path is sys.path:
+                    self.check_version_conflict()
+                path.insert(p, loc)
+                npath.insert(p, nloc)
+                break
+        else:
+            if path is sys.path:
+                self.check_version_conflict()
+            if replace:
+                path.insert(0, loc)
+            else:
+                path.append(loc)
+            return
+
+        # p is the spot where we found or inserted loc; now remove duplicates
+        while True:
+            try:
+                np = npath.index(nloc, p + 1)
+            except ValueError:
+                break
+            else:
+                del npath[np], path[np]
+                # ha!
+                p = np
+
+        return
+
+    def check_version_conflict(self):
+        if self.key == 'setuptools':
+            # ignore the inevitable setuptools self-conflicts  :(
+            return
+
+        nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+        loc = normalize_path(self.location)
+        for modname in self._get_metadata('top_level.txt'):
+            if (modname not in sys.modules or modname in nsp
+                    or modname in _namespace_packages):
+                continue
+            if modname in ('pkg_resources', 'setuptools', 'site'):
+                continue
+            fn = getattr(sys.modules[modname], '__file__', None)
+            if fn and (normalize_path(fn).startswith(loc) or
+                       fn.startswith(self.location)):
+                continue
+            issue_warning(
+                "Module %s was already imported from %s, but %s is being added"
+                " to sys.path" % (modname, fn, self.location),
+            )
+
+    def has_version(self):
+        try:
+            self.version
+        except ValueError:
+            issue_warning("Unbuilt egg for " + repr(self))
+            return False
+        return True
+
+    def clone(self, **kw):
+        """Copy this distribution, substituting in any changed keyword args"""
+        names = 'project_name version py_version platform location precedence'
+        for attr in names.split():
+            kw.setdefault(attr, getattr(self, attr, None))
+        kw.setdefault('metadata', self._provider)
+        return self.__class__(**kw)
+
+    @property
+    def extras(self):
+        return [dep for dep in self._dep_map if dep]
+
+
+class EggInfoDistribution(Distribution):
+    def _reload_version(self):
+        """
+        Packages installed by distutils (e.g. numpy or scipy),
+        which uses an old safe_version, and so
+        their version numbers can get mangled when
+        converted to filenames (e.g., 1.11.0.dev0+2329eae to
+        1.11.0.dev0_2329eae). These distributions will not be
+        parsed properly
+        downstream by Distribution and safe_version, so
+        take an extra step and try to get the version number from
+        the metadata file itself instead of the filename.
+        """
+        md_version = self._get_version()
+        if md_version:
+            self._version = md_version
+        return self
+
+
+class DistInfoDistribution(Distribution):
+    """
+    Wrap an actual or potential sys.path entry
+    w/metadata, .dist-info style.
+    """
+    PKG_INFO = 'METADATA'
+    EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
+
+    @property
+    def _parsed_pkg_info(self):
+        """Parse and cache metadata"""
+        try:
+            return self._pkg_info
+        except AttributeError:
+            metadata = self.get_metadata(self.PKG_INFO)
+            self._pkg_info = email.parser.Parser().parsestr(metadata)
+            return self._pkg_info
+
+    @property
+    def _dep_map(self):
+        try:
+            return self.__dep_map
+        except AttributeError:
+            self.__dep_map = self._compute_dependencies()
+            return self.__dep_map
+
+    def _compute_dependencies(self):
+        """Recompute this distribution's dependencies."""
+        dm = self.__dep_map = {None: []}
+
+        reqs = []
+        # Including any condition expressions
+        for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
+            reqs.extend(parse_requirements(req))
+
+        def reqs_for_extra(extra):
+            for req in reqs:
+                if not req.marker or req.marker.evaluate({'extra': extra}):
+                    yield req
+
+        common = frozenset(reqs_for_extra(None))
+        dm[None].extend(common)
+
+        for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
+            s_extra = safe_extra(extra.strip())
+            dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
+
+        return dm
+
+
+_distributionImpl = {
+    '.egg': Distribution,
+    '.egg-info': EggInfoDistribution,
+    '.dist-info': DistInfoDistribution,
+}
+
+
+def issue_warning(*args, **kw):
+    level = 1
+    g = globals()
+    try:
+        # find the first stack frame that is *not* code in
+        # the pkg_resources module, to use for the warning
+        while sys._getframe(level).f_globals is g:
+            level += 1
+    except ValueError:
+        pass
+    warnings.warn(stacklevel=level + 1, *args, **kw)
+
+
+class RequirementParseError(ValueError):
+    def __str__(self):
+        return ' '.join(self.args)
+
+
+def parse_requirements(strs):
+    """Yield ``Requirement`` objects for each specification in `strs`
+
+    `strs` must be a string, or a (possibly-nested) iterable thereof.
+    """
+    # create a steppable iterator, so we can handle \-continuations
+    lines = iter(yield_lines(strs))
+
+    for line in lines:
+        # Drop comments -- a hash without a space may be in a URL.
+        if ' #' in line:
+            line = line[:line.find(' #')]
+        # If there is a line continuation, drop it, and append the next line.
+        if line.endswith('\\'):
+            line = line[:-2].strip()
+            try:
+                line += next(lines)
+            except StopIteration:
+                return
+        yield Requirement(line)
+
+
+class Requirement(packaging.requirements.Requirement):
+    def __init__(self, requirement_string):
+        """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+        try:
+            super(Requirement, self).__init__(requirement_string)
+        except packaging.requirements.InvalidRequirement as e:
+            raise RequirementParseError(str(e))
+        self.unsafe_name = self.name
+        project_name = safe_name(self.name)
+        self.project_name, self.key = project_name, project_name.lower()
+        self.specs = [
+            (spec.operator, spec.version) for spec in self.specifier]
+        self.extras = tuple(map(safe_extra, self.extras))
+        self.hashCmp = (
+            self.key,
+            self.specifier,
+            frozenset(self.extras),
+            str(self.marker) if self.marker else None,
+        )
+        self.__hash = hash(self.hashCmp)
+
+    def __eq__(self, other):
+        return (
+            isinstance(other, Requirement) and
+            self.hashCmp == other.hashCmp
+        )
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __contains__(self, item):
+        if isinstance(item, Distribution):
+            if item.key != self.key:
+                return False
+
+            item = item.version
+
+        # Allow prereleases always in order to match the previous behavior of
+        # this method. In the future this should be smarter and follow PEP 440
+        # more accurately.
+        return self.specifier.contains(item, prereleases=True)
+
+    def __hash__(self):
+        return self.__hash
+
+    def __repr__(self):
+        return "Requirement.parse(%r)" % str(self)
+
+    @staticmethod
+    def parse(s):
+        req, = parse_requirements(s)
+        return req
+
+
+def _always_object(classes):
+    """
+    Ensure object appears in the mro even
+    for old-style classes.
+    """
+    if object not in classes:
+        return classes + (object,)
+    return classes
+
+
+def _find_adapter(registry, ob):
+    """Return an adapter factory for `ob` from `registry`"""
+    types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
+    for t in types:
+        if t in registry:
+            return registry[t]
+
+
+def ensure_directory(path):
+    """Ensure that the parent directory of `path` exists"""
+    dirname = os.path.dirname(path)
+    py31compat.makedirs(dirname, exist_ok=True)
+
+
+def _bypass_ensure_directory(path):
+    """Sandbox-bypassing version of ensure_directory()"""
+    if not WRITE_SUPPORT:
+        raise IOError('"os.mkdir" not supported on this platform.')
+    dirname, filename = split(path)
+    if dirname and filename and not isdir(dirname):
+        _bypass_ensure_directory(dirname)
+        try:
+            mkdir(dirname, 0o755)
+        except FileExistsError:
+            pass
+
+
+def split_sections(s):
+    """Split a string or iterable thereof into (section, content) pairs
+
+    Each ``section`` is a stripped version of the section header ("[section]")
+    and each ``content`` is a list of stripped lines excluding blank lines and
+    comment-only lines.  If there are any such lines before the first section
+    header, they're returned in a first ``section`` of ``None``.
+    """
+    section = None
+    content = []
+    for line in yield_lines(s):
+        if line.startswith("["):
+            if line.endswith("]"):
+                if section or content:
+                    yield section, content
+                section = line[1:-1].strip()
+                content = []
+            else:
+                raise ValueError("Invalid section heading", line)
+        else:
+            content.append(line)
+
+    # wrap up last segment
+    yield section, content
+
+
+def _mkstemp(*args, **kw):
+    old_open = os.open
+    try:
+        # temporarily bypass sandboxing
+        os.open = os_open
+        return tempfile.mkstemp(*args, **kw)
+    finally:
+        # and then put it back
+        os.open = old_open
+
+
+# Silence the PEP440Warning by default, so that end users don't get hit by it
+# randomly just because they use pkg_resources. We want to append the rule
+# because we want earlier uses of filterwarnings to take precedence over this
+# one.
+warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
+
+
+# from jaraco.functools 1.3
+def _call_aside(f, *args, **kwargs):
+    f(*args, **kwargs)
+    return f
+
+
+@_call_aside
+def _initialize(g=globals()):
+    "Set up global resource manager (deliberately not state-saved)"
+    manager = ResourceManager()
+    g['_manager'] = manager
+    g.update(
+        (name, getattr(manager, name))
+        for name in dir(manager)
+        if not name.startswith('_')
+    )
+
+
+@_call_aside
+def _initialize_master_working_set():
+    """
+    Prepare the master working set and make the ``require()``
+    API available.
+
+    This function has explicit effects on the global state
+    of pkg_resources. It is intended to be invoked once at
+    the initialization of this module.
+
+    Invocation by other packages is unsupported and done
+    at their own risk.
+    """
+    working_set = WorkingSet._build_master()
+    _declare_state('object', working_set=working_set)
+
+    require = working_set.require
+    iter_entry_points = working_set.iter_entry_points
+    add_activation_listener = working_set.subscribe
+    run_script = working_set.run_script
+    # backward compatibility
+    run_main = run_script
+    # Activate all distributions already on sys.path with replace=False and
+    # ensure that all distributions added to the working set in the future
+    # (e.g. by calling ``require()``) will get activated as well,
+    # with higher priority (replace=True).
+    tuple(
+        dist.activate(replace=False)
+        for dist in working_set
+    )
+    add_activation_listener(
+        lambda dist: dist.activate(replace=True),
+        existing=False,
+    )
+    working_set.entries = []
+    # match order
+    list(map(working_set.add_entry, sys.path))
+    globals().update(locals())
+
+class PkgResourcesDeprecationWarning(Warning):
+    """
+    Base class for warning about deprecations in ``pkg_resources``
+
+    This class is not derived from ``DeprecationWarning``, and as such is
+    visible by default.
+    """
diff --git a/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..787688e5122fae26d2bdac8d57a32ec1073b5ea7
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__pycache__/py31compat.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__pycache__/py31compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b49a94f10fa614ef1bc189f2cc66783f1e0758de
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pkg_resources/__pycache__/py31compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pkg_resources/py31compat.py b/lib/python3.7/site-packages/pip/_vendor/pkg_resources/py31compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2d3007ceb16b0eeb4b1f57361c089558a25daeb
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pkg_resources/py31compat.py
@@ -0,0 +1,23 @@
+import os
+import errno
+import sys
+
+from pip._vendor import six
+
+
+def _makedirs_31(path, exist_ok=False):
+    try:
+        os.makedirs(path)
+    except OSError as exc:
+        if not exist_ok or exc.errno != errno.EEXIST:
+            raise
+
+
+# rely on compatibility behavior until mode considerations
+#  and exists_ok considerations are disentangled.
+# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663
+needs_makedirs = (
+    six.PY2 or
+    (3, 4) <= sys.version_info < (3, 4, 1)
+)
+makedirs = _makedirs_31 if needs_makedirs else os.makedirs
diff --git a/lib/python3.7/site-packages/pip/_vendor/progress/__init__.py b/lib/python3.7/site-packages/pip/_vendor/progress/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e434c257fefd4eda43f5daf5b8dcf7d4cf9da30b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/progress/__init__.py
@@ -0,0 +1,177 @@
+# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import division, print_function
+
+from collections import deque
+from datetime import timedelta
+from math import ceil
+from sys import stderr
+try:
+    from time import monotonic
+except ImportError:
+    from time import time as monotonic
+
+
+__version__ = '1.5'
+
+HIDE_CURSOR = '\x1b[?25l'
+SHOW_CURSOR = '\x1b[?25h'
+
+
+class Infinite(object):
+    file = stderr
+    sma_window = 10         # Simple Moving Average window
+    check_tty = True
+    hide_cursor = True
+
+    def __init__(self, message='', **kwargs):
+        self.index = 0
+        self.start_ts = monotonic()
+        self.avg = 0
+        self._avg_update_ts = self.start_ts
+        self._ts = self.start_ts
+        self._xput = deque(maxlen=self.sma_window)
+        for key, val in kwargs.items():
+            setattr(self, key, val)
+
+        self._width = 0
+        self.message = message
+
+        if self.file and self.is_tty():
+            if self.hide_cursor:
+                print(HIDE_CURSOR, end='', file=self.file)
+            print(self.message, end='', file=self.file)
+            self.file.flush()
+
+    def __getitem__(self, key):
+        if key.startswith('_'):
+            return None
+        return getattr(self, key, None)
+
+    @property
+    def elapsed(self):
+        return int(monotonic() - self.start_ts)
+
+    @property
+    def elapsed_td(self):
+        return timedelta(seconds=self.elapsed)
+
+    def update_avg(self, n, dt):
+        if n > 0:
+            xput_len = len(self._xput)
+            self._xput.append(dt / n)
+            now = monotonic()
+            # update when we're still filling _xput, then after every second
+            if (xput_len < self.sma_window or
+                    now - self._avg_update_ts > 1):
+                self.avg = sum(self._xput) / len(self._xput)
+                self._avg_update_ts = now
+
+    def update(self):
+        pass
+
+    def start(self):
+        pass
+
+    def clearln(self):
+        if self.file and self.is_tty():
+            print('\r\x1b[K', end='', file=self.file)
+
+    def write(self, s):
+        if self.file and self.is_tty():
+            line = self.message + s.ljust(self._width)
+            print('\r' + line, end='', file=self.file)
+            self._width = max(self._width, len(s))
+            self.file.flush()
+
+    def writeln(self, line):
+        if self.file and self.is_tty():
+            self.clearln()
+            print(line, end='', file=self.file)
+            self.file.flush()
+
+    def finish(self):
+        if self.file and self.is_tty():
+            print(file=self.file)
+            if self.hide_cursor:
+                print(SHOW_CURSOR, end='', file=self.file)
+
+    def is_tty(self):
+        return self.file.isatty() if self.check_tty else True
+
+    def next(self, n=1):
+        now = monotonic()
+        dt = now - self._ts
+        self.update_avg(n, dt)
+        self._ts = now
+        self.index = self.index + n
+        self.update()
+
+    def iter(self, it):
+        with self:
+            for x in it:
+                yield x
+                self.next()
+
+    def __enter__(self):
+        self.start()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.finish()
+
+
+class Progress(Infinite):
+    def __init__(self, *args, **kwargs):
+        super(Progress, self).__init__(*args, **kwargs)
+        self.max = kwargs.get('max', 100)
+
+    @property
+    def eta(self):
+        return int(ceil(self.avg * self.remaining))
+
+    @property
+    def eta_td(self):
+        return timedelta(seconds=self.eta)
+
+    @property
+    def percent(self):
+        return self.progress * 100
+
+    @property
+    def progress(self):
+        return min(1, self.index / self.max)
+
+    @property
+    def remaining(self):
+        return max(self.max - self.index, 0)
+
+    def start(self):
+        self.update()
+
+    def goto(self, index):
+        incr = index - self.index
+        self.next(incr)
+
+    def iter(self, it):
+        try:
+            self.max = len(it)
+        except TypeError:
+            pass
+
+        with self:
+            for x in it:
+                yield x
+                self.next()
diff --git a/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bdf7fb1f968da47791f2962309908117b637df75
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/bar.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/bar.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34a5f9600bee55887222b81769c94b76f6bab15a
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/bar.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/counter.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/counter.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..122dba7716457b853e364a875bb49673f420aa66
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/counter.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/spinner.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/spinner.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2eeee0622d99f465ecdce286bb470e5e55e73d18
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/progress/__pycache__/spinner.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/progress/bar.py b/lib/python3.7/site-packages/pip/_vendor/progress/bar.py
new file mode 100644
index 0000000000000000000000000000000000000000..8819efda65b9f6bf1315ad15bedc5a3857bf67fc
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/progress/bar.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import unicode_literals
+
+import sys
+
+from . import Progress
+
+
+class Bar(Progress):
+    width = 32
+    suffix = '%(index)d/%(max)d'
+    bar_prefix = ' |'
+    bar_suffix = '| '
+    empty_fill = ' '
+    fill = '#'
+
+    def update(self):
+        filled_length = int(self.width * self.progress)
+        empty_length = self.width - filled_length
+
+        message = self.message % self
+        bar = self.fill * filled_length
+        empty = self.empty_fill * empty_length
+        suffix = self.suffix % self
+        line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix,
+                        suffix])
+        self.writeln(line)
+
+
+class ChargingBar(Bar):
+    suffix = '%(percent)d%%'
+    bar_prefix = ' '
+    bar_suffix = ' '
+    empty_fill = '∙'
+    fill = 'â–ˆ'
+
+
+class FillingSquaresBar(ChargingBar):
+    empty_fill = 'â–¢'
+    fill = 'â–£'
+
+
+class FillingCirclesBar(ChargingBar):
+    empty_fill = 'â—¯'
+    fill = 'â—‰'
+
+
+class IncrementalBar(Bar):
+    if sys.platform.startswith('win'):
+        phases = (u' ', u'▌', u'█')
+    else:
+        phases = (' ', '▏', '▎', '▍', '▌', '▋', '▊', '▉', '█')
+
+    def update(self):
+        nphases = len(self.phases)
+        filled_len = self.width * self.progress
+        nfull = int(filled_len)                      # Number of full chars
+        phase = int((filled_len - nfull) * nphases)  # Phase of last char
+        nempty = self.width - nfull                  # Number of empty chars
+
+        message = self.message % self
+        bar = self.phases[-1] * nfull
+        current = self.phases[phase] if phase > 0 else ''
+        empty = self.empty_fill * max(0, nempty - len(current))
+        suffix = self.suffix % self
+        line = ''.join([message, self.bar_prefix, bar, current, empty,
+                        self.bar_suffix, suffix])
+        self.writeln(line)
+
+
+class PixelBar(IncrementalBar):
+    phases = ('⡀', '⡄', '⡆', '⡇', '⣇', '⣧', '⣷', '⣿')
+
+
+class ShadyBar(IncrementalBar):
+    phases = (' ', 'â–‘', 'â–’', 'â–“', 'â–ˆ')
diff --git a/lib/python3.7/site-packages/pip/_vendor/progress/counter.py b/lib/python3.7/site-packages/pip/_vendor/progress/counter.py
new file mode 100644
index 0000000000000000000000000000000000000000..d955ca4771bb2cd78a8e9334a6917f9f6cce59b1
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/progress/counter.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import unicode_literals
+from . import Infinite, Progress
+
+
+class Counter(Infinite):
+    def update(self):
+        self.write(str(self.index))
+
+
+class Countdown(Progress):
+    def update(self):
+        self.write(str(self.remaining))
+
+
+class Stack(Progress):
+    phases = (' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█')
+
+    def update(self):
+        nphases = len(self.phases)
+        i = min(nphases - 1, int(self.progress * nphases))
+        self.write(self.phases[i])
+
+
+class Pie(Stack):
+    phases = ('○', '◔', '◑', '◕', '●')
diff --git a/lib/python3.7/site-packages/pip/_vendor/progress/spinner.py b/lib/python3.7/site-packages/pip/_vendor/progress/spinner.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e100cabb9b8d7113111d1b09a7b84da2895e2a0
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/progress/spinner.py
@@ -0,0 +1,43 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com>
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import unicode_literals
+from . import Infinite
+
+
+class Spinner(Infinite):
+    phases = ('-', '\\', '|', '/')
+    hide_cursor = True
+
+    def update(self):
+        i = self.index % len(self.phases)
+        self.write(self.phases[i])
+
+
+class PieSpinner(Spinner):
+    phases = ['â—·', 'â—¶', 'â—µ', 'â—´']
+
+
+class MoonSpinner(Spinner):
+    phases = ['◑', '◒', '◐', '◓']
+
+
+class LineSpinner(Spinner):
+    phases = ['⎺', '⎻', '⎼', '⎽', '⎼', '⎻']
+
+
+class PixelSpinner(Spinner):
+    phases = ['⣾', '⣷', '⣯', '⣟', '⡿', '⢿', '⣻', '⣽']
diff --git a/lib/python3.7/site-packages/pip/_vendor/pyparsing.py b/lib/python3.7/site-packages/pip/_vendor/pyparsing.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d6a01d5f371a9aaace413d4796c4fed9a9baccf
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pyparsing.py
@@ -0,0 +1,6493 @@
+#-*- coding: utf-8 -*-
+# module pyparsing.py
+#
+# Copyright (c) 2003-2019  Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and
+executing simple grammars, vs. the traditional lex/yacc approach, or the
+use of regular expressions.  With pyparsing, you don't need to learn
+a new syntax for defining grammars or matching expressions - the parsing
+module provides a library of classes that you use to construct the
+grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form
+``"<salutation>, <addressee>!"``), built up using :class:`Word`,
+:class:`Literal`, and :class:`And` elements
+(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
+and the strings are auto-converted to :class:`Literal` expressions)::
+
+    from pip._vendor.pyparsing import Word, alphas
+
+    # define grammar of a greeting
+    greet = Word(alphas) + "," + Word(alphas) + "!"
+
+    hello = "Hello, World!"
+    print (hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+    Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the
+self-explanatory class names, and the use of '+', '|' and '^' operators.
+
+The :class:`ParseResults` object returned from
+:class:`ParserElement.parseString` can be
+accessed as a nested list, a dictionary, or an object with named
+attributes.
+
+The pyparsing module handles some of the problems that are typically
+vexing when writing text parsers:
+
+  - extra or missing whitespace (the above program will also handle
+    "Hello,World!", "Hello  ,  World  !", etc.)
+  - quoted strings
+  - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes :class:`ParserElement` and :class:`ParseResults` to
+see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+
+ - construct literal match expressions from :class:`Literal` and
+   :class:`CaselessLiteral` classes
+ - construct character word-group expressions using the :class:`Word`
+   class
+ - see how to create repetitive expressions using :class:`ZeroOrMore`
+   and :class:`OneOrMore` classes
+ - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
+   and :class:`'&'<Each>` operators to combine simple expressions into
+   more complex ones
+ - associate names with your parsed results using
+   :class:`ParserElement.setResultsName`
+ - find some helpful expression short-cuts like :class:`delimitedList`
+   and :class:`oneOf`
+ - find more useful common expressions in the :class:`pyparsing_common`
+   namespace class
+"""
+
+__version__ = "2.4.0"
+__versionTime__ = "07 Apr 2019 18:28 UTC"
+__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+import collections
+import pprint
+import traceback
+import types
+from datetime import datetime
+
+try:
+    # Python 3
+    from itertools import filterfalse
+except ImportError:
+    from itertools import ifilterfalse as filterfalse
+
+try:
+    from _thread import RLock
+except ImportError:
+    from threading import RLock
+
+try:
+    # Python 3
+    from collections.abc import Iterable
+    from collections.abc import MutableMapping
+except ImportError:
+    # Python 2.7
+    from collections import Iterable
+    from collections import MutableMapping
+
+try:
+    from collections import OrderedDict as _OrderedDict
+except ImportError:
+    try:
+        from ordereddict import OrderedDict as _OrderedDict
+    except ImportError:
+        _OrderedDict = None
+
+try:
+    from types import SimpleNamespace
+except ImportError:
+    class SimpleNamespace: pass
+
+# version compatibility configuration
+__compat__ = SimpleNamespace()
+__compat__.__doc__ = """
+    A cross-version compatibility configuration for pyparsing features that will be 
+    released in a future version. By setting values in this configuration to True, 
+    those features can be enabled in prior versions for compatibility development 
+    and testing.
+    
+     - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping
+       of results names when an And expression is nested within an Or or MatchFirst; set to 
+       True to enable bugfix to be released in pyparsing 2.4
+"""
+__compat__.collect_all_And_tokens = True
+
+
+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
+
+__all__ = [ '__version__', '__versionTime__', '__author__', '__compat__',
+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char',
+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
+'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set',
+]
+
+system_version = tuple(sys.version_info)[:3]
+PY_3 = system_version[0] == 3
+if PY_3:
+    _MAX_INT = sys.maxsize
+    basestring = str
+    unichr = chr
+    unicode = str
+    _ustr = str
+
+    # build list of single arg builtins, that can be used as parse actions
+    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
+
+else:
+    _MAX_INT = sys.maxint
+    range = xrange
+
+    def _ustr(obj):
+        """Drop-in replacement for str(obj) that tries to be Unicode
+        friendly. It first tries str(obj). If that fails with
+        a UnicodeEncodeError, then it tries unicode(obj). It then
+        < returns the unicode object | encodes it with the default
+        encoding | ... >.
+        """
+        if isinstance(obj,unicode):
+            return obj
+
+        try:
+            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+            # it won't break any existing code.
+            return str(obj)
+
+        except UnicodeEncodeError:
+            # Else encode it
+            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
+            xmlcharref = Regex(r'&#\d+;')
+            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
+            return xmlcharref.transformString(ret)
+
+    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+    singleArgBuiltins = []
+    import __builtin__
+    for fname in "sum len sorted reversed list tuple set any all min max".split():
+        try:
+            singleArgBuiltins.append(getattr(__builtin__,fname))
+        except AttributeError:
+            continue
+
+_generatorType = type((y for y in range(1)))
+
+def _xml_escape(data):
+    """Escape &, <, >, ", ', etc. in a string of data."""
+
+    # ampersand must be replaced first
+    from_symbols = '&><"\''
+    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
+    for from_,to_ in zip(from_symbols, to_symbols):
+        data = data.replace(from_, to_)
+    return data
+
+alphas     = string.ascii_uppercase + string.ascii_lowercase
+nums       = "0123456789"
+hexnums    = nums + "ABCDEFabcdef"
+alphanums  = alphas + nums
+_bslash    = chr(92)
+printables = "".join(c for c in string.printable if c not in string.whitespace)
+
+class ParseBaseException(Exception):
+    """base exception class for all parsing runtime exceptions"""
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, pstr, loc=0, msg=None, elem=None ):
+        self.loc = loc
+        if msg is None:
+            self.msg = pstr
+            self.pstr = ""
+        else:
+            self.msg = msg
+            self.pstr = pstr
+        self.parserElement = elem
+        self.args = (pstr, loc, msg)
+
+    @classmethod
+    def _from_exception(cls, pe):
+        """
+        internal factory method to simplify creating one type of ParseException
+        from another - avoids having __init__ signature conflicts among subclasses
+        """
+        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+    def __getattr__( self, aname ):
+        """supported attributes by name are:
+           - lineno - returns the line number of the exception text
+           - col - returns the column number of the exception text
+           - line - returns the line containing the exception text
+        """
+        if( aname == "lineno" ):
+            return lineno( self.loc, self.pstr )
+        elif( aname in ("col", "column") ):
+            return col( self.loc, self.pstr )
+        elif( aname == "line" ):
+            return line( self.loc, self.pstr )
+        else:
+            raise AttributeError(aname)
+
+    def __str__( self ):
+        return "%s (at char %d), (line:%d, col:%d)" % \
+                ( self.msg, self.loc, self.lineno, self.column )
+    def __repr__( self ):
+        return _ustr(self)
+    def markInputline( self, markerString = ">!<" ):
+        """Extracts the exception line from the input string, and marks
+           the location of the exception with a special symbol.
+        """
+        line_str = self.line
+        line_column = self.column - 1
+        if markerString:
+            line_str = "".join((line_str[:line_column],
+                                markerString, line_str[line_column:]))
+        return line_str.strip()
+    def __dir__(self):
+        return "lineno col line".split() + dir(type(self))
+
+class ParseException(ParseBaseException):
+    """
+    Exception thrown when parse expressions don't match class;
+    supported attributes by name are:
+    - lineno - returns the line number of the exception text
+    - col - returns the column number of the exception text
+    - line - returns the line containing the exception text
+
+    Example::
+
+        try:
+            Word(nums).setName("integer").parseString("ABC")
+        except ParseException as pe:
+            print(pe)
+            print("column: {}".format(pe.col))
+
+    prints::
+
+       Expected integer (at char 0), (line:1, col:1)
+        column: 1
+
+    """
+
+    @staticmethod
+    def explain(exc, depth=16):
+        """
+        Method to take an exception and translate the Python internal traceback into a list
+        of the pyparsing expressions that caused the exception to be raised.
+
+        Parameters:
+
+         - exc - exception raised during parsing (need not be a ParseException, in support
+           of Python exceptions that might be raised in a parse action)
+         - depth (default=16) - number of levels back in the stack trace to list expression
+           and function names; if None, the full stack trace names will be listed; if 0, only
+           the failing input line, marker, and exception string will be shown
+
+        Returns a multi-line string listing the ParserElements and/or function names in the
+        exception's stack trace.
+
+        Note: the diagnostic output will include string representations of the expressions
+        that failed to parse. These representations will be more helpful if you use `setName` to
+        give identifiable names to your expressions. Otherwise they will use the default string
+        forms, which may be cryptic to read.
+
+        explain() is only supported under Python 3.
+        """
+        import inspect
+
+        if depth is None:
+            depth = sys.getrecursionlimit()
+        ret = []
+        if isinstance(exc, ParseBaseException):
+            ret.append(exc.line)
+            ret.append(' ' * (exc.col - 1) + '^')
+        ret.append("{0}: {1}".format(type(exc).__name__, exc))
+
+        if depth > 0:
+            callers = inspect.getinnerframes(exc.__traceback__, context=depth)
+            seen = set()
+            for i, ff in enumerate(callers[-depth:]):
+                frm = ff[0]
+
+                f_self = frm.f_locals.get('self', None)
+                if isinstance(f_self, ParserElement):
+                    if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'):
+                        continue
+                    if f_self in seen:
+                        continue
+                    seen.add(f_self)
+
+                    self_type = type(f_self)
+                    ret.append("{0}.{1} - {2}".format(self_type.__module__,
+                                                      self_type.__name__,
+                                                      f_self))
+                elif f_self is not None:
+                    self_type = type(f_self)
+                    ret.append("{0}.{1}".format(self_type.__module__,
+                                                self_type.__name__))
+                else:
+                    code = frm.f_code
+                    if code.co_name in ('wrapper', '<module>'):
+                        continue
+
+                    ret.append("{0}".format(code.co_name))
+
+                depth -= 1
+                if not depth:
+                    break
+
+        return '\n'.join(ret)
+
+
+class ParseFatalException(ParseBaseException):
+    """user-throwable exception thrown when inconsistent parse content
+       is found; stops all parsing immediately"""
+    pass
+
+class ParseSyntaxException(ParseFatalException):
+    """just like :class:`ParseFatalException`, but thrown internally
+    when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
+    that parsing is to stop immediately because an unbacktrackable
+    syntax error has been found.
+    """
+    pass
+
+#~ class ReparseException(ParseBaseException):
+    #~ """Experimental class - parse actions can raise this exception to cause
+       #~ pyparsing to reparse the input string:
+        #~ - with a modified input string, and/or
+        #~ - with a modified start location
+       #~ Set the values of the ReparseException in the constructor, and raise the
+       #~ exception in a parse action to cause pyparsing to use the new string/location.
+       #~ Setting the values as None causes no change to be made.
+       #~ """
+    #~ def __init_( self, newstring, restartLoc ):
+        #~ self.newParseText = newstring
+        #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+    """exception thrown by :class:`ParserElement.validate` if the
+    grammar could be improperly recursive
+    """
+    def __init__( self, parseElementList ):
+        self.parseElementTrace = parseElementList
+
+    def __str__( self ):
+        return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+    def __init__(self,p1,p2):
+        self.tup = (p1,p2)
+    def __getitem__(self,i):
+        return self.tup[i]
+    def __repr__(self):
+        return repr(self.tup[0])
+    def setOffset(self,i):
+        self.tup = (self.tup[0],i)
+
+class ParseResults(object):
+    """Structured parse results, to provide multiple means of access to
+    the parsed data:
+
+       - as a list (``len(results)``)
+       - by list index (``results[0], results[1]``, etc.)
+       - by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`)
+
+    Example::
+
+        integer = Word(nums)
+        date_str = (integer.setResultsName("year") + '/'
+                        + integer.setResultsName("month") + '/'
+                        + integer.setResultsName("day"))
+        # equivalent form:
+        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+        # parseString returns a ParseResults object
+        result = date_str.parseString("1999/12/31")
+
+        def test(s, fn=repr):
+            print("%s -> %s" % (s, fn(eval(s))))
+        test("list(result)")
+        test("result[0]")
+        test("result['month']")
+        test("result.day")
+        test("'month' in result")
+        test("'minutes' in result")
+        test("result.dump()", str)
+
+    prints::
+
+        list(result) -> ['1999', '/', '12', '/', '31']
+        result[0] -> '1999'
+        result['month'] -> '12'
+        result.day -> '31'
+        'month' in result -> True
+        'minutes' in result -> False
+        result.dump() -> ['1999', '/', '12', '/', '31']
+        - day: 31
+        - month: 12
+        - year: 1999
+    """
+    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
+        if isinstance(toklist, cls):
+            return toklist
+        retobj = object.__new__(cls)
+        retobj.__doinit = True
+        return retobj
+
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
+        if self.__doinit:
+            self.__doinit = False
+            self.__name = None
+            self.__parent = None
+            self.__accumNames = {}
+            self.__asList = asList
+            self.__modal = modal
+            if toklist is None:
+                toklist = []
+            if isinstance(toklist, list):
+                self.__toklist = toklist[:]
+            elif isinstance(toklist, _generatorType):
+                self.__toklist = list(toklist)
+            else:
+                self.__toklist = [toklist]
+            self.__tokdict = dict()
+
+        if name is not None and name:
+            if not modal:
+                self.__accumNames[name] = 0
+            if isinstance(name,int):
+                name = _ustr(name) # will always return a str, but use _ustr for consistency
+            self.__name = name
+            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
+                if isinstance(toklist,basestring):
+                    toklist = [ toklist ]
+                if asList:
+                    if isinstance(toklist,ParseResults):
+                        self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0)
+                    else:
+                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
+                    self[name].__name = name
+                else:
+                    try:
+                        self[name] = toklist[0]
+                    except (KeyError,TypeError,IndexError):
+                        self[name] = toklist
+
+    def __getitem__( self, i ):
+        if isinstance( i, (int,slice) ):
+            return self.__toklist[i]
+        else:
+            if i not in self.__accumNames:
+                return self.__tokdict[i][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[i] ])
+
+    def __setitem__( self, k, v, isinstance=isinstance ):
+        if isinstance(v,_ParseResultsWithOffset):
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
+            sub = v[0]
+        elif isinstance(k,(int,slice)):
+            self.__toklist[k] = v
+            sub = v
+        else:
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
+            sub = v
+        if isinstance(sub,ParseResults):
+            sub.__parent = wkref(self)
+
+    def __delitem__( self, i ):
+        if isinstance(i,(int,slice)):
+            mylen = len( self.__toklist )
+            del self.__toklist[i]
+
+            # convert int to slice
+            if isinstance(i, int):
+                if i < 0:
+                    i += mylen
+                i = slice(i, i+1)
+            # get removed indices
+            removed = list(range(*i.indices(mylen)))
+            removed.reverse()
+            # fixup indices in token dictionary
+            for name,occurrences in self.__tokdict.items():
+                for j in removed:
+                    for k, (value, position) in enumerate(occurrences):
+                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+        else:
+            del self.__tokdict[i]
+
+    def __contains__( self, k ):
+        return k in self.__tokdict
+
+    def __len__( self ): return len( self.__toklist )
+    def __bool__(self): return ( not not self.__toklist )
+    __nonzero__ = __bool__
+    def __iter__( self ): return iter( self.__toklist )
+    def __reversed__( self ): return iter( self.__toklist[::-1] )
+    def _iterkeys( self ):
+        if hasattr(self.__tokdict, "iterkeys"):
+            return self.__tokdict.iterkeys()
+        else:
+            return iter(self.__tokdict)
+
+    def _itervalues( self ):
+        return (self[k] for k in self._iterkeys())
+
+    def _iteritems( self ):
+        return ((k, self[k]) for k in self._iterkeys())
+
+    if PY_3:
+        keys = _iterkeys
+        """Returns an iterator of all named result keys."""
+
+        values = _itervalues
+        """Returns an iterator of all named result values."""
+
+        items = _iteritems
+        """Returns an iterator of all named result key-value tuples."""
+
+    else:
+        iterkeys = _iterkeys
+        """Returns an iterator of all named result keys (Python 2.x only)."""
+
+        itervalues = _itervalues
+        """Returns an iterator of all named result values (Python 2.x only)."""
+
+        iteritems = _iteritems
+        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
+
+        def keys( self ):
+            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.iterkeys())
+
+        def values( self ):
+            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.itervalues())
+
+        def items( self ):
+            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.iteritems())
+
+    def haskeys( self ):
+        """Since keys() returns an iterator, this method is helpful in bypassing
+           code that looks for the existence of any defined results names."""
+        return bool(self.__tokdict)
+
+    def pop( self, *args, **kwargs):
+        """
+        Removes and returns item at specified index (default= ``last``).
+        Supports both ``list`` and ``dict`` semantics for ``pop()``. If
+        passed no argument or an integer argument, it will use ``list``
+        semantics and pop tokens from the list of parsed tokens. If passed
+        a non-integer argument (most likely a string), it will use ``dict``
+        semantics and pop the corresponding value from any defined results
+        names. A second default return value argument is supported, just as in
+        ``dict.pop()``.
+
+        Example::
+
+            def remove_first(tokens):
+                tokens.pop(0)
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
+
+            label = Word(alphas)
+            patt = label("LABEL") + OneOrMore(Word(nums))
+            print(patt.parseString("AAB 123 321").dump())
+
+            # Use pop() in a parse action to remove named result (note that corresponding value is not
+            # removed from list form of results)
+            def remove_LABEL(tokens):
+                tokens.pop("LABEL")
+                return tokens
+            patt.addParseAction(remove_LABEL)
+            print(patt.parseString("AAB 123 321").dump())
+
+        prints::
+
+            ['AAB', '123', '321']
+            - LABEL: AAB
+
+            ['AAB', '123', '321']
+        """
+        if not args:
+            args = [-1]
+        for k,v in kwargs.items():
+            if k == 'default':
+                args = (args[0], v)
+            else:
+                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
+        if (isinstance(args[0], int) or
+                        len(args) == 1 or
+                        args[0] in self):
+            index = args[0]
+            ret = self[index]
+            del self[index]
+            return ret
+        else:
+            defaultvalue = args[1]
+            return defaultvalue
+
+    def get(self, key, defaultValue=None):
+        """
+        Returns named result matching the given key, or if there is no
+        such name, then returns the given ``defaultValue`` or ``None`` if no
+        ``defaultValue`` is specified.
+
+        Similar to ``dict.get()``.
+
+        Example::
+
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+            result = date_str.parseString("1999/12/31")
+            print(result.get("year")) # -> '1999'
+            print(result.get("hour", "not specified")) # -> 'not specified'
+            print(result.get("hour")) # -> None
+        """
+        if key in self:
+            return self[key]
+        else:
+            return defaultValue
+
+    def insert( self, index, insStr ):
+        """
+        Inserts new element at location index in the list of parsed tokens.
+
+        Similar to ``list.insert()``.
+
+        Example::
+
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+            # use a parse action to insert the parse location in the front of the parsed results
+            def insert_locn(locn, tokens):
+                tokens.insert(0, locn)
+            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
+        """
+        self.__toklist.insert(index, insStr)
+        # fixup indices in token dictionary
+        for name,occurrences in self.__tokdict.items():
+            for k, (value, position) in enumerate(occurrences):
+                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+    def append( self, item ):
+        """
+        Add single element to end of ParseResults list of elements.
+
+        Example::
+
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+            # use a parse action to compute the sum of the parsed integers, and add it to the end
+            def append_sum(tokens):
+                tokens.append(sum(map(int, tokens)))
+            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
+        """
+        self.__toklist.append(item)
+
+    def extend( self, itemseq ):
+        """
+        Add sequence of elements to end of ParseResults list of elements.
+
+        Example::
+
+            patt = OneOrMore(Word(alphas))
+
+            # use a parse action to append the reverse of the matched strings, to make a palindrome
+            def make_palindrome(tokens):
+                tokens.extend(reversed([t[::-1] for t in tokens]))
+                return ''.join(tokens)
+            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+        """
+        if isinstance(itemseq, ParseResults):
+            self.__iadd__(itemseq)
+        else:
+            self.__toklist.extend(itemseq)
+
+    def clear( self ):
+        """
+        Clear all elements and results names.
+        """
+        del self.__toklist[:]
+        self.__tokdict.clear()
+
+    def __getattr__( self, name ):
+        try:
+            return self[name]
+        except KeyError:
+            return ""
+
+        if name in self.__tokdict:
+            if name not in self.__accumNames:
+                return self.__tokdict[name][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[name] ])
+        else:
+            return ""
+
+    def __add__( self, other ):
+        ret = self.copy()
+        ret += other
+        return ret
+
+    def __iadd__( self, other ):
+        if other.__tokdict:
+            offset = len(self.__toklist)
+            addoffset = lambda a: offset if a<0 else a+offset
+            otheritems = other.__tokdict.items()
+            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
+                                for (k,vlist) in otheritems for v in vlist]
+            for k,v in otherdictitems:
+                self[k] = v
+                if isinstance(v[0],ParseResults):
+                    v[0].__parent = wkref(self)
+
+        self.__toklist += other.__toklist
+        self.__accumNames.update( other.__accumNames )
+        return self
+
+    def __radd__(self, other):
+        if isinstance(other,int) and other == 0:
+            # useful for merging many ParseResults using sum() builtin
+            return self.copy()
+        else:
+            # this may raise a TypeError - so be it
+            return other + self
+
+    def __repr__( self ):
+        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
+
+    def __str__( self ):
+        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
+
+    def _asStringList( self, sep='' ):
+        out = []
+        for item in self.__toklist:
+            if out and sep:
+                out.append(sep)
+            if isinstance( item, ParseResults ):
+                out += item._asStringList()
+            else:
+                out.append( _ustr(item) )
+        return out
+
+    def asList( self ):
+        """
+        Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+        Example::
+
+            patt = OneOrMore(Word(alphas))
+            result = patt.parseString("sldkj lsdkj sldkj")
+            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+            print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
+
+            # Use asList() to create an actual list
+            result_list = result.asList()
+            print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
+        """
+        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
+
+    def asDict( self ):
+        """
+        Returns the named parse results as a nested dictionary.
+
+        Example::
+
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+            result = date_str.parseString('12/31/1999')
+            print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+
+            result_dict = result.asDict()
+            print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
+
+            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+            import json
+            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
+        """
+        if PY_3:
+            item_fn = self.items
+        else:
+            item_fn = self.iteritems
+
+        def toItem(obj):
+            if isinstance(obj, ParseResults):
+                if obj.haskeys():
+                    return obj.asDict()
+                else:
+                    return [toItem(v) for v in obj]
+            else:
+                return obj
+
+        return dict((k,toItem(v)) for k,v in item_fn())
+
+    def copy( self ):
+        """
+        Returns a new copy of a :class:`ParseResults` object.
+        """
+        ret = ParseResults( self.__toklist )
+        ret.__tokdict = dict(self.__tokdict.items())
+        ret.__parent = self.__parent
+        ret.__accumNames.update( self.__accumNames )
+        ret.__name = self.__name
+        return ret
+
+    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
+        """
+        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
+        """
+        nl = "\n"
+        out = []
+        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
+                                                            for v in vlist)
+        nextLevelIndent = indent + "  "
+
+        # collapse out indents if formatting is not desired
+        if not formatted:
+            indent = ""
+            nextLevelIndent = ""
+            nl = ""
+
+        selfTag = None
+        if doctag is not None:
+            selfTag = doctag
+        else:
+            if self.__name:
+                selfTag = self.__name
+
+        if not selfTag:
+            if namedItemsOnly:
+                return ""
+            else:
+                selfTag = "ITEM"
+
+        out += [ nl, indent, "<", selfTag, ">" ]
+
+        for i,res in enumerate(self.__toklist):
+            if isinstance(res,ParseResults):
+                if i in namedItems:
+                    out += [ res.asXML(namedItems[i],
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+                else:
+                    out += [ res.asXML(None,
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+            else:
+                # individual token, see if there is a name for it
+                resTag = None
+                if i in namedItems:
+                    resTag = namedItems[i]
+                if not resTag:
+                    if namedItemsOnly:
+                        continue
+                    else:
+                        resTag = "ITEM"
+                xmlBodyText = _xml_escape(_ustr(res))
+                out += [ nl, nextLevelIndent, "<", resTag, ">",
+                                                xmlBodyText,
+                                                "</", resTag, ">" ]
+
+        out += [ nl, indent, "</", selfTag, ">" ]
+        return "".join(out)
+
+    def __lookup(self,sub):
+        for k,vlist in self.__tokdict.items():
+            for v,loc in vlist:
+                if sub is v:
+                    return k
+        return None
+
+    def getName(self):
+        r"""
+        Returns the results name for this token expression. Useful when several
+        different expressions might match at a particular location.
+
+        Example::
+
+            integer = Word(nums)
+            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+            house_number_expr = Suppress('#') + Word(nums, alphanums)
+            user_data = (Group(house_number_expr)("house_number")
+                        | Group(ssn_expr)("ssn")
+                        | Group(integer)("age"))
+            user_info = OneOrMore(user_data)
+
+            result = user_info.parseString("22 111-22-3333 #221B")
+            for item in result:
+                print(item.getName(), ':', item[0])
+
+        prints::
+
+            age : 22
+            ssn : 111-22-3333
+            house_number : 221B
+        """
+        if self.__name:
+            return self.__name
+        elif self.__parent:
+            par = self.__parent()
+            if par:
+                return par.__lookup(self)
+            else:
+                return None
+        elif (len(self) == 1 and
+               len(self.__tokdict) == 1 and
+               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
+            return next(iter(self.__tokdict.keys()))
+        else:
+            return None
+
+    def dump(self, indent='', depth=0, full=True):
+        """
+        Diagnostic method for listing out the contents of
+        a :class:`ParseResults`. Accepts an optional ``indent`` argument so
+        that this string can be embedded in a nested display of other data.
+
+        Example::
+
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+            result = date_str.parseString('12/31/1999')
+            print(result.dump())
+
+        prints::
+
+            ['12', '/', '31', '/', '1999']
+            - day: 1999
+            - month: 31
+            - year: 12
+        """
+        out = []
+        NL = '\n'
+        out.append( indent+_ustr(self.asList()) )
+        if full:
+            if self.haskeys():
+                items = sorted((str(k), v) for k,v in self.items())
+                for k,v in items:
+                    if out:
+                        out.append(NL)
+                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
+                    if isinstance(v,ParseResults):
+                        if v:
+                            out.append( v.dump(indent,depth+1) )
+                        else:
+                            out.append(_ustr(v))
+                    else:
+                        out.append(repr(v))
+            elif any(isinstance(vv,ParseResults) for vv in self):
+                v = self
+                for i,vv in enumerate(v):
+                    if isinstance(vv,ParseResults):
+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))
+                    else:
+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))
+
+        return "".join(out)
+
+    def pprint(self, *args, **kwargs):
+        """
+        Pretty-printer for parsed results as a list, using the
+        `pprint <https://docs.python.org/3/library/pprint.html>`_ module.
+        Accepts additional positional or keyword args as defined for
+        `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
+
+        Example::
+
+            ident = Word(alphas, alphanums)
+            num = Word(nums)
+            func = Forward()
+            term = ident | num | Group('(' + func + ')')
+            func <<= ident + Group(Optional(delimitedList(term)))
+            result = func.parseString("fna a,b,(fnb c,d,200),100")
+            result.pprint(width=40)
+
+        prints::
+
+            ['fna',
+             ['a',
+              'b',
+              ['(', 'fnb', ['c', 'd', '200'], ')'],
+              '100']]
+        """
+        pprint.pprint(self.asList(), *args, **kwargs)
+
+    # add support for pickle protocol
+    def __getstate__(self):
+        return ( self.__toklist,
+                 ( self.__tokdict.copy(),
+                   self.__parent is not None and self.__parent() or None,
+                   self.__accumNames,
+                   self.__name ) )
+
+    def __setstate__(self,state):
+        self.__toklist = state[0]
+        (self.__tokdict,
+         par,
+         inAccumNames,
+         self.__name) = state[1]
+        self.__accumNames = {}
+        self.__accumNames.update(inAccumNames)
+        if par is not None:
+            self.__parent = wkref(par)
+        else:
+            self.__parent = None
+
+    def __getnewargs__(self):
+        return self.__toklist, self.__name, self.__asList, self.__modal
+
+    def __dir__(self):
+        return (dir(type(self)) + list(self.keys()))
+
+MutableMapping.register(ParseResults)
+
+def col (loc,strg):
+    """Returns current column within a string, counting newlines as line separators.
+   The first column is number 1.
+
+   Note: the default parsing behavior is to expand tabs in the input string
+   before starting the parsing process.  See
+   :class:`ParserElement.parseString` for more
+   information on parsing strings containing ``<TAB>`` s, and suggested
+   methods to maintain a consistent view of the parsed string, the parse
+   location, and line and column positions within the parsed string.
+   """
+    s = strg
+    return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
+
+def lineno(loc,strg):
+    """Returns current line number within a string, counting newlines as line separators.
+    The first line is number 1.
+
+    Note - the default parsing behavior is to expand tabs in the input string
+    before starting the parsing process.  See :class:`ParserElement.parseString`
+    for more information on parsing strings containing ``<TAB>`` s, and
+    suggested methods to maintain a consistent view of the parsed string, the
+    parse location, and line and column positions within the parsed string.
+    """
+    return strg.count("\n",0,loc) + 1
+
+def line( loc, strg ):
+    """Returns the line of text containing loc within a string, counting newlines as line separators.
+       """
+    lastCR = strg.rfind("\n", 0, loc)
+    nextCR = strg.find("\n", loc)
+    if nextCR >= 0:
+        return strg[lastCR+1:nextCR]
+    else:
+        return strg[lastCR+1:]
+
+def _defaultStartDebugAction( instring, loc, expr ):
+    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
+
+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
+    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction( instring, loc, expr, exc ):
+    print ("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+    """'Do-nothing' debug action, to suppress debugging output during parsing."""
+    pass
+
+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
+#~ 'decorator to trim function calls to match the arity of the target'
+#~ def _trim_arity(func, maxargs=3):
+    #~ if func in singleArgBuiltins:
+        #~ return lambda s,l,t: func(t)
+    #~ limit = 0
+    #~ foundArity = False
+    #~ def wrapper(*args):
+        #~ nonlocal limit,foundArity
+        #~ while 1:
+            #~ try:
+                #~ ret = func(*args[limit:])
+                #~ foundArity = True
+                #~ return ret
+            #~ except TypeError:
+                #~ if limit == maxargs or foundArity:
+                    #~ raise
+                #~ limit += 1
+                #~ continue
+    #~ return wrapper
+
+# this version is Python 2.x-3.x cross-compatible
+'decorator to trim function calls to match the arity of the target'
+def _trim_arity(func, maxargs=2):
+    if func in singleArgBuiltins:
+        return lambda s,l,t: func(t)
+    limit = [0]
+    foundArity = [False]
+
+    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
+    if system_version[:2] >= (3,5):
+        def extract_stack(limit=0):
+            # special handling for Python 3.5.0 - extra deep call stack by 1
+            offset = -3 if system_version == (3,5,0) else -2
+            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
+            return [frame_summary[:2]]
+        def extract_tb(tb, limit=0):
+            frames = traceback.extract_tb(tb, limit=limit)
+            frame_summary = frames[-1]
+            return [frame_summary[:2]]
+    else:
+        extract_stack = traceback.extract_stack
+        extract_tb = traceback.extract_tb
+
+    # synthesize what would be returned by traceback.extract_stack at the call to
+    # user's parse action 'func', so that we don't incur call penalty at parse time
+
+    LINE_DIFF = 6
+    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
+    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+    this_line = extract_stack(limit=2)[-1]
+    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
+
+    def wrapper(*args):
+        while 1:
+            try:
+                ret = func(*args[limit[0]:])
+                foundArity[0] = True
+                return ret
+            except TypeError:
+                # re-raise TypeErrors if they did not come from our arity testing
+                if foundArity[0]:
+                    raise
+                else:
+                    try:
+                        tb = sys.exc_info()[-1]
+                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
+                            raise
+                    finally:
+                        del tb
+
+                if limit[0] <= maxargs:
+                    limit[0] += 1
+                    continue
+                raise
+
+    # copy func name to wrapper for sensible debug output
+    func_name = "<parse action>"
+    try:
+        func_name = getattr(func, '__name__',
+                            getattr(func, '__class__').__name__)
+    except Exception:
+        func_name = str(func)
+    wrapper.__name__ = func_name
+
+    return wrapper
+
+class ParserElement(object):
+    """Abstract base level parser element class."""
+    DEFAULT_WHITE_CHARS = " \n\t\r"
+    verbose_stacktrace = False
+
+    @staticmethod
+    def setDefaultWhitespaceChars( chars ):
+        r"""
+        Overrides the default whitespace chars
+
+        Example::
+
+            # default whitespace chars are space, <TAB> and newline
+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
+
+            # change to just treat newline as significant
+            ParserElement.setDefaultWhitespaceChars(" \t")
+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
+        """
+        ParserElement.DEFAULT_WHITE_CHARS = chars
+
+    @staticmethod
+    def inlineLiteralsUsing(cls):
+        """
+        Set class to be used for inclusion of string literals into a parser.
+
+        Example::
+
+            # default literal class used is Literal
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
+
+
+            # change to Suppress
+            ParserElement.inlineLiteralsUsing(Suppress)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
+        """
+        ParserElement._literalStringClass = cls
+
+    def __init__( self, savelist=False ):
+        self.parseAction = list()
+        self.failAction = None
+        #~ self.name = "<unknown>"  # don't define self.name, let subclasses try/except upcall
+        self.strRepr = None
+        self.resultsName = None
+        self.saveAsList = savelist
+        self.skipWhitespace = True
+        self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
+        self.copyDefaultWhiteChars = True
+        self.mayReturnEmpty = False # used when checking for left-recursion
+        self.keepTabs = False
+        self.ignoreExprs = list()
+        self.debug = False
+        self.streamlined = False
+        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+        self.errmsg = ""
+        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+        self.debugActions = ( None, None, None ) #custom debug actions
+        self.re = None
+        self.callPreparse = True # used to avoid redundant calls to preParse
+        self.callDuringTry = False
+
+    def copy( self ):
+        """
+        Make a copy of this :class:`ParserElement`.  Useful for defining
+        different parse actions for the same parsing pattern, using copies of
+        the original parse element.
+
+        Example::
+
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
+            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+
+            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
+
+        prints::
+
+            [5120, 100, 655360, 268435456]
+
+        Equivalent form of ``expr.copy()`` is just ``expr()``::
+
+            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+        """
+        cpy = copy.copy( self )
+        cpy.parseAction = self.parseAction[:]
+        cpy.ignoreExprs = self.ignoreExprs[:]
+        if self.copyDefaultWhiteChars:
+            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        return cpy
+
+    def setName( self, name ):
+        """
+        Define name for this expression, makes debugging and exception messages clearer.
+
+        Example::
+
+            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
+            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
+        """
+        self.name = name
+        self.errmsg = "Expected " + self.name
+        if hasattr(self,"exception"):
+            self.exception.msg = self.errmsg
+        return self
+
+    def setResultsName( self, name, listAllMatches=False ):
+        """
+        Define name for referencing matching tokens as a nested attribute
+        of the returned parse results.
+        NOTE: this returns a *copy* of the original :class:`ParserElement` object;
+        this is so that the client can define a basic element, such as an
+        integer, and reference it in multiple places with different names.
+
+        You can also set results names using the abbreviated syntax,
+        ``expr("name")`` in place of ``expr.setResultsName("name")``
+        - see :class:`__call__`.
+
+        Example::
+
+            date_str = (integer.setResultsName("year") + '/'
+                        + integer.setResultsName("month") + '/'
+                        + integer.setResultsName("day"))
+
+            # equivalent form:
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+        """
+        newself = self.copy()
+        if name.endswith("*"):
+            name = name[:-1]
+            listAllMatches=True
+        newself.resultsName = name
+        newself.modalResults = not listAllMatches
+        return newself
+
+    def setBreak(self,breakFlag = True):
+        """Method to invoke the Python pdb debugger when this element is
+           about to be parsed. Set ``breakFlag`` to True to enable, False to
+           disable.
+        """
+        if breakFlag:
+            _parseMethod = self._parse
+            def breaker(instring, loc, doActions=True, callPreParse=True):
+                import pdb
+                pdb.set_trace()
+                return _parseMethod( instring, loc, doActions, callPreParse )
+            breaker._originalParseMethod = _parseMethod
+            self._parse = breaker
+        else:
+            if hasattr(self._parse,"_originalParseMethod"):
+                self._parse = self._parse._originalParseMethod
+        return self
+
+    def setParseAction( self, *fns, **kwargs ):
+        """
+        Define one or more actions to perform when successfully matching parse element definition.
+        Parse action fn is a callable method with 0-3 arguments, called as ``fn(s,loc,toks)`` ,
+        ``fn(loc,toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
+
+        - s   = the original string being parsed (see note below)
+        - loc = the location of the matching substring
+        - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
+
+        If the functions in fns modify the tokens, they can return them as the return
+        value from fn, and the modified list of tokens will replace the original.
+        Otherwise, fn does not need to return any value.
+
+        Optional keyword arguments:
+        - callDuringTry = (default= ``False`` ) indicate if parse action should be run during lookaheads and alternate testing
+
+        Note: the default parsing behavior is to expand tabs in the input string
+        before starting the parsing process.  See :class:`parseString for more
+        information on parsing strings containing ``<TAB>`` s, and suggested
+        methods to maintain a consistent view of the parsed string, the parse
+        location, and line and column positions within the parsed string.
+
+        Example::
+
+            integer = Word(nums)
+            date_str = integer + '/' + integer + '/' + integer
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
+
+            # use parse action to convert to ints at parse time
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            date_str = integer + '/' + integer + '/' + integer
+
+            # note that integer fields are now ints, not strings
+            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
+        """
+        self.parseAction = list(map(_trim_arity, list(fns)))
+        self.callDuringTry = kwargs.get("callDuringTry", False)
+        return self
+
+    def addParseAction( self, *fns, **kwargs ):
+        """
+        Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`.
+
+        See examples in :class:`copy`.
+        """
+        self.parseAction += list(map(_trim_arity, list(fns)))
+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+        return self
+
+    def addCondition(self, *fns, **kwargs):
+        """Add a boolean predicate function to expression's list of parse actions. See
+        :class:`setParseAction` for function call signatures. Unlike ``setParseAction``,
+        functions passed to ``addCondition`` need to return boolean success/fail of the condition.
+
+        Optional keyword arguments:
+        - message = define a custom message to be used in the raised exception
+        - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
+
+        Example::
+
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            year_int = integer.copy()
+            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+            date_str = year_int + '/' + integer + '/' + integer
+
+            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
+        """
+        msg = kwargs.get("message", "failed user-defined condition")
+        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
+        for fn in fns:
+            fn = _trim_arity(fn)
+            def pa(s,l,t):
+                if not bool(fn(s,l,t)):
+                    raise exc_type(s,l,msg)
+            self.parseAction.append(pa)
+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+        return self
+
+    def setFailAction( self, fn ):
+        """Define action to perform if parsing fails at this expression.
+           Fail acton fn is a callable function that takes the arguments
+           ``fn(s,loc,expr,err)`` where:
+           - s = string being parsed
+           - loc = location where expression match was attempted and failed
+           - expr = the parse expression that failed
+           - err = the exception thrown
+           The function returns no value.  It may throw :class:`ParseFatalException`
+           if it is desired to stop parsing immediately."""
+        self.failAction = fn
+        return self
+
+    def _skipIgnorables( self, instring, loc ):
+        exprsFound = True
+        while exprsFound:
+            exprsFound = False
+            for e in self.ignoreExprs:
+                try:
+                    while 1:
+                        loc,dummy = e._parse( instring, loc )
+                        exprsFound = True
+                except ParseException:
+                    pass
+        return loc
+
+    def preParse( self, instring, loc ):
+        if self.ignoreExprs:
+            loc = self._skipIgnorables( instring, loc )
+
+        if self.skipWhitespace:
+            wt = self.whiteChars
+            instrlen = len(instring)
+            while loc < instrlen and instring[loc] in wt:
+                loc += 1
+
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        return loc, []
+
+    def postParse( self, instring, loc, tokenlist ):
+        return tokenlist
+
+    #~ @profile
+    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
+        debugging = ( self.debug ) #and doActions )
+
+        if debugging or self.failAction:
+            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+            if (self.debugActions[0] ):
+                self.debugActions[0]( instring, loc, self )
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            try:
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            except ParseBaseException as err:
+                #~ print ("Exception raised:", err)
+                if self.debugActions[2]:
+                    self.debugActions[2]( instring, tokensStart, self, err )
+                if self.failAction:
+                    self.failAction( instring, tokensStart, self, err )
+                raise
+        else:
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            if self.mayIndexError or preloc >= len(instring):
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            else:
+                loc,tokens = self.parseImpl( instring, preloc, doActions )
+
+        tokens = self.postParse( instring, loc, tokens )
+
+        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
+        if self.parseAction and (doActions or self.callDuringTry):
+            if debugging:
+                try:
+                    for fn in self.parseAction:
+                        try:
+                            tokens = fn( instring, tokensStart, retTokens )
+                        except IndexError as parse_action_exc:
+                            exc = ParseException("exception raised in parse action")
+                            exc.__cause__ = parse_action_exc
+                            raise exc
+
+                        if tokens is not None and tokens is not retTokens:
+                            retTokens = ParseResults( tokens,
+                                                      self.resultsName,
+                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                      modal=self.modalResults )
+                except ParseBaseException as err:
+                    #~ print "Exception raised in user parse action:", err
+                    if (self.debugActions[2] ):
+                        self.debugActions[2]( instring, tokensStart, self, err )
+                    raise
+            else:
+                for fn in self.parseAction:
+                    try:
+                        tokens = fn( instring, tokensStart, retTokens )
+                    except IndexError as parse_action_exc:
+                        exc = ParseException("exception raised in parse action")
+                        exc.__cause__ = parse_action_exc
+                        raise exc
+
+                    if tokens is not None and tokens is not retTokens:
+                        retTokens = ParseResults( tokens,
+                                                  self.resultsName,
+                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                  modal=self.modalResults )
+        if debugging:
+            #~ print ("Matched",self,"->",retTokens.asList())
+            if (self.debugActions[1] ):
+                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
+
+        return loc, retTokens
+
+    def tryParse( self, instring, loc ):
+        try:
+            return self._parse( instring, loc, doActions=False )[0]
+        except ParseFatalException:
+            raise ParseException( instring, loc, self.errmsg, self)
+
+    def canParseNext(self, instring, loc):
+        try:
+            self.tryParse(instring, loc)
+        except (ParseException, IndexError):
+            return False
+        else:
+            return True
+
+    class _UnboundedCache(object):
+        def __init__(self):
+            cache = {}
+            self.not_in_cache = not_in_cache = object()
+
+            def get(self, key):
+                return cache.get(key, not_in_cache)
+
+            def set(self, key, value):
+                cache[key] = value
+
+            def clear(self):
+                cache.clear()
+
+            def cache_len(self):
+                return len(cache)
+
+            self.get = types.MethodType(get, self)
+            self.set = types.MethodType(set, self)
+            self.clear = types.MethodType(clear, self)
+            self.__len__ = types.MethodType(cache_len, self)
+
+    if _OrderedDict is not None:
+        class _FifoCache(object):
+            def __init__(self, size):
+                self.not_in_cache = not_in_cache = object()
+
+                cache = _OrderedDict()
+
+                def get(self, key):
+                    return cache.get(key, not_in_cache)
+
+                def set(self, key, value):
+                    cache[key] = value
+                    while len(cache) > size:
+                        try:
+                            cache.popitem(False)
+                        except KeyError:
+                            pass
+
+                def clear(self):
+                    cache.clear()
+
+                def cache_len(self):
+                    return len(cache)
+
+                self.get = types.MethodType(get, self)
+                self.set = types.MethodType(set, self)
+                self.clear = types.MethodType(clear, self)
+                self.__len__ = types.MethodType(cache_len, self)
+
+    else:
+        class _FifoCache(object):
+            def __init__(self, size):
+                self.not_in_cache = not_in_cache = object()
+
+                cache = {}
+                key_fifo = collections.deque([], size)
+
+                def get(self, key):
+                    return cache.get(key, not_in_cache)
+
+                def set(self, key, value):
+                    cache[key] = value
+                    while len(key_fifo) > size:
+                        cache.pop(key_fifo.popleft(), None)
+                    key_fifo.append(key)
+
+                def clear(self):
+                    cache.clear()
+                    key_fifo.clear()
+
+                def cache_len(self):
+                    return len(cache)
+
+                self.get = types.MethodType(get, self)
+                self.set = types.MethodType(set, self)
+                self.clear = types.MethodType(clear, self)
+                self.__len__ = types.MethodType(cache_len, self)
+
+    # argument cache for optimizing repeated calls when backtracking through recursive expressions
+    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
+    packrat_cache_lock = RLock()
+    packrat_cache_stats = [0, 0]
+
+    # this method gets repeatedly called during backtracking with the same arguments -
+    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
+        HIT, MISS = 0, 1
+        lookup = (self, instring, loc, callPreParse, doActions)
+        with ParserElement.packrat_cache_lock:
+            cache = ParserElement.packrat_cache
+            value = cache.get(lookup)
+            if value is cache.not_in_cache:
+                ParserElement.packrat_cache_stats[MISS] += 1
+                try:
+                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
+                except ParseBaseException as pe:
+                    # cache a copy of the exception, without the traceback
+                    cache.set(lookup, pe.__class__(*pe.args))
+                    raise
+                else:
+                    cache.set(lookup, (value[0], value[1].copy()))
+                    return value
+            else:
+                ParserElement.packrat_cache_stats[HIT] += 1
+                if isinstance(value, Exception):
+                    raise value
+                return (value[0], value[1].copy())
+
+    _parse = _parseNoCache
+
+    @staticmethod
+    def resetCache():
+        ParserElement.packrat_cache.clear()
+        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
+
+    _packratEnabled = False
+    @staticmethod
+    def enablePackrat(cache_size_limit=128):
+        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+           Repeated parse attempts at the same string location (which happens
+           often in many complex grammars) can immediately return a cached value,
+           instead of re-executing parsing/validating code.  Memoizing is done of
+           both valid results and parsing exceptions.
+
+           Parameters:
+
+           - cache_size_limit - (default= ``128``) - if an integer value is provided
+             will limit the size of the packrat cache; if None is passed, then
+             the cache size will be unbounded; if 0 is passed, the cache will
+             be effectively disabled.
+
+           This speedup may break existing programs that use parse actions that
+           have side-effects.  For this reason, packrat parsing is disabled when
+           you first import pyparsing.  To activate the packrat feature, your
+           program must call the class method :class:`ParserElement.enablePackrat`.
+           For best results, call ``enablePackrat()`` immediately after
+           importing pyparsing.
+
+           Example::
+
+               from pip._vendor import pyparsing
+               pyparsing.ParserElement.enablePackrat()
+        """
+        if not ParserElement._packratEnabled:
+            ParserElement._packratEnabled = True
+            if cache_size_limit is None:
+                ParserElement.packrat_cache = ParserElement._UnboundedCache()
+            else:
+                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
+            ParserElement._parse = ParserElement._parseCache
+
+    def parseString( self, instring, parseAll=False ):
+        """
+        Execute the parse expression with the given string.
+        This is the main interface to the client code, once the complete
+        expression has been built.
+
+        If you want the grammar to require that the entire input string be
+        successfully parsed, then set ``parseAll`` to True (equivalent to ending
+        the grammar with ``StringEnd()``).
+
+        Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,
+        in order to report proper column numbers in parse actions.
+        If the input string contains tabs and
+        the grammar uses parse actions that use the ``loc`` argument to index into the
+        string being parsed, you can ensure you have a consistent view of the input
+        string by:
+
+        - calling ``parseWithTabs`` on your grammar before calling ``parseString``
+          (see :class:`parseWithTabs`)
+        - define your parse action using the full ``(s,loc,toks)`` signature, and
+          reference the input string using the parse action's ``s`` argument
+        - explictly expand the tabs in your input string before calling
+          ``parseString``
+
+        Example::
+
+            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
+            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
+        """
+        ParserElement.resetCache()
+        if not self.streamlined:
+            self.streamline()
+            #~ self.saveAsList = True
+        for e in self.ignoreExprs:
+            e.streamline()
+        if not self.keepTabs:
+            instring = instring.expandtabs()
+        try:
+            loc, tokens = self._parse( instring, 0 )
+            if parseAll:
+                loc = self.preParse( instring, loc )
+                se = Empty() + StringEnd()
+                se._parse( instring, loc )
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+        else:
+            return tokens
+
+    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
+        """
+        Scan the input string for expression matches.  Each match will return the
+        matching tokens, start location, and end location.  May be called with optional
+        ``maxMatches`` argument, to clip scanning after 'n' matches are found.  If
+        ``overlap`` is specified, then overlapping matches will be reported.
+
+        Note that the start and end locations are reported relative to the string
+        being parsed.  See :class:`parseString` for more information on parsing
+        strings with embedded tabs.
+
+        Example::
+
+            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+            print(source)
+            for tokens,start,end in Word(alphas).scanString(source):
+                print(' '*start + '^'*(end-start))
+                print(' '*start + tokens[0])
+
+        prints::
+
+            sldjf123lsdjjkf345sldkjf879lkjsfd987
+            ^^^^^
+            sldjf
+                    ^^^^^^^
+                    lsdjjkf
+                              ^^^^^^
+                              sldkjf
+                                       ^^^^^^
+                                       lkjsfd
+        """
+        if not self.streamlined:
+            self.streamline()
+        for e in self.ignoreExprs:
+            e.streamline()
+
+        if not self.keepTabs:
+            instring = _ustr(instring).expandtabs()
+        instrlen = len(instring)
+        loc = 0
+        preparseFn = self.preParse
+        parseFn = self._parse
+        ParserElement.resetCache()
+        matches = 0
+        try:
+            while loc <= instrlen and matches < maxMatches:
+                try:
+                    preloc = preparseFn( instring, loc )
+                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
+                except ParseException:
+                    loc = preloc+1
+                else:
+                    if nextLoc > loc:
+                        matches += 1
+                        yield tokens, preloc, nextLoc
+                        if overlap:
+                            nextloc = preparseFn( instring, loc )
+                            if nextloc > loc:
+                                loc = nextLoc
+                            else:
+                                loc += 1
+                        else:
+                            loc = nextLoc
+                    else:
+                        loc = preloc+1
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def transformString( self, instring ):
+        """
+        Extension to :class:`scanString`, to modify matching text with modified tokens that may
+        be returned from a parse action.  To use ``transformString``, define a grammar and
+        attach a parse action to it that modifies the returned token list.
+        Invoking ``transformString()`` on a target string will then scan for matches,
+        and replace the matched text patterns according to the logic in the parse
+        action.  ``transformString()`` returns the resulting transformed string.
+
+        Example::
+
+            wd = Word(alphas)
+            wd.setParseAction(lambda toks: toks[0].title())
+
+            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
+
+        prints::
+
+            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+        """
+        out = []
+        lastE = 0
+        # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
+        # keep string locs straight between transformString and scanString
+        self.keepTabs = True
+        try:
+            for t,s,e in self.scanString( instring ):
+                out.append( instring[lastE:s] )
+                if t:
+                    if isinstance(t,ParseResults):
+                        out += t.asList()
+                    elif isinstance(t,list):
+                        out += t
+                    else:
+                        out.append(t)
+                lastE = e
+            out.append(instring[lastE:])
+            out = [o for o in out if o]
+            return "".join(map(_ustr,_flatten(out)))
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def searchString( self, instring, maxMatches=_MAX_INT ):
+        """
+        Another extension to :class:`scanString`, simplifying the access to the tokens found
+        to match the given parse expression.  May be called with optional
+        ``maxMatches`` argument, to clip searching after 'n' matches are found.
+
+        Example::
+
+            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+            cap_word = Word(alphas.upper(), alphas.lower())
+
+            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+            # the sum() builtin can be used to merge results into a single ParseResults object
+            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
+
+        prints::
+
+            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+        """
+        try:
+            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
+        """
+        Generator method to split a string using the given expression as a separator.
+        May be called with optional ``maxsplit`` argument, to limit the number of splits;
+        and the optional ``includeSeparators`` argument (default= ``False``), if the separating
+        matching text should be included in the split results.
+
+        Example::
+
+            punc = oneOf(list(".,;:/-!?"))
+            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+
+        prints::
+
+            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+        """
+        splits = 0
+        last = 0
+        for t,s,e in self.scanString(instring, maxMatches=maxsplit):
+            yield instring[last:s]
+            if includeSeparators:
+                yield t[0]
+            last = e
+        yield instring[last:]
+
+    def __add__(self, other ):
+        """
+        Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement
+        converts them to :class:`Literal`s by default.
+
+        Example::
+
+            greet = Word(alphas) + "," + Word(alphas) + "!"
+            hello = "Hello, World!"
+            print (hello, "->", greet.parseString(hello))
+
+        prints::
+
+            Hello, World! -> ['Hello', ',', 'World', '!']
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return And( [ self, other ] )
+
+    def __radd__(self, other ):
+        """
+        Implementation of + operator when left operand is not a :class:`ParserElement`
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other + self
+
+    def __sub__(self, other):
+        """
+        Implementation of - operator, returns :class:`And` with error stop
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return self + And._ErrorStop() + other
+
+    def __rsub__(self, other ):
+        """
+        Implementation of - operator when left operand is not a :class:`ParserElement`
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other - self
+
+    def __mul__(self,other):
+        """
+        Implementation of * operator, allows use of ``expr * 3`` in place of
+        ``expr + expr + expr``.  Expressions may also me multiplied by a 2-integer
+        tuple, similar to ``{min,max}`` multipliers in regular expressions.  Tuples
+        may also include ``None`` as in:
+         - ``expr*(n,None)`` or ``expr*(n,)`` is equivalent
+              to ``expr*n + ZeroOrMore(expr)``
+              (read as "at least n instances of ``expr``")
+         - ``expr*(None,n)`` is equivalent to ``expr*(0,n)``
+              (read as "0 to n instances of ``expr``")
+         - ``expr*(None,None)`` is equivalent to ``ZeroOrMore(expr)``
+         - ``expr*(1,None)`` is equivalent to ``OneOrMore(expr)``
+
+        Note that ``expr*(None,n)`` does not raise an exception if
+        more than n exprs exist in the input stream; that is,
+        ``expr*(None,n)`` does not enforce a maximum number of expr
+        occurrences.  If this behavior is desired, then write
+        ``expr*(None,n) + ~expr``
+        """
+        if isinstance(other,int):
+            minElements, optElements = other,0
+        elif isinstance(other,tuple):
+            other = (other + (None, None))[:2]
+            if other[0] is None:
+                other = (0, other[1])
+            if isinstance(other[0],int) and other[1] is None:
+                if other[0] == 0:
+                    return ZeroOrMore(self)
+                if other[0] == 1:
+                    return OneOrMore(self)
+                else:
+                    return self*other[0] + ZeroOrMore(self)
+            elif isinstance(other[0],int) and isinstance(other[1],int):
+                minElements, optElements = other
+                optElements -= minElements
+            else:
+                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
+        else:
+            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+        if minElements < 0:
+            raise ValueError("cannot multiply ParserElement by negative value")
+        if optElements < 0:
+            raise ValueError("second tuple value must be greater or equal to first tuple value")
+        if minElements == optElements == 0:
+            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
+
+        if (optElements):
+            def makeOptionalList(n):
+                if n>1:
+                    return Optional(self + makeOptionalList(n-1))
+                else:
+                    return Optional(self)
+            if minElements:
+                if minElements == 1:
+                    ret = self + makeOptionalList(optElements)
+                else:
+                    ret = And([self]*minElements) + makeOptionalList(optElements)
+            else:
+                ret = makeOptionalList(optElements)
+        else:
+            if minElements == 1:
+                ret = self
+            else:
+                ret = And([self]*minElements)
+        return ret
+
+    def __rmul__(self, other):
+        return self.__mul__(other)
+
+    def __or__(self, other ):
+        """
+        Implementation of | operator - returns :class:`MatchFirst`
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return MatchFirst( [ self, other ] )
+
+    def __ror__(self, other ):
+        """
+        Implementation of | operator when left operand is not a :class:`ParserElement`
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other | self
+
+    def __xor__(self, other ):
+        """
+        Implementation of ^ operator - returns :class:`Or`
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Or( [ self, other ] )
+
+    def __rxor__(self, other ):
+        """
+        Implementation of ^ operator when left operand is not a :class:`ParserElement`
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other ^ self
+
+    def __and__(self, other ):
+        """
+        Implementation of & operator - returns :class:`Each`
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Each( [ self, other ] )
+
+    def __rand__(self, other ):
+        """
+        Implementation of & operator when left operand is not a :class:`ParserElement`
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other & self
+
+    def __invert__( self ):
+        """
+        Implementation of ~ operator - returns :class:`NotAny`
+        """
+        return NotAny( self )
+
+    def __call__(self, name=None):
+        """
+        Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.
+
+        If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be
+        passed as ``True``.
+
+        If ``name` is omitted, same as calling :class:`copy`.
+
+        Example::
+
+            # these are equivalent
+            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
+            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
+        """
+        if name is not None:
+            return self.setResultsName(name)
+        else:
+            return self.copy()
+
+    def suppress( self ):
+        """
+        Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
+        cluttering up returned output.
+        """
+        return Suppress( self )
+
+    def leaveWhitespace( self ):
+        """
+        Disables the skipping of whitespace before matching the characters in the
+        :class:`ParserElement`'s defined pattern.  This is normally only used internally by
+        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+        """
+        self.skipWhitespace = False
+        return self
+
+    def setWhitespaceChars( self, chars ):
+        """
+        Overrides the default whitespace chars
+        """
+        self.skipWhitespace = True
+        self.whiteChars = chars
+        self.copyDefaultWhiteChars = False
+        return self
+
+    def parseWithTabs( self ):
+        """
+        Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string.
+        Must be called before ``parseString`` when the input grammar contains elements that
+        match ``<TAB>`` characters.
+        """
+        self.keepTabs = True
+        return self
+
+    def ignore( self, other ):
+        """
+        Define expression to be ignored (e.g., comments) while doing pattern
+        matching; may be called repeatedly, to define multiple comment or other
+        ignorable patterns.
+
+        Example::
+
+            patt = OneOrMore(Word(alphas))
+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
+
+            patt.ignore(cStyleComment)
+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
+        """
+        if isinstance(other, basestring):
+            other = Suppress(other)
+
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                self.ignoreExprs.append(other)
+        else:
+            self.ignoreExprs.append( Suppress( other.copy() ) )
+        return self
+
+    def setDebugActions( self, startAction, successAction, exceptionAction ):
+        """
+        Enable display of debugging messages while doing pattern matching.
+        """
+        self.debugActions = (startAction or _defaultStartDebugAction,
+                             successAction or _defaultSuccessDebugAction,
+                             exceptionAction or _defaultExceptionDebugAction)
+        self.debug = True
+        return self
+
+    def setDebug( self, flag=True ):
+        """
+        Enable display of debugging messages while doing pattern matching.
+        Set ``flag`` to True to enable, False to disable.
+
+        Example::
+
+            wd = Word(alphas).setName("alphaword")
+            integer = Word(nums).setName("numword")
+            term = wd | integer
+
+            # turn on debugging for wd
+            wd.setDebug()
+
+            OneOrMore(term).parseString("abc 123 xyz 890")
+
+        prints::
+
+            Match alphaword at loc 0(1,1)
+            Matched alphaword -> ['abc']
+            Match alphaword at loc 3(1,4)
+            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+            Match alphaword at loc 7(1,8)
+            Matched alphaword -> ['xyz']
+            Match alphaword at loc 11(1,12)
+            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+            Match alphaword at loc 15(1,16)
+            Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+        The output shown is that produced by the default debug actions - custom debug actions can be
+        specified using :class:`setDebugActions`. Prior to attempting
+        to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
+        is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
+        message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression,
+        which makes debugging and exception messages easier to understand - for instance, the default
+        name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``.
+        """
+        if flag:
+            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
+        else:
+            self.debug = False
+        return self
+
+    def __str__( self ):
+        return self.name
+
+    def __repr__( self ):
+        return _ustr(self)
+
+    def streamline( self ):
+        self.streamlined = True
+        self.strRepr = None
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        pass
+
+    def validate( self, validateTrace=[] ):
+        """
+        Check defined expressions for valid structure, check for infinite recursive definitions.
+        """
+        self.checkRecursion( [] )
+
+    def parseFile( self, file_or_filename, parseAll=False ):
+        """
+        Execute the parse expression on the given file or filename.
+        If a filename is specified (instead of a file object),
+        the entire file is opened, read, and closed before parsing.
+        """
+        try:
+            file_contents = file_or_filename.read()
+        except AttributeError:
+            with open(file_or_filename, "r") as f:
+                file_contents = f.read()
+        try:
+            return self.parseString(file_contents, parseAll)
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def __eq__(self,other):
+        if isinstance(other, ParserElement):
+            return self is other or vars(self) == vars(other)
+        elif isinstance(other, basestring):
+            return self.matches(other)
+        else:
+            return super(ParserElement,self)==other
+
+    def __ne__(self,other):
+        return not (self == other)
+
+    def __hash__(self):
+        return hash(id(self))
+
+    def __req__(self,other):
+        return self == other
+
+    def __rne__(self,other):
+        return not (self == other)
+
+    def matches(self, testString, parseAll=True):
+        """
+        Method for quick testing of a parser against a test string. Good for simple
+        inline microtests of sub expressions while building up larger parser.
+
+        Parameters:
+         - testString - to test against this expression for a match
+         - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
+
+        Example::
+
+            expr = Word(nums)
+            assert expr.matches("100")
+        """
+        try:
+            self.parseString(_ustr(testString), parseAll=parseAll)
+            return True
+        except ParseBaseException:
+            return False
+
+    def runTests(self, tests, parseAll=True, comment='#',
+                 fullDump=True, printResults=True, failureTests=False, postParse=None):
+        """
+        Execute the parse expression on a series of test strings, showing each
+        test, the parsed results or where the parse failed. Quick and easy way to
+        run a parse expression against a list of sample strings.
+
+        Parameters:
+         - tests - a list of separate test strings, or a multiline string of test strings
+         - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
+         - comment - (default= ``'#'``) - expression for indicating embedded comments in the test
+              string; pass None to disable comment filtering
+         - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline;
+              if False, only dump nested list
+         - printResults - (default= ``True``) prints test output to stdout
+         - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing
+         - postParse - (default= ``None``) optional callback for successful parse results; called as
+              `fn(test_string, parse_results)` and returns a string to be added to the test output
+
+        Returns: a (success, results) tuple, where success indicates that all tests succeeded
+        (or failed if ``failureTests`` is True), and the results contain a list of lines of each
+        test's output
+
+        Example::
+
+            number_expr = pyparsing_common.number.copy()
+
+            result = number_expr.runTests('''
+                # unsigned integer
+                100
+                # negative integer
+                -100
+                # float with scientific notation
+                6.02e23
+                # integer with scientific notation
+                1e-12
+                ''')
+            print("Success" if result[0] else "Failed!")
+
+            result = number_expr.runTests('''
+                # stray character
+                100Z
+                # missing leading digit before '.'
+                -.100
+                # too many '.'
+                3.14.159
+                ''', failureTests=True)
+            print("Success" if result[0] else "Failed!")
+
+        prints::
+
+            # unsigned integer
+            100
+            [100]
+
+            # negative integer
+            -100
+            [-100]
+
+            # float with scientific notation
+            6.02e23
+            [6.02e+23]
+
+            # integer with scientific notation
+            1e-12
+            [1e-12]
+
+            Success
+
+            # stray character
+            100Z
+               ^
+            FAIL: Expected end of text (at char 3), (line:1, col:4)
+
+            # missing leading digit before '.'
+            -.100
+            ^
+            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
+
+            # too many '.'
+            3.14.159
+                ^
+            FAIL: Expected end of text (at char 4), (line:1, col:5)
+
+            Success
+
+        Each test string must be on a single line. If you want to test a string that spans multiple
+        lines, create a test like this::
+
+            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
+
+        (Note that this is a raw string literal, you must include the leading 'r'.)
+        """
+        if isinstance(tests, basestring):
+            tests = list(map(str.strip, tests.rstrip().splitlines()))
+        if isinstance(comment, basestring):
+            comment = Literal(comment)
+        allResults = []
+        comments = []
+        success = True
+        for t in tests:
+            if comment is not None and comment.matches(t, False) or comments and not t:
+                comments.append(t)
+                continue
+            if not t:
+                continue
+            out = ['\n'.join(comments), t]
+            comments = []
+            try:
+                # convert newline marks to actual newlines, and strip leading BOM if present
+                NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString)
+                BOM = '\ufeff'
+                t = NL.transformString(t.lstrip(BOM))
+                result = self.parseString(t, parseAll=parseAll)
+                out.append(result.dump(full=fullDump))
+                success = success and not failureTests
+                if postParse is not None:
+                    try:
+                        pp_value = postParse(t, result)
+                        if pp_value is not None:
+                            out.append(str(pp_value))
+                    except Exception as e:
+                        out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e))
+            except ParseBaseException as pe:
+                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
+                if '\n' in t:
+                    out.append(line(pe.loc, t))
+                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
+                else:
+                    out.append(' '*pe.loc + '^' + fatal)
+                out.append("FAIL: " + str(pe))
+                success = success and failureTests
+                result = pe
+            except Exception as exc:
+                out.append("FAIL-EXCEPTION: " + str(exc))
+                success = success and failureTests
+                result = exc
+
+            if printResults:
+                if fullDump:
+                    out.append('')
+                print('\n'.join(out))
+
+            allResults.append((t, result))
+
+        return success, allResults
+
+
+class Token(ParserElement):
+    """Abstract :class:`ParserElement` subclass, for defining atomic
+    matching patterns.
+    """
+    def __init__( self ):
+        super(Token,self).__init__( savelist=False )
+
+
+class Empty(Token):
+    """An empty token, will always match.
+    """
+    def __init__( self ):
+        super(Empty,self).__init__()
+        self.name = "Empty"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+
+class NoMatch(Token):
+    """A token that will never match.
+    """
+    def __init__( self ):
+        super(NoMatch,self).__init__()
+        self.name = "NoMatch"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.errmsg = "Unmatchable token"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Literal(Token):
+    """Token to exactly match a specified string.
+
+    Example::
+
+        Literal('blah').parseString('blah')  # -> ['blah']
+        Literal('blah').parseString('blahfooblah')  # -> ['blah']
+        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
+
+    For case-insensitive matching, use :class:`CaselessLiteral`.
+
+    For keyword matching (force word break before and after the matched string),
+    use :class:`Keyword` or :class:`CaselessKeyword`.
+    """
+    def __init__( self, matchString ):
+        super(Literal,self).__init__()
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Literal; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+            self.__class__ = Empty
+        self.name = '"%s"' % _ustr(self.match)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+
+    # Performance tuning: this routine gets called a *lot*
+    # if this is a single character match string  and the first character matches,
+    # short-circuit as quickly as possible, and avoid calling startswith
+    #~ @profile
+    def parseImpl( self, instring, loc, doActions=True ):
+        if (instring[loc] == self.firstMatchChar and
+            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
+            return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+_L = Literal
+ParserElement._literalStringClass = Literal
+
+class Keyword(Token):
+    """Token to exactly match a specified string as a keyword, that is,
+    it must be immediately followed by a non-keyword character.  Compare
+    with :class:`Literal`:
+
+     - ``Literal("if")`` will match the leading ``'if'`` in
+       ``'ifAndOnlyIf'``.
+     - ``Keyword("if")`` will not; it will only match the leading
+       ``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
+
+    Accepts two optional constructor arguments in addition to the
+    keyword string:
+
+     - ``identChars`` is a string of characters that would be valid
+       identifier characters, defaulting to all alphanumerics + "_" and
+       "$"
+     - ``caseless`` allows case-insensitive matching, default is ``False``.
+
+    Example::
+
+        Keyword("start").parseString("start")  # -> ['start']
+        Keyword("start").parseString("starting")  # -> Exception
+
+    For case-insensitive matching, use :class:`CaselessKeyword`.
+    """
+    DEFAULT_KEYWORD_CHARS = alphanums+"_$"
+
+    def __init__( self, matchString, identChars=None, caseless=False ):
+        super(Keyword,self).__init__()
+        if identChars is None:
+            identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Keyword; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+        self.name = '"%s"' % self.match
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+        self.caseless = caseless
+        if caseless:
+            self.caselessmatch = matchString.upper()
+            identChars = identChars.upper()
+        self.identChars = set(identChars)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.caseless:
+            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
+                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        else:
+            if (instring[loc] == self.firstMatchChar and
+                (self.matchLen==1 or instring.startswith(self.match,loc)) and
+                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
+                (loc == 0 or instring[loc-1] not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+
+    def copy(self):
+        c = super(Keyword,self).copy()
+        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        return c
+
+    @staticmethod
+    def setDefaultKeywordChars( chars ):
+        """Overrides the default Keyword chars
+        """
+        Keyword.DEFAULT_KEYWORD_CHARS = chars
+
+class CaselessLiteral(Literal):
+    """Token to match a specified string, ignoring case of letters.
+    Note: the matched results will always be in the case of the given
+    match string, NOT the case of the input text.
+
+    Example::
+
+        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
+
+    (Contrast with example for :class:`CaselessKeyword`.)
+    """
+    def __init__( self, matchString ):
+        super(CaselessLiteral,self).__init__( matchString.upper() )
+        # Preserve the defining literal.
+        self.returnString = matchString
+        self.name = "'%s'" % self.returnString
+        self.errmsg = "Expected " + self.name
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[ loc:loc+self.matchLen ].upper() == self.match:
+            return loc+self.matchLen, self.returnString
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class CaselessKeyword(Keyword):
+    """
+    Caseless version of :class:`Keyword`.
+
+    Example::
+
+        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
+
+    (Contrast with example for :class:`CaselessLiteral`.)
+    """
+    def __init__( self, matchString, identChars=None ):
+        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
+
+class CloseMatch(Token):
+    """A variation on :class:`Literal` which matches "close" matches,
+    that is, strings with at most 'n' mismatching characters.
+    :class:`CloseMatch` takes parameters:
+
+     - ``match_string`` - string to be matched
+     - ``maxMismatches`` - (``default=1``) maximum number of
+       mismatches allowed to count as a match
+
+    The results from a successful parse will contain the matched text
+    from the input string and the following named results:
+
+     - ``mismatches`` - a list of the positions within the
+       match_string where mismatches were found
+     - ``original`` - the original match_string used to compare
+       against the input string
+
+    If ``mismatches`` is an empty list, then the match was an exact
+    match.
+
+    Example::
+
+        patt = CloseMatch("ATCATCGAATGGA")
+        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
+        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
+
+        # exact match
+        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
+
+        # close match allowing up to 2 mismatches
+        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
+        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
+    """
+    def __init__(self, match_string, maxMismatches=1):
+        super(CloseMatch,self).__init__()
+        self.name = match_string
+        self.match_string = match_string
+        self.maxMismatches = maxMismatches
+        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
+        self.mayIndexError = False
+        self.mayReturnEmpty = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        start = loc
+        instrlen = len(instring)
+        maxloc = start + len(self.match_string)
+
+        if maxloc <= instrlen:
+            match_string = self.match_string
+            match_stringloc = 0
+            mismatches = []
+            maxMismatches = self.maxMismatches
+
+            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
+                src,mat = s_m
+                if src != mat:
+                    mismatches.append(match_stringloc)
+                    if len(mismatches) > maxMismatches:
+                        break
+            else:
+                loc = match_stringloc + 1
+                results = ParseResults([instring[start:loc]])
+                results['original'] = self.match_string
+                results['mismatches'] = mismatches
+                return loc, results
+
+        raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Word(Token):
+    """Token for matching words composed of allowed character sets.
+    Defined with string containing all allowed initial characters, an
+    optional string containing allowed body characters (if omitted,
+    defaults to the initial character set), and an optional minimum,
+    maximum, and/or exact length.  The default value for ``min`` is
+    1 (a minimum value < 1 is not valid); the default values for
+    ``max`` and ``exact`` are 0, meaning no maximum or exact
+    length restriction. An optional ``excludeChars`` parameter can
+    list characters that might be found in the input ``bodyChars``
+    string; useful to define a word of all printables except for one or
+    two characters, for instance.
+
+    :class:`srange` is useful for defining custom character set strings
+    for defining ``Word`` expressions, using range notation from
+    regular expression character sets.
+
+    A common mistake is to use :class:`Word` to match a specific literal
+    string, as in ``Word("Address")``. Remember that :class:`Word`
+    uses the string argument to define *sets* of matchable characters.
+    This expression would match "Add", "AAA", "dAred", or any other word
+    made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
+    exact literal string, use :class:`Literal` or :class:`Keyword`.
+
+    pyparsing includes helper strings for building Words:
+
+     - :class:`alphas`
+     - :class:`nums`
+     - :class:`alphanums`
+     - :class:`hexnums`
+     - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
+       - accented, tilded, umlauted, etc.)
+     - :class:`punc8bit` (non-alphabetic characters in ASCII range
+       128-255 - currency, symbols, superscripts, diacriticals, etc.)
+     - :class:`printables` (any non-whitespace character)
+
+    Example::
+
+        # a word composed of digits
+        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
+
+        # a word with a leading capital, and zero or more lowercase
+        capital_word = Word(alphas.upper(), alphas.lower())
+
+        # hostnames are alphanumeric, with leading alpha, and '-'
+        hostname = Word(alphas, alphanums+'-')
+
+        # roman numeral (not a strict parser, accepts invalid mix of characters)
+        roman = Word("IVXLCDM")
+
+        # any string of non-whitespace characters, except for ','
+        csv_value = Word(printables, excludeChars=",")
+    """
+    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
+        super(Word,self).__init__()
+        if excludeChars:
+            excludeChars = set(excludeChars)
+            initChars = ''.join(c for c in initChars if c not in excludeChars)
+            if bodyChars:
+                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
+        self.initCharsOrig = initChars
+        self.initChars = set(initChars)
+        if bodyChars :
+            self.bodyCharsOrig = bodyChars
+            self.bodyChars = set(bodyChars)
+        else:
+            self.bodyCharsOrig = initChars
+            self.bodyChars = set(initChars)
+
+        self.maxSpecified = max > 0
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.asKeyword = asKeyword
+
+        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
+            if self.bodyCharsOrig == self.initCharsOrig:
+                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
+            elif len(self.initCharsOrig) == 1:
+                self.reString = "%s[%s]*" % \
+                                      (re.escape(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            else:
+                self.reString = "[%s][%s]*" % \
+                                      (_escapeRegexRangeChars(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            if self.asKeyword:
+                self.reString = r"\b"+self.reString+r"\b"
+            try:
+                self.re = re.compile( self.reString )
+            except Exception:
+                self.re = None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.re:
+            result = self.re.match(instring,loc)
+            if not result:
+                raise ParseException(instring, loc, self.errmsg, self)
+
+            loc = result.end()
+            return loc, result.group()
+
+        if instring[loc] not in self.initChars:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        start = loc
+        loc += 1
+        instrlen = len(instring)
+        bodychars = self.bodyChars
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, instrlen )
+        while loc < maxloc and instring[loc] in bodychars:
+            loc += 1
+
+        throwException = False
+        if loc - start < self.minLen:
+            throwException = True
+        elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+            throwException = True
+        elif self.asKeyword:
+            if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
+                throwException = True
+
+        if throwException:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+    def __str__( self ):
+        try:
+            return super(Word,self).__str__()
+        except Exception:
+            pass
+
+
+        if self.strRepr is None:
+
+            def charsAsStr(s):
+                if len(s)>4:
+                    return s[:4]+"..."
+                else:
+                    return s
+
+            if ( self.initCharsOrig != self.bodyCharsOrig ):
+                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
+            else:
+                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
+
+        return self.strRepr
+
+
+class Char(Word):
+    """A short-cut class for defining ``Word(characters, exact=1)``,
+    when defining a match of any single character in a string of
+    characters.
+    """
+    def __init__(self, charset, asKeyword=False, excludeChars=None):
+        super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars)
+        self.reString = "[%s]" % _escapeRegexRangeChars(self.initCharsOrig)
+        self.re = re.compile( self.reString )
+
+
+class Regex(Token):
+    r"""Token for matching strings that match a given regular
+    expression. Defined with string specifying the regular expression in
+    a form recognized by the stdlib Python  `re module <https://docs.python.org/3/library/re.html>`_.
+    If the given regex contains named groups (defined using ``(?P<name>...)``),
+    these will be preserved as named parse results.
+
+    Example::
+
+        realnum = Regex(r"[+-]?\d+\.\d*")
+        date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
+        # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
+        roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
+    """
+    compiledREtype = type(re.compile("[A-Z]"))
+    def __init__( self, pattern, flags=0, asGroupList=False, asMatch=False):
+        """The parameters ``pattern`` and ``flags`` are passed
+        to the ``re.compile()`` function as-is. See the Python
+        `re module <https://docs.python.org/3/library/re.html>`_ module for an
+        explanation of the acceptable patterns and flags.
+        """
+        super(Regex,self).__init__()
+
+        if isinstance(pattern, basestring):
+            if not pattern:
+                warnings.warn("null string passed to Regex; use Empty() instead",
+                        SyntaxWarning, stacklevel=2)
+
+            self.pattern = pattern
+            self.flags = flags
+
+            try:
+                self.re = re.compile(self.pattern, self.flags)
+                self.reString = self.pattern
+            except sre_constants.error:
+                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
+                    SyntaxWarning, stacklevel=2)
+                raise
+
+        elif isinstance(pattern, Regex.compiledREtype):
+            self.re = pattern
+            self.pattern = \
+            self.reString = str(pattern)
+            self.flags = flags
+
+        else:
+            raise ValueError("Regex may only be constructed with a string or a compiled RE object")
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+        self.asGroupList = asGroupList
+        self.asMatch = asMatch
+        if self.asGroupList:
+            self.parseImpl = self.parseImplAsGroupList
+        if self.asMatch:
+            self.parseImpl = self.parseImplAsMatch
+
+    def parseImpl(self, instring, loc, doActions=True):
+        result = self.re.match(instring,loc)
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        ret = ParseResults(result.group())
+        d = result.groupdict()
+        if d:
+            for k, v in d.items():
+                ret[k] = v
+        return loc, ret
+
+    def parseImplAsGroupList(self, instring, loc, doActions=True):
+        result = self.re.match(instring,loc)
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        ret = result.groups()
+        return loc, ret
+
+    def parseImplAsMatch(self, instring, loc, doActions=True):
+        result = self.re.match(instring,loc)
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        ret = result
+        return loc, ret
+
+    def __str__( self ):
+        try:
+            return super(Regex,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "Re:(%s)" % repr(self.pattern)
+
+        return self.strRepr
+
+    def sub(self, repl):
+        r"""
+        Return Regex with an attached parse action to transform the parsed
+        result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
+
+        Example::
+
+            make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
+            print(make_html.transformString("h1:main title:"))
+            # prints "<h1>main title</h1>"
+        """
+        if self.asGroupList:
+            warnings.warn("cannot use sub() with Regex(asGroupList=True)",
+                           SyntaxWarning, stacklevel=2)
+            raise SyntaxError()
+
+        if self.asMatch and callable(repl):
+            warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)",
+                           SyntaxWarning, stacklevel=2)
+            raise SyntaxError()
+
+        if self.asMatch:
+            def pa(tokens):
+                return tokens[0].expand(repl)
+        else:
+            def pa(tokens):
+                return self.re.sub(repl, tokens[0])
+        return self.addParseAction(pa)
+
+class QuotedString(Token):
+    r"""
+    Token for matching strings that are delimited by quoting characters.
+
+    Defined with the following parameters:
+
+        - quoteChar - string of one or more characters defining the
+          quote delimiting string
+        - escChar - character to escape quotes, typically backslash
+          (default= ``None`` )
+        - escQuote - special quote sequence to escape an embedded quote
+          string (such as SQL's ``""`` to escape an embedded ``"``)
+          (default= ``None`` )
+        - multiline - boolean indicating whether quotes can span
+          multiple lines (default= ``False`` )
+        - unquoteResults - boolean indicating whether the matched text
+          should be unquoted (default= ``True`` )
+        - endQuoteChar - string of one or more characters defining the
+          end of the quote delimited string (default= ``None``  => same as
+          quoteChar)
+        - convertWhitespaceEscapes - convert escaped whitespace
+          (``'\t'``, ``'\n'``, etc.) to actual whitespace
+          (default= ``True`` )
+
+    Example::
+
+        qs = QuotedString('"')
+        print(qs.searchString('lsjdf "This is the quote" sldjf'))
+        complex_qs = QuotedString('{{', endQuoteChar='}}')
+        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
+        sql_qs = QuotedString('"', escQuote='""')
+        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
+
+    prints::
+
+        [['This is the quote']]
+        [['This is the "quote"']]
+        [['This is the quote with "embedded" quotes']]
+    """
+    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
+        super(QuotedString,self).__init__()
+
+        # remove white space from quote chars - wont work anyway
+        quoteChar = quoteChar.strip()
+        if not quoteChar:
+            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+            raise SyntaxError()
+
+        if endQuoteChar is None:
+            endQuoteChar = quoteChar
+        else:
+            endQuoteChar = endQuoteChar.strip()
+            if not endQuoteChar:
+                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+                raise SyntaxError()
+
+        self.quoteChar = quoteChar
+        self.quoteCharLen = len(quoteChar)
+        self.firstQuoteChar = quoteChar[0]
+        self.endQuoteChar = endQuoteChar
+        self.endQuoteCharLen = len(endQuoteChar)
+        self.escChar = escChar
+        self.escQuote = escQuote
+        self.unquoteResults = unquoteResults
+        self.convertWhitespaceEscapes = convertWhitespaceEscapes
+
+        if multiline:
+            self.flags = re.MULTILINE | re.DOTALL
+            self.pattern = r'%s(?:[^%s%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        else:
+            self.flags = 0
+            self.pattern = r'%s(?:[^%s\n\r%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        if len(self.endQuoteChar) > 1:
+            self.pattern += (
+                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
+                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
+                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
+                )
+        if escQuote:
+            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
+        if escChar:
+            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
+            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
+        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
+
+        try:
+            self.re = re.compile(self.pattern, self.flags)
+            self.reString = self.pattern
+        except sre_constants.error:
+            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
+                SyntaxWarning, stacklevel=2)
+            raise
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        ret = result.group()
+
+        if self.unquoteResults:
+
+            # strip off quotes
+            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
+
+            if isinstance(ret,basestring):
+                # replace escaped whitespace
+                if '\\' in ret and self.convertWhitespaceEscapes:
+                    ws_map = {
+                        r'\t' : '\t',
+                        r'\n' : '\n',
+                        r'\f' : '\f',
+                        r'\r' : '\r',
+                    }
+                    for wslit,wschar in ws_map.items():
+                        ret = ret.replace(wslit, wschar)
+
+                # replace escaped characters
+                if self.escChar:
+                    ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
+
+                # replace escaped quotes
+                if self.escQuote:
+                    ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+        return loc, ret
+
+    def __str__( self ):
+        try:
+            return super(QuotedString,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
+
+        return self.strRepr
+
+
+class CharsNotIn(Token):
+    """Token for matching words composed of characters *not* in a given
+    set (will include whitespace in matched characters if not listed in
+    the provided exclusion set - see example). Defined with string
+    containing all disallowed characters, and an optional minimum,
+    maximum, and/or exact length.  The default value for ``min`` is
+    1 (a minimum value < 1 is not valid); the default values for
+    ``max`` and ``exact`` are 0, meaning no maximum or exact
+    length restriction.
+
+    Example::
+
+        # define a comma-separated-value as anything that is not a ','
+        csv_value = CharsNotIn(',')
+        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
+
+    prints::
+
+        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
+    """
+    def __init__( self, notChars, min=1, max=0, exact=0 ):
+        super(CharsNotIn,self).__init__()
+        self.skipWhitespace = False
+        self.notChars = notChars
+
+        if min < 1:
+            raise ValueError(
+                "cannot specify a minimum length < 1; use " +
+                "Optional(CharsNotIn()) if zero-length char group is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = ( self.minLen == 0 )
+        self.mayIndexError = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[loc] in self.notChars:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        start = loc
+        loc += 1
+        notchars = self.notChars
+        maxlen = min( start+self.maxLen, len(instring) )
+        while loc < maxlen and \
+              (instring[loc] not in notchars):
+            loc += 1
+
+        if loc - start < self.minLen:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+    def __str__( self ):
+        try:
+            return super(CharsNotIn, self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            if len(self.notChars) > 4:
+                self.strRepr = "!W:(%s...)" % self.notChars[:4]
+            else:
+                self.strRepr = "!W:(%s)" % self.notChars
+
+        return self.strRepr
+
+class White(Token):
+    """Special matching class for matching whitespace.  Normally,
+    whitespace is ignored by pyparsing grammars.  This class is included
+    when some whitespace structures are significant.  Define with
+    a string containing the whitespace characters to be matched; default
+    is ``" \\t\\r\\n"``.  Also takes optional ``min``,
+    ``max``, and ``exact`` arguments, as defined for the
+    :class:`Word` class.
+    """
+    whiteStrs = {
+        ' ' : '<SP>',
+        '\t': '<TAB>',
+        '\n': '<LF>',
+        '\r': '<CR>',
+        '\f': '<FF>',
+        'u\00A0': '<NBSP>',
+        'u\1680': '<OGHAM_SPACE_MARK>',
+        'u\180E': '<MONGOLIAN_VOWEL_SEPARATOR>',
+        'u\2000': '<EN_QUAD>',
+        'u\2001': '<EM_QUAD>',
+        'u\2002': '<EN_SPACE>',
+        'u\2003': '<EM_SPACE>',
+        'u\2004': '<THREE-PER-EM_SPACE>',
+        'u\2005': '<FOUR-PER-EM_SPACE>',
+        'u\2006': '<SIX-PER-EM_SPACE>',
+        'u\2007': '<FIGURE_SPACE>',
+        'u\2008': '<PUNCTUATION_SPACE>',
+        'u\2009': '<THIN_SPACE>',
+        'u\200A': '<HAIR_SPACE>',
+        'u\200B': '<ZERO_WIDTH_SPACE>',
+        'u\202F': '<NNBSP>',
+        'u\205F': '<MMSP>',
+        'u\3000': '<IDEOGRAPHIC_SPACE>',
+        }
+    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
+        super(White,self).__init__()
+        self.matchWhite = ws
+        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
+        #~ self.leaveWhitespace()
+        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
+        self.mayReturnEmpty = True
+        self.errmsg = "Expected " + self.name
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[loc] not in self.matchWhite:
+            raise ParseException(instring, loc, self.errmsg, self)
+        start = loc
+        loc += 1
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, len(instring) )
+        while loc < maxloc and instring[loc] in self.matchWhite:
+            loc += 1
+
+        if loc - start < self.minLen:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+
+class _PositionToken(Token):
+    def __init__( self ):
+        super(_PositionToken,self).__init__()
+        self.name=self.__class__.__name__
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+class GoToColumn(_PositionToken):
+    """Token to advance to a specific column of input text; useful for
+    tabular report scraping.
+    """
+    def __init__( self, colno ):
+        super(GoToColumn,self).__init__()
+        self.col = colno
+
+    def preParse( self, instring, loc ):
+        if col(loc,instring) != self.col:
+            instrlen = len(instring)
+            if self.ignoreExprs:
+                loc = self._skipIgnorables( instring, loc )
+            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
+                loc += 1
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        thiscol = col( loc, instring )
+        if thiscol > self.col:
+            raise ParseException( instring, loc, "Text not in expected column", self )
+        newloc = loc + self.col - thiscol
+        ret = instring[ loc: newloc ]
+        return newloc, ret
+
+
+class LineStart(_PositionToken):
+    r"""Matches if current position is at the beginning of a line within
+    the parse string
+
+    Example::
+
+        test = '''\
+        AAA this line
+        AAA and this line
+          AAA but not this one
+        B AAA and definitely not this one
+        '''
+
+        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
+            print(t)
+
+    prints::
+
+        ['AAA', ' this line']
+        ['AAA', ' and this line']
+
+    """
+    def __init__( self ):
+        super(LineStart,self).__init__()
+        self.errmsg = "Expected start of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if col(loc, instring) == 1:
+            return loc, []
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class LineEnd(_PositionToken):
+    """Matches if current position is at the end of a line within the
+    parse string
+    """
+    def __init__( self ):
+        super(LineEnd,self).__init__()
+        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
+        self.errmsg = "Expected end of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc<len(instring):
+            if instring[loc] == "\n":
+                return loc+1, "\n"
+            else:
+                raise ParseException(instring, loc, self.errmsg, self)
+        elif loc == len(instring):
+            return loc+1, []
+        else:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+class StringStart(_PositionToken):
+    """Matches if current position is at the beginning of the parse
+    string
+    """
+    def __init__( self ):
+        super(StringStart,self).__init__()
+        self.errmsg = "Expected start of text"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc != 0:
+            # see if entire string up to here is just whitespace and ignoreables
+            if loc != self.preParse( instring, 0 ):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+class StringEnd(_PositionToken):
+    """Matches if current position is at the end of the parse string
+    """
+    def __init__( self ):
+        super(StringEnd,self).__init__()
+        self.errmsg = "Expected end of text"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc < len(instring):
+            raise ParseException(instring, loc, self.errmsg, self)
+        elif loc == len(instring):
+            return loc+1, []
+        elif loc > len(instring):
+            return loc, []
+        else:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+class WordStart(_PositionToken):
+    """Matches if the current position is at the beginning of a Word,
+    and is not preceded by any character in a given set of
+    ``wordChars`` (default= ``printables``). To emulate the
+    ``\b`` behavior of regular expressions, use
+    ``WordStart(alphanums)``. ``WordStart`` will also match at
+    the beginning of the string being parsed, or at the beginning of
+    a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordStart,self).__init__()
+        self.wordChars = set(wordChars)
+        self.errmsg = "Not at the start of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        if loc != 0:
+            if (instring[loc-1] in self.wordChars or
+                instring[loc] not in self.wordChars):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+class WordEnd(_PositionToken):
+    """Matches if the current position is at the end of a Word, and is
+    not followed by any character in a given set of ``wordChars``
+    (default= ``printables``). To emulate the ``\b`` behavior of
+    regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
+    will also match at the end of the string being parsed, or at the end
+    of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordEnd,self).__init__()
+        self.wordChars = set(wordChars)
+        self.skipWhitespace = False
+        self.errmsg = "Not at the end of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        instrlen = len(instring)
+        if instrlen>0 and loc<instrlen:
+            if (instring[loc] in self.wordChars or
+                instring[loc-1] not in self.wordChars):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+
+class ParseExpression(ParserElement):
+    """Abstract subclass of ParserElement, for combining and
+    post-processing parsed tokens.
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(ParseExpression,self).__init__(savelist)
+        if isinstance( exprs, _generatorType ):
+            exprs = list(exprs)
+
+        if isinstance( exprs, basestring ):
+            self.exprs = [ ParserElement._literalStringClass( exprs ) ]
+        elif isinstance( exprs, Iterable ):
+            exprs = list(exprs)
+            # if sequence of strings provided, wrap with Literal
+            if all(isinstance(expr, basestring) for expr in exprs):
+                exprs = map(ParserElement._literalStringClass, exprs)
+            self.exprs = list(exprs)
+        else:
+            try:
+                self.exprs = list( exprs )
+            except TypeError:
+                self.exprs = [ exprs ]
+        self.callPreparse = False
+
+    def __getitem__( self, i ):
+        return self.exprs[i]
+
+    def append( self, other ):
+        self.exprs.append( other )
+        self.strRepr = None
+        return self
+
+    def leaveWhitespace( self ):
+        """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
+           all contained expressions."""
+        self.skipWhitespace = False
+        self.exprs = [ e.copy() for e in self.exprs ]
+        for e in self.exprs:
+            e.leaveWhitespace()
+        return self
+
+    def ignore( self, other ):
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                super( ParseExpression, self).ignore( other )
+                for e in self.exprs:
+                    e.ignore( self.ignoreExprs[-1] )
+        else:
+            super( ParseExpression, self).ignore( other )
+            for e in self.exprs:
+                e.ignore( self.ignoreExprs[-1] )
+        return self
+
+    def __str__( self ):
+        try:
+            return super(ParseExpression,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
+        return self.strRepr
+
+    def streamline( self ):
+        super(ParseExpression,self).streamline()
+
+        for e in self.exprs:
+            e.streamline()
+
+        # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
+        # but only if there are no parse actions or resultsNames on the nested And's
+        # (likewise for Or's and MatchFirst's)
+        if ( len(self.exprs) == 2 ):
+            other = self.exprs[0]
+            if ( isinstance( other, self.__class__ ) and
+                  not(other.parseAction) and
+                  other.resultsName is None and
+                  not other.debug ):
+                self.exprs = other.exprs[:] + [ self.exprs[1] ]
+                self.strRepr = None
+                self.mayReturnEmpty |= other.mayReturnEmpty
+                self.mayIndexError  |= other.mayIndexError
+
+            other = self.exprs[-1]
+            if ( isinstance( other, self.__class__ ) and
+                  not(other.parseAction) and
+                  other.resultsName is None and
+                  not other.debug ):
+                self.exprs = self.exprs[:-1] + other.exprs[:]
+                self.strRepr = None
+                self.mayReturnEmpty |= other.mayReturnEmpty
+                self.mayIndexError  |= other.mayIndexError
+
+        self.errmsg = "Expected " + _ustr(self)
+
+        return self
+
+    def validate( self, validateTrace=[] ):
+        tmp = validateTrace[:]+[self]
+        for e in self.exprs:
+            e.validate(tmp)
+        self.checkRecursion( [] )
+
+    def copy(self):
+        ret = super(ParseExpression,self).copy()
+        ret.exprs = [e.copy() for e in self.exprs]
+        return ret
+
+class And(ParseExpression):
+    """
+    Requires all given :class:`ParseExpression` s to be found in the given order.
+    Expressions may be separated by whitespace.
+    May be constructed using the ``'+'`` operator.
+    May also be constructed using the ``'-'`` operator, which will
+    suppress backtracking.
+
+    Example::
+
+        integer = Word(nums)
+        name_expr = OneOrMore(Word(alphas))
+
+        expr = And([integer("id"),name_expr("name"),integer("age")])
+        # more easily written as:
+        expr = integer("id") + name_expr("name") + integer("age")
+    """
+
+    class _ErrorStop(Empty):
+        def __init__(self, *args, **kwargs):
+            super(And._ErrorStop,self).__init__(*args, **kwargs)
+            self.name = '-'
+            self.leaveWhitespace()
+
+    def __init__( self, exprs, savelist = True ):
+        super(And,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+        self.setWhitespaceChars( self.exprs[0].whiteChars )
+        self.skipWhitespace = self.exprs[0].skipWhitespace
+        self.callPreparse = True
+
+    def streamline(self):
+        super(And, self).streamline()
+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+        return self
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        # pass False as last arg to _parse for first element, since we already
+        # pre-parsed the string as part of our And pre-parsing
+        loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
+        errorStop = False
+        for e in self.exprs[1:]:
+            if isinstance(e, And._ErrorStop):
+                errorStop = True
+                continue
+            if errorStop:
+                try:
+                    loc, exprtokens = e._parse( instring, loc, doActions )
+                except ParseSyntaxException:
+                    raise
+                except ParseBaseException as pe:
+                    pe.__traceback__ = None
+                    raise ParseSyntaxException._from_exception(pe)
+                except IndexError:
+                    raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
+            else:
+                loc, exprtokens = e._parse( instring, loc, doActions )
+            if exprtokens or exprtokens.haskeys():
+                resultlist += exprtokens
+        return loc, resultlist
+
+    def __iadd__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #And( [ self, other ] )
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+            if not e.mayReturnEmpty:
+                break
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+
+class Or(ParseExpression):
+    """Requires that at least one :class:`ParseExpression` is found. If
+    two expressions match, the expression that matches the longest
+    string will be used. May be constructed using the ``'^'``
+    operator.
+
+    Example::
+
+        # construct Or using '^' operator
+
+        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
+        print(number.searchString("123 3.1416 789"))
+
+    prints::
+
+        [['123'], ['3.1416'], ['789']]
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(Or,self).__init__(exprs, savelist)
+        if self.exprs:
+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+        else:
+            self.mayReturnEmpty = True
+
+    def streamline(self):
+        super(Or, self).streamline()
+        if __compat__.collect_all_And_tokens:
+            self.saveAsList = any(e.saveAsList for e in self.exprs)
+        return self
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        maxExcLoc = -1
+        maxException = None
+        matches = []
+        for e in self.exprs:
+            try:
+                loc2 = e.tryParse( instring, loc )
+            except ParseException as err:
+                err.__traceback__ = None
+                if err.loc > maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+            else:
+                # save match among all matches, to retry longest to shortest
+                matches.append((loc2, e))
+
+        if matches:
+            matches.sort(key=lambda x: -x[0])
+            for _,e in matches:
+                try:
+                    return e._parse( instring, loc, doActions )
+                except ParseException as err:
+                    err.__traceback__ = None
+                    if err.loc > maxExcLoc:
+                        maxException = err
+                        maxExcLoc = err.loc
+
+        if maxException is not None:
+            maxException.msg = self.errmsg
+            raise maxException
+        else:
+            raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+
+    def __ixor__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #Or( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class MatchFirst(ParseExpression):
+    """Requires that at least one :class:`ParseExpression` is found. If
+    two expressions match, the first one listed is the one that will
+    match. May be constructed using the ``'|'`` operator.
+
+    Example::
+
+        # construct MatchFirst using '|' operator
+
+        # watch the order of expressions to match
+        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
+        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]
+
+        # put more selective expression first
+        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
+        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(MatchFirst,self).__init__(exprs, savelist)
+        if self.exprs:
+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+        else:
+            self.mayReturnEmpty = True
+
+    def streamline(self):
+        super(MatchFirst, self).streamline()
+        if __compat__.collect_all_And_tokens:
+            self.saveAsList = any(e.saveAsList for e in self.exprs)
+        return self
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        maxExcLoc = -1
+        maxException = None
+        for e in self.exprs:
+            try:
+                ret = e._parse( instring, loc, doActions )
+                return ret
+            except ParseException as err:
+                if err.loc > maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+
+        # only got here if no expression matched, raise exception for match that made it the furthest
+        else:
+            if maxException is not None:
+                maxException.msg = self.errmsg
+                raise maxException
+            else:
+                raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+    def __ior__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #MatchFirst( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class Each(ParseExpression):
+    """Requires all given :class:`ParseExpression` s to be found, but in
+    any order. Expressions may be separated by whitespace.
+
+    May be constructed using the ``'&'`` operator.
+
+    Example::
+
+        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
+        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
+        integer = Word(nums)
+        shape_attr = "shape:" + shape_type("shape")
+        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
+        color_attr = "color:" + color("color")
+        size_attr = "size:" + integer("size")
+
+        # use Each (using operator '&') to accept attributes in any order
+        # (shape and posn are required, color and size are optional)
+        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
+
+        shape_spec.runTests('''
+            shape: SQUARE color: BLACK posn: 100, 120
+            shape: CIRCLE size: 50 color: BLUE posn: 50,80
+            color:GREEN size:20 shape:TRIANGLE posn:20,40
+            '''
+            )
+
+    prints::
+
+        shape: SQUARE color: BLACK posn: 100, 120
+        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
+        - color: BLACK
+        - posn: ['100', ',', '120']
+          - x: 100
+          - y: 120
+        - shape: SQUARE
+
+
+        shape: CIRCLE size: 50 color: BLUE posn: 50,80
+        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
+        - color: BLUE
+        - posn: ['50', ',', '80']
+          - x: 50
+          - y: 80
+        - shape: CIRCLE
+        - size: 50
+
+
+        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
+        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
+        - color: GREEN
+        - posn: ['20', ',', '40']
+          - x: 20
+          - y: 40
+        - shape: TRIANGLE
+        - size: 20
+    """
+    def __init__( self, exprs, savelist = True ):
+        super(Each,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+        self.skipWhitespace = True
+        self.initExprGroups = True
+        self.saveAsList = True
+
+    def streamline(self):
+        super(Each, self).streamline()
+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+        return self
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.initExprGroups:
+            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
+            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
+            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
+            self.optionals = opt1 + opt2
+            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
+            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
+            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
+            self.required += self.multirequired
+            self.initExprGroups = False
+        tmpLoc = loc
+        tmpReqd = self.required[:]
+        tmpOpt  = self.optionals[:]
+        matchOrder = []
+
+        keepMatching = True
+        while keepMatching:
+            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
+            failed = []
+            for e in tmpExprs:
+                try:
+                    tmpLoc = e.tryParse( instring, tmpLoc )
+                except ParseException:
+                    failed.append(e)
+                else:
+                    matchOrder.append(self.opt1map.get(id(e),e))
+                    if e in tmpReqd:
+                        tmpReqd.remove(e)
+                    elif e in tmpOpt:
+                        tmpOpt.remove(e)
+            if len(failed) == len(tmpExprs):
+                keepMatching = False
+
+        if tmpReqd:
+            missing = ", ".join(_ustr(e) for e in tmpReqd)
+            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
+
+        # add any unmatched Optionals, in case they have default values defined
+        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
+
+        resultlist = []
+        for e in matchOrder:
+            loc,results = e._parse(instring,loc,doActions)
+            resultlist.append(results)
+
+        finalResults = sum(resultlist, ParseResults([]))
+        return loc, finalResults
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class ParseElementEnhance(ParserElement):
+    """Abstract subclass of :class:`ParserElement`, for combining and
+    post-processing parsed tokens.
+    """
+    def __init__( self, expr, savelist=False ):
+        super(ParseElementEnhance,self).__init__(savelist)
+        if isinstance( expr, basestring ):
+            if issubclass(ParserElement._literalStringClass, Token):
+                expr = ParserElement._literalStringClass(expr)
+            else:
+                expr = ParserElement._literalStringClass(Literal(expr))
+        self.expr = expr
+        self.strRepr = None
+        if expr is not None:
+            self.mayIndexError = expr.mayIndexError
+            self.mayReturnEmpty = expr.mayReturnEmpty
+            self.setWhitespaceChars( expr.whiteChars )
+            self.skipWhitespace = expr.skipWhitespace
+            self.saveAsList = expr.saveAsList
+            self.callPreparse = expr.callPreparse
+            self.ignoreExprs.extend(expr.ignoreExprs)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr is not None:
+            return self.expr._parse( instring, loc, doActions, callPreParse=False )
+        else:
+            raise ParseException("",loc,self.errmsg,self)
+
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        self.expr = self.expr.copy()
+        if self.expr is not None:
+            self.expr.leaveWhitespace()
+        return self
+
+    def ignore( self, other ):
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                super( ParseElementEnhance, self).ignore( other )
+                if self.expr is not None:
+                    self.expr.ignore( self.ignoreExprs[-1] )
+        else:
+            super( ParseElementEnhance, self).ignore( other )
+            if self.expr is not None:
+                self.expr.ignore( self.ignoreExprs[-1] )
+        return self
+
+    def streamline( self ):
+        super(ParseElementEnhance,self).streamline()
+        if self.expr is not None:
+            self.expr.streamline()
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        if self in parseElementList:
+            raise RecursiveGrammarException( parseElementList+[self] )
+        subRecCheckList = parseElementList[:] + [ self ]
+        if self.expr is not None:
+            self.expr.checkRecursion( subRecCheckList )
+
+    def validate( self, validateTrace=[] ):
+        tmp = validateTrace[:]+[self]
+        if self.expr is not None:
+            self.expr.validate(tmp)
+        self.checkRecursion( [] )
+
+    def __str__( self ):
+        try:
+            return super(ParseElementEnhance,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None and self.expr is not None:
+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
+        return self.strRepr
+
+
+class FollowedBy(ParseElementEnhance):
+    """Lookahead matching of the given parse expression.
+    ``FollowedBy`` does *not* advance the parsing position within
+    the input string, it only verifies that the specified parse
+    expression matches at the current position.  ``FollowedBy``
+    always returns a null token list. If any results names are defined
+    in the lookahead expression, those *will* be returned for access by
+    name.
+
+    Example::
+
+        # use FollowedBy to match a label only if it is followed by a ':'
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+
+        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
+
+    prints::
+
+        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
+    """
+    def __init__( self, expr ):
+        super(FollowedBy,self).__init__(expr)
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        _, ret = self.expr._parse(instring, loc, doActions=doActions)
+        del ret[:]
+        return loc, ret
+
+
+class PrecededBy(ParseElementEnhance):
+    """Lookbehind matching of the given parse expression.
+    ``PrecededBy`` does not advance the parsing position within the
+    input string, it only verifies that the specified parse expression
+    matches prior to the current position.  ``PrecededBy`` always
+    returns a null token list, but if a results name is defined on the
+    given expression, it is returned.
+
+    Parameters:
+
+     - expr - expression that must match prior to the current parse
+       location
+     - retreat - (default= ``None``) - (int) maximum number of characters
+       to lookbehind prior to the current parse location
+
+    If the lookbehind expression is a string, Literal, Keyword, or
+    a Word or CharsNotIn with a specified exact or maximum length, then
+    the retreat parameter is not required. Otherwise, retreat must be
+    specified to give a maximum number of characters to look back from
+    the current parse position for a lookbehind match.
+
+    Example::
+
+        # VB-style variable names with type prefixes
+        int_var = PrecededBy("#") + pyparsing_common.identifier
+        str_var = PrecededBy("$") + pyparsing_common.identifier
+
+    """
+    def __init__(self, expr, retreat=None):
+        super(PrecededBy, self).__init__(expr)
+        self.expr = self.expr().leaveWhitespace()
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.exact = False
+        if isinstance(expr, str):
+            retreat = len(expr)
+            self.exact = True
+        elif isinstance(expr, (Literal, Keyword)):
+            retreat = expr.matchLen
+            self.exact = True
+        elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
+            retreat = expr.maxLen
+            self.exact = True
+        elif isinstance(expr, _PositionToken):
+            retreat = 0
+            self.exact = True
+        self.retreat = retreat
+        self.errmsg = "not preceded by " + str(expr)
+        self.skipWhitespace = False
+
+    def parseImpl(self, instring, loc=0, doActions=True):
+        if self.exact:
+            if loc < self.retreat:
+                raise ParseException(instring, loc, self.errmsg)
+            start = loc - self.retreat
+            _, ret = self.expr._parse(instring, start)
+        else:
+            # retreat specified a maximum lookbehind window, iterate
+            test_expr = self.expr + StringEnd()
+            instring_slice = instring[:loc]
+            last_expr = ParseException(instring, loc, self.errmsg)
+            for offset in range(1, min(loc, self.retreat+1)):
+                try:
+                    _, ret = test_expr._parse(instring_slice, loc-offset)
+                except ParseBaseException as pbe:
+                    last_expr = pbe
+                else:
+                    break
+            else:
+                raise last_expr
+        # return empty list of tokens, but preserve any defined results names
+        del ret[:]
+        return loc, ret
+
+
+class NotAny(ParseElementEnhance):
+    """Lookahead to disallow matching with the given parse expression.
+    ``NotAny`` does *not* advance the parsing position within the
+    input string, it only verifies that the specified parse expression
+    does *not* match at the current position.  Also, ``NotAny`` does
+    *not* skip over leading whitespace. ``NotAny`` always returns
+    a null token list.  May be constructed using the '~' operator.
+
+    Example::
+
+        AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
+
+        # take care not to mistake keywords for identifiers
+        ident = ~(AND | OR | NOT) + Word(alphas)
+        boolean_term = Optional(NOT) + ident
+
+        # very crude boolean expression - to support parenthesis groups and
+        # operation hierarchy, use infixNotation
+        boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)
+
+        # integers that are followed by "." are actually floats
+        integer = Word(nums) + ~Char(".")
+    """
+    def __init__( self, expr ):
+        super(NotAny,self).__init__(expr)
+        #~ self.leaveWhitespace()
+        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
+        self.mayReturnEmpty = True
+        self.errmsg = "Found unwanted token, "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr.canParseNext(instring, loc):
+            raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "~{" + _ustr(self.expr) + "}"
+
+        return self.strRepr
+
+class _MultipleMatch(ParseElementEnhance):
+    def __init__( self, expr, stopOn=None):
+        super(_MultipleMatch, self).__init__(expr)
+        self.saveAsList = True
+        ender = stopOn
+        if isinstance(ender, basestring):
+            ender = ParserElement._literalStringClass(ender)
+        self.not_ender = ~ender if ender is not None else None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        self_expr_parse = self.expr._parse
+        self_skip_ignorables = self._skipIgnorables
+        check_ender = self.not_ender is not None
+        if check_ender:
+            try_not_ender = self.not_ender.tryParse
+
+        # must be at least one (but first see if we are the stopOn sentinel;
+        # if so, fail)
+        if check_ender:
+            try_not_ender(instring, loc)
+        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
+        try:
+            hasIgnoreExprs = (not not self.ignoreExprs)
+            while 1:
+                if check_ender:
+                    try_not_ender(instring, loc)
+                if hasIgnoreExprs:
+                    preloc = self_skip_ignorables( instring, loc )
+                else:
+                    preloc = loc
+                loc, tmptokens = self_expr_parse( instring, preloc, doActions )
+                if tmptokens or tmptokens.haskeys():
+                    tokens += tmptokens
+        except (ParseException,IndexError):
+            pass
+
+        return loc, tokens
+
+class OneOrMore(_MultipleMatch):
+    """Repetition of one or more of the given expression.
+
+    Parameters:
+     - expr - expression that must match one or more times
+     - stopOn - (default= ``None``) - expression for a terminating sentinel
+          (only required if the sentinel would ordinarily match the repetition
+          expression)
+
+    Example::
+
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+        text = "shape: SQUARE posn: upper left color: BLACK"
+        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+
+        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
+
+        # could also be written as
+        (attr_expr * (1,)).parseString(text).pprint()
+    """
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + _ustr(self.expr) + "}..."
+
+        return self.strRepr
+
+class ZeroOrMore(_MultipleMatch):
+    """Optional repetition of zero or more of the given expression.
+
+    Parameters:
+     - expr - expression that must match zero or more times
+     - stopOn - (default= ``None``) - expression for a terminating sentinel
+          (only required if the sentinel would ordinarily match the repetition
+          expression)
+
+    Example: similar to :class:`OneOrMore`
+    """
+    def __init__( self, expr, stopOn=None):
+        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
+        except (ParseException,IndexError):
+            return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]..."
+
+        return self.strRepr
+
+class _NullToken(object):
+    def __bool__(self):
+        return False
+    __nonzero__ = __bool__
+    def __str__(self):
+        return ""
+
+_optionalNotMatched = _NullToken()
+class Optional(ParseElementEnhance):
+    """Optional matching of the given expression.
+
+    Parameters:
+     - expr - expression that must match zero or more times
+     - default (optional) - value to be returned if the optional expression is not found.
+
+    Example::
+
+        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
+        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
+        zip.runTests('''
+            # traditional ZIP code
+            12345
+
+            # ZIP+4 form
+            12101-0001
+
+            # invalid ZIP
+            98765-
+            ''')
+
+    prints::
+
+        # traditional ZIP code
+        12345
+        ['12345']
+
+        # ZIP+4 form
+        12101-0001
+        ['12101-0001']
+
+        # invalid ZIP
+        98765-
+             ^
+        FAIL: Expected end of text (at char 5), (line:1, col:6)
+    """
+    def __init__( self, expr, default=_optionalNotMatched ):
+        super(Optional,self).__init__( expr, savelist=False )
+        self.saveAsList = self.expr.saveAsList
+        self.defaultValue = default
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+        except (ParseException,IndexError):
+            if self.defaultValue is not _optionalNotMatched:
+                if self.expr.resultsName:
+                    tokens = ParseResults([ self.defaultValue ])
+                    tokens[self.expr.resultsName] = self.defaultValue
+                else:
+                    tokens = [ self.defaultValue ]
+            else:
+                tokens = []
+        return loc, tokens
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]"
+
+        return self.strRepr
+
+class SkipTo(ParseElementEnhance):
+    """Token for skipping over all undefined text until the matched
+    expression is found.
+
+    Parameters:
+     - expr - target expression marking the end of the data to be skipped
+     - include - (default= ``False``) if True, the target expression is also parsed
+          (the skipped text and target expression are returned as a 2-element list).
+     - ignore - (default= ``None``) used to define grammars (typically quoted strings and
+          comments) that might contain false matches to the target expression
+     - failOn - (default= ``None``) define expressions that are not allowed to be
+          included in the skipped test; if found before the target expression is found,
+          the SkipTo is not a match
+
+    Example::
+
+        report = '''
+            Outstanding Issues Report - 1 Jan 2000
+
+               # | Severity | Description                               |  Days Open
+            -----+----------+-------------------------------------------+-----------
+             101 | Critical | Intermittent system crash                 |          6
+              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
+              79 | Minor    | System slow when running too many reports |         47
+            '''
+        integer = Word(nums)
+        SEP = Suppress('|')
+        # use SkipTo to simply match everything up until the next SEP
+        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
+        # - parse action will call token.strip() for each matched token, i.e., the description body
+        string_data = SkipTo(SEP, ignore=quotedString)
+        string_data.setParseAction(tokenMap(str.strip))
+        ticket_expr = (integer("issue_num") + SEP
+                      + string_data("sev") + SEP
+                      + string_data("desc") + SEP
+                      + integer("days_open"))
+
+        for tkt in ticket_expr.searchString(report):
+            print tkt.dump()
+
+    prints::
+
+        ['101', 'Critical', 'Intermittent system crash', '6']
+        - days_open: 6
+        - desc: Intermittent system crash
+        - issue_num: 101
+        - sev: Critical
+        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
+        - days_open: 14
+        - desc: Spelling error on Login ('log|n')
+        - issue_num: 94
+        - sev: Cosmetic
+        ['79', 'Minor', 'System slow when running too many reports', '47']
+        - days_open: 47
+        - desc: System slow when running too many reports
+        - issue_num: 79
+        - sev: Minor
+    """
+    def __init__( self, other, include=False, ignore=None, failOn=None ):
+        super( SkipTo, self ).__init__( other )
+        self.ignoreExpr = ignore
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.includeMatch = include
+        self.saveAsList = False
+        if isinstance(failOn, basestring):
+            self.failOn = ParserElement._literalStringClass(failOn)
+        else:
+            self.failOn = failOn
+        self.errmsg = "No match found for "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        startloc = loc
+        instrlen = len(instring)
+        expr = self.expr
+        expr_parse = self.expr._parse
+        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
+        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
+
+        tmploc = loc
+        while tmploc <= instrlen:
+            if self_failOn_canParseNext is not None:
+                # break if failOn expression matches
+                if self_failOn_canParseNext(instring, tmploc):
+                    break
+
+            if self_ignoreExpr_tryParse is not None:
+                # advance past ignore expressions
+                while 1:
+                    try:
+                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
+                    except ParseBaseException:
+                        break
+
+            try:
+                expr_parse(instring, tmploc, doActions=False, callPreParse=False)
+            except (ParseException, IndexError):
+                # no match, advance loc in string
+                tmploc += 1
+            else:
+                # matched skipto expr, done
+                break
+
+        else:
+            # ran off the end of the input string without matching skipto expr, fail
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        # build up return values
+        loc = tmploc
+        skiptext = instring[startloc:loc]
+        skipresult = ParseResults(skiptext)
+
+        if self.includeMatch:
+            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
+            skipresult += mat
+
+        return loc, skipresult
+
+class Forward(ParseElementEnhance):
+    """Forward declaration of an expression to be defined later -
+    used for recursive grammars, such as algebraic infix notation.
+    When the expression is known, it is assigned to the ``Forward``
+    variable using the '<<' operator.
+
+    Note: take care when assigning to ``Forward`` not to overlook
+    precedence of operators.
+
+    Specifically, '|' has a lower precedence than '<<', so that::
+
+        fwdExpr << a | b | c
+
+    will actually be evaluated as::
+
+        (fwdExpr << a) | b | c
+
+    thereby leaving b and c out as parseable alternatives.  It is recommended that you
+    explicitly group the values inserted into the ``Forward``::
+
+        fwdExpr << (a | b | c)
+
+    Converting to use the '<<=' operator instead will avoid this problem.
+
+    See :class:`ParseResults.pprint` for an example of a recursive
+    parser created using ``Forward``.
+    """
+    def __init__( self, other=None ):
+        super(Forward,self).__init__( other, savelist=False )
+
+    def __lshift__( self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass(other)
+        self.expr = other
+        self.strRepr = None
+        self.mayIndexError = self.expr.mayIndexError
+        self.mayReturnEmpty = self.expr.mayReturnEmpty
+        self.setWhitespaceChars( self.expr.whiteChars )
+        self.skipWhitespace = self.expr.skipWhitespace
+        self.saveAsList = self.expr.saveAsList
+        self.ignoreExprs.extend(self.expr.ignoreExprs)
+        return self
+
+    def __ilshift__(self, other):
+        return self << other
+
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        return self
+
+    def streamline( self ):
+        if not self.streamlined:
+            self.streamlined = True
+            if self.expr is not None:
+                self.expr.streamline()
+        return self
+
+    def validate( self, validateTrace=[] ):
+        if self not in validateTrace:
+            tmp = validateTrace[:]+[self]
+            if self.expr is not None:
+                self.expr.validate(tmp)
+        self.checkRecursion([])
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        # Avoid infinite recursion by setting a temporary name
+        self.name = self.__class__.__name__ + ": ..."
+
+        # Use the string representation of main expression.
+        try:
+            if self.expr is not None:
+                retString = _ustr(self.expr)
+            else:
+                retString = "None"
+        finally:
+            del self.name
+        return self.__class__.__name__ + ": " + retString
+
+    def copy(self):
+        if self.expr is not None:
+            return super(Forward,self).copy()
+        else:
+            ret = Forward()
+            ret <<= self
+            return ret
+
+class TokenConverter(ParseElementEnhance):
+    """
+    Abstract subclass of :class:`ParseExpression`, for converting parsed results.
+    """
+    def __init__( self, expr, savelist=False ):
+        super(TokenConverter,self).__init__( expr )#, savelist )
+        self.saveAsList = False
+
+class Combine(TokenConverter):
+    """Converter to concatenate all matching tokens to a single string.
+    By default, the matching patterns must also be contiguous in the
+    input string; this can be disabled by specifying
+    ``'adjacent=False'`` in the constructor.
+
+    Example::
+
+        real = Word(nums) + '.' + Word(nums)
+        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
+        # will also erroneously match the following
+        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
+
+        real = Combine(Word(nums) + '.' + Word(nums))
+        print(real.parseString('3.1416')) # -> ['3.1416']
+        # no match when there are internal spaces
+        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
+    """
+    def __init__( self, expr, joinString="", adjacent=True ):
+        super(Combine,self).__init__( expr )
+        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+        if adjacent:
+            self.leaveWhitespace()
+        self.adjacent = adjacent
+        self.skipWhitespace = True
+        self.joinString = joinString
+        self.callPreparse = True
+
+    def ignore( self, other ):
+        if self.adjacent:
+            ParserElement.ignore(self, other)
+        else:
+            super( Combine, self).ignore( other )
+        return self
+
+    def postParse( self, instring, loc, tokenlist ):
+        retToks = tokenlist.copy()
+        del retToks[:]
+        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
+
+        if self.resultsName and retToks.haskeys():
+            return [ retToks ]
+        else:
+            return retToks
+
+class Group(TokenConverter):
+    """Converter to return the matched tokens as a list - useful for
+    returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
+
+    Example::
+
+        ident = Word(alphas)
+        num = Word(nums)
+        term = ident | num
+        func = ident + Optional(delimitedList(term))
+        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']
+
+        func = ident + Group(Optional(delimitedList(term)))
+        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
+    """
+    def __init__( self, expr ):
+        super(Group,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        return [ tokenlist ]
+
+class Dict(TokenConverter):
+    """Converter to return a repetitive expression as a list, but also
+    as a dictionary. Each element can also be referenced using the first
+    token in the expression as its key. Useful for tabular report
+    scraping when the first column can be used as a item key.
+
+    Example::
+
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+
+        # print attributes as plain groups
+        print(OneOrMore(attr_expr).parseString(text).dump())
+
+        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
+        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
+        print(result.dump())
+
+        # access named fields as dict entries, or output as dict
+        print(result['shape'])
+        print(result.asDict())
+
+    prints::
+
+        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+        - color: light blue
+        - posn: upper left
+        - shape: SQUARE
+        - texture: burlap
+        SQUARE
+        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
+
+    See more examples at :class:`ParseResults` of accessing fields by results name.
+    """
+    def __init__( self, expr ):
+        super(Dict,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        for i,tok in enumerate(tokenlist):
+            if len(tok) == 0:
+                continue
+            ikey = tok[0]
+            if isinstance(ikey,int):
+                ikey = _ustr(tok[0]).strip()
+            if len(tok)==1:
+                tokenlist[ikey] = _ParseResultsWithOffset("",i)
+            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
+                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
+            else:
+                dictvalue = tok.copy() #ParseResults(i)
+                del dictvalue[0]
+                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
+                else:
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
+
+        if self.resultsName:
+            return [ tokenlist ]
+        else:
+            return tokenlist
+
+
+class Suppress(TokenConverter):
+    """Converter for ignoring the results of a parsed expression.
+
+    Example::
+
+        source = "a, b, c,d"
+        wd = Word(alphas)
+        wd_list1 = wd + ZeroOrMore(',' + wd)
+        print(wd_list1.parseString(source))
+
+        # often, delimiters that are useful during parsing are just in the
+        # way afterward - use Suppress to keep them out of the parsed output
+        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
+        print(wd_list2.parseString(source))
+
+    prints::
+
+        ['a', ',', 'b', ',', 'c', ',', 'd']
+        ['a', 'b', 'c', 'd']
+
+    (See also :class:`delimitedList`.)
+    """
+    def postParse( self, instring, loc, tokenlist ):
+        return []
+
+    def suppress( self ):
+        return self
+
+
+class OnlyOnce(object):
+    """Wrapper for parse actions, to ensure they are only called once.
+    """
+    def __init__(self, methodCall):
+        self.callable = _trim_arity(methodCall)
+        self.called = False
+    def __call__(self,s,l,t):
+        if not self.called:
+            results = self.callable(s,l,t)
+            self.called = True
+            return results
+        raise ParseException(s,l,"")
+    def reset(self):
+        self.called = False
+
+def traceParseAction(f):
+    """Decorator for debugging parse actions.
+
+    When the parse action is called, this decorator will print
+    ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
+    When the parse action completes, the decorator will print
+    ``"<<"`` followed by the returned value, or any exception that the parse action raised.
+
+    Example::
+
+        wd = Word(alphas)
+
+        @traceParseAction
+        def remove_duplicate_chars(tokens):
+            return ''.join(sorted(set(''.join(tokens))))
+
+        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
+        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
+
+    prints::
+
+        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
+        <<leaving remove_duplicate_chars (ret: 'dfjkls')
+        ['dfjkls']
+    """
+    f = _trim_arity(f)
+    def z(*paArgs):
+        thisFunc = f.__name__
+        s,l,t = paArgs[-3:]
+        if len(paArgs)>3:
+            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
+        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
+        try:
+            ret = f(*paArgs)
+        except Exception as exc:
+            sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
+            raise
+        sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
+        return ret
+    try:
+        z.__name__ = f.__name__
+    except AttributeError:
+        pass
+    return z
+
+#
+# global helpers
+#
+def delimitedList( expr, delim=",", combine=False ):
+    """Helper to define a delimited list of expressions - the delimiter
+    defaults to ','. By default, the list elements and delimiters can
+    have intervening whitespace, and comments, but this can be
+    overridden by passing ``combine=True`` in the constructor. If
+    ``combine`` is set to ``True``, the matching tokens are
+    returned as a single token string, with the delimiters included;
+    otherwise, the matching tokens are returned as a list of tokens,
+    with the delimiters suppressed.
+
+    Example::
+
+        delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
+        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
+    """
+    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
+    if combine:
+        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
+    else:
+        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
+
+def countedArray( expr, intExpr=None ):
+    """Helper to define a counted list of expressions.
+
+    This helper defines a pattern of the form::
+
+        integer expr expr expr...
+
+    where the leading integer tells how many expr expressions follow.
+    The matched tokens returns the array of expr tokens as a list - the
+    leading count token is suppressed.
+
+    If ``intExpr`` is specified, it should be a pyparsing expression
+    that produces an integer value.
+
+    Example::
+
+        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']
+
+        # in this parser, the leading integer value is given in binary,
+        # '10' indicating that 2 values are in the array
+        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
+        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
+    """
+    arrayExpr = Forward()
+    def countFieldParseAction(s,l,t):
+        n = t[0]
+        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
+        return []
+    if intExpr is None:
+        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
+    else:
+        intExpr = intExpr.copy()
+    intExpr.setName("arrayLen")
+    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
+    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
+
+def _flatten(L):
+    ret = []
+    for i in L:
+        if isinstance(i,list):
+            ret.extend(_flatten(i))
+        else:
+            ret.append(i)
+    return ret
+
+def matchPreviousLiteral(expr):
+    """Helper to define an expression that is indirectly defined from
+    the tokens matched in a previous expression, that is, it looks for
+    a 'repeat' of a previous expression.  For example::
+
+        first = Word(nums)
+        second = matchPreviousLiteral(first)
+        matchExpr = first + ":" + second
+
+    will match ``"1:1"``, but not ``"1:2"``.  Because this
+    matches a previous literal, will also match the leading
+    ``"1:1"`` in ``"1:10"``. If this is not desired, use
+    :class:`matchPreviousExpr`. Do *not* use with packrat parsing
+    enabled.
+    """
+    rep = Forward()
+    def copyTokenToRepeater(s,l,t):
+        if t:
+            if len(t) == 1:
+                rep << t[0]
+            else:
+                # flatten t tokens
+                tflat = _flatten(t.asList())
+                rep << And(Literal(tt) for tt in tflat)
+        else:
+            rep << Empty()
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    rep.setName('(prev) ' + _ustr(expr))
+    return rep
+
+def matchPreviousExpr(expr):
+    """Helper to define an expression that is indirectly defined from
+    the tokens matched in a previous expression, that is, it looks for
+    a 'repeat' of a previous expression.  For example::
+
+        first = Word(nums)
+        second = matchPreviousExpr(first)
+        matchExpr = first + ":" + second
+
+    will match ``"1:1"``, but not ``"1:2"``.  Because this
+    matches by expressions, will *not* match the leading ``"1:1"``
+    in ``"1:10"``; the expressions are evaluated first, and then
+    compared, so ``"1"`` is compared with ``"10"``. Do *not* use
+    with packrat parsing enabled.
+    """
+    rep = Forward()
+    e2 = expr.copy()
+    rep <<= e2
+    def copyTokenToRepeater(s,l,t):
+        matchTokens = _flatten(t.asList())
+        def mustMatchTheseTokens(s,l,t):
+            theseTokens = _flatten(t.asList())
+            if  theseTokens != matchTokens:
+                raise ParseException("",0,"")
+        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    rep.setName('(prev) ' + _ustr(expr))
+    return rep
+
+def _escapeRegexRangeChars(s):
+    #~  escape these chars: ^-]
+    for c in r"\^-]":
+        s = s.replace(c,_bslash+c)
+    s = s.replace("\n",r"\n")
+    s = s.replace("\t",r"\t")
+    return _ustr(s)
+
+def oneOf( strs, caseless=False, useRegex=True ):
+    """Helper to quickly define a set of alternative Literals, and makes
+    sure to do longest-first testing when there is a conflict,
+    regardless of the input order, but returns
+    a :class:`MatchFirst` for best performance.
+
+    Parameters:
+
+     - strs - a string of space-delimited literals, or a collection of
+       string literals
+     - caseless - (default= ``False``) - treat all literals as
+       caseless
+     - useRegex - (default= ``True``) - as an optimization, will
+       generate a Regex object; otherwise, will generate
+       a :class:`MatchFirst` object (if ``caseless=True``, or if
+       creating a :class:`Regex` raises an exception)
+
+    Example::
+
+        comp_oper = oneOf("< = > <= >= !=")
+        var = Word(alphas)
+        number = Word(nums)
+        term = var | number
+        comparison_expr = term + comp_oper + term
+        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
+
+    prints::
+
+        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+    """
+    if caseless:
+        isequal = ( lambda a,b: a.upper() == b.upper() )
+        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
+        parseElementClass = CaselessLiteral
+    else:
+        isequal = ( lambda a,b: a == b )
+        masks = ( lambda a,b: b.startswith(a) )
+        parseElementClass = Literal
+
+    symbols = []
+    if isinstance(strs,basestring):
+        symbols = strs.split()
+    elif isinstance(strs, Iterable):
+        symbols = list(strs)
+    else:
+        warnings.warn("Invalid argument to oneOf, expected string or iterable",
+                SyntaxWarning, stacklevel=2)
+    if not symbols:
+        return NoMatch()
+
+    i = 0
+    while i < len(symbols)-1:
+        cur = symbols[i]
+        for j,other in enumerate(symbols[i+1:]):
+            if ( isequal(other, cur) ):
+                del symbols[i+j+1]
+                break
+            elif ( masks(cur, other) ):
+                del symbols[i+j+1]
+                symbols.insert(i,other)
+                cur = other
+                break
+        else:
+            i += 1
+
+    if not caseless and useRegex:
+        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
+        try:
+            if len(symbols)==len("".join(symbols)):
+                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
+            else:
+                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
+        except Exception:
+            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
+                    SyntaxWarning, stacklevel=2)
+
+
+    # last resort, just use MatchFirst
+    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
+
+def dictOf( key, value ):
+    """Helper to easily and clearly define a dictionary by specifying
+    the respective patterns for the key and value.  Takes care of
+    defining the :class:`Dict`, :class:`ZeroOrMore`, and
+    :class:`Group` tokens in the proper order.  The key pattern
+    can include delimiting markers or punctuation, as long as they are
+    suppressed, thereby leaving the significant key text.  The value
+    pattern can include named results, so that the :class:`Dict` results
+    can include named token fields.
+
+    Example::
+
+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        print(OneOrMore(attr_expr).parseString(text).dump())
+
+        attr_label = label
+        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
+
+        # similar to Dict, but simpler call format
+        result = dictOf(attr_label, attr_value).parseString(text)
+        print(result.dump())
+        print(result['shape'])
+        print(result.shape)  # object attribute access works too
+        print(result.asDict())
+
+    prints::
+
+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+        - color: light blue
+        - posn: upper left
+        - shape: SQUARE
+        - texture: burlap
+        SQUARE
+        SQUARE
+        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+    """
+    return Dict(OneOrMore(Group(key + value)))
+
+def originalTextFor(expr, asString=True):
+    """Helper to return the original, untokenized text for a given
+    expression.  Useful to restore the parsed fields of an HTML start
+    tag into the raw tag text itself, or to revert separate tokens with
+    intervening whitespace back to the original matching input text. By
+    default, returns astring containing the original parsed text.
+
+    If the optional ``asString`` argument is passed as
+    ``False``, then the return value is
+    a :class:`ParseResults` containing any results names that
+    were originally matched, and a single token containing the original
+    matched text from the input string.  So if the expression passed to
+    :class:`originalTextFor` contains expressions with defined
+    results names, you must set ``asString`` to ``False`` if you
+    want to preserve those results name values.
+
+    Example::
+
+        src = "this is test <b> bold <i>text</i> </b> normal text "
+        for tag in ("b","i"):
+            opener,closer = makeHTMLTags(tag)
+            patt = originalTextFor(opener + SkipTo(closer) + closer)
+            print(patt.searchString(src)[0])
+
+    prints::
+
+        ['<b> bold <i>text</i> </b>']
+        ['<i>text</i>']
+    """
+    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
+    endlocMarker = locMarker.copy()
+    endlocMarker.callPreparse = False
+    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+    if asString:
+        extractText = lambda s,l,t: s[t._original_start:t._original_end]
+    else:
+        def extractText(s,l,t):
+            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
+    matchExpr.setParseAction(extractText)
+    matchExpr.ignoreExprs = expr.ignoreExprs
+    return matchExpr
+
+def ungroup(expr):
+    """Helper to undo pyparsing's default grouping of And expressions,
+    even if all but one are non-empty.
+    """
+    return TokenConverter(expr).addParseAction(lambda t:t[0])
+
+def locatedExpr(expr):
+    """Helper to decorate a returned token with its starting and ending
+    locations in the input string.
+
+    This helper adds the following results names:
+
+     - locn_start = location where matched expression begins
+     - locn_end = location where matched expression ends
+     - value = the actual parsed results
+
+    Be careful if the input text contains ``<TAB>`` characters, you
+    may want to call :class:`ParserElement.parseWithTabs`
+
+    Example::
+
+        wd = Word(alphas)
+        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
+            print(match)
+
+    prints::
+
+        [[0, 'ljsdf', 5]]
+        [[8, 'lksdjjf', 15]]
+        [[18, 'lkkjj', 23]]
+    """
+    locator = Empty().setParseAction(lambda s,l,t: l)
+    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
+
+
+# convenience constants for positional expressions
+empty       = Empty().setName("empty")
+lineStart   = LineStart().setName("lineStart")
+lineEnd     = LineEnd().setName("lineEnd")
+stringStart = StringStart().setName("stringStart")
+stringEnd   = StringEnd().setName("stringEnd")
+
+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
+
+def srange(s):
+    r"""Helper to easily define string ranges for use in Word
+    construction. Borrows syntax from regexp '[]' string range
+    definitions::
+
+        srange("[0-9]")   -> "0123456789"
+        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
+        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+
+    The input string must be enclosed in []'s, and the returned string
+    is the expanded character set joined into a single string. The
+    values enclosed in the []'s may be:
+
+     - a single character
+     - an escaped character with a leading backslash (such as ``\-``
+       or ``\]``)
+     - an escaped hex character with a leading ``'\x'``
+       (``\x21``, which is a ``'!'`` character) (``\0x##``
+       is also supported for backwards compatibility)
+     - an escaped octal character with a leading ``'\0'``
+       (``\041``, which is a ``'!'`` character)
+     - a range of any of the above, separated by a dash (``'a-z'``,
+       etc.)
+     - any combination of the above (``'aeiouy'``,
+       ``'a-zA-Z0-9_$'``, etc.)
+    """
+    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
+    try:
+        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
+    except Exception:
+        return ""
+
+def matchOnlyAtCol(n):
+    """Helper method for defining parse actions that require matching at
+    a specific column in the input text.
+    """
+    def verifyCol(strg,locn,toks):
+        if col(locn,strg) != n:
+            raise ParseException(strg,locn,"matched token not at column %d" % n)
+    return verifyCol
+
+def replaceWith(replStr):
+    """Helper method for common parse actions that simply return
+    a literal value.  Especially useful when used with
+    :class:`transformString<ParserElement.transformString>` ().
+
+    Example::
+
+        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
+        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
+        term = na | num
+
+        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
+    """
+    return lambda s,l,t: [replStr]
+
+def removeQuotes(s,l,t):
+    """Helper parse action for removing quotation marks from parsed
+    quoted strings.
+
+    Example::
+
+        # by default, quotation marks are included in parsed results
+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+        # use removeQuotes to strip quotation marks from parsed results
+        quotedString.setParseAction(removeQuotes)
+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+    """
+    return t[0][1:-1]
+
+def tokenMap(func, *args):
+    """Helper to define a parse action by mapping a function to all
+    elements of a ParseResults list. If any additional args are passed,
+    they are forwarded to the given function as additional arguments
+    after the token, as in
+    ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``,
+    which will convert the parsed data to an integer using base 16.
+
+    Example (compare the last to example in :class:`ParserElement.transformString`::
+
+        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
+        hex_ints.runTests('''
+            00 11 22 aa FF 0a 0d 1a
+            ''')
+
+        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
+        OneOrMore(upperword).runTests('''
+            my kingdom for a horse
+            ''')
+
+        wd = Word(alphas).setParseAction(tokenMap(str.title))
+        OneOrMore(wd).setParseAction(' '.join).runTests('''
+            now is the winter of our discontent made glorious summer by this sun of york
+            ''')
+
+    prints::
+
+        00 11 22 aa FF 0a 0d 1a
+        [0, 17, 34, 170, 255, 10, 13, 26]
+
+        my kingdom for a horse
+        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
+
+        now is the winter of our discontent made glorious summer by this sun of york
+        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
+    """
+    def pa(s,l,t):
+        return [func(tokn, *args) for tokn in t]
+
+    try:
+        func_name = getattr(func, '__name__',
+                            getattr(func, '__class__').__name__)
+    except Exception:
+        func_name = str(func)
+    pa.__name__ = func_name
+
+    return pa
+
+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
+"""(Deprecated) Helper parse action to convert tokens to upper case.
+Deprecated in favor of :class:`pyparsing_common.upcaseTokens`"""
+
+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
+"""(Deprecated) Helper parse action to convert tokens to lower case.
+Deprecated in favor of :class:`pyparsing_common.downcaseTokens`"""
+
+def _makeTags(tagStr, xml,
+              suppress_LT=Suppress("<"),
+              suppress_GT=Suppress(">")):
+    """Internal helper to construct opening and closing tag expressions, given a tag name"""
+    if isinstance(tagStr,basestring):
+        resname = tagStr
+        tagStr = Keyword(tagStr, caseless=not xml)
+    else:
+        resname = tagStr.name
+
+    tagAttrName = Word(alphas,alphanums+"_-:")
+    if (xml):
+        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
+        openTag = (suppress_LT
+                   + tagStr("tag")
+                   + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue )))
+                   + Optional("/", default=[False])("empty").setParseAction(lambda s,l,t:t[0]=='/')
+                   + suppress_GT)
+    else:
+        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printables, excludeChars=">")
+        openTag = (suppress_LT
+                   + tagStr("tag")
+                   + Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens)
+                                           + Optional(Suppress("=") + tagAttrValue))))
+                   + Optional("/",default=[False])("empty").setParseAction(lambda s,l,t:t[0]=='/')
+                   + suppress_GT)
+    closeTag = Combine(_L("</") + tagStr + ">", adjacent=False)
+
+    openTag.setName("<%s>" % resname)
+    # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
+    openTag.addParseAction(lambda t: t.__setitem__("start"+"".join(resname.replace(":"," ").title().split()), t.copy()))
+    closeTag = closeTag("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
+    openTag.tag = resname
+    closeTag.tag = resname
+    openTag.tag_body = SkipTo(closeTag())
+    return openTag, closeTag
+
+def makeHTMLTags(tagStr):
+    """Helper to construct opening and closing tag expressions for HTML,
+    given a tag name. Matches tags in either upper or lower case,
+    attributes with namespaces and with quoted or unquoted values.
+
+    Example::
+
+        text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
+        # makeHTMLTags returns pyparsing expressions for the opening and
+        # closing tags as a 2-tuple
+        a,a_end = makeHTMLTags("A")
+        link_expr = a + SkipTo(a_end)("link_text") + a_end
+
+        for link in link_expr.searchString(text):
+            # attributes in the <A> tag (like "href" shown here) are
+            # also accessible as named results
+            print(link.link_text, '->', link.href)
+
+    prints::
+
+        pyparsing -> https://github.com/pyparsing/pyparsing/wiki
+    """
+    return _makeTags( tagStr, False )
+
+def makeXMLTags(tagStr):
+    """Helper to construct opening and closing tag expressions for XML,
+    given a tag name. Matches tags only in the given upper/lower case.
+
+    Example: similar to :class:`makeHTMLTags`
+    """
+    return _makeTags( tagStr, True )
+
+def withAttribute(*args,**attrDict):
+    """Helper to create a validating parse action to be used with start
+    tags created with :class:`makeXMLTags` or
+    :class:`makeHTMLTags`. Use ``withAttribute`` to qualify
+    a starting tag with a required attribute value, to avoid false
+    matches on common tags such as ``<TD>`` or ``<DIV>``.
+
+    Call ``withAttribute`` with a series of attribute names and
+    values. Specify the list of filter attributes names and values as:
+
+     - keyword arguments, as in ``(align="right")``, or
+     - as an explicit dict with ``**`` operator, when an attribute
+       name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
+     - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align","right"))``
+
+    For attribute names with a namespace prefix, you must use the second
+    form.  Attribute names are matched insensitive to upper/lower case.
+
+    If just testing for ``class`` (with or without a namespace), use
+    :class:`withClass`.
+
+    To verify that the attribute exists, but without specifying a value,
+    pass ``withAttribute.ANY_VALUE`` as the value.
+
+    Example::
+
+        html = '''
+            <div>
+            Some text
+            <div type="grid">1 4 0 1 0</div>
+            <div type="graph">1,3 2,3 1,1</div>
+            <div>this has no type</div>
+            </div>
+
+        '''
+        div,div_end = makeHTMLTags("div")
+
+        # only match div tag having a type attribute with value "grid"
+        div_grid = div().setParseAction(withAttribute(type="grid"))
+        grid_expr = div_grid + SkipTo(div | div_end)("body")
+        for grid_header in grid_expr.searchString(html):
+            print(grid_header.body)
+
+        # construct a match with any div tag having a type attribute, regardless of the value
+        div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
+        div_expr = div_any_type + SkipTo(div | div_end)("body")
+        for div_header in div_expr.searchString(html):
+            print(div_header.body)
+
+    prints::
+
+        1 4 0 1 0
+
+        1 4 0 1 0
+        1,3 2,3 1,1
+    """
+    if args:
+        attrs = args[:]
+    else:
+        attrs = attrDict.items()
+    attrs = [(k,v) for k,v in attrs]
+    def pa(s,l,tokens):
+        for attrName,attrValue in attrs:
+            if attrName not in tokens:
+                raise ParseException(s,l,"no matching attribute " + attrName)
+            if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
+                raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
+                                            (attrName, tokens[attrName], attrValue))
+    return pa
+withAttribute.ANY_VALUE = object()
+
+def withClass(classname, namespace=''):
+    """Simplified version of :class:`withAttribute` when
+    matching on a div class - made difficult because ``class`` is
+    a reserved word in Python.
+
+    Example::
+
+        html = '''
+            <div>
+            Some text
+            <div class="grid">1 4 0 1 0</div>
+            <div class="graph">1,3 2,3 1,1</div>
+            <div>this &lt;div&gt; has no class</div>
+            </div>
+
+        '''
+        div,div_end = makeHTMLTags("div")
+        div_grid = div().setParseAction(withClass("grid"))
+
+        grid_expr = div_grid + SkipTo(div | div_end)("body")
+        for grid_header in grid_expr.searchString(html):
+            print(grid_header.body)
+
+        div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
+        div_expr = div_any_type + SkipTo(div | div_end)("body")
+        for div_header in div_expr.searchString(html):
+            print(div_header.body)
+
+    prints::
+
+        1 4 0 1 0
+
+        1 4 0 1 0
+        1,3 2,3 1,1
+    """
+    classattr = "%s:class" % namespace if namespace else "class"
+    return withAttribute(**{classattr : classname})
+
+opAssoc = SimpleNamespace()
+opAssoc.LEFT = object()
+opAssoc.RIGHT = object()
+
+def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
+    """Helper method for constructing grammars of expressions made up of
+    operators working in a precedence hierarchy.  Operators may be unary
+    or binary, left- or right-associative.  Parse actions can also be
+    attached to operator expressions. The generated parser will also
+    recognize the use of parentheses to override operator precedences
+    (see example below).
+
+    Note: if you define a deep operator list, you may see performance
+    issues when using infixNotation. See
+    :class:`ParserElement.enablePackrat` for a mechanism to potentially
+    improve your parser performance.
+
+    Parameters:
+     - baseExpr - expression representing the most basic element for the
+       nested
+     - opList - list of tuples, one for each operator precedence level
+       in the expression grammar; each tuple is of the form ``(opExpr,
+       numTerms, rightLeftAssoc, parseAction)``, where:
+
+       - opExpr is the pyparsing expression for the operator; may also
+         be a string, which will be converted to a Literal; if numTerms
+         is 3, opExpr is a tuple of two expressions, for the two
+         operators separating the 3 terms
+       - numTerms is the number of terms for this operator (must be 1,
+         2, or 3)
+       - rightLeftAssoc is the indicator whether the operator is right
+         or left associative, using the pyparsing-defined constants
+         ``opAssoc.RIGHT`` and ``opAssoc.LEFT``.
+       - parseAction is the parse action to be associated with
+         expressions matching this operator expression (the parse action
+         tuple member may be omitted); if the parse action is passed
+         a tuple or list of functions, this is equivalent to calling
+         ``setParseAction(*fn)``
+         (:class:`ParserElement.setParseAction`)
+     - lpar - expression for matching left-parentheses
+       (default= ``Suppress('(')``)
+     - rpar - expression for matching right-parentheses
+       (default= ``Suppress(')')``)
+
+    Example::
+
+        # simple example of four-function arithmetic with ints and
+        # variable names
+        integer = pyparsing_common.signed_integer
+        varname = pyparsing_common.identifier
+
+        arith_expr = infixNotation(integer | varname,
+            [
+            ('-', 1, opAssoc.RIGHT),
+            (oneOf('* /'), 2, opAssoc.LEFT),
+            (oneOf('+ -'), 2, opAssoc.LEFT),
+            ])
+
+        arith_expr.runTests('''
+            5+3*6
+            (5+3)*6
+            -2--11
+            ''', fullDump=False)
+
+    prints::
+
+        5+3*6
+        [[5, '+', [3, '*', 6]]]
+
+        (5+3)*6
+        [[[5, '+', 3], '*', 6]]
+
+        -2--11
+        [[['-', 2], '-', ['-', 11]]]
+    """
+    # captive version of FollowedBy that does not do parse actions or capture results names
+    class _FB(FollowedBy):
+        def parseImpl(self, instring, loc, doActions=True):
+            self.expr.tryParse(instring, loc)
+            return loc, []
+
+    ret = Forward()
+    lastExpr = baseExpr | ( lpar + ret + rpar )
+    for i,operDef in enumerate(opList):
+        opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
+        termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
+        if arity == 3:
+            if opExpr is None or len(opExpr) != 2:
+                raise ValueError(
+                    "if numterms=3, opExpr must be a tuple or list of two expressions")
+            opExpr1, opExpr2 = opExpr
+        thisExpr = Forward().setName(termName)
+        if rightLeftAssoc == opAssoc.LEFT:
+            if arity == 1:
+                matchExpr = _FB(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
+            elif arity == 2:
+                if opExpr is not None:
+                    matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
+                else:
+                    matchExpr = _FB(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
+            elif arity == 3:
+                matchExpr = _FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
+                            Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
+            else:
+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+        elif rightLeftAssoc == opAssoc.RIGHT:
+            if arity == 1:
+                # try to avoid LR with this extra test
+                if not isinstance(opExpr, Optional):
+                    opExpr = Optional(opExpr)
+                matchExpr = _FB(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
+            elif arity == 2:
+                if opExpr is not None:
+                    matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
+                else:
+                    matchExpr = _FB(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
+            elif arity == 3:
+                matchExpr = _FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
+                            Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
+            else:
+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+        else:
+            raise ValueError("operator must indicate right or left associativity")
+        if pa:
+            if isinstance(pa, (tuple, list)):
+                matchExpr.setParseAction(*pa)
+            else:
+                matchExpr.setParseAction(pa)
+        thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
+        lastExpr = thisExpr
+    ret <<= lastExpr
+    return ret
+
+operatorPrecedence = infixNotation
+"""(Deprecated) Former name of :class:`infixNotation`, will be
+dropped in a future release."""
+
+dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
+sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
+quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
+                       Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
+unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
+
+def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
+    """Helper method for defining nested lists enclosed in opening and
+    closing delimiters ("(" and ")" are the default).
+
+    Parameters:
+     - opener - opening character for a nested list
+       (default= ``"("``); can also be a pyparsing expression
+     - closer - closing character for a nested list
+       (default= ``")"``); can also be a pyparsing expression
+     - content - expression for items within the nested lists
+       (default= ``None``)
+     - ignoreExpr - expression for ignoring opening and closing
+       delimiters (default= :class:`quotedString`)
+
+    If an expression is not provided for the content argument, the
+    nested expression will capture all whitespace-delimited content
+    between delimiters as a list of separate values.
+
+    Use the ``ignoreExpr`` argument to define expressions that may
+    contain opening or closing characters that should not be treated as
+    opening or closing characters for nesting, such as quotedString or
+    a comment expression.  Specify multiple expressions using an
+    :class:`Or` or :class:`MatchFirst`. The default is
+    :class:`quotedString`, but if no expressions are to be ignored, then
+    pass ``None`` for this argument.
+
+    Example::
+
+        data_type = oneOf("void int short long char float double")
+        decl_data_type = Combine(data_type + Optional(Word('*')))
+        ident = Word(alphas+'_', alphanums+'_')
+        number = pyparsing_common.number
+        arg = Group(decl_data_type + ident)
+        LPAR,RPAR = map(Suppress, "()")
+
+        code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
+
+        c_function = (decl_data_type("type")
+                      + ident("name")
+                      + LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+                      + code_body("body"))
+        c_function.ignore(cStyleComment)
+
+        source_code = '''
+            int is_odd(int x) {
+                return (x%2);
+            }
+
+            int dec_to_hex(char hchar) {
+                if (hchar >= '0' && hchar <= '9') {
+                    return (ord(hchar)-ord('0'));
+                } else {
+                    return (10+ord(hchar)-ord('A'));
+                }
+            }
+        '''
+        for func in c_function.searchString(source_code):
+            print("%(name)s (%(type)s) args: %(args)s" % func)
+
+
+    prints::
+
+        is_odd (int) args: [['int', 'x']]
+        dec_to_hex (int) args: [['char', 'hchar']]
+    """
+    if opener == closer:
+        raise ValueError("opening and closing strings cannot be the same")
+    if content is None:
+        if isinstance(opener,basestring) and isinstance(closer,basestring):
+            if len(opener) == 1 and len(closer)==1:
+                if ignoreExpr is not None:
+                    content = (Combine(OneOrMore(~ignoreExpr +
+                                    CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+                else:
+                    content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
+                                ).setParseAction(lambda t:t[0].strip()))
+            else:
+                if ignoreExpr is not None:
+                    content = (Combine(OneOrMore(~ignoreExpr +
+                                    ~Literal(opener) + ~Literal(closer) +
+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+                else:
+                    content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+        else:
+            raise ValueError("opening and closing arguments must be strings if no content expression is given")
+    ret = Forward()
+    if ignoreExpr is not None:
+        ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
+    else:
+        ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content )  + Suppress(closer) )
+    ret.setName('nested %s%s expression' % (opener,closer))
+    return ret
+
+def indentedBlock(blockStatementExpr, indentStack, indent=True):
+    """Helper method for defining space-delimited indentation blocks,
+    such as those used to define block statements in Python source code.
+
+    Parameters:
+
+     - blockStatementExpr - expression defining syntax of statement that
+       is repeated within the indented block
+     - indentStack - list created by caller to manage indentation stack
+       (multiple statementWithIndentedBlock expressions within a single
+       grammar should share a common indentStack)
+     - indent - boolean indicating whether block must be indented beyond
+       the the current level; set to False for block of left-most
+       statements (default= ``True``)
+
+    A valid block must contain at least one ``blockStatement``.
+
+    Example::
+
+        data = '''
+        def A(z):
+          A1
+          B = 100
+          G = A2
+          A2
+          A3
+        B
+        def BB(a,b,c):
+          BB1
+          def BBA():
+            bba1
+            bba2
+            bba3
+        C
+        D
+        def spam(x,y):
+             def eggs(z):
+                 pass
+        '''
+
+
+        indentStack = [1]
+        stmt = Forward()
+
+        identifier = Word(alphas, alphanums)
+        funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
+        func_body = indentedBlock(stmt, indentStack)
+        funcDef = Group( funcDecl + func_body )
+
+        rvalue = Forward()
+        funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
+        rvalue << (funcCall | identifier | Word(nums))
+        assignment = Group(identifier + "=" + rvalue)
+        stmt << ( funcDef | assignment | identifier )
+
+        module_body = OneOrMore(stmt)
+
+        parseTree = module_body.parseString(data)
+        parseTree.pprint()
+
+    prints::
+
+        [['def',
+          'A',
+          ['(', 'z', ')'],
+          ':',
+          [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
+         'B',
+         ['def',
+          'BB',
+          ['(', 'a', 'b', 'c', ')'],
+          ':',
+          [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
+         'C',
+         'D',
+         ['def',
+          'spam',
+          ['(', 'x', 'y', ')'],
+          ':',
+          [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
+    """
+    backup_stack = indentStack[:]
+
+    def reset_stack():
+        indentStack[:] = backup_stack
+
+    def checkPeerIndent(s,l,t):
+        if l >= len(s): return
+        curCol = col(l,s)
+        if curCol != indentStack[-1]:
+            if curCol > indentStack[-1]:
+                raise ParseException(s,l,"illegal nesting")
+            raise ParseException(s,l,"not a peer entry")
+
+    def checkSubIndent(s,l,t):
+        curCol = col(l,s)
+        if curCol > indentStack[-1]:
+            indentStack.append( curCol )
+        else:
+            raise ParseException(s,l,"not a subentry")
+
+    def checkUnindent(s,l,t):
+        if l >= len(s): return
+        curCol = col(l,s)
+        if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
+            raise ParseException(s,l,"not an unindent")
+        indentStack.pop()
+
+    NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
+    INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
+    PEER   = Empty().setParseAction(checkPeerIndent).setName('')
+    UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
+    if indent:
+        smExpr = Group( Optional(NL) +
+            #~ FollowedBy(blockStatementExpr) +
+            INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
+    else:
+        smExpr = Group( Optional(NL) +
+            (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
+    smExpr.setFailAction(lambda a, b, c, d: reset_stack())
+    blockStatementExpr.ignore(_bslash + LineEnd())
+    return smExpr.setName('indented block')
+
+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
+
+anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
+_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
+commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
+def replaceHTMLEntity(t):
+    """Helper parser action to replace common HTML entities with their special characters"""
+    return _htmlEntityMap.get(t.entity)
+
+# it's easy to get these comment structures wrong - they're very common, so may as well make them available
+cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
+"Comment of the form ``/* ... */``"
+
+htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
+"Comment of the form ``<!-- ... -->``"
+
+restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
+dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
+"Comment of the form ``// ... (to end of line)``"
+
+cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
+"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"
+
+javaStyleComment = cppStyleComment
+"Same as :class:`cppStyleComment`"
+
+pythonStyleComment = Regex(r"#.*").setName("Python style comment")
+"Comment of the form ``# ... (to end of line)``"
+
+_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
+                                  Optional( Word(" \t") +
+                                            ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
+commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
+"""(Deprecated) Predefined expression of 1 or more printable words or
+quoted strings, separated by commas.
+
+This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.
+"""
+
+# some other useful expressions - using lower-case class name since we are really using this as a namespace
+class pyparsing_common:
+    """Here are some common low-level expressions that may be useful in
+    jump-starting parser development:
+
+     - numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
+       :class:`scientific notation<sci_real>`)
+     - common :class:`programming identifiers<identifier>`
+     - network addresses (:class:`MAC<mac_address>`,
+       :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
+     - ISO8601 :class:`dates<iso8601_date>` and
+       :class:`datetime<iso8601_datetime>`
+     - :class:`UUID<uuid>`
+     - :class:`comma-separated list<comma_separated_list>`
+
+    Parse actions:
+
+     - :class:`convertToInteger`
+     - :class:`convertToFloat`
+     - :class:`convertToDate`
+     - :class:`convertToDatetime`
+     - :class:`stripHTMLTags`
+     - :class:`upcaseTokens`
+     - :class:`downcaseTokens`
+
+    Example::
+
+        pyparsing_common.number.runTests('''
+            # any int or real number, returned as the appropriate type
+            100
+            -100
+            +100
+            3.14159
+            6.02e23
+            1e-12
+            ''')
+
+        pyparsing_common.fnumber.runTests('''
+            # any int or real number, returned as float
+            100
+            -100
+            +100
+            3.14159
+            6.02e23
+            1e-12
+            ''')
+
+        pyparsing_common.hex_integer.runTests('''
+            # hex numbers
+            100
+            FF
+            ''')
+
+        pyparsing_common.fraction.runTests('''
+            # fractions
+            1/2
+            -3/4
+            ''')
+
+        pyparsing_common.mixed_integer.runTests('''
+            # mixed fractions
+            1
+            1/2
+            -3/4
+            1-3/4
+            ''')
+
+        import uuid
+        pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+        pyparsing_common.uuid.runTests('''
+            # uuid
+            12345678-1234-5678-1234-567812345678
+            ''')
+
+    prints::
+
+        # any int or real number, returned as the appropriate type
+        100
+        [100]
+
+        -100
+        [-100]
+
+        +100
+        [100]
+
+        3.14159
+        [3.14159]
+
+        6.02e23
+        [6.02e+23]
+
+        1e-12
+        [1e-12]
+
+        # any int or real number, returned as float
+        100
+        [100.0]
+
+        -100
+        [-100.0]
+
+        +100
+        [100.0]
+
+        3.14159
+        [3.14159]
+
+        6.02e23
+        [6.02e+23]
+
+        1e-12
+        [1e-12]
+
+        # hex numbers
+        100
+        [256]
+
+        FF
+        [255]
+
+        # fractions
+        1/2
+        [0.5]
+
+        -3/4
+        [-0.75]
+
+        # mixed fractions
+        1
+        [1]
+
+        1/2
+        [0.5]
+
+        -3/4
+        [-0.75]
+
+        1-3/4
+        [1.75]
+
+        # uuid
+        12345678-1234-5678-1234-567812345678
+        [UUID('12345678-1234-5678-1234-567812345678')]
+    """
+
+    convertToInteger = tokenMap(int)
+    """
+    Parse action for converting parsed integers to Python int
+    """
+
+    convertToFloat = tokenMap(float)
+    """
+    Parse action for converting parsed numbers to Python float
+    """
+
+    integer = Word(nums).setName("integer").setParseAction(convertToInteger)
+    """expression that parses an unsigned integer, returns an int"""
+
+    hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
+    """expression that parses a hexadecimal integer, returns an int"""
+
+    signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
+    """expression that parses an integer with optional leading sign, returns an int"""
+
+    fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
+    """fractional expression of an integer divided by an integer, returns a float"""
+    fraction.addParseAction(lambda t: t[0]/t[-1])
+
+    mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
+    """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
+    mixed_integer.addParseAction(sum)
+
+    real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
+    """expression that parses a floating point number and returns a float"""
+
+    sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
+    """expression that parses a floating point number with optional
+    scientific notation and returns a float"""
+
+    # streamlining this expression makes the docs nicer-looking
+    number = (sci_real | real | signed_integer).streamline()
+    """any numeric expression, returns the corresponding Python type"""
+
+    fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
+    """any int or real number, returned as float"""
+
+    identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
+    """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
+
+    ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
+    "IPv4 address (``0.0.0.0 - 255.255.255.255``)"
+
+    _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
+    _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
+    _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
+    _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
+    _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
+    ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
+    "IPv6 address (long, short, or mixed form)"
+
+    mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
+    "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
+
+    @staticmethod
+    def convertToDate(fmt="%Y-%m-%d"):
+        """
+        Helper to create a parse action for converting parsed date string to Python datetime.date
+
+        Params -
+         - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
+
+        Example::
+
+            date_expr = pyparsing_common.iso8601_date.copy()
+            date_expr.setParseAction(pyparsing_common.convertToDate())
+            print(date_expr.parseString("1999-12-31"))
+
+        prints::
+
+            [datetime.date(1999, 12, 31)]
+        """
+        def cvt_fn(s,l,t):
+            try:
+                return datetime.strptime(t[0], fmt).date()
+            except ValueError as ve:
+                raise ParseException(s, l, str(ve))
+        return cvt_fn
+
+    @staticmethod
+    def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
+        """Helper to create a parse action for converting parsed
+        datetime string to Python datetime.datetime
+
+        Params -
+         - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
+
+        Example::
+
+            dt_expr = pyparsing_common.iso8601_datetime.copy()
+            dt_expr.setParseAction(pyparsing_common.convertToDatetime())
+            print(dt_expr.parseString("1999-12-31T23:59:59.999"))
+
+        prints::
+
+            [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
+        """
+        def cvt_fn(s,l,t):
+            try:
+                return datetime.strptime(t[0], fmt)
+            except ValueError as ve:
+                raise ParseException(s, l, str(ve))
+        return cvt_fn
+
+    iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
+    "ISO8601 date (``yyyy-mm-dd``)"
+
+    iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
+    "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
+
+    uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
+    "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
+
+    _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
+    @staticmethod
+    def stripHTMLTags(s, l, tokens):
+        """Parse action to remove HTML tags from web page HTML source
+
+        Example::
+
+            # strip HTML links from normal text
+            text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
+            td,td_end = makeHTMLTags("TD")
+            table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
+            print(table_text.parseString(text).body)
+
+        Prints::
+
+            More info at the pyparsing wiki page
+        """
+        return pyparsing_common._html_stripper.transformString(tokens[0])
+
+    _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+                                        + Optional( White(" \t") ) ) ).streamline().setName("commaItem")
+    comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
+    """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
+
+    upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
+    """Parse action to convert tokens to upper case."""
+
+    downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
+    """Parse action to convert tokens to lower case."""
+
+
+class _lazyclassproperty(object):
+    def __init__(self, fn):
+        self.fn = fn
+        self.__doc__ = fn.__doc__
+        self.__name__ = fn.__name__
+
+    def __get__(self, obj, cls):
+        if cls is None:
+            cls = type(obj)
+        if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', []) for superclass in cls.__mro__[1:]):
+            cls._intern = {}
+        attrname = self.fn.__name__
+        if attrname not in cls._intern:
+            cls._intern[attrname] = self.fn(cls)
+        return cls._intern[attrname]
+
+
+class unicode_set(object):
+    """
+    A set of Unicode characters, for language-specific strings for
+    ``alphas``, ``nums``, ``alphanums``, and ``printables``.
+    A unicode_set is defined by a list of ranges in the Unicode character
+    set, in a class attribute ``_ranges``, such as::
+
+        _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
+
+    A unicode set can also be defined using multiple inheritance of other unicode sets::
+
+        class CJK(Chinese, Japanese, Korean):
+            pass
+    """
+    _ranges = []
+
+    @classmethod
+    def _get_chars_for_ranges(cls):
+        ret = []
+        for cc in cls.__mro__:
+            if cc is unicode_set:
+                break
+            for rr in cc._ranges:
+                ret.extend(range(rr[0], rr[-1]+1))
+        return [unichr(c) for c in sorted(set(ret))]
+
+    @_lazyclassproperty
+    def printables(cls):
+        "all non-whitespace characters in this range"
+        return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges()))
+
+    @_lazyclassproperty
+    def alphas(cls):
+        "all alphabetic characters in this range"
+        return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges()))
+
+    @_lazyclassproperty
+    def nums(cls):
+        "all numeric digit characters in this range"
+        return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges()))
+
+    @_lazyclassproperty
+    def alphanums(cls):
+        "all alphanumeric characters in this range"
+        return cls.alphas + cls.nums
+
+
+class pyparsing_unicode(unicode_set):
+    """
+    A namespace class for defining common language unicode_sets.
+    """
+    _ranges = [(32, sys.maxunicode)]
+
+    class Latin1(unicode_set):
+        "Unicode set for Latin-1 Unicode Character Range"
+        _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
+
+    class LatinA(unicode_set):
+        "Unicode set for Latin-A Unicode Character Range"
+        _ranges = [(0x0100, 0x017f),]
+
+    class LatinB(unicode_set):
+        "Unicode set for Latin-B Unicode Character Range"
+        _ranges = [(0x0180, 0x024f),]
+
+    class Greek(unicode_set):
+        "Unicode set for Greek Unicode Character Ranges"
+        _ranges = [
+            (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d),
+            (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4),
+            (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe),
+        ]
+
+    class Cyrillic(unicode_set):
+        "Unicode set for Cyrillic Unicode Character Range"
+        _ranges = [(0x0400, 0x04ff)]
+
+    class Chinese(unicode_set):
+        "Unicode set for Chinese Unicode Character Range"
+        _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f), ]
+
+    class Japanese(unicode_set):
+        "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
+        _ranges = [ ]
+
+        class Kanji(unicode_set):
+            "Unicode set for Kanji Unicode Character Range"
+            _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f), ]
+
+        class Hiragana(unicode_set):
+            "Unicode set for Hiragana Unicode Character Range"
+            _ranges = [(0x3040, 0x309f), ]
+
+        class Katakana(unicode_set):
+            "Unicode set for Katakana  Unicode Character Range"
+            _ranges = [(0x30a0, 0x30ff), ]
+
+    class Korean(unicode_set):
+        "Unicode set for Korean Unicode Character Range"
+        _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f), ]
+
+    class CJK(Chinese, Japanese, Korean):
+        "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
+        pass
+
+    class Thai(unicode_set):
+        "Unicode set for Thai Unicode Character Range"
+        _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b), ]
+
+    class Arabic(unicode_set):
+        "Unicode set for Arabic Unicode Character Range"
+        _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f), ]
+
+    class Hebrew(unicode_set):
+        "Unicode set for Hebrew Unicode Character Range"
+        _ranges = [(0x0590, 0x05ff), ]
+
+    class Devanagari(unicode_set):
+        "Unicode set for Devanagari Unicode Character Range"
+        _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)]
+
+pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges
+                                      + pyparsing_unicode.Japanese.Hiragana._ranges
+                                      + pyparsing_unicode.Japanese.Katakana._ranges)
+
+# define ranges in language character sets
+if PY_3:
+    setattr(pyparsing_unicode, "العربية", pyparsing_unicode.Arabic)
+    setattr(pyparsing_unicode, "中文", pyparsing_unicode.Chinese)
+    setattr(pyparsing_unicode, "кириллица", pyparsing_unicode.Cyrillic)
+    setattr(pyparsing_unicode, "Ελληνικά", pyparsing_unicode.Greek)
+    setattr(pyparsing_unicode, "עִברִית", pyparsing_unicode.Hebrew)
+    setattr(pyparsing_unicode, "日本語", pyparsing_unicode.Japanese)
+    setattr(pyparsing_unicode.Japanese, "漢字", pyparsing_unicode.Japanese.Kanji)
+    setattr(pyparsing_unicode.Japanese, "カタカナ", pyparsing_unicode.Japanese.Katakana)
+    setattr(pyparsing_unicode.Japanese, "ひらがな", pyparsing_unicode.Japanese.Hiragana)
+    setattr(pyparsing_unicode, "한국어", pyparsing_unicode.Korean)
+    setattr(pyparsing_unicode, "ไทย", pyparsing_unicode.Thai)
+    setattr(pyparsing_unicode, "देवनागरी", pyparsing_unicode.Devanagari)
+
+
+if __name__ == "__main__":
+
+    selectToken    = CaselessLiteral("select")
+    fromToken      = CaselessLiteral("from")
+
+    ident          = Word(alphas, alphanums + "_$")
+
+    columnName     = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+    columnNameList = Group(delimitedList(columnName)).setName("columns")
+    columnSpec     = ('*' | columnNameList)
+
+    tableName      = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+    tableNameList  = Group(delimitedList(tableName)).setName("tables")
+
+    simpleSQL      = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
+
+    # demo runTests method, including embedded comments in test string
+    simpleSQL.runTests("""
+        # '*' as column list and dotted table name
+        select * from SYS.XYZZY
+
+        # caseless match on "SELECT", and casts back to "select"
+        SELECT * from XYZZY, ABC
+
+        # list of column names, and mixed case SELECT keyword
+        Select AA,BB,CC from Sys.dual
+
+        # multiple tables
+        Select A, B, C from Sys.dual, Table2
+
+        # invalid SELECT keyword - should fail
+        Xelect A, B, C from Sys.dual
+
+        # incomplete command - should fail
+        Select
+
+        # invalid column name - should fail
+        Select ^^^ frox Sys.dual
+
+        """)
+
+    pyparsing_common.number.runTests("""
+        100
+        -100
+        +100
+        3.14159
+        6.02e23
+        1e-12
+        """)
+
+    # any int or real number, returned as float
+    pyparsing_common.fnumber.runTests("""
+        100
+        -100
+        +100
+        3.14159
+        6.02e23
+        1e-12
+        """)
+
+    pyparsing_common.hex_integer.runTests("""
+        100
+        FF
+        """)
+
+    import uuid
+    pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+    pyparsing_common.uuid.runTests("""
+        12345678-1234-5678-1234-567812345678
+        """)
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/__init__.py b/lib/python3.7/site-packages/pip/_vendor/pytoml/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ed060ff52973e5d9f9faa4afb972ba88beefced
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pytoml/__init__.py
@@ -0,0 +1,4 @@
+from .core import TomlError
+from .parser import load, loads
+from .test import translate_to_test
+from .writer import dump, dumps
\ No newline at end of file
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..18fd245c8e57c8121df6f1d4d8b88dd151f94969
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/core.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/core.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f320cebd08699e683741aab20a2c2d0888b67d9c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/core.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/parser.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/parser.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ddd3c44c86b642fd0cd5507b6722800c89ffb10c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/parser.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/test.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/test.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..07304f74a64d3ed0929d7e03da1d6cbbdfffa0fe
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/test.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/utils.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..195327f21d75d5ff6b434afc82e3b054273f488b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/writer.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/writer.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36f9977f69d661526819a9912dafee622012b82a
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/pytoml/__pycache__/writer.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/core.py b/lib/python3.7/site-packages/pip/_vendor/pytoml/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..c182734e1caa00515a06adfb9eab7a22911e658b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pytoml/core.py
@@ -0,0 +1,13 @@
+class TomlError(RuntimeError):
+    def __init__(self, message, line, col, filename):
+        RuntimeError.__init__(self, message, line, col, filename)
+        self.message = message
+        self.line = line
+        self.col = col
+        self.filename = filename
+
+    def __str__(self):
+        return '{}({}, {}): {}'.format(self.filename, self.line, self.col, self.message)
+
+    def __repr__(self):
+        return 'TomlError({!r}, {!r}, {!r}, {!r})'.format(self.message, self.line, self.col, self.filename)
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/parser.py b/lib/python3.7/site-packages/pip/_vendor/pytoml/parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..3493aa644cac13ab2542f967815593f1690052ec
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pytoml/parser.py
@@ -0,0 +1,341 @@
+import string, re, sys, datetime
+from .core import TomlError
+from .utils import rfc3339_re, parse_rfc3339_re
+
+if sys.version_info[0] == 2:
+    _chr = unichr
+else:
+    _chr = chr
+
+def load(fin, translate=lambda t, x, v: v, object_pairs_hook=dict):
+    return loads(fin.read(), translate=translate, object_pairs_hook=object_pairs_hook, filename=getattr(fin, 'name', repr(fin)))
+
+def loads(s, filename='<string>', translate=lambda t, x, v: v, object_pairs_hook=dict):
+    if isinstance(s, bytes):
+        s = s.decode('utf-8')
+
+    s = s.replace('\r\n', '\n')
+
+    root = object_pairs_hook()
+    tables = object_pairs_hook()
+    scope = root
+
+    src = _Source(s, filename=filename)
+    ast = _p_toml(src, object_pairs_hook=object_pairs_hook)
+
+    def error(msg):
+        raise TomlError(msg, pos[0], pos[1], filename)
+
+    def process_value(v, object_pairs_hook):
+        kind, text, value, pos = v
+        if kind == 'str' and value.startswith('\n'):
+            value = value[1:]
+        if kind == 'array':
+            if value and any(k != value[0][0] for k, t, v, p in value[1:]):
+                error('array-type-mismatch')
+            value = [process_value(item, object_pairs_hook=object_pairs_hook) for item in value]
+        elif kind == 'table':
+            value = object_pairs_hook([(k, process_value(value[k], object_pairs_hook=object_pairs_hook)) for k in value])
+        return translate(kind, text, value)
+
+    for kind, value, pos in ast:
+        if kind == 'kv':
+            k, v = value
+            if k in scope:
+                error('duplicate_keys. Key "{0}" was used more than once.'.format(k))
+            scope[k] = process_value(v, object_pairs_hook=object_pairs_hook)
+        else:
+            is_table_array = (kind == 'table_array')
+            cur = tables
+            for name in value[:-1]:
+                if isinstance(cur.get(name), list):
+                    d, cur = cur[name][-1]
+                else:
+                    d, cur = cur.setdefault(name, (None, object_pairs_hook()))
+
+            scope = object_pairs_hook()
+            name = value[-1]
+            if name not in cur:
+                if is_table_array:
+                    cur[name] = [(scope, object_pairs_hook())]
+                else:
+                    cur[name] = (scope, object_pairs_hook())
+            elif isinstance(cur[name], list):
+                if not is_table_array:
+                    error('table_type_mismatch')
+                cur[name].append((scope, object_pairs_hook()))
+            else:
+                if is_table_array:
+                    error('table_type_mismatch')
+                old_scope, next_table = cur[name]
+                if old_scope is not None:
+                    error('duplicate_tables')
+                cur[name] = (scope, next_table)
+
+    def merge_tables(scope, tables):
+        if scope is None:
+            scope = object_pairs_hook()
+        for k in tables:
+            if k in scope:
+                error('key_table_conflict')
+            v = tables[k]
+            if isinstance(v, list):
+                scope[k] = [merge_tables(sc, tbl) for sc, tbl in v]
+            else:
+                scope[k] = merge_tables(v[0], v[1])
+        return scope
+
+    return merge_tables(root, tables)
+
+class _Source:
+    def __init__(self, s, filename=None):
+        self.s = s
+        self._pos = (1, 1)
+        self._last = None
+        self._filename = filename
+        self.backtrack_stack = []
+
+    def last(self):
+        return self._last
+
+    def pos(self):
+        return self._pos
+
+    def fail(self):
+        return self._expect(None)
+
+    def consume_dot(self):
+        if self.s:
+            self._last = self.s[0]
+            self.s = self[1:]
+            self._advance(self._last)
+            return self._last
+        return None
+
+    def expect_dot(self):
+        return self._expect(self.consume_dot())
+
+    def consume_eof(self):
+        if not self.s:
+            self._last = ''
+            return True
+        return False
+
+    def expect_eof(self):
+        return self._expect(self.consume_eof())
+
+    def consume(self, s):
+        if self.s.startswith(s):
+            self.s = self.s[len(s):]
+            self._last = s
+            self._advance(s)
+            return True
+        return False
+
+    def expect(self, s):
+        return self._expect(self.consume(s))
+
+    def consume_re(self, re):
+        m = re.match(self.s)
+        if m:
+            self.s = self.s[len(m.group(0)):]
+            self._last = m
+            self._advance(m.group(0))
+            return m
+        return None
+
+    def expect_re(self, re):
+        return self._expect(self.consume_re(re))
+
+    def __enter__(self):
+        self.backtrack_stack.append((self.s, self._pos))
+
+    def __exit__(self, type, value, traceback):
+        if type is None:
+            self.backtrack_stack.pop()
+        else:
+            self.s, self._pos = self.backtrack_stack.pop()
+        return type == TomlError
+
+    def commit(self):
+        self.backtrack_stack[-1] = (self.s, self._pos)
+
+    def _expect(self, r):
+        if not r:
+            raise TomlError('msg', self._pos[0], self._pos[1], self._filename)
+        return r
+
+    def _advance(self, s):
+        suffix_pos = s.rfind('\n')
+        if suffix_pos == -1:
+            self._pos = (self._pos[0], self._pos[1] + len(s))
+        else:
+            self._pos = (self._pos[0] + s.count('\n'), len(s) - suffix_pos)
+
+_ews_re = re.compile(r'(?:[ \t]|#[^\n]*\n|#[^\n]*\Z|\n)*')
+def _p_ews(s):
+    s.expect_re(_ews_re)
+
+_ws_re = re.compile(r'[ \t]*')
+def _p_ws(s):
+    s.expect_re(_ws_re)
+
+_escapes = { 'b': '\b', 'n': '\n', 'r': '\r', 't': '\t', '"': '"',
+    '\\': '\\', 'f': '\f' }
+
+_basicstr_re = re.compile(r'[^"\\\000-\037]*')
+_short_uni_re = re.compile(r'u([0-9a-fA-F]{4})')
+_long_uni_re = re.compile(r'U([0-9a-fA-F]{8})')
+_escapes_re = re.compile(r'[btnfr\"\\]')
+_newline_esc_re = re.compile('\n[ \t\n]*')
+def _p_basicstr_content(s, content=_basicstr_re):
+    res = []
+    while True:
+        res.append(s.expect_re(content).group(0))
+        if not s.consume('\\'):
+            break
+        if s.consume_re(_newline_esc_re):
+            pass
+        elif s.consume_re(_short_uni_re) or s.consume_re(_long_uni_re):
+            v = int(s.last().group(1), 16)
+            if 0xd800 <= v < 0xe000:
+                s.fail()
+            res.append(_chr(v))
+        else:
+            s.expect_re(_escapes_re)
+            res.append(_escapes[s.last().group(0)])
+    return ''.join(res)
+
+_key_re = re.compile(r'[0-9a-zA-Z-_]+')
+def _p_key(s):
+    with s:
+        s.expect('"')
+        r = _p_basicstr_content(s, _basicstr_re)
+        s.expect('"')
+        return r
+    if s.consume('\''):
+        if s.consume('\'\''):
+            r = s.expect_re(_litstr_ml_re).group(0)
+            s.expect('\'\'\'')
+        else:
+            r = s.expect_re(_litstr_re).group(0)
+            s.expect('\'')
+        return r
+    return s.expect_re(_key_re).group(0)
+
+_float_re = re.compile(r'[+-]?(?:0|[1-9](?:_?\d)*)(?:\.\d(?:_?\d)*)?(?:[eE][+-]?(?:\d(?:_?\d)*))?')
+
+_basicstr_ml_re = re.compile(r'(?:""?(?!")|[^"\\\000-\011\013-\037])*')
+_litstr_re = re.compile(r"[^'\000\010\012-\037]*")
+_litstr_ml_re = re.compile(r"(?:(?:|'|'')(?:[^'\000-\010\013-\037]))*")
+def _p_value(s, object_pairs_hook):
+    pos = s.pos()
+
+    if s.consume('true'):
+        return 'bool', s.last(), True, pos
+    if s.consume('false'):
+        return 'bool', s.last(), False, pos
+
+    if s.consume('"'):
+        if s.consume('""'):
+            r = _p_basicstr_content(s, _basicstr_ml_re)
+            s.expect('"""')
+        else:
+            r = _p_basicstr_content(s, _basicstr_re)
+            s.expect('"')
+        return 'str', r, r, pos
+
+    if s.consume('\''):
+        if s.consume('\'\''):
+            r = s.expect_re(_litstr_ml_re).group(0)
+            s.expect('\'\'\'')
+        else:
+            r = s.expect_re(_litstr_re).group(0)
+            s.expect('\'')
+        return 'str', r, r, pos
+
+    if s.consume_re(rfc3339_re):
+        m = s.last()
+        return 'datetime', m.group(0), parse_rfc3339_re(m), pos
+
+    if s.consume_re(_float_re):
+        m = s.last().group(0)
+        r = m.replace('_','')
+        if '.' in m or 'e' in m or 'E' in m:
+            return 'float', m, float(r), pos
+        else:
+            return 'int', m, int(r, 10), pos
+
+    if s.consume('['):
+        items = []
+        with s:
+            while True:
+                _p_ews(s)
+                items.append(_p_value(s, object_pairs_hook=object_pairs_hook))
+                s.commit()
+                _p_ews(s)
+                s.expect(',')
+                s.commit()
+        _p_ews(s)
+        s.expect(']')
+        return 'array', None, items, pos
+
+    if s.consume('{'):
+        _p_ws(s)
+        items = object_pairs_hook()
+        if not s.consume('}'):
+            k = _p_key(s)
+            _p_ws(s)
+            s.expect('=')
+            _p_ws(s)
+            items[k] = _p_value(s, object_pairs_hook=object_pairs_hook)
+            _p_ws(s)
+            while s.consume(','):
+                _p_ws(s)
+                k = _p_key(s)
+                _p_ws(s)
+                s.expect('=')
+                _p_ws(s)
+                items[k] = _p_value(s, object_pairs_hook=object_pairs_hook)
+                _p_ws(s)
+            s.expect('}')
+        return 'table', None, items, pos
+
+    s.fail()
+
+def _p_stmt(s, object_pairs_hook):
+    pos = s.pos()
+    if s.consume(   '['):
+        is_array = s.consume('[')
+        _p_ws(s)
+        keys = [_p_key(s)]
+        _p_ws(s)
+        while s.consume('.'):
+            _p_ws(s)
+            keys.append(_p_key(s))
+            _p_ws(s)
+        s.expect(']')
+        if is_array:
+            s.expect(']')
+        return 'table_array' if is_array else 'table', keys, pos
+
+    key = _p_key(s)
+    _p_ws(s)
+    s.expect('=')
+    _p_ws(s)
+    value = _p_value(s, object_pairs_hook=object_pairs_hook)
+    return 'kv', (key, value), pos
+
+_stmtsep_re = re.compile(r'(?:[ \t]*(?:#[^\n]*)?\n)+[ \t]*')
+def _p_toml(s, object_pairs_hook):
+    stmts = []
+    _p_ews(s)
+    with s:
+        stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook))
+        while True:
+            s.commit()
+            s.expect_re(_stmtsep_re)
+            stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook))
+    _p_ews(s)
+    s.expect_eof()
+    return stmts
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/test.py b/lib/python3.7/site-packages/pip/_vendor/pytoml/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec8abfc650264a5295447da59a242e950d25802d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pytoml/test.py
@@ -0,0 +1,30 @@
+import datetime
+from .utils import format_rfc3339
+
+try:
+    _string_types = (str, unicode)
+    _int_types = (int, long)
+except NameError:
+    _string_types = str
+    _int_types = int
+
+def translate_to_test(v):
+    if isinstance(v, dict):
+        return { k: translate_to_test(v) for k, v in v.items() }
+    if isinstance(v, list):
+        a = [translate_to_test(x) for x in v]
+        if v and isinstance(v[0], dict):
+            return a
+        else:
+            return {'type': 'array', 'value': a}
+    if isinstance(v, datetime.datetime):
+        return {'type': 'datetime', 'value': format_rfc3339(v)}
+    if isinstance(v, bool):
+        return {'type': 'bool', 'value': 'true' if v else 'false'}
+    if isinstance(v, _int_types):
+        return {'type': 'integer', 'value': str(v)}
+    if isinstance(v, float):
+        return {'type': 'float', 'value': '{:.17}'.format(v)}
+    if isinstance(v, _string_types):
+        return {'type': 'string', 'value': v}
+    raise RuntimeError('unexpected value: {!r}'.format(v))
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/utils.py b/lib/python3.7/site-packages/pip/_vendor/pytoml/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..636a680b06c48061aa566639372998c6d19551ca
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pytoml/utils.py
@@ -0,0 +1,67 @@
+import datetime
+import re
+
+rfc3339_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))')
+
+def parse_rfc3339(v):
+    m = rfc3339_re.match(v)
+    if not m or m.group(0) != v:
+        return None
+    return parse_rfc3339_re(m)
+
+def parse_rfc3339_re(m):
+    r = map(int, m.groups()[:6])
+    if m.group(7):
+        micro = float(m.group(7))
+    else:
+        micro = 0
+
+    if m.group(8):
+        g = int(m.group(8), 10) * 60 + int(m.group(9), 10)
+        tz = _TimeZone(datetime.timedelta(0, g * 60))
+    else:
+        tz = _TimeZone(datetime.timedelta(0, 0))
+
+    y, m, d, H, M, S = r
+    return datetime.datetime(y, m, d, H, M, S, int(micro * 1000000), tz)
+
+
+def format_rfc3339(v):
+    offs = v.utcoffset()
+    offs = int(offs.total_seconds()) // 60 if offs is not None else 0
+
+    if offs == 0:
+        suffix = 'Z'
+    else:
+        if offs > 0:
+            suffix = '+'
+        else:
+            suffix = '-'
+            offs = -offs
+        suffix = '{0}{1:02}:{2:02}'.format(suffix, offs // 60, offs % 60)
+
+    if v.microsecond:
+        return v.strftime('%Y-%m-%dT%H:%M:%S.%f') + suffix
+    else:
+        return v.strftime('%Y-%m-%dT%H:%M:%S') + suffix
+
+class _TimeZone(datetime.tzinfo):
+    def __init__(self, offset):
+        self._offset = offset
+
+    def utcoffset(self, dt):
+        return self._offset
+
+    def dst(self, dt):
+        return None
+
+    def tzname(self, dt):
+        m = self._offset.total_seconds() // 60
+        if m < 0:
+            res = '-'
+            m = -m
+        else:
+            res = '+'
+        h = m // 60
+        m = m - h * 60
+        return '{}{:.02}{:.02}'.format(res, h, m)
diff --git a/lib/python3.7/site-packages/pip/_vendor/pytoml/writer.py b/lib/python3.7/site-packages/pip/_vendor/pytoml/writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..73b5089c241ec6a5f94899cda7807c3fc27ed316
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/pytoml/writer.py
@@ -0,0 +1,106 @@
+from __future__ import unicode_literals
+import io, datetime, math, string, sys
+
+from .utils import format_rfc3339
+
+if sys.version_info[0] == 3:
+    long = int
+    unicode = str
+
+
+def dumps(obj, sort_keys=False):
+    fout = io.StringIO()
+    dump(obj, fout, sort_keys=sort_keys)
+    return fout.getvalue()
+
+
+_escapes = {'\n': 'n', '\r': 'r', '\\': '\\', '\t': 't', '\b': 'b', '\f': 'f', '"': '"'}
+
+
+def _escape_string(s):
+    res = []
+    start = 0
+
+    def flush():
+        if start != i:
+            res.append(s[start:i])
+        return i + 1
+
+    i = 0
+    while i < len(s):
+        c = s[i]
+        if c in '"\\\n\r\t\b\f':
+            start = flush()
+            res.append('\\' + _escapes[c])
+        elif ord(c) < 0x20:
+            start = flush()
+            res.append('\\u%04x' % ord(c))
+        i += 1
+
+    flush()
+    return '"' + ''.join(res) + '"'
+
+
+_key_chars = string.digits + string.ascii_letters + '-_'
+def _escape_id(s):
+    if any(c not in _key_chars for c in s):
+        return _escape_string(s)
+    return s
+
+
+def _format_value(v):
+    if isinstance(v, bool):
+        return 'true' if v else 'false'
+    if isinstance(v, int) or isinstance(v, long):
+        return unicode(v)
+    if isinstance(v, float):
+        if math.isnan(v) or math.isinf(v):
+            raise ValueError("{0} is not a valid TOML value".format(v))
+        else:
+            return repr(v)
+    elif isinstance(v, unicode) or isinstance(v, bytes):
+        return _escape_string(v)
+    elif isinstance(v, datetime.datetime):
+        return format_rfc3339(v)
+    elif isinstance(v, list):
+        return '[{0}]'.format(', '.join(_format_value(obj) for obj in v))
+    elif isinstance(v, dict):
+        return '{{{0}}}'.format(', '.join('{} = {}'.format(_escape_id(k), _format_value(obj)) for k, obj in v.items()))
+    else:
+        raise RuntimeError(v)
+
+
+def dump(obj, fout, sort_keys=False):
+    tables = [((), obj, False)]
+
+    while tables:
+        name, table, is_array = tables.pop()
+        if name:
+            section_name = '.'.join(_escape_id(c) for c in name)
+            if is_array:
+                fout.write('[[{0}]]\n'.format(section_name))
+            else:
+                fout.write('[{0}]\n'.format(section_name))
+
+        table_keys = sorted(table.keys()) if sort_keys else table.keys()
+        new_tables = []
+        has_kv = False
+        for k in table_keys:
+            v = table[k]
+            if isinstance(v, dict):
+                new_tables.append((name + (k,), v, False))
+            elif isinstance(v, list) and v and all(isinstance(o, dict) for o in v):
+                new_tables.extend((name + (k,), d, True) for d in v)
+            elif v is None:
+                # based on mojombo's comment: https://github.com/toml-lang/toml/issues/146#issuecomment-25019344
+                fout.write(
+                    '#{} = null  # To use: uncomment and replace null with value\n'.format(_escape_id(k)))
+                has_kv = True
+            else:
+                fout.write('{0} = {1}\n'.format(_escape_id(k), _format_value(v)))
+                has_kv = True
+
+        tables.extend(reversed(new_tables))
+
+        if (name or has_kv) and tables:
+            fout.write('\n')
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__init__.py b/lib/python3.7/site-packages/pip/_vendor/requests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..80c4ce1d213bc539d3418aec6ccb5f4ba94cd45b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/__init__.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+
+#   __
+#  /__)  _  _     _   _ _/   _
+# / (   (- (/ (/ (- _)  /  _)
+#          /
+
+"""
+Requests HTTP Library
+~~~~~~~~~~~~~~~~~~~~~
+
+Requests is an HTTP library, written in Python, for human beings. Basic GET
+usage:
+
+   >>> import requests
+   >>> r = requests.get('https://www.python.org')
+   >>> r.status_code
+   200
+   >>> 'Python is a programming language' in r.content
+   True
+
+... or POST:
+
+   >>> payload = dict(key1='value1', key2='value2')
+   >>> r = requests.post('https://httpbin.org/post', data=payload)
+   >>> print(r.text)
+   {
+     ...
+     "form": {
+       "key2": "value2",
+       "key1": "value1"
+     },
+     ...
+   }
+
+The other HTTP methods are supported - see `requests.api`. Full documentation
+is at <http://python-requests.org>.
+
+:copyright: (c) 2017 by Kenneth Reitz.
+:license: Apache 2.0, see LICENSE for more details.
+"""
+
+from pip._vendor import urllib3
+from pip._vendor import chardet
+import warnings
+from .exceptions import RequestsDependencyWarning
+
+
+def check_compatibility(urllib3_version, chardet_version):
+    urllib3_version = urllib3_version.split('.')
+    assert urllib3_version != ['dev']  # Verify urllib3 isn't installed from git.
+
+    # Sometimes, urllib3 only reports its version as 16.1.
+    if len(urllib3_version) == 2:
+        urllib3_version.append('0')
+
+    # Check urllib3 for compatibility.
+    major, minor, patch = urllib3_version  # noqa: F811
+    major, minor, patch = int(major), int(minor), int(patch)
+    # urllib3 >= 1.21.1, <= 1.24
+    assert major == 1
+    assert minor >= 21
+    assert minor <= 24
+
+    # Check chardet for compatibility.
+    major, minor, patch = chardet_version.split('.')[:3]
+    major, minor, patch = int(major), int(minor), int(patch)
+    # chardet >= 3.0.2, < 3.1.0
+    assert major == 3
+    assert minor < 1
+    assert patch >= 2
+
+
+def _check_cryptography(cryptography_version):
+    # cryptography < 1.3.4
+    try:
+        cryptography_version = list(map(int, cryptography_version.split('.')))
+    except ValueError:
+        return
+
+    if cryptography_version < [1, 3, 4]:
+        warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version)
+        warnings.warn(warning, RequestsDependencyWarning)
+
+# Check imported dependencies for compatibility.
+try:
+    check_compatibility(urllib3.__version__, chardet.__version__)
+except (AssertionError, ValueError):
+    warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported "
+                  "version!".format(urllib3.__version__, chardet.__version__),
+                  RequestsDependencyWarning)
+
+# Attempt to enable urllib3's SNI support, if possible
+from pip._internal.utils.compat import WINDOWS
+if not WINDOWS:
+    try:
+        from pip._vendor.urllib3.contrib import pyopenssl
+        pyopenssl.inject_into_urllib3()
+
+        # Check cryptography version
+        from cryptography import __version__ as cryptography_version
+        _check_cryptography(cryptography_version)
+    except ImportError:
+        pass
+
+# urllib3's DependencyWarnings should be silenced.
+from pip._vendor.urllib3.exceptions import DependencyWarning
+warnings.simplefilter('ignore', DependencyWarning)
+
+from .__version__ import __title__, __description__, __url__, __version__
+from .__version__ import __build__, __author__, __author_email__, __license__
+from .__version__ import __copyright__, __cake__
+
+from . import utils
+from . import packages
+from .models import Request, Response, PreparedRequest
+from .api import request, get, head, post, patch, put, delete, options
+from .sessions import session, Session
+from .status_codes import codes
+from .exceptions import (
+    RequestException, Timeout, URLRequired,
+    TooManyRedirects, HTTPError, ConnectionError,
+    FileModeWarning, ConnectTimeout, ReadTimeout
+)
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+from logging import NullHandler
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+# FileModeWarnings go off per the default.
+warnings.simplefilter('default', FileModeWarning, append=True)
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2fa3bfa6c13f99c550164f73c04db9208e09d75e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b7d0bc34190100c86a6da322df305f8db0ea038b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/__version__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d2ba83a9a9a926200fb7ce6443eef6d6f58ce64
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/_internal_utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..52b9308e57f7b293c91c93766228dc1bf818169d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/adapters.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/api.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/api.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..516d99df3db829fac86070477cdd5a0700999348
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/api.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..91dee96458da437365ef578c1f281e2ae7f0ada3
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/auth.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d17754615dff824c1075f6e014cf4f0ae8c31252
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/certs.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e6ca8088e435e054c91b99840a9329cf6e99d27
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48cdd6d915aa8aba2b9d0ef1374b1c932035aaa2
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/cookies.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7424f6e341e7e6b7d5e9d0099efc8d6cdfd417ea
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/exceptions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/help.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/help.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..730516fefc41b33428ef15e302c4809022d21e5c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/help.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e974689b6ef84939b137fc7ce1d59f43c63f8e2
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/hooks.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/models.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/models.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6123c47bc628a80ef5bbda1480422bd61c49c388
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/models.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..112cb75304240f47bc2faefb6deba77f91eb1f48
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/packages.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d3a23854db866cc97989aeb93d253ec153708592
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/sessions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dcc0d5c65a0d4e077a474587867cbf670e2216a6
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/status_codes.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..81209886ec88bb7cc913d8972670e0c367ed8354
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/structures.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6d7bf27cc38b8d1360c63f47383a2db3c919ebfa
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/requests/__pycache__/utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/__version__.py b/lib/python3.7/site-packages/pip/_vendor/requests/__version__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5b5d03671d141fa50d8b89f09844482870ac2a9
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/__version__.py
@@ -0,0 +1,14 @@
+# .-. .-. .-. . . .-. .-. .-. .-.
+# |(  |-  |.| | | |-  `-.  |  `-.
+# ' ' `-' `-`.`-' `-' `-'  '  `-'
+
+__title__ = 'requests'
+__description__ = 'Python HTTP for Humans.'
+__url__ = 'http://python-requests.org'
+__version__ = '2.21.0'
+__build__ = 0x022100
+__author__ = 'Kenneth Reitz'
+__author_email__ = 'me@kennethreitz.org'
+__license__ = 'Apache 2.0'
+__copyright__ = 'Copyright 2018 Kenneth Reitz'
+__cake__ = u'\u2728 \U0001f370 \u2728'
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/_internal_utils.py b/lib/python3.7/site-packages/pip/_vendor/requests/_internal_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..759d9a56ba0102bb3b7b3abfcfae6731a2ecc243
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/_internal_utils.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests._internal_utils
+~~~~~~~~~~~~~~
+
+Provides utility functions that are consumed internally by Requests
+which depend on extremely few external helpers (such as compat)
+"""
+
+from .compat import is_py2, builtin_str, str
+
+
+def to_native_string(string, encoding='ascii'):
+    """Given a string object, regardless of type, returns a representation of
+    that string in the native string type, encoding and decoding where
+    necessary. This assumes ASCII unless told otherwise.
+    """
+    if isinstance(string, builtin_str):
+        out = string
+    else:
+        if is_py2:
+            out = string.encode(encoding)
+        else:
+            out = string.decode(encoding)
+
+    return out
+
+
+def unicode_is_ascii(u_string):
+    """Determine if unicode string only contains ASCII characters.
+
+    :param str u_string: unicode string to check. Must be unicode
+        and not Python 2 `str`.
+    :rtype: bool
+    """
+    assert isinstance(u_string, str)
+    try:
+        u_string.encode('ascii')
+        return True
+    except UnicodeEncodeError:
+        return False
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/adapters.py b/lib/python3.7/site-packages/pip/_vendor/requests/adapters.py
new file mode 100644
index 0000000000000000000000000000000000000000..c30e7c92dc7f1688c86c345b47656b3561d71373
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/adapters.py
@@ -0,0 +1,533 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.adapters
+~~~~~~~~~~~~~~~~~
+
+This module contains the transport adapters that Requests uses to define
+and maintain connections.
+"""
+
+import os.path
+import socket
+
+from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
+from pip._vendor.urllib3.response import HTTPResponse
+from pip._vendor.urllib3.util import parse_url
+from pip._vendor.urllib3.util import Timeout as TimeoutSauce
+from pip._vendor.urllib3.util.retry import Retry
+from pip._vendor.urllib3.exceptions import ClosedPoolError
+from pip._vendor.urllib3.exceptions import ConnectTimeoutError
+from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError
+from pip._vendor.urllib3.exceptions import MaxRetryError
+from pip._vendor.urllib3.exceptions import NewConnectionError
+from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError
+from pip._vendor.urllib3.exceptions import ProtocolError
+from pip._vendor.urllib3.exceptions import ReadTimeoutError
+from pip._vendor.urllib3.exceptions import SSLError as _SSLError
+from pip._vendor.urllib3.exceptions import ResponseError
+from pip._vendor.urllib3.exceptions import LocationValueError
+
+from .models import Response
+from .compat import urlparse, basestring
+from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
+                    get_encoding_from_headers, prepend_scheme_if_needed,
+                    get_auth_from_url, urldefragauth, select_proxy)
+from .structures import CaseInsensitiveDict
+from .cookies import extract_cookies_to_jar
+from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
+                         ProxyError, RetryError, InvalidSchema, InvalidProxyURL,
+                         InvalidURL)
+from .auth import _basic_auth_str
+
+try:
+    from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager
+except ImportError:
+    def SOCKSProxyManager(*args, **kwargs):
+        raise InvalidSchema("Missing dependencies for SOCKS support.")
+
+DEFAULT_POOLBLOCK = False
+DEFAULT_POOLSIZE = 10
+DEFAULT_RETRIES = 0
+DEFAULT_POOL_TIMEOUT = None
+
+
+class BaseAdapter(object):
+    """The Base Transport Adapter"""
+
+    def __init__(self):
+        super(BaseAdapter, self).__init__()
+
+    def send(self, request, stream=False, timeout=None, verify=True,
+             cert=None, proxies=None):
+        """Sends PreparedRequest object. Returns Response object.
+
+        :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
+        :param stream: (optional) Whether to stream the request content.
+        :param timeout: (optional) How long to wait for the server to send
+            data before giving up, as a float, or a :ref:`(connect timeout,
+            read timeout) <timeouts>` tuple.
+        :type timeout: float or tuple
+        :param verify: (optional) Either a boolean, in which case it controls whether we verify
+            the server's TLS certificate, or a string, in which case it must be a path
+            to a CA bundle to use
+        :param cert: (optional) Any user-provided SSL certificate to be trusted.
+        :param proxies: (optional) The proxies dictionary to apply to the request.
+        """
+        raise NotImplementedError
+
+    def close(self):
+        """Cleans up adapter specific items."""
+        raise NotImplementedError
+
+
+class HTTPAdapter(BaseAdapter):
+    """The built-in HTTP Adapter for urllib3.
+
+    Provides a general-case interface for Requests sessions to contact HTTP and
+    HTTPS urls by implementing the Transport Adapter interface. This class will
+    usually be created by the :class:`Session <Session>` class under the
+    covers.
+
+    :param pool_connections: The number of urllib3 connection pools to cache.
+    :param pool_maxsize: The maximum number of connections to save in the pool.
+    :param max_retries: The maximum number of retries each connection
+        should attempt. Note, this applies only to failed DNS lookups, socket
+        connections and connection timeouts, never to requests where data has
+        made it to the server. By default, Requests does not retry failed
+        connections. If you need granular control over the conditions under
+        which we retry a request, import urllib3's ``Retry`` class and pass
+        that instead.
+    :param pool_block: Whether the connection pool should block for connections.
+
+    Usage::
+
+      >>> import requests
+      >>> s = requests.Session()
+      >>> a = requests.adapters.HTTPAdapter(max_retries=3)
+      >>> s.mount('http://', a)
+    """
+    __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
+                 '_pool_block']
+
+    def __init__(self, pool_connections=DEFAULT_POOLSIZE,
+                 pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
+                 pool_block=DEFAULT_POOLBLOCK):
+        if max_retries == DEFAULT_RETRIES:
+            self.max_retries = Retry(0, read=False)
+        else:
+            self.max_retries = Retry.from_int(max_retries)
+        self.config = {}
+        self.proxy_manager = {}
+
+        super(HTTPAdapter, self).__init__()
+
+        self._pool_connections = pool_connections
+        self._pool_maxsize = pool_maxsize
+        self._pool_block = pool_block
+
+        self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
+
+    def __getstate__(self):
+        return {attr: getattr(self, attr, None) for attr in self.__attrs__}
+
+    def __setstate__(self, state):
+        # Can't handle by adding 'proxy_manager' to self.__attrs__ because
+        # self.poolmanager uses a lambda function, which isn't pickleable.
+        self.proxy_manager = {}
+        self.config = {}
+
+        for attr, value in state.items():
+            setattr(self, attr, value)
+
+        self.init_poolmanager(self._pool_connections, self._pool_maxsize,
+                              block=self._pool_block)
+
+    def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
+        """Initializes a urllib3 PoolManager.
+
+        This method should not be called from user code, and is only
+        exposed for use when subclassing the
+        :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+        :param connections: The number of urllib3 connection pools to cache.
+        :param maxsize: The maximum number of connections to save in the pool.
+        :param block: Block when no free connections are available.
+        :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
+        """
+        # save these values for pickling
+        self._pool_connections = connections
+        self._pool_maxsize = maxsize
+        self._pool_block = block
+
+        self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
+                                       block=block, strict=True, **pool_kwargs)
+
+    def proxy_manager_for(self, proxy, **proxy_kwargs):
+        """Return urllib3 ProxyManager for the given proxy.
+
+        This method should not be called from user code, and is only
+        exposed for use when subclassing the
+        :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+        :param proxy: The proxy to return a urllib3 ProxyManager for.
+        :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
+        :returns: ProxyManager
+        :rtype: urllib3.ProxyManager
+        """
+        if proxy in self.proxy_manager:
+            manager = self.proxy_manager[proxy]
+        elif proxy.lower().startswith('socks'):
+            username, password = get_auth_from_url(proxy)
+            manager = self.proxy_manager[proxy] = SOCKSProxyManager(
+                proxy,
+                username=username,
+                password=password,
+                num_pools=self._pool_connections,
+                maxsize=self._pool_maxsize,
+                block=self._pool_block,
+                **proxy_kwargs
+            )
+        else:
+            proxy_headers = self.proxy_headers(proxy)
+            manager = self.proxy_manager[proxy] = proxy_from_url(
+                proxy,
+                proxy_headers=proxy_headers,
+                num_pools=self._pool_connections,
+                maxsize=self._pool_maxsize,
+                block=self._pool_block,
+                **proxy_kwargs)
+
+        return manager
+
+    def cert_verify(self, conn, url, verify, cert):
+        """Verify a SSL certificate. This method should not be called from user
+        code, and is only exposed for use when subclassing the
+        :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+        :param conn: The urllib3 connection object associated with the cert.
+        :param url: The requested URL.
+        :param verify: Either a boolean, in which case it controls whether we verify
+            the server's TLS certificate, or a string, in which case it must be a path
+            to a CA bundle to use
+        :param cert: The SSL certificate to verify.
+        """
+        if url.lower().startswith('https') and verify:
+
+            cert_loc = None
+
+            # Allow self-specified cert location.
+            if verify is not True:
+                cert_loc = verify
+
+            if not cert_loc:
+                cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
+
+            if not cert_loc or not os.path.exists(cert_loc):
+                raise IOError("Could not find a suitable TLS CA certificate bundle, "
+                              "invalid path: {}".format(cert_loc))
+
+            conn.cert_reqs = 'CERT_REQUIRED'
+
+            if not os.path.isdir(cert_loc):
+                conn.ca_certs = cert_loc
+            else:
+                conn.ca_cert_dir = cert_loc
+        else:
+            conn.cert_reqs = 'CERT_NONE'
+            conn.ca_certs = None
+            conn.ca_cert_dir = None
+
+        if cert:
+            if not isinstance(cert, basestring):
+                conn.cert_file = cert[0]
+                conn.key_file = cert[1]
+            else:
+                conn.cert_file = cert
+                conn.key_file = None
+            if conn.cert_file and not os.path.exists(conn.cert_file):
+                raise IOError("Could not find the TLS certificate file, "
+                              "invalid path: {}".format(conn.cert_file))
+            if conn.key_file and not os.path.exists(conn.key_file):
+                raise IOError("Could not find the TLS key file, "
+                              "invalid path: {}".format(conn.key_file))
+
+    def build_response(self, req, resp):
+        """Builds a :class:`Response <requests.Response>` object from a urllib3
+        response. This should not be called from user code, and is only exposed
+        for use when subclassing the
+        :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
+
+        :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
+        :param resp: The urllib3 response object.
+        :rtype: requests.Response
+        """
+        response = Response()
+
+        # Fallback to None if there's no status_code, for whatever reason.
+        response.status_code = getattr(resp, 'status', None)
+
+        # Make headers case-insensitive.
+        response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
+
+        # Set encoding.
+        response.encoding = get_encoding_from_headers(response.headers)
+        response.raw = resp
+        response.reason = response.raw.reason
+
+        if isinstance(req.url, bytes):
+            response.url = req.url.decode('utf-8')
+        else:
+            response.url = req.url
+
+        # Add new cookies from the server.
+        extract_cookies_to_jar(response.cookies, req, resp)
+
+        # Give the Response some context.
+        response.request = req
+        response.connection = self
+
+        return response
+
+    def get_connection(self, url, proxies=None):
+        """Returns a urllib3 connection for the given URL. This should not be
+        called from user code, and is only exposed for use when subclassing the
+        :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+        :param url: The URL to connect to.
+        :param proxies: (optional) A Requests-style dictionary of proxies used on this request.
+        :rtype: urllib3.ConnectionPool
+        """
+        proxy = select_proxy(url, proxies)
+
+        if proxy:
+            proxy = prepend_scheme_if_needed(proxy, 'http')
+            proxy_url = parse_url(proxy)
+            if not proxy_url.host:
+                raise InvalidProxyURL("Please check proxy URL. It is malformed"
+                                      " and could be missing the host.")
+            proxy_manager = self.proxy_manager_for(proxy)
+            conn = proxy_manager.connection_from_url(url)
+        else:
+            # Only scheme should be lower case
+            parsed = urlparse(url)
+            url = parsed.geturl()
+            conn = self.poolmanager.connection_from_url(url)
+
+        return conn
+
+    def close(self):
+        """Disposes of any internal state.
+
+        Currently, this closes the PoolManager and any active ProxyManager,
+        which closes any pooled connections.
+        """
+        self.poolmanager.clear()
+        for proxy in self.proxy_manager.values():
+            proxy.clear()
+
+    def request_url(self, request, proxies):
+        """Obtain the url to use when making the final request.
+
+        If the message is being sent through a HTTP proxy, the full URL has to
+        be used. Otherwise, we should only use the path portion of the URL.
+
+        This should not be called from user code, and is only exposed for use
+        when subclassing the
+        :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+        :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
+        :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
+        :rtype: str
+        """
+        proxy = select_proxy(request.url, proxies)
+        scheme = urlparse(request.url).scheme
+
+        is_proxied_http_request = (proxy and scheme != 'https')
+        using_socks_proxy = False
+        if proxy:
+            proxy_scheme = urlparse(proxy).scheme.lower()
+            using_socks_proxy = proxy_scheme.startswith('socks')
+
+        url = request.path_url
+        if is_proxied_http_request and not using_socks_proxy:
+            url = urldefragauth(request.url)
+
+        return url
+
+    def add_headers(self, request, **kwargs):
+        """Add any headers needed by the connection. As of v2.0 this does
+        nothing by default, but is left for overriding by users that subclass
+        the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+        This should not be called from user code, and is only exposed for use
+        when subclassing the
+        :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+        :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
+        :param kwargs: The keyword arguments from the call to send().
+        """
+        pass
+
+    def proxy_headers(self, proxy):
+        """Returns a dictionary of the headers to add to any request sent
+        through a proxy. This works with urllib3 magic to ensure that they are
+        correctly sent to the proxy, rather than in a tunnelled request if
+        CONNECT is being used.
+
+        This should not be called from user code, and is only exposed for use
+        when subclassing the
+        :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+        :param proxy: The url of the proxy being used for this request.
+        :rtype: dict
+        """
+        headers = {}
+        username, password = get_auth_from_url(proxy)
+
+        if username:
+            headers['Proxy-Authorization'] = _basic_auth_str(username,
+                                                             password)
+
+        return headers
+
+    def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
+        """Sends PreparedRequest object. Returns Response object.
+
+        :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
+        :param stream: (optional) Whether to stream the request content.
+        :param timeout: (optional) How long to wait for the server to send
+            data before giving up, as a float, or a :ref:`(connect timeout,
+            read timeout) <timeouts>` tuple.
+        :type timeout: float or tuple or urllib3 Timeout object
+        :param verify: (optional) Either a boolean, in which case it controls whether
+            we verify the server's TLS certificate, or a string, in which case it
+            must be a path to a CA bundle to use
+        :param cert: (optional) Any user-provided SSL certificate to be trusted.
+        :param proxies: (optional) The proxies dictionary to apply to the request.
+        :rtype: requests.Response
+        """
+
+        try:
+            conn = self.get_connection(request.url, proxies)
+        except LocationValueError as e:
+            raise InvalidURL(e, request=request)
+
+        self.cert_verify(conn, request.url, verify, cert)
+        url = self.request_url(request, proxies)
+        self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
+
+        chunked = not (request.body is None or 'Content-Length' in request.headers)
+
+        if isinstance(timeout, tuple):
+            try:
+                connect, read = timeout
+                timeout = TimeoutSauce(connect=connect, read=read)
+            except ValueError as e:
+                # this may raise a string formatting error.
+                err = ("Invalid timeout {}. Pass a (connect, read) "
+                       "timeout tuple, or a single float to set "
+                       "both timeouts to the same value".format(timeout))
+                raise ValueError(err)
+        elif isinstance(timeout, TimeoutSauce):
+            pass
+        else:
+            timeout = TimeoutSauce(connect=timeout, read=timeout)
+
+        try:
+            if not chunked:
+                resp = conn.urlopen(
+                    method=request.method,
+                    url=url,
+                    body=request.body,
+                    headers=request.headers,
+                    redirect=False,
+                    assert_same_host=False,
+                    preload_content=False,
+                    decode_content=False,
+                    retries=self.max_retries,
+                    timeout=timeout
+                )
+
+            # Send the request.
+            else:
+                if hasattr(conn, 'proxy_pool'):
+                    conn = conn.proxy_pool
+
+                low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
+
+                try:
+                    low_conn.putrequest(request.method,
+                                        url,
+                                        skip_accept_encoding=True)
+
+                    for header, value in request.headers.items():
+                        low_conn.putheader(header, value)
+
+                    low_conn.endheaders()
+
+                    for i in request.body:
+                        low_conn.send(hex(len(i))[2:].encode('utf-8'))
+                        low_conn.send(b'\r\n')
+                        low_conn.send(i)
+                        low_conn.send(b'\r\n')
+                    low_conn.send(b'0\r\n\r\n')
+
+                    # Receive the response from the server
+                    try:
+                        # For Python 2.7, use buffering of HTTP responses
+                        r = low_conn.getresponse(buffering=True)
+                    except TypeError:
+                        # For compatibility with Python 3.3+
+                        r = low_conn.getresponse()
+
+                    resp = HTTPResponse.from_httplib(
+                        r,
+                        pool=conn,
+                        connection=low_conn,
+                        preload_content=False,
+                        decode_content=False
+                    )
+                except:
+                    # If we hit any problems here, clean up the connection.
+                    # Then, reraise so that we can handle the actual exception.
+                    low_conn.close()
+                    raise
+
+        except (ProtocolError, socket.error) as err:
+            raise ConnectionError(err, request=request)
+
+        except MaxRetryError as e:
+            if isinstance(e.reason, ConnectTimeoutError):
+                # TODO: Remove this in 3.0.0: see #2811
+                if not isinstance(e.reason, NewConnectionError):
+                    raise ConnectTimeout(e, request=request)
+
+            if isinstance(e.reason, ResponseError):
+                raise RetryError(e, request=request)
+
+            if isinstance(e.reason, _ProxyError):
+                raise ProxyError(e, request=request)
+
+            if isinstance(e.reason, _SSLError):
+                # This branch is for urllib3 v1.22 and later.
+                raise SSLError(e, request=request)
+
+            raise ConnectionError(e, request=request)
+
+        except ClosedPoolError as e:
+            raise ConnectionError(e, request=request)
+
+        except _ProxyError as e:
+            raise ProxyError(e)
+
+        except (_SSLError, _HTTPError) as e:
+            if isinstance(e, _SSLError):
+                # This branch is for urllib3 versions earlier than v1.22
+                raise SSLError(e, request=request)
+            elif isinstance(e, ReadTimeoutError):
+                raise ReadTimeout(e, request=request)
+            else:
+                raise
+
+        return self.build_response(request, resp)
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/api.py b/lib/python3.7/site-packages/pip/_vendor/requests/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..abada96d4627a52bf0f6040ee2e08b0eff32c77c
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/api.py
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.api
+~~~~~~~~~~~~
+
+This module implements the Requests API.
+
+:copyright: (c) 2012 by Kenneth Reitz.
+:license: Apache2, see LICENSE for more details.
+"""
+
+from . import sessions
+
+
+def request(method, url, **kwargs):
+    """Constructs and sends a :class:`Request <Request>`.
+
+    :param method: method for the new :class:`Request` object.
+    :param url: URL for the new :class:`Request` object.
+    :param params: (optional) Dictionary, list of tuples or bytes to send
+        in the body of the :class:`Request`.
+    :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+        object to send in the body of the :class:`Request`.
+    :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
+    :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
+    :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
+    :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
+        ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
+        or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
+        defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
+        to add for the file.
+    :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
+    :param timeout: (optional) How many seconds to wait for the server to send data
+        before giving up, as a float, or a :ref:`(connect timeout, read
+        timeout) <timeouts>` tuple.
+    :type timeout: float or tuple
+    :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
+    :type allow_redirects: bool
+    :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
+    :param verify: (optional) Either a boolean, in which case it controls whether we verify
+            the server's TLS certificate, or a string, in which case it must be a path
+            to a CA bundle to use. Defaults to ``True``.
+    :param stream: (optional) if ``False``, the response content will be immediately downloaded.
+    :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
+
+    Usage::
+
+      >>> import requests
+      >>> req = requests.request('GET', 'https://httpbin.org/get')
+      <Response [200]>
+    """
+
+    # By using the 'with' statement we are sure the session is closed, thus we
+    # avoid leaving sockets open which can trigger a ResourceWarning in some
+    # cases, and look like a memory leak in others.
+    with sessions.Session() as session:
+        return session.request(method=method, url=url, **kwargs)
+
+
+def get(url, params=None, **kwargs):
+    r"""Sends a GET request.
+
+    :param url: URL for the new :class:`Request` object.
+    :param params: (optional) Dictionary, list of tuples or bytes to send
+        in the body of the :class:`Request`.
+    :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
+    """
+
+    kwargs.setdefault('allow_redirects', True)
+    return request('get', url, params=params, **kwargs)
+
+
+def options(url, **kwargs):
+    r"""Sends an OPTIONS request.
+
+    :param url: URL for the new :class:`Request` object.
+    :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
+    """
+
+    kwargs.setdefault('allow_redirects', True)
+    return request('options', url, **kwargs)
+
+
+def head(url, **kwargs):
+    r"""Sends a HEAD request.
+
+    :param url: URL for the new :class:`Request` object.
+    :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
+    """
+
+    kwargs.setdefault('allow_redirects', False)
+    return request('head', url, **kwargs)
+
+
+def post(url, data=None, json=None, **kwargs):
+    r"""Sends a POST request.
+
+    :param url: URL for the new :class:`Request` object.
+    :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+        object to send in the body of the :class:`Request`.
+    :param json: (optional) json data to send in the body of the :class:`Request`.
+    :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
+    """
+
+    return request('post', url, data=data, json=json, **kwargs)
+
+
+def put(url, data=None, **kwargs):
+    r"""Sends a PUT request.
+
+    :param url: URL for the new :class:`Request` object.
+    :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+        object to send in the body of the :class:`Request`.
+    :param json: (optional) json data to send in the body of the :class:`Request`.
+    :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
+    """
+
+    return request('put', url, data=data, **kwargs)
+
+
+def patch(url, data=None, **kwargs):
+    r"""Sends a PATCH request.
+
+    :param url: URL for the new :class:`Request` object.
+    :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+        object to send in the body of the :class:`Request`.
+    :param json: (optional) json data to send in the body of the :class:`Request`.
+    :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
+    """
+
+    return request('patch', url, data=data, **kwargs)
+
+
+def delete(url, **kwargs):
+    r"""Sends a DELETE request.
+
+    :param url: URL for the new :class:`Request` object.
+    :param \*\*kwargs: Optional arguments that ``request`` takes.
+    :return: :class:`Response <Response>` object
+    :rtype: requests.Response
+    """
+
+    return request('delete', url, **kwargs)
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/auth.py b/lib/python3.7/site-packages/pip/_vendor/requests/auth.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdde51c7fd1b0f1e3c7adeb8f2bf2587cdfab122
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/auth.py
@@ -0,0 +1,305 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.auth
+~~~~~~~~~~~~~
+
+This module contains the authentication handlers for Requests.
+"""
+
+import os
+import re
+import time
+import hashlib
+import threading
+import warnings
+
+from base64 import b64encode
+
+from .compat import urlparse, str, basestring
+from .cookies import extract_cookies_to_jar
+from ._internal_utils import to_native_string
+from .utils import parse_dict_header
+
+CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
+CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
+
+
+def _basic_auth_str(username, password):
+    """Returns a Basic Auth string."""
+
+    # "I want us to put a big-ol' comment on top of it that
+    # says that this behaviour is dumb but we need to preserve
+    # it because people are relying on it."
+    #    - Lukasa
+    #
+    # These are here solely to maintain backwards compatibility
+    # for things like ints. This will be removed in 3.0.0.
+    if not isinstance(username, basestring):
+        warnings.warn(
+            "Non-string usernames will no longer be supported in Requests "
+            "3.0.0. Please convert the object you've passed in ({!r}) to "
+            "a string or bytes object in the near future to avoid "
+            "problems.".format(username),
+            category=DeprecationWarning,
+        )
+        username = str(username)
+
+    if not isinstance(password, basestring):
+        warnings.warn(
+            "Non-string passwords will no longer be supported in Requests "
+            "3.0.0. Please convert the object you've passed in ({!r}) to "
+            "a string or bytes object in the near future to avoid "
+            "problems.".format(password),
+            category=DeprecationWarning,
+        )
+        password = str(password)
+    # -- End Removal --
+
+    if isinstance(username, str):
+        username = username.encode('latin1')
+
+    if isinstance(password, str):
+        password = password.encode('latin1')
+
+    authstr = 'Basic ' + to_native_string(
+        b64encode(b':'.join((username, password))).strip()
+    )
+
+    return authstr
+
+
+class AuthBase(object):
+    """Base class that all auth implementations derive from"""
+
+    def __call__(self, r):
+        raise NotImplementedError('Auth hooks must be callable.')
+
+
+class HTTPBasicAuth(AuthBase):
+    """Attaches HTTP Basic Authentication to the given Request object."""
+
+    def __init__(self, username, password):
+        self.username = username
+        self.password = password
+
+    def __eq__(self, other):
+        return all([
+            self.username == getattr(other, 'username', None),
+            self.password == getattr(other, 'password', None)
+        ])
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __call__(self, r):
+        r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
+        return r
+
+
+class HTTPProxyAuth(HTTPBasicAuth):
+    """Attaches HTTP Proxy Authentication to a given Request object."""
+
+    def __call__(self, r):
+        r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
+        return r
+
+
+class HTTPDigestAuth(AuthBase):
+    """Attaches HTTP Digest Authentication to the given Request object."""
+
+    def __init__(self, username, password):
+        self.username = username
+        self.password = password
+        # Keep state in per-thread local storage
+        self._thread_local = threading.local()
+
+    def init_per_thread_state(self):
+        # Ensure state is initialized just once per-thread
+        if not hasattr(self._thread_local, 'init'):
+            self._thread_local.init = True
+            self._thread_local.last_nonce = ''
+            self._thread_local.nonce_count = 0
+            self._thread_local.chal = {}
+            self._thread_local.pos = None
+            self._thread_local.num_401_calls = None
+
+    def build_digest_header(self, method, url):
+        """
+        :rtype: str
+        """
+
+        realm = self._thread_local.chal['realm']
+        nonce = self._thread_local.chal['nonce']
+        qop = self._thread_local.chal.get('qop')
+        algorithm = self._thread_local.chal.get('algorithm')
+        opaque = self._thread_local.chal.get('opaque')
+        hash_utf8 = None
+
+        if algorithm is None:
+            _algorithm = 'MD5'
+        else:
+            _algorithm = algorithm.upper()
+        # lambdas assume digest modules are imported at the top level
+        if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
+            def md5_utf8(x):
+                if isinstance(x, str):
+                    x = x.encode('utf-8')
+                return hashlib.md5(x).hexdigest()
+            hash_utf8 = md5_utf8
+        elif _algorithm == 'SHA':
+            def sha_utf8(x):
+                if isinstance(x, str):
+                    x = x.encode('utf-8')
+                return hashlib.sha1(x).hexdigest()
+            hash_utf8 = sha_utf8
+        elif _algorithm == 'SHA-256':
+            def sha256_utf8(x):
+                if isinstance(x, str):
+                    x = x.encode('utf-8')
+                return hashlib.sha256(x).hexdigest()
+            hash_utf8 = sha256_utf8
+        elif _algorithm == 'SHA-512':
+            def sha512_utf8(x):
+                if isinstance(x, str):
+                    x = x.encode('utf-8')
+                return hashlib.sha512(x).hexdigest()
+            hash_utf8 = sha512_utf8
+
+        KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
+
+        if hash_utf8 is None:
+            return None
+
+        # XXX not implemented yet
+        entdig = None
+        p_parsed = urlparse(url)
+        #: path is request-uri defined in RFC 2616 which should not be empty
+        path = p_parsed.path or "/"
+        if p_parsed.query:
+            path += '?' + p_parsed.query
+
+        A1 = '%s:%s:%s' % (self.username, realm, self.password)
+        A2 = '%s:%s' % (method, path)
+
+        HA1 = hash_utf8(A1)
+        HA2 = hash_utf8(A2)
+
+        if nonce == self._thread_local.last_nonce:
+            self._thread_local.nonce_count += 1
+        else:
+            self._thread_local.nonce_count = 1
+        ncvalue = '%08x' % self._thread_local.nonce_count
+        s = str(self._thread_local.nonce_count).encode('utf-8')
+        s += nonce.encode('utf-8')
+        s += time.ctime().encode('utf-8')
+        s += os.urandom(8)
+
+        cnonce = (hashlib.sha1(s).hexdigest()[:16])
+        if _algorithm == 'MD5-SESS':
+            HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
+
+        if not qop:
+            respdig = KD(HA1, "%s:%s" % (nonce, HA2))
+        elif qop == 'auth' or 'auth' in qop.split(','):
+            noncebit = "%s:%s:%s:%s:%s" % (
+                nonce, ncvalue, cnonce, 'auth', HA2
+            )
+            respdig = KD(HA1, noncebit)
+        else:
+            # XXX handle auth-int.
+            return None
+
+        self._thread_local.last_nonce = nonce
+
+        # XXX should the partial digests be encoded too?
+        base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
+               'response="%s"' % (self.username, realm, nonce, path, respdig)
+        if opaque:
+            base += ', opaque="%s"' % opaque
+        if algorithm:
+            base += ', algorithm="%s"' % algorithm
+        if entdig:
+            base += ', digest="%s"' % entdig
+        if qop:
+            base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
+
+        return 'Digest %s' % (base)
+
+    def handle_redirect(self, r, **kwargs):
+        """Reset num_401_calls counter on redirects."""
+        if r.is_redirect:
+            self._thread_local.num_401_calls = 1
+
+    def handle_401(self, r, **kwargs):
+        """
+        Takes the given response and tries digest-auth, if needed.
+
+        :rtype: requests.Response
+        """
+
+        # If response is not 4xx, do not auth
+        # See https://github.com/requests/requests/issues/3772
+        if not 400 <= r.status_code < 500:
+            self._thread_local.num_401_calls = 1
+            return r
+
+        if self._thread_local.pos is not None:
+            # Rewind the file position indicator of the body to where
+            # it was to resend the request.
+            r.request.body.seek(self._thread_local.pos)
+        s_auth = r.headers.get('www-authenticate', '')
+
+        if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
+
+            self._thread_local.num_401_calls += 1
+            pat = re.compile(r'digest ', flags=re.IGNORECASE)
+            self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
+
+            # Consume content and release the original connection
+            # to allow our new request to reuse the same one.
+            r.content
+            r.close()
+            prep = r.request.copy()
+            extract_cookies_to_jar(prep._cookies, r.request, r.raw)
+            prep.prepare_cookies(prep._cookies)
+
+            prep.headers['Authorization'] = self.build_digest_header(
+                prep.method, prep.url)
+            _r = r.connection.send(prep, **kwargs)
+            _r.history.append(r)
+            _r.request = prep
+
+            return _r
+
+        self._thread_local.num_401_calls = 1
+        return r
+
+    def __call__(self, r):
+        # Initialize per-thread state, if needed
+        self.init_per_thread_state()
+        # If we have a saved nonce, skip the 401
+        if self._thread_local.last_nonce:
+            r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
+        try:
+            self._thread_local.pos = r.body.tell()
+        except AttributeError:
+            # In the case of HTTPDigestAuth being reused and the body of
+            # the previous request was a file-like object, pos has the
+            # file position of the previous body. Ensure it's set to
+            # None.
+            self._thread_local.pos = None
+        r.register_hook('response', self.handle_401)
+        r.register_hook('response', self.handle_redirect)
+        self._thread_local.num_401_calls = 1
+
+        return r
+
+    def __eq__(self, other):
+        return all([
+            self.username == getattr(other, 'username', None),
+            self.password == getattr(other, 'password', None)
+        ])
+
+    def __ne__(self, other):
+        return not self == other
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/certs.py b/lib/python3.7/site-packages/pip/_vendor/requests/certs.py
new file mode 100644
index 0000000000000000000000000000000000000000..06a594e58f6746041edf371bc3dc8ca42b612322
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/certs.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+requests.certs
+~~~~~~~~~~~~~~
+
+This module returns the preferred default CA certificate bundle. There is
+only one — the one from the certifi package.
+
+If you are packaging Requests, e.g., for a Linux distribution or a managed
+environment, you can change the definition of where() to return a separately
+packaged CA bundle.
+"""
+from pip._vendor.certifi import where
+
+if __name__ == '__main__':
+    print(where())
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/compat.py b/lib/python3.7/site-packages/pip/_vendor/requests/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a86893dc30af3fe7c6fb6a8e47897fd883b7b78
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/compat.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.compat
+~~~~~~~~~~~~~~~
+
+This module handles import compatibility issues between Python 2 and
+Python 3.
+"""
+
+from pip._vendor import chardet
+
+import sys
+
+# -------
+# Pythons
+# -------
+
+# Syntax sugar.
+_ver = sys.version_info
+
+#: Python 2.x?
+is_py2 = (_ver[0] == 2)
+
+#: Python 3.x?
+is_py3 = (_ver[0] == 3)
+
+# Note: We've patched out simplejson support in pip because it prevents
+#       upgrading simplejson on Windows.
+# try:
+#     import simplejson as json
+# except (ImportError, SyntaxError):
+#     # simplejson does not support Python 3.2, it throws a SyntaxError
+#     # because of u'...' Unicode literals.
+import json
+
+# ---------
+# Specifics
+# ---------
+
+if is_py2:
+    from urllib import (
+        quote, unquote, quote_plus, unquote_plus, urlencode, getproxies,
+        proxy_bypass, proxy_bypass_environment, getproxies_environment)
+    from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
+    from urllib2 import parse_http_list
+    import cookielib
+    from Cookie import Morsel
+    from StringIO import StringIO
+    from collections import Callable, Mapping, MutableMapping, OrderedDict
+
+
+    builtin_str = str
+    bytes = str
+    str = unicode
+    basestring = basestring
+    numeric_types = (int, long, float)
+    integer_types = (int, long)
+
+elif is_py3:
+    from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
+    from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment
+    from http import cookiejar as cookielib
+    from http.cookies import Morsel
+    from io import StringIO
+    from collections import OrderedDict
+    from collections.abc import Callable, Mapping, MutableMapping
+
+    builtin_str = str
+    str = str
+    bytes = bytes
+    basestring = (str, bytes)
+    numeric_types = (int, float)
+    integer_types = (int,)
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/cookies.py b/lib/python3.7/site-packages/pip/_vendor/requests/cookies.py
new file mode 100644
index 0000000000000000000000000000000000000000..56fccd9c2570d2a31365ed11278fd1b6ecc2aa54
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/cookies.py
@@ -0,0 +1,549 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.cookies
+~~~~~~~~~~~~~~~~
+
+Compatibility code to be able to use `cookielib.CookieJar` with requests.
+
+requests.utils imports from here, so be careful with imports.
+"""
+
+import copy
+import time
+import calendar
+
+from ._internal_utils import to_native_string
+from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping
+
+try:
+    import threading
+except ImportError:
+    import dummy_threading as threading
+
+
+class MockRequest(object):
+    """Wraps a `requests.Request` to mimic a `urllib2.Request`.
+
+    The code in `cookielib.CookieJar` expects this interface in order to correctly
+    manage cookie policies, i.e., determine whether a cookie can be set, given the
+    domains of the request and the cookie.
+
+    The original request object is read-only. The client is responsible for collecting
+    the new headers via `get_new_headers()` and interpreting them appropriately. You
+    probably want `get_cookie_header`, defined below.
+    """
+
+    def __init__(self, request):
+        self._r = request
+        self._new_headers = {}
+        self.type = urlparse(self._r.url).scheme
+
+    def get_type(self):
+        return self.type
+
+    def get_host(self):
+        return urlparse(self._r.url).netloc
+
+    def get_origin_req_host(self):
+        return self.get_host()
+
+    def get_full_url(self):
+        # Only return the response's URL if the user hadn't set the Host
+        # header
+        if not self._r.headers.get('Host'):
+            return self._r.url
+        # If they did set it, retrieve it and reconstruct the expected domain
+        host = to_native_string(self._r.headers['Host'], encoding='utf-8')
+        parsed = urlparse(self._r.url)
+        # Reconstruct the URL as we expect it
+        return urlunparse([
+            parsed.scheme, host, parsed.path, parsed.params, parsed.query,
+            parsed.fragment
+        ])
+
+    def is_unverifiable(self):
+        return True
+
+    def has_header(self, name):
+        return name in self._r.headers or name in self._new_headers
+
+    def get_header(self, name, default=None):
+        return self._r.headers.get(name, self._new_headers.get(name, default))
+
+    def add_header(self, key, val):
+        """cookielib has no legitimate use for this method; add it back if you find one."""
+        raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
+
+    def add_unredirected_header(self, name, value):
+        self._new_headers[name] = value
+
+    def get_new_headers(self):
+        return self._new_headers
+
+    @property
+    def unverifiable(self):
+        return self.is_unverifiable()
+
+    @property
+    def origin_req_host(self):
+        return self.get_origin_req_host()
+
+    @property
+    def host(self):
+        return self.get_host()
+
+
+class MockResponse(object):
+    """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
+
+    ...what? Basically, expose the parsed HTTP headers from the server response
+    the way `cookielib` expects to see them.
+    """
+
+    def __init__(self, headers):
+        """Make a MockResponse for `cookielib` to read.
+
+        :param headers: a httplib.HTTPMessage or analogous carrying the headers
+        """
+        self._headers = headers
+
+    def info(self):
+        return self._headers
+
+    def getheaders(self, name):
+        self._headers.getheaders(name)
+
+
+def extract_cookies_to_jar(jar, request, response):
+    """Extract the cookies from the response into a CookieJar.
+
+    :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
+    :param request: our own requests.Request object
+    :param response: urllib3.HTTPResponse object
+    """
+    if not (hasattr(response, '_original_response') and
+            response._original_response):
+        return
+    # the _original_response field is the wrapped httplib.HTTPResponse object,
+    req = MockRequest(request)
+    # pull out the HTTPMessage with the headers and put it in the mock:
+    res = MockResponse(response._original_response.msg)
+    jar.extract_cookies(res, req)
+
+
+def get_cookie_header(jar, request):
+    """
+    Produce an appropriate Cookie header string to be sent with `request`, or None.
+
+    :rtype: str
+    """
+    r = MockRequest(request)
+    jar.add_cookie_header(r)
+    return r.get_new_headers().get('Cookie')
+
+
+def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
+    """Unsets a cookie by name, by default over all domains and paths.
+
+    Wraps CookieJar.clear(), is O(n).
+    """
+    clearables = []
+    for cookie in cookiejar:
+        if cookie.name != name:
+            continue
+        if domain is not None and domain != cookie.domain:
+            continue
+        if path is not None and path != cookie.path:
+            continue
+        clearables.append((cookie.domain, cookie.path, cookie.name))
+
+    for domain, path, name in clearables:
+        cookiejar.clear(domain, path, name)
+
+
+class CookieConflictError(RuntimeError):
+    """There are two cookies that meet the criteria specified in the cookie jar.
+    Use .get and .set and include domain and path args in order to be more specific.
+    """
+
+
+class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
+    """Compatibility class; is a cookielib.CookieJar, but exposes a dict
+    interface.
+
+    This is the CookieJar we create by default for requests and sessions that
+    don't specify one, since some clients may expect response.cookies and
+    session.cookies to support dict operations.
+
+    Requests does not use the dict interface internally; it's just for
+    compatibility with external client code. All requests code should work
+    out of the box with externally provided instances of ``CookieJar``, e.g.
+    ``LWPCookieJar`` and ``FileCookieJar``.
+
+    Unlike a regular CookieJar, this class is pickleable.
+
+    .. warning:: dictionary operations that are normally O(1) may be O(n).
+    """
+
+    def get(self, name, default=None, domain=None, path=None):
+        """Dict-like get() that also supports optional domain and path args in
+        order to resolve naming collisions from using one cookie jar over
+        multiple domains.
+
+        .. warning:: operation is O(n), not O(1).
+        """
+        try:
+            return self._find_no_duplicates(name, domain, path)
+        except KeyError:
+            return default
+
+    def set(self, name, value, **kwargs):
+        """Dict-like set() that also supports optional domain and path args in
+        order to resolve naming collisions from using one cookie jar over
+        multiple domains.
+        """
+        # support client code that unsets cookies by assignment of a None value:
+        if value is None:
+            remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
+            return
+
+        if isinstance(value, Morsel):
+            c = morsel_to_cookie(value)
+        else:
+            c = create_cookie(name, value, **kwargs)
+        self.set_cookie(c)
+        return c
+
+    def iterkeys(self):
+        """Dict-like iterkeys() that returns an iterator of names of cookies
+        from the jar.
+
+        .. seealso:: itervalues() and iteritems().
+        """
+        for cookie in iter(self):
+            yield cookie.name
+
+    def keys(self):
+        """Dict-like keys() that returns a list of names of cookies from the
+        jar.
+
+        .. seealso:: values() and items().
+        """
+        return list(self.iterkeys())
+
+    def itervalues(self):
+        """Dict-like itervalues() that returns an iterator of values of cookies
+        from the jar.
+
+        .. seealso:: iterkeys() and iteritems().
+        """
+        for cookie in iter(self):
+            yield cookie.value
+
+    def values(self):
+        """Dict-like values() that returns a list of values of cookies from the
+        jar.
+
+        .. seealso:: keys() and items().
+        """
+        return list(self.itervalues())
+
+    def iteritems(self):
+        """Dict-like iteritems() that returns an iterator of name-value tuples
+        from the jar.
+
+        .. seealso:: iterkeys() and itervalues().
+        """
+        for cookie in iter(self):
+            yield cookie.name, cookie.value
+
+    def items(self):
+        """Dict-like items() that returns a list of name-value tuples from the
+        jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
+        vanilla python dict of key value pairs.
+
+        .. seealso:: keys() and values().
+        """
+        return list(self.iteritems())
+
+    def list_domains(self):
+        """Utility method to list all the domains in the jar."""
+        domains = []
+        for cookie in iter(self):
+            if cookie.domain not in domains:
+                domains.append(cookie.domain)
+        return domains
+
+    def list_paths(self):
+        """Utility method to list all the paths in the jar."""
+        paths = []
+        for cookie in iter(self):
+            if cookie.path not in paths:
+                paths.append(cookie.path)
+        return paths
+
+    def multiple_domains(self):
+        """Returns True if there are multiple domains in the jar.
+        Returns False otherwise.
+
+        :rtype: bool
+        """
+        domains = []
+        for cookie in iter(self):
+            if cookie.domain is not None and cookie.domain in domains:
+                return True
+            domains.append(cookie.domain)
+        return False  # there is only one domain in jar
+
+    def get_dict(self, domain=None, path=None):
+        """Takes as an argument an optional domain and path and returns a plain
+        old Python dict of name-value pairs of cookies that meet the
+        requirements.
+
+        :rtype: dict
+        """
+        dictionary = {}
+        for cookie in iter(self):
+            if (
+                (domain is None or cookie.domain == domain) and
+                (path is None or cookie.path == path)
+            ):
+                dictionary[cookie.name] = cookie.value
+        return dictionary
+
+    def __contains__(self, name):
+        try:
+            return super(RequestsCookieJar, self).__contains__(name)
+        except CookieConflictError:
+            return True
+
+    def __getitem__(self, name):
+        """Dict-like __getitem__() for compatibility with client code. Throws
+        exception if there are more than one cookie with name. In that case,
+        use the more explicit get() method instead.
+
+        .. warning:: operation is O(n), not O(1).
+        """
+        return self._find_no_duplicates(name)
+
+    def __setitem__(self, name, value):
+        """Dict-like __setitem__ for compatibility with client code. Throws
+        exception if there is already a cookie of that name in the jar. In that
+        case, use the more explicit set() method instead.
+        """
+        self.set(name, value)
+
+    def __delitem__(self, name):
+        """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
+        ``remove_cookie_by_name()``.
+        """
+        remove_cookie_by_name(self, name)
+
+    def set_cookie(self, cookie, *args, **kwargs):
+        if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
+            cookie.value = cookie.value.replace('\\"', '')
+        return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
+
+    def update(self, other):
+        """Updates this jar with cookies from another CookieJar or dict-like"""
+        if isinstance(other, cookielib.CookieJar):
+            for cookie in other:
+                self.set_cookie(copy.copy(cookie))
+        else:
+            super(RequestsCookieJar, self).update(other)
+
+    def _find(self, name, domain=None, path=None):
+        """Requests uses this method internally to get cookie values.
+
+        If there are conflicting cookies, _find arbitrarily chooses one.
+        See _find_no_duplicates if you want an exception thrown if there are
+        conflicting cookies.
+
+        :param name: a string containing name of cookie
+        :param domain: (optional) string containing domain of cookie
+        :param path: (optional) string containing path of cookie
+        :return: cookie.value
+        """
+        for cookie in iter(self):
+            if cookie.name == name:
+                if domain is None or cookie.domain == domain:
+                    if path is None or cookie.path == path:
+                        return cookie.value
+
+        raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
+
+    def _find_no_duplicates(self, name, domain=None, path=None):
+        """Both ``__get_item__`` and ``get`` call this function: it's never
+        used elsewhere in Requests.
+
+        :param name: a string containing name of cookie
+        :param domain: (optional) string containing domain of cookie
+        :param path: (optional) string containing path of cookie
+        :raises KeyError: if cookie is not found
+        :raises CookieConflictError: if there are multiple cookies
+            that match name and optionally domain and path
+        :return: cookie.value
+        """
+        toReturn = None
+        for cookie in iter(self):
+            if cookie.name == name:
+                if domain is None or cookie.domain == domain:
+                    if path is None or cookie.path == path:
+                        if toReturn is not None:  # if there are multiple cookies that meet passed in criteria
+                            raise CookieConflictError('There are multiple cookies with name, %r' % (name))
+                        toReturn = cookie.value  # we will eventually return this as long as no cookie conflict
+
+        if toReturn:
+            return toReturn
+        raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
+
+    def __getstate__(self):
+        """Unlike a normal CookieJar, this class is pickleable."""
+        state = self.__dict__.copy()
+        # remove the unpickleable RLock object
+        state.pop('_cookies_lock')
+        return state
+
+    def __setstate__(self, state):
+        """Unlike a normal CookieJar, this class is pickleable."""
+        self.__dict__.update(state)
+        if '_cookies_lock' not in self.__dict__:
+            self._cookies_lock = threading.RLock()
+
+    def copy(self):
+        """Return a copy of this RequestsCookieJar."""
+        new_cj = RequestsCookieJar()
+        new_cj.set_policy(self.get_policy())
+        new_cj.update(self)
+        return new_cj
+
+    def get_policy(self):
+        """Return the CookiePolicy instance used."""
+        return self._policy
+
+
+def _copy_cookie_jar(jar):
+    if jar is None:
+        return None
+
+    if hasattr(jar, 'copy'):
+        # We're dealing with an instance of RequestsCookieJar
+        return jar.copy()
+    # We're dealing with a generic CookieJar instance
+    new_jar = copy.copy(jar)
+    new_jar.clear()
+    for cookie in jar:
+        new_jar.set_cookie(copy.copy(cookie))
+    return new_jar
+
+
+def create_cookie(name, value, **kwargs):
+    """Make a cookie from underspecified parameters.
+
+    By default, the pair of `name` and `value` will be set for the domain ''
+    and sent on every request (this is sometimes called a "supercookie").
+    """
+    result = {
+        'version': 0,
+        'name': name,
+        'value': value,
+        'port': None,
+        'domain': '',
+        'path': '/',
+        'secure': False,
+        'expires': None,
+        'discard': True,
+        'comment': None,
+        'comment_url': None,
+        'rest': {'HttpOnly': None},
+        'rfc2109': False,
+    }
+
+    badargs = set(kwargs) - set(result)
+    if badargs:
+        err = 'create_cookie() got unexpected keyword arguments: %s'
+        raise TypeError(err % list(badargs))
+
+    result.update(kwargs)
+    result['port_specified'] = bool(result['port'])
+    result['domain_specified'] = bool(result['domain'])
+    result['domain_initial_dot'] = result['domain'].startswith('.')
+    result['path_specified'] = bool(result['path'])
+
+    return cookielib.Cookie(**result)
+
+
+def morsel_to_cookie(morsel):
+    """Convert a Morsel object into a Cookie containing the one k/v pair."""
+
+    expires = None
+    if morsel['max-age']:
+        try:
+            expires = int(time.time() + int(morsel['max-age']))
+        except ValueError:
+            raise TypeError('max-age: %s must be integer' % morsel['max-age'])
+    elif morsel['expires']:
+        time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
+        expires = calendar.timegm(
+            time.strptime(morsel['expires'], time_template)
+        )
+    return create_cookie(
+        comment=morsel['comment'],
+        comment_url=bool(morsel['comment']),
+        discard=False,
+        domain=morsel['domain'],
+        expires=expires,
+        name=morsel.key,
+        path=morsel['path'],
+        port=None,
+        rest={'HttpOnly': morsel['httponly']},
+        rfc2109=False,
+        secure=bool(morsel['secure']),
+        value=morsel.value,
+        version=morsel['version'] or 0,
+    )
+
+
+def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
+    """Returns a CookieJar from a key/value dictionary.
+
+    :param cookie_dict: Dict of key/values to insert into CookieJar.
+    :param cookiejar: (optional) A cookiejar to add the cookies to.
+    :param overwrite: (optional) If False, will not replace cookies
+        already in the jar with new ones.
+    :rtype: CookieJar
+    """
+    if cookiejar is None:
+        cookiejar = RequestsCookieJar()
+
+    if cookie_dict is not None:
+        names_from_jar = [cookie.name for cookie in cookiejar]
+        for name in cookie_dict:
+            if overwrite or (name not in names_from_jar):
+                cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
+
+    return cookiejar
+
+
+def merge_cookies(cookiejar, cookies):
+    """Add cookies to cookiejar and returns a merged CookieJar.
+
+    :param cookiejar: CookieJar object to add the cookies to.
+    :param cookies: Dictionary or CookieJar object to be added.
+    :rtype: CookieJar
+    """
+    if not isinstance(cookiejar, cookielib.CookieJar):
+        raise ValueError('You can only merge into CookieJar')
+
+    if isinstance(cookies, dict):
+        cookiejar = cookiejar_from_dict(
+            cookies, cookiejar=cookiejar, overwrite=False)
+    elif isinstance(cookies, cookielib.CookieJar):
+        try:
+            cookiejar.update(cookies)
+        except AttributeError:
+            for cookie_in_jar in cookies:
+                cookiejar.set_cookie(cookie_in_jar)
+
+    return cookiejar
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/exceptions.py b/lib/python3.7/site-packages/pip/_vendor/requests/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..a91e1fd114ef372efa2d1f02d99f114db913b7e6
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/exceptions.py
@@ -0,0 +1,126 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.exceptions
+~~~~~~~~~~~~~~~~~~~
+
+This module contains the set of Requests' exceptions.
+"""
+from pip._vendor.urllib3.exceptions import HTTPError as BaseHTTPError
+
+
+class RequestException(IOError):
+    """There was an ambiguous exception that occurred while handling your
+    request.
+    """
+
+    def __init__(self, *args, **kwargs):
+        """Initialize RequestException with `request` and `response` objects."""
+        response = kwargs.pop('response', None)
+        self.response = response
+        self.request = kwargs.pop('request', None)
+        if (response is not None and not self.request and
+                hasattr(response, 'request')):
+            self.request = self.response.request
+        super(RequestException, self).__init__(*args, **kwargs)
+
+
+class HTTPError(RequestException):
+    """An HTTP error occurred."""
+
+
+class ConnectionError(RequestException):
+    """A Connection error occurred."""
+
+
+class ProxyError(ConnectionError):
+    """A proxy error occurred."""
+
+
+class SSLError(ConnectionError):
+    """An SSL error occurred."""
+
+
+class Timeout(RequestException):
+    """The request timed out.
+
+    Catching this error will catch both
+    :exc:`~requests.exceptions.ConnectTimeout` and
+    :exc:`~requests.exceptions.ReadTimeout` errors.
+    """
+
+
+class ConnectTimeout(ConnectionError, Timeout):
+    """The request timed out while trying to connect to the remote server.
+
+    Requests that produced this error are safe to retry.
+    """
+
+
+class ReadTimeout(Timeout):
+    """The server did not send any data in the allotted amount of time."""
+
+
+class URLRequired(RequestException):
+    """A valid URL is required to make a request."""
+
+
+class TooManyRedirects(RequestException):
+    """Too many redirects."""
+
+
+class MissingSchema(RequestException, ValueError):
+    """The URL schema (e.g. http or https) is missing."""
+
+
+class InvalidSchema(RequestException, ValueError):
+    """See defaults.py for valid schemas."""
+
+
+class InvalidURL(RequestException, ValueError):
+    """The URL provided was somehow invalid."""
+
+
+class InvalidHeader(RequestException, ValueError):
+    """The header value provided was somehow invalid."""
+
+
+class InvalidProxyURL(InvalidURL):
+    """The proxy URL provided is invalid."""
+
+
+class ChunkedEncodingError(RequestException):
+    """The server declared chunked encoding but sent an invalid chunk."""
+
+
+class ContentDecodingError(RequestException, BaseHTTPError):
+    """Failed to decode response content"""
+
+
+class StreamConsumedError(RequestException, TypeError):
+    """The content for this response was already consumed"""
+
+
+class RetryError(RequestException):
+    """Custom retries logic failed"""
+
+
+class UnrewindableBodyError(RequestException):
+    """Requests encountered an error when trying to rewind a body"""
+
+# Warnings
+
+
+class RequestsWarning(Warning):
+    """Base warning for Requests."""
+    pass
+
+
+class FileModeWarning(RequestsWarning, DeprecationWarning):
+    """A file was opened in text mode, but Requests determined its binary length."""
+    pass
+
+
+class RequestsDependencyWarning(RequestsWarning):
+    """An imported dependency doesn't match the expected version range."""
+    pass
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/help.py b/lib/python3.7/site-packages/pip/_vendor/requests/help.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c3072ba1419b4e0f183c5de1d0c39d166f2472c
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/help.py
@@ -0,0 +1,119 @@
+"""Module containing bug report helper(s)."""
+from __future__ import print_function
+
+import json
+import platform
+import sys
+import ssl
+
+from pip._vendor import idna
+from pip._vendor import urllib3
+from pip._vendor import chardet
+
+from . import __version__ as requests_version
+
+try:
+    from pip._vendor.urllib3.contrib import pyopenssl
+except ImportError:
+    pyopenssl = None
+    OpenSSL = None
+    cryptography = None
+else:
+    import OpenSSL
+    import cryptography
+
+
+def _implementation():
+    """Return a dict with the Python implementation and version.
+
+    Provide both the name and the version of the Python implementation
+    currently running. For example, on CPython 2.7.5 it will return
+    {'name': 'CPython', 'version': '2.7.5'}.
+
+    This function works best on CPython and PyPy: in particular, it probably
+    doesn't work for Jython or IronPython. Future investigation should be done
+    to work out the correct shape of the code for those platforms.
+    """
+    implementation = platform.python_implementation()
+
+    if implementation == 'CPython':
+        implementation_version = platform.python_version()
+    elif implementation == 'PyPy':
+        implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
+                                               sys.pypy_version_info.minor,
+                                               sys.pypy_version_info.micro)
+        if sys.pypy_version_info.releaselevel != 'final':
+            implementation_version = ''.join([
+                implementation_version, sys.pypy_version_info.releaselevel
+            ])
+    elif implementation == 'Jython':
+        implementation_version = platform.python_version()  # Complete Guess
+    elif implementation == 'IronPython':
+        implementation_version = platform.python_version()  # Complete Guess
+    else:
+        implementation_version = 'Unknown'
+
+    return {'name': implementation, 'version': implementation_version}
+
+
+def info():
+    """Generate information for a bug report."""
+    try:
+        platform_info = {
+            'system': platform.system(),
+            'release': platform.release(),
+        }
+    except IOError:
+        platform_info = {
+            'system': 'Unknown',
+            'release': 'Unknown',
+        }
+
+    implementation_info = _implementation()
+    urllib3_info = {'version': urllib3.__version__}
+    chardet_info = {'version': chardet.__version__}
+
+    pyopenssl_info = {
+        'version': None,
+        'openssl_version': '',
+    }
+    if OpenSSL:
+        pyopenssl_info = {
+            'version': OpenSSL.__version__,
+            'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
+        }
+    cryptography_info = {
+        'version': getattr(cryptography, '__version__', ''),
+    }
+    idna_info = {
+        'version': getattr(idna, '__version__', ''),
+    }
+
+    system_ssl = ssl.OPENSSL_VERSION_NUMBER
+    system_ssl_info = {
+        'version': '%x' % system_ssl if system_ssl is not None else ''
+    }
+
+    return {
+        'platform': platform_info,
+        'implementation': implementation_info,
+        'system_ssl': system_ssl_info,
+        'using_pyopenssl': pyopenssl is not None,
+        'pyOpenSSL': pyopenssl_info,
+        'urllib3': urllib3_info,
+        'chardet': chardet_info,
+        'cryptography': cryptography_info,
+        'idna': idna_info,
+        'requests': {
+            'version': requests_version,
+        },
+    }
+
+
+def main():
+    """Pretty-print the bug information as JSON."""
+    print(json.dumps(info(), sort_keys=True, indent=2))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/hooks.py b/lib/python3.7/site-packages/pip/_vendor/requests/hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a51f212c8a08fc6a04e509e89d8dcc85185df77
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/hooks.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.hooks
+~~~~~~~~~~~~~~
+
+This module provides the capabilities for the Requests hooks system.
+
+Available hooks:
+
+``response``:
+    The response generated from a Request.
+"""
+HOOKS = ['response']
+
+
+def default_hooks():
+    return {event: [] for event in HOOKS}
+
+# TODO: response is the only one
+
+
+def dispatch_hook(key, hooks, hook_data, **kwargs):
+    """Dispatches a hook dictionary on a given piece of data."""
+    hooks = hooks or {}
+    hooks = hooks.get(key)
+    if hooks:
+        if hasattr(hooks, '__call__'):
+            hooks = [hooks]
+        for hook in hooks:
+            _hook_data = hook(hook_data, **kwargs)
+            if _hook_data is not None:
+                hook_data = _hook_data
+    return hook_data
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/models.py b/lib/python3.7/site-packages/pip/_vendor/requests/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..0839957475d44b590bf3894edda89d1ce610028d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/models.py
@@ -0,0 +1,953 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.models
+~~~~~~~~~~~~~~~
+
+This module contains the primary objects that power Requests.
+"""
+
+import datetime
+import sys
+
+# Import encoding now, to avoid implicit import later.
+# Implicit import within threads may cause LookupError when standard library is in a ZIP,
+# such as in Embedded Python. See https://github.com/requests/requests/issues/3578.
+import encodings.idna
+
+from pip._vendor.urllib3.fields import RequestField
+from pip._vendor.urllib3.filepost import encode_multipart_formdata
+from pip._vendor.urllib3.util import parse_url
+from pip._vendor.urllib3.exceptions import (
+    DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
+
+from io import UnsupportedOperation
+from .hooks import default_hooks
+from .structures import CaseInsensitiveDict
+
+from .auth import HTTPBasicAuth
+from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
+from .exceptions import (
+    HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
+    ContentDecodingError, ConnectionError, StreamConsumedError)
+from ._internal_utils import to_native_string, unicode_is_ascii
+from .utils import (
+    guess_filename, get_auth_from_url, requote_uri,
+    stream_decode_response_unicode, to_key_val_list, parse_header_links,
+    iter_slices, guess_json_utf, super_len, check_header_validity)
+from .compat import (
+    Callable, Mapping,
+    cookielib, urlunparse, urlsplit, urlencode, str, bytes,
+    is_py2, chardet, builtin_str, basestring)
+from .compat import json as complexjson
+from .status_codes import codes
+
+#: The set of HTTP status codes that indicate an automatically
+#: processable redirect.
+REDIRECT_STATI = (
+    codes.moved,               # 301
+    codes.found,               # 302
+    codes.other,               # 303
+    codes.temporary_redirect,  # 307
+    codes.permanent_redirect,  # 308
+)
+
+DEFAULT_REDIRECT_LIMIT = 30
+CONTENT_CHUNK_SIZE = 10 * 1024
+ITER_CHUNK_SIZE = 512
+
+
+class RequestEncodingMixin(object):
+    @property
+    def path_url(self):
+        """Build the path URL to use."""
+
+        url = []
+
+        p = urlsplit(self.url)
+
+        path = p.path
+        if not path:
+            path = '/'
+
+        url.append(path)
+
+        query = p.query
+        if query:
+            url.append('?')
+            url.append(query)
+
+        return ''.join(url)
+
+    @staticmethod
+    def _encode_params(data):
+        """Encode parameters in a piece of data.
+
+        Will successfully encode parameters when passed as a dict or a list of
+        2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
+        if parameters are supplied as a dict.
+        """
+
+        if isinstance(data, (str, bytes)):
+            return data
+        elif hasattr(data, 'read'):
+            return data
+        elif hasattr(data, '__iter__'):
+            result = []
+            for k, vs in to_key_val_list(data):
+                if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
+                    vs = [vs]
+                for v in vs:
+                    if v is not None:
+                        result.append(
+                            (k.encode('utf-8') if isinstance(k, str) else k,
+                             v.encode('utf-8') if isinstance(v, str) else v))
+            return urlencode(result, doseq=True)
+        else:
+            return data
+
+    @staticmethod
+    def _encode_files(files, data):
+        """Build the body for a multipart/form-data request.
+
+        Will successfully encode files when passed as a dict or a list of
+        tuples. Order is retained if data is a list of tuples but arbitrary
+        if parameters are supplied as a dict.
+        The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
+        or 4-tuples (filename, fileobj, contentype, custom_headers).
+        """
+        if (not files):
+            raise ValueError("Files must be provided.")
+        elif isinstance(data, basestring):
+            raise ValueError("Data must not be a string.")
+
+        new_fields = []
+        fields = to_key_val_list(data or {})
+        files = to_key_val_list(files or {})
+
+        for field, val in fields:
+            if isinstance(val, basestring) or not hasattr(val, '__iter__'):
+                val = [val]
+            for v in val:
+                if v is not None:
+                    # Don't call str() on bytestrings: in Py3 it all goes wrong.
+                    if not isinstance(v, bytes):
+                        v = str(v)
+
+                    new_fields.append(
+                        (field.decode('utf-8') if isinstance(field, bytes) else field,
+                         v.encode('utf-8') if isinstance(v, str) else v))
+
+        for (k, v) in files:
+            # support for explicit filename
+            ft = None
+            fh = None
+            if isinstance(v, (tuple, list)):
+                if len(v) == 2:
+                    fn, fp = v
+                elif len(v) == 3:
+                    fn, fp, ft = v
+                else:
+                    fn, fp, ft, fh = v
+            else:
+                fn = guess_filename(v) or k
+                fp = v
+
+            if isinstance(fp, (str, bytes, bytearray)):
+                fdata = fp
+            elif hasattr(fp, 'read'):
+                fdata = fp.read()
+            elif fp is None:
+                continue
+            else:
+                fdata = fp
+
+            rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
+            rf.make_multipart(content_type=ft)
+            new_fields.append(rf)
+
+        body, content_type = encode_multipart_formdata(new_fields)
+
+        return body, content_type
+
+
+class RequestHooksMixin(object):
+    def register_hook(self, event, hook):
+        """Properly register a hook."""
+
+        if event not in self.hooks:
+            raise ValueError('Unsupported event specified, with event name "%s"' % (event))
+
+        if isinstance(hook, Callable):
+            self.hooks[event].append(hook)
+        elif hasattr(hook, '__iter__'):
+            self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
+
+    def deregister_hook(self, event, hook):
+        """Deregister a previously registered hook.
+        Returns True if the hook existed, False if not.
+        """
+
+        try:
+            self.hooks[event].remove(hook)
+            return True
+        except ValueError:
+            return False
+
+
+class Request(RequestHooksMixin):
+    """A user-created :class:`Request <Request>` object.
+
+    Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
+
+    :param method: HTTP method to use.
+    :param url: URL to send.
+    :param headers: dictionary of headers to send.
+    :param files: dictionary of {filename: fileobject} files to multipart upload.
+    :param data: the body to attach to the request. If a dictionary or
+        list of tuples ``[(key, value)]`` is provided, form-encoding will
+        take place.
+    :param json: json for the body to attach to the request (if files or data is not specified).
+    :param params: URL parameters to append to the URL. If a dictionary or
+        list of tuples ``[(key, value)]`` is provided, form-encoding will
+        take place.
+    :param auth: Auth handler or (user, pass) tuple.
+    :param cookies: dictionary or CookieJar of cookies to attach to this request.
+    :param hooks: dictionary of callback hooks, for internal usage.
+
+    Usage::
+
+      >>> import requests
+      >>> req = requests.Request('GET', 'https://httpbin.org/get')
+      >>> req.prepare()
+      <PreparedRequest [GET]>
+    """
+
+    def __init__(self,
+            method=None, url=None, headers=None, files=None, data=None,
+            params=None, auth=None, cookies=None, hooks=None, json=None):
+
+        # Default empty dicts for dict params.
+        data = [] if data is None else data
+        files = [] if files is None else files
+        headers = {} if headers is None else headers
+        params = {} if params is None else params
+        hooks = {} if hooks is None else hooks
+
+        self.hooks = default_hooks()
+        for (k, v) in list(hooks.items()):
+            self.register_hook(event=k, hook=v)
+
+        self.method = method
+        self.url = url
+        self.headers = headers
+        self.files = files
+        self.data = data
+        self.json = json
+        self.params = params
+        self.auth = auth
+        self.cookies = cookies
+
+    def __repr__(self):
+        return '<Request [%s]>' % (self.method)
+
+    def prepare(self):
+        """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
+        p = PreparedRequest()
+        p.prepare(
+            method=self.method,
+            url=self.url,
+            headers=self.headers,
+            files=self.files,
+            data=self.data,
+            json=self.json,
+            params=self.params,
+            auth=self.auth,
+            cookies=self.cookies,
+            hooks=self.hooks,
+        )
+        return p
+
+
+class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
+    """The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
+    containing the exact bytes that will be sent to the server.
+
+    Generated from either a :class:`Request <Request>` object or manually.
+
+    Usage::
+
+      >>> import requests
+      >>> req = requests.Request('GET', 'https://httpbin.org/get')
+      >>> r = req.prepare()
+      <PreparedRequest [GET]>
+
+      >>> s = requests.Session()
+      >>> s.send(r)
+      <Response [200]>
+    """
+
+    def __init__(self):
+        #: HTTP verb to send to the server.
+        self.method = None
+        #: HTTP URL to send the request to.
+        self.url = None
+        #: dictionary of HTTP headers.
+        self.headers = None
+        # The `CookieJar` used to create the Cookie header will be stored here
+        # after prepare_cookies is called
+        self._cookies = None
+        #: request body to send to the server.
+        self.body = None
+        #: dictionary of callback hooks, for internal usage.
+        self.hooks = default_hooks()
+        #: integer denoting starting position of a readable file-like body.
+        self._body_position = None
+
+    def prepare(self,
+            method=None, url=None, headers=None, files=None, data=None,
+            params=None, auth=None, cookies=None, hooks=None, json=None):
+        """Prepares the entire request with the given parameters."""
+
+        self.prepare_method(method)
+        self.prepare_url(url, params)
+        self.prepare_headers(headers)
+        self.prepare_cookies(cookies)
+        self.prepare_body(data, files, json)
+        self.prepare_auth(auth, url)
+
+        # Note that prepare_auth must be last to enable authentication schemes
+        # such as OAuth to work on a fully prepared request.
+
+        # This MUST go after prepare_auth. Authenticators could add a hook
+        self.prepare_hooks(hooks)
+
+    def __repr__(self):
+        return '<PreparedRequest [%s]>' % (self.method)
+
+    def copy(self):
+        p = PreparedRequest()
+        p.method = self.method
+        p.url = self.url
+        p.headers = self.headers.copy() if self.headers is not None else None
+        p._cookies = _copy_cookie_jar(self._cookies)
+        p.body = self.body
+        p.hooks = self.hooks
+        p._body_position = self._body_position
+        return p
+
+    def prepare_method(self, method):
+        """Prepares the given HTTP method."""
+        self.method = method
+        if self.method is not None:
+            self.method = to_native_string(self.method.upper())
+
+    @staticmethod
+    def _get_idna_encoded_host(host):
+        from pip._vendor import idna
+
+        try:
+            host = idna.encode(host, uts46=True).decode('utf-8')
+        except idna.IDNAError:
+            raise UnicodeError
+        return host
+
+    def prepare_url(self, url, params):
+        """Prepares the given HTTP URL."""
+        #: Accept objects that have string representations.
+        #: We're unable to blindly call unicode/str functions
+        #: as this will include the bytestring indicator (b'')
+        #: on python 3.x.
+        #: https://github.com/requests/requests/pull/2238
+        if isinstance(url, bytes):
+            url = url.decode('utf8')
+        else:
+            url = unicode(url) if is_py2 else str(url)
+
+        # Remove leading whitespaces from url
+        url = url.lstrip()
+
+        # Don't do any URL preparation for non-HTTP schemes like `mailto`,
+        # `data` etc to work around exceptions from `url_parse`, which
+        # handles RFC 3986 only.
+        if ':' in url and not url.lower().startswith('http'):
+            self.url = url
+            return
+
+        # Support for unicode domain names and paths.
+        try:
+            scheme, auth, host, port, path, query, fragment = parse_url(url)
+        except LocationParseError as e:
+            raise InvalidURL(*e.args)
+
+        if not scheme:
+            error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
+            error = error.format(to_native_string(url, 'utf8'))
+
+            raise MissingSchema(error)
+
+        if not host:
+            raise InvalidURL("Invalid URL %r: No host supplied" % url)
+
+        # In general, we want to try IDNA encoding the hostname if the string contains
+        # non-ASCII characters. This allows users to automatically get the correct IDNA
+        # behaviour. For strings containing only ASCII characters, we need to also verify
+        # it doesn't start with a wildcard (*), before allowing the unencoded hostname.
+        if not unicode_is_ascii(host):
+            try:
+                host = self._get_idna_encoded_host(host)
+            except UnicodeError:
+                raise InvalidURL('URL has an invalid label.')
+        elif host.startswith(u'*'):
+            raise InvalidURL('URL has an invalid label.')
+
+        # Carefully reconstruct the network location
+        netloc = auth or ''
+        if netloc:
+            netloc += '@'
+        netloc += host
+        if port:
+            netloc += ':' + str(port)
+
+        # Bare domains aren't valid URLs.
+        if not path:
+            path = '/'
+
+        if is_py2:
+            if isinstance(scheme, str):
+                scheme = scheme.encode('utf-8')
+            if isinstance(netloc, str):
+                netloc = netloc.encode('utf-8')
+            if isinstance(path, str):
+                path = path.encode('utf-8')
+            if isinstance(query, str):
+                query = query.encode('utf-8')
+            if isinstance(fragment, str):
+                fragment = fragment.encode('utf-8')
+
+        if isinstance(params, (str, bytes)):
+            params = to_native_string(params)
+
+        enc_params = self._encode_params(params)
+        if enc_params:
+            if query:
+                query = '%s&%s' % (query, enc_params)
+            else:
+                query = enc_params
+
+        url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
+        self.url = url
+
+    def prepare_headers(self, headers):
+        """Prepares the given HTTP headers."""
+
+        self.headers = CaseInsensitiveDict()
+        if headers:
+            for header in headers.items():
+                # Raise exception on invalid header value.
+                check_header_validity(header)
+                name, value = header
+                self.headers[to_native_string(name)] = value
+
+    def prepare_body(self, data, files, json=None):
+        """Prepares the given HTTP body data."""
+
+        # Check if file, fo, generator, iterator.
+        # If not, run through normal process.
+
+        # Nottin' on you.
+        body = None
+        content_type = None
+
+        if not data and json is not None:
+            # urllib3 requires a bytes-like body. Python 2's json.dumps
+            # provides this natively, but Python 3 gives a Unicode string.
+            content_type = 'application/json'
+            body = complexjson.dumps(json)
+            if not isinstance(body, bytes):
+                body = body.encode('utf-8')
+
+        is_stream = all([
+            hasattr(data, '__iter__'),
+            not isinstance(data, (basestring, list, tuple, Mapping))
+        ])
+
+        try:
+            length = super_len(data)
+        except (TypeError, AttributeError, UnsupportedOperation):
+            length = None
+
+        if is_stream:
+            body = data
+
+            if getattr(body, 'tell', None) is not None:
+                # Record the current file position before reading.
+                # This will allow us to rewind a file in the event
+                # of a redirect.
+                try:
+                    self._body_position = body.tell()
+                except (IOError, OSError):
+                    # This differentiates from None, allowing us to catch
+                    # a failed `tell()` later when trying to rewind the body
+                    self._body_position = object()
+
+            if files:
+                raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
+
+            if length:
+                self.headers['Content-Length'] = builtin_str(length)
+            else:
+                self.headers['Transfer-Encoding'] = 'chunked'
+        else:
+            # Multi-part file uploads.
+            if files:
+                (body, content_type) = self._encode_files(files, data)
+            else:
+                if data:
+                    body = self._encode_params(data)
+                    if isinstance(data, basestring) or hasattr(data, 'read'):
+                        content_type = None
+                    else:
+                        content_type = 'application/x-www-form-urlencoded'
+
+            self.prepare_content_length(body)
+
+            # Add content-type if it wasn't explicitly provided.
+            if content_type and ('content-type' not in self.headers):
+                self.headers['Content-Type'] = content_type
+
+        self.body = body
+
+    def prepare_content_length(self, body):
+        """Prepare Content-Length header based on request method and body"""
+        if body is not None:
+            length = super_len(body)
+            if length:
+                # If length exists, set it. Otherwise, we fallback
+                # to Transfer-Encoding: chunked.
+                self.headers['Content-Length'] = builtin_str(length)
+        elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
+            # Set Content-Length to 0 for methods that can have a body
+            # but don't provide one. (i.e. not GET or HEAD)
+            self.headers['Content-Length'] = '0'
+
+    def prepare_auth(self, auth, url=''):
+        """Prepares the given HTTP auth data."""
+
+        # If no Auth is explicitly provided, extract it from the URL first.
+        if auth is None:
+            url_auth = get_auth_from_url(self.url)
+            auth = url_auth if any(url_auth) else None
+
+        if auth:
+            if isinstance(auth, tuple) and len(auth) == 2:
+                # special-case basic HTTP auth
+                auth = HTTPBasicAuth(*auth)
+
+            # Allow auth to make its changes.
+            r = auth(self)
+
+            # Update self to reflect the auth changes.
+            self.__dict__.update(r.__dict__)
+
+            # Recompute Content-Length
+            self.prepare_content_length(self.body)
+
+    def prepare_cookies(self, cookies):
+        """Prepares the given HTTP cookie data.
+
+        This function eventually generates a ``Cookie`` header from the
+        given cookies using cookielib. Due to cookielib's design, the header
+        will not be regenerated if it already exists, meaning this function
+        can only be called once for the life of the
+        :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
+        to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
+        header is removed beforehand.
+        """
+        if isinstance(cookies, cookielib.CookieJar):
+            self._cookies = cookies
+        else:
+            self._cookies = cookiejar_from_dict(cookies)
+
+        cookie_header = get_cookie_header(self._cookies, self)
+        if cookie_header is not None:
+            self.headers['Cookie'] = cookie_header
+
+    def prepare_hooks(self, hooks):
+        """Prepares the given hooks."""
+        # hooks can be passed as None to the prepare method and to this
+        # method. To prevent iterating over None, simply use an empty list
+        # if hooks is False-y
+        hooks = hooks or []
+        for event in hooks:
+            self.register_hook(event, hooks[event])
+
+
+class Response(object):
+    """The :class:`Response <Response>` object, which contains a
+    server's response to an HTTP request.
+    """
+
+    __attrs__ = [
+        '_content', 'status_code', 'headers', 'url', 'history',
+        'encoding', 'reason', 'cookies', 'elapsed', 'request'
+    ]
+
+    def __init__(self):
+        self._content = False
+        self._content_consumed = False
+        self._next = None
+
+        #: Integer Code of responded HTTP Status, e.g. 404 or 200.
+        self.status_code = None
+
+        #: Case-insensitive Dictionary of Response Headers.
+        #: For example, ``headers['content-encoding']`` will return the
+        #: value of a ``'Content-Encoding'`` response header.
+        self.headers = CaseInsensitiveDict()
+
+        #: File-like object representation of response (for advanced usage).
+        #: Use of ``raw`` requires that ``stream=True`` be set on the request.
+        # This requirement does not apply for use internally to Requests.
+        self.raw = None
+
+        #: Final URL location of Response.
+        self.url = None
+
+        #: Encoding to decode with when accessing r.text.
+        self.encoding = None
+
+        #: A list of :class:`Response <Response>` objects from
+        #: the history of the Request. Any redirect responses will end
+        #: up here. The list is sorted from the oldest to the most recent request.
+        self.history = []
+
+        #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
+        self.reason = None
+
+        #: A CookieJar of Cookies the server sent back.
+        self.cookies = cookiejar_from_dict({})
+
+        #: The amount of time elapsed between sending the request
+        #: and the arrival of the response (as a timedelta).
+        #: This property specifically measures the time taken between sending
+        #: the first byte of the request and finishing parsing the headers. It
+        #: is therefore unaffected by consuming the response content or the
+        #: value of the ``stream`` keyword argument.
+        self.elapsed = datetime.timedelta(0)
+
+        #: The :class:`PreparedRequest <PreparedRequest>` object to which this
+        #: is a response.
+        self.request = None
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.close()
+
+    def __getstate__(self):
+        # Consume everything; accessing the content attribute makes
+        # sure the content has been fully read.
+        if not self._content_consumed:
+            self.content
+
+        return {attr: getattr(self, attr, None) for attr in self.__attrs__}
+
+    def __setstate__(self, state):
+        for name, value in state.items():
+            setattr(self, name, value)
+
+        # pickled objects do not have .raw
+        setattr(self, '_content_consumed', True)
+        setattr(self, 'raw', None)
+
+    def __repr__(self):
+        return '<Response [%s]>' % (self.status_code)
+
+    def __bool__(self):
+        """Returns True if :attr:`status_code` is less than 400.
+
+        This attribute checks if the status code of the response is between
+        400 and 600 to see if there was a client error or a server error. If
+        the status code, is between 200 and 400, this will return True. This
+        is **not** a check to see if the response code is ``200 OK``.
+        """
+        return self.ok
+
+    def __nonzero__(self):
+        """Returns True if :attr:`status_code` is less than 400.
+
+        This attribute checks if the status code of the response is between
+        400 and 600 to see if there was a client error or a server error. If
+        the status code, is between 200 and 400, this will return True. This
+        is **not** a check to see if the response code is ``200 OK``.
+        """
+        return self.ok
+
+    def __iter__(self):
+        """Allows you to use a response as an iterator."""
+        return self.iter_content(128)
+
+    @property
+    def ok(self):
+        """Returns True if :attr:`status_code` is less than 400, False if not.
+
+        This attribute checks if the status code of the response is between
+        400 and 600 to see if there was a client error or a server error. If
+        the status code is between 200 and 400, this will return True. This
+        is **not** a check to see if the response code is ``200 OK``.
+        """
+        try:
+            self.raise_for_status()
+        except HTTPError:
+            return False
+        return True
+
+    @property
+    def is_redirect(self):
+        """True if this Response is a well-formed HTTP redirect that could have
+        been processed automatically (by :meth:`Session.resolve_redirects`).
+        """
+        return ('location' in self.headers and self.status_code in REDIRECT_STATI)
+
+    @property
+    def is_permanent_redirect(self):
+        """True if this Response one of the permanent versions of redirect."""
+        return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
+
+    @property
+    def next(self):
+        """Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
+        return self._next
+
+    @property
+    def apparent_encoding(self):
+        """The apparent encoding, provided by the chardet library."""
+        return chardet.detect(self.content)['encoding']
+
+    def iter_content(self, chunk_size=1, decode_unicode=False):
+        """Iterates over the response data.  When stream=True is set on the
+        request, this avoids reading the content at once into memory for
+        large responses.  The chunk size is the number of bytes it should
+        read into memory.  This is not necessarily the length of each item
+        returned as decoding can take place.
+
+        chunk_size must be of type int or None. A value of None will
+        function differently depending on the value of `stream`.
+        stream=True will read data as it arrives in whatever size the
+        chunks are received. If stream=False, data is returned as
+        a single chunk.
+
+        If decode_unicode is True, content will be decoded using the best
+        available encoding based on the response.
+        """
+
+        def generate():
+            # Special case for urllib3.
+            if hasattr(self.raw, 'stream'):
+                try:
+                    for chunk in self.raw.stream(chunk_size, decode_content=True):
+                        yield chunk
+                except ProtocolError as e:
+                    raise ChunkedEncodingError(e)
+                except DecodeError as e:
+                    raise ContentDecodingError(e)
+                except ReadTimeoutError as e:
+                    raise ConnectionError(e)
+            else:
+                # Standard file-like object.
+                while True:
+                    chunk = self.raw.read(chunk_size)
+                    if not chunk:
+                        break
+                    yield chunk
+
+            self._content_consumed = True
+
+        if self._content_consumed and isinstance(self._content, bool):
+            raise StreamConsumedError()
+        elif chunk_size is not None and not isinstance(chunk_size, int):
+            raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
+        # simulate reading small chunks of the content
+        reused_chunks = iter_slices(self._content, chunk_size)
+
+        stream_chunks = generate()
+
+        chunks = reused_chunks if self._content_consumed else stream_chunks
+
+        if decode_unicode:
+            chunks = stream_decode_response_unicode(chunks, self)
+
+        return chunks
+
+    def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
+        """Iterates over the response data, one line at a time.  When
+        stream=True is set on the request, this avoids reading the
+        content at once into memory for large responses.
+
+        .. note:: This method is not reentrant safe.
+        """
+
+        pending = None
+
+        for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
+
+            if pending is not None:
+                chunk = pending + chunk
+
+            if delimiter:
+                lines = chunk.split(delimiter)
+            else:
+                lines = chunk.splitlines()
+
+            if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
+                pending = lines.pop()
+            else:
+                pending = None
+
+            for line in lines:
+                yield line
+
+        if pending is not None:
+            yield pending
+
+    @property
+    def content(self):
+        """Content of the response, in bytes."""
+
+        if self._content is False:
+            # Read the contents.
+            if self._content_consumed:
+                raise RuntimeError(
+                    'The content for this response was already consumed')
+
+            if self.status_code == 0 or self.raw is None:
+                self._content = None
+            else:
+                self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
+
+        self._content_consumed = True
+        # don't need to release the connection; that's been handled by urllib3
+        # since we exhausted the data.
+        return self._content
+
+    @property
+    def text(self):
+        """Content of the response, in unicode.
+
+        If Response.encoding is None, encoding will be guessed using
+        ``chardet``.
+
+        The encoding of the response content is determined based solely on HTTP
+        headers, following RFC 2616 to the letter. If you can take advantage of
+        non-HTTP knowledge to make a better guess at the encoding, you should
+        set ``r.encoding`` appropriately before accessing this property.
+        """
+
+        # Try charset from content-type
+        content = None
+        encoding = self.encoding
+
+        if not self.content:
+            return str('')
+
+        # Fallback to auto-detected encoding.
+        if self.encoding is None:
+            encoding = self.apparent_encoding
+
+        # Decode unicode from given encoding.
+        try:
+            content = str(self.content, encoding, errors='replace')
+        except (LookupError, TypeError):
+            # A LookupError is raised if the encoding was not found which could
+            # indicate a misspelling or similar mistake.
+            #
+            # A TypeError can be raised if encoding is None
+            #
+            # So we try blindly encoding.
+            content = str(self.content, errors='replace')
+
+        return content
+
+    def json(self, **kwargs):
+        r"""Returns the json-encoded content of a response, if any.
+
+        :param \*\*kwargs: Optional arguments that ``json.loads`` takes.
+        :raises ValueError: If the response body does not contain valid json.
+        """
+
+        if not self.encoding and self.content and len(self.content) > 3:
+            # No encoding set. JSON RFC 4627 section 3 states we should expect
+            # UTF-8, -16 or -32. Detect which one to use; If the detection or
+            # decoding fails, fall back to `self.text` (using chardet to make
+            # a best guess).
+            encoding = guess_json_utf(self.content)
+            if encoding is not None:
+                try:
+                    return complexjson.loads(
+                        self.content.decode(encoding), **kwargs
+                    )
+                except UnicodeDecodeError:
+                    # Wrong UTF codec detected; usually because it's not UTF-8
+                    # but some other 8-bit codec.  This is an RFC violation,
+                    # and the server didn't bother to tell us what codec *was*
+                    # used.
+                    pass
+        return complexjson.loads(self.text, **kwargs)
+
+    @property
+    def links(self):
+        """Returns the parsed header links of the response, if any."""
+
+        header = self.headers.get('link')
+
+        # l = MultiDict()
+        l = {}
+
+        if header:
+            links = parse_header_links(header)
+
+            for link in links:
+                key = link.get('rel') or link.get('url')
+                l[key] = link
+
+        return l
+
+    def raise_for_status(self):
+        """Raises stored :class:`HTTPError`, if one occurred."""
+
+        http_error_msg = ''
+        if isinstance(self.reason, bytes):
+            # We attempt to decode utf-8 first because some servers
+            # choose to localize their reason strings. If the string
+            # isn't utf-8, we fall back to iso-8859-1 for all other
+            # encodings. (See PR #3538)
+            try:
+                reason = self.reason.decode('utf-8')
+            except UnicodeDecodeError:
+                reason = self.reason.decode('iso-8859-1')
+        else:
+            reason = self.reason
+
+        if 400 <= self.status_code < 500:
+            http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
+
+        elif 500 <= self.status_code < 600:
+            http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
+
+        if http_error_msg:
+            raise HTTPError(http_error_msg, response=self)
+
+    def close(self):
+        """Releases the connection back to the pool. Once this method has been
+        called the underlying ``raw`` object must not be accessed again.
+
+        *Note: Should not normally need to be called explicitly.*
+        """
+        if not self._content_consumed:
+            self.raw.close()
+
+        release_conn = getattr(self.raw, 'release_conn', None)
+        if release_conn is not None:
+            release_conn()
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/packages.py b/lib/python3.7/site-packages/pip/_vendor/requests/packages.py
new file mode 100644
index 0000000000000000000000000000000000000000..9582fa730f121634348a79c1a8b0cc2df99c616f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/packages.py
@@ -0,0 +1,16 @@
+import sys
+
+# This code exists for backwards compatibility reasons.
+# I don't like it either. Just look the other way. :)
+
+for package in ('urllib3', 'idna', 'chardet'):
+    vendored_package = "pip._vendor." + package
+    locals()[package] = __import__(vendored_package)
+    # This traversal is apparently necessary such that the identities are
+    # preserved (requests.packages.urllib3.* is urllib3.*)
+    for mod in list(sys.modules):
+        if mod == vendored_package or mod.startswith(vendored_package + '.'):
+            unprefixed_mod = mod[len("pip._vendor."):]
+            sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod]
+
+# Kinda cool, though, right?
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/sessions.py b/lib/python3.7/site-packages/pip/_vendor/requests/sessions.py
new file mode 100644
index 0000000000000000000000000000000000000000..d73d700fa6baa975ab9b10fbdb186bccdb9d3610
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/sessions.py
@@ -0,0 +1,770 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.session
+~~~~~~~~~~~~~~~~
+
+This module provides a Session object to manage and persist settings across
+requests (cookies, auth, proxies).
+"""
+import os
+import sys
+import time
+from datetime import timedelta
+
+from .auth import _basic_auth_str
+from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse, Mapping
+from .cookies import (
+    cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
+from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
+from .hooks import default_hooks, dispatch_hook
+from ._internal_utils import to_native_string
+from .utils import to_key_val_list, default_headers, DEFAULT_PORTS
+from .exceptions import (
+    TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
+
+from .structures import CaseInsensitiveDict
+from .adapters import HTTPAdapter
+
+from .utils import (
+    requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
+    get_auth_from_url, rewind_body
+)
+
+from .status_codes import codes
+
+# formerly defined here, reexposed here for backward compatibility
+from .models import REDIRECT_STATI
+
+# Preferred clock, based on which one is more accurate on a given system.
+if sys.platform == 'win32':
+    try:  # Python 3.4+
+        preferred_clock = time.perf_counter
+    except AttributeError:  # Earlier than Python 3.
+        preferred_clock = time.clock
+else:
+    preferred_clock = time.time
+
+
+def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
+    """Determines appropriate setting for a given request, taking into account
+    the explicit setting on that request, and the setting in the session. If a
+    setting is a dictionary, they will be merged together using `dict_class`
+    """
+
+    if session_setting is None:
+        return request_setting
+
+    if request_setting is None:
+        return session_setting
+
+    # Bypass if not a dictionary (e.g. verify)
+    if not (
+            isinstance(session_setting, Mapping) and
+            isinstance(request_setting, Mapping)
+    ):
+        return request_setting
+
+    merged_setting = dict_class(to_key_val_list(session_setting))
+    merged_setting.update(to_key_val_list(request_setting))
+
+    # Remove keys that are set to None. Extract keys first to avoid altering
+    # the dictionary during iteration.
+    none_keys = [k for (k, v) in merged_setting.items() if v is None]
+    for key in none_keys:
+        del merged_setting[key]
+
+    return merged_setting
+
+
+def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
+    """Properly merges both requests and session hooks.
+
+    This is necessary because when request_hooks == {'response': []}, the
+    merge breaks Session hooks entirely.
+    """
+    if session_hooks is None or session_hooks.get('response') == []:
+        return request_hooks
+
+    if request_hooks is None or request_hooks.get('response') == []:
+        return session_hooks
+
+    return merge_setting(request_hooks, session_hooks, dict_class)
+
+
+class SessionRedirectMixin(object):
+
+    def get_redirect_target(self, resp):
+        """Receives a Response. Returns a redirect URI or ``None``"""
+        # Due to the nature of how requests processes redirects this method will
+        # be called at least once upon the original response and at least twice
+        # on each subsequent redirect response (if any).
+        # If a custom mixin is used to handle this logic, it may be advantageous
+        # to cache the redirect location onto the response object as a private
+        # attribute.
+        if resp.is_redirect:
+            location = resp.headers['location']
+            # Currently the underlying http module on py3 decode headers
+            # in latin1, but empirical evidence suggests that latin1 is very
+            # rarely used with non-ASCII characters in HTTP headers.
+            # It is more likely to get UTF8 header rather than latin1.
+            # This causes incorrect handling of UTF8 encoded location headers.
+            # To solve this, we re-encode the location in latin1.
+            if is_py3:
+                location = location.encode('latin1')
+            return to_native_string(location, 'utf8')
+        return None
+
+    def should_strip_auth(self, old_url, new_url):
+        """Decide whether Authorization header should be removed when redirecting"""
+        old_parsed = urlparse(old_url)
+        new_parsed = urlparse(new_url)
+        if old_parsed.hostname != new_parsed.hostname:
+            return True
+        # Special case: allow http -> https redirect when using the standard
+        # ports. This isn't specified by RFC 7235, but is kept to avoid
+        # breaking backwards compatibility with older versions of requests
+        # that allowed any redirects on the same host.
+        if (old_parsed.scheme == 'http' and old_parsed.port in (80, None)
+                and new_parsed.scheme == 'https' and new_parsed.port in (443, None)):
+            return False
+
+        # Handle default port usage corresponding to scheme.
+        changed_port = old_parsed.port != new_parsed.port
+        changed_scheme = old_parsed.scheme != new_parsed.scheme
+        default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
+        if (not changed_scheme and old_parsed.port in default_port
+                and new_parsed.port in default_port):
+            return False
+
+        # Standard case: root URI must match
+        return changed_port or changed_scheme
+
+    def resolve_redirects(self, resp, req, stream=False, timeout=None,
+                          verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
+        """Receives a Response. Returns a generator of Responses or Requests."""
+
+        hist = []  # keep track of history
+
+        url = self.get_redirect_target(resp)
+        previous_fragment = urlparse(req.url).fragment
+        while url:
+            prepared_request = req.copy()
+
+            # Update history and keep track of redirects.
+            # resp.history must ignore the original request in this loop
+            hist.append(resp)
+            resp.history = hist[1:]
+
+            try:
+                resp.content  # Consume socket so it can be released
+            except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
+                resp.raw.read(decode_content=False)
+
+            if len(resp.history) >= self.max_redirects:
+                raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
+
+            # Release the connection back into the pool.
+            resp.close()
+
+            # Handle redirection without scheme (see: RFC 1808 Section 4)
+            if url.startswith('//'):
+                parsed_rurl = urlparse(resp.url)
+                url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url)
+
+            # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
+            parsed = urlparse(url)
+            if parsed.fragment == '' and previous_fragment:
+                parsed = parsed._replace(fragment=previous_fragment)
+            elif parsed.fragment:
+                previous_fragment = parsed.fragment
+            url = parsed.geturl()
+
+            # Facilitate relative 'location' headers, as allowed by RFC 7231.
+            # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
+            # Compliant with RFC3986, we percent encode the url.
+            if not parsed.netloc:
+                url = urljoin(resp.url, requote_uri(url))
+            else:
+                url = requote_uri(url)
+
+            prepared_request.url = to_native_string(url)
+
+            self.rebuild_method(prepared_request, resp)
+
+            # https://github.com/requests/requests/issues/1084
+            if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
+                # https://github.com/requests/requests/issues/3490
+                purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
+                for header in purged_headers:
+                    prepared_request.headers.pop(header, None)
+                prepared_request.body = None
+
+            headers = prepared_request.headers
+            try:
+                del headers['Cookie']
+            except KeyError:
+                pass
+
+            # Extract any cookies sent on the response to the cookiejar
+            # in the new request. Because we've mutated our copied prepared
+            # request, use the old one that we haven't yet touched.
+            extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
+            merge_cookies(prepared_request._cookies, self.cookies)
+            prepared_request.prepare_cookies(prepared_request._cookies)
+
+            # Rebuild auth and proxy information.
+            proxies = self.rebuild_proxies(prepared_request, proxies)
+            self.rebuild_auth(prepared_request, resp)
+
+            # A failed tell() sets `_body_position` to `object()`. This non-None
+            # value ensures `rewindable` will be True, allowing us to raise an
+            # UnrewindableBodyError, instead of hanging the connection.
+            rewindable = (
+                prepared_request._body_position is not None and
+                ('Content-Length' in headers or 'Transfer-Encoding' in headers)
+            )
+
+            # Attempt to rewind consumed file-like object.
+            if rewindable:
+                rewind_body(prepared_request)
+
+            # Override the original request.
+            req = prepared_request
+
+            if yield_requests:
+                yield req
+            else:
+
+                resp = self.send(
+                    req,
+                    stream=stream,
+                    timeout=timeout,
+                    verify=verify,
+                    cert=cert,
+                    proxies=proxies,
+                    allow_redirects=False,
+                    **adapter_kwargs
+                )
+
+                extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
+
+                # extract redirect url, if any, for the next loop
+                url = self.get_redirect_target(resp)
+                yield resp
+
+    def rebuild_auth(self, prepared_request, response):
+        """When being redirected we may want to strip authentication from the
+        request to avoid leaking credentials. This method intelligently removes
+        and reapplies authentication where possible to avoid credential loss.
+        """
+        headers = prepared_request.headers
+        url = prepared_request.url
+
+        if 'Authorization' in headers and self.should_strip_auth(response.request.url, url):
+            # If we get redirected to a new host, we should strip out any
+            # authentication headers.
+            del headers['Authorization']
+
+        # .netrc might have more auth for us on our new host.
+        new_auth = get_netrc_auth(url) if self.trust_env else None
+        if new_auth is not None:
+            prepared_request.prepare_auth(new_auth)
+
+        return
+
+    def rebuild_proxies(self, prepared_request, proxies):
+        """This method re-evaluates the proxy configuration by considering the
+        environment variables. If we are redirected to a URL covered by
+        NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
+        proxy keys for this URL (in case they were stripped by a previous
+        redirect).
+
+        This method also replaces the Proxy-Authorization header where
+        necessary.
+
+        :rtype: dict
+        """
+        proxies = proxies if proxies is not None else {}
+        headers = prepared_request.headers
+        url = prepared_request.url
+        scheme = urlparse(url).scheme
+        new_proxies = proxies.copy()
+        no_proxy = proxies.get('no_proxy')
+
+        bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy)
+        if self.trust_env and not bypass_proxy:
+            environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
+
+            proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
+
+            if proxy:
+                new_proxies.setdefault(scheme, proxy)
+
+        if 'Proxy-Authorization' in headers:
+            del headers['Proxy-Authorization']
+
+        try:
+            username, password = get_auth_from_url(new_proxies[scheme])
+        except KeyError:
+            username, password = None, None
+
+        if username and password:
+            headers['Proxy-Authorization'] = _basic_auth_str(username, password)
+
+        return new_proxies
+
+    def rebuild_method(self, prepared_request, response):
+        """When being redirected we may want to change the method of the request
+        based on certain specs or browser behavior.
+        """
+        method = prepared_request.method
+
+        # https://tools.ietf.org/html/rfc7231#section-6.4.4
+        if response.status_code == codes.see_other and method != 'HEAD':
+            method = 'GET'
+
+        # Do what the browsers do, despite standards...
+        # First, turn 302s into GETs.
+        if response.status_code == codes.found and method != 'HEAD':
+            method = 'GET'
+
+        # Second, if a POST is responded to with a 301, turn it into a GET.
+        # This bizarre behaviour is explained in Issue 1704.
+        if response.status_code == codes.moved and method == 'POST':
+            method = 'GET'
+
+        prepared_request.method = method
+
+
+class Session(SessionRedirectMixin):
+    """A Requests session.
+
+    Provides cookie persistence, connection-pooling, and configuration.
+
+    Basic Usage::
+
+      >>> import requests
+      >>> s = requests.Session()
+      >>> s.get('https://httpbin.org/get')
+      <Response [200]>
+
+    Or as a context manager::
+
+      >>> with requests.Session() as s:
+      >>>     s.get('https://httpbin.org/get')
+      <Response [200]>
+    """
+
+    __attrs__ = [
+        'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
+        'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
+        'max_redirects',
+    ]
+
+    def __init__(self):
+
+        #: A case-insensitive dictionary of headers to be sent on each
+        #: :class:`Request <Request>` sent from this
+        #: :class:`Session <Session>`.
+        self.headers = default_headers()
+
+        #: Default Authentication tuple or object to attach to
+        #: :class:`Request <Request>`.
+        self.auth = None
+
+        #: Dictionary mapping protocol or protocol and host to the URL of the proxy
+        #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
+        #: be used on each :class:`Request <Request>`.
+        self.proxies = {}
+
+        #: Event-handling hooks.
+        self.hooks = default_hooks()
+
+        #: Dictionary of querystring data to attach to each
+        #: :class:`Request <Request>`. The dictionary values may be lists for
+        #: representing multivalued query parameters.
+        self.params = {}
+
+        #: Stream response content default.
+        self.stream = False
+
+        #: SSL Verification default.
+        self.verify = True
+
+        #: SSL client certificate default, if String, path to ssl client
+        #: cert file (.pem). If Tuple, ('cert', 'key') pair.
+        self.cert = None
+
+        #: Maximum number of redirects allowed. If the request exceeds this
+        #: limit, a :class:`TooManyRedirects` exception is raised.
+        #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
+        #: 30.
+        self.max_redirects = DEFAULT_REDIRECT_LIMIT
+
+        #: Trust environment settings for proxy configuration, default
+        #: authentication and similar.
+        self.trust_env = True
+
+        #: A CookieJar containing all currently outstanding cookies set on this
+        #: session. By default it is a
+        #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
+        #: may be any other ``cookielib.CookieJar`` compatible object.
+        self.cookies = cookiejar_from_dict({})
+
+        # Default connection adapters.
+        self.adapters = OrderedDict()
+        self.mount('https://', HTTPAdapter())
+        self.mount('http://', HTTPAdapter())
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args):
+        self.close()
+
+    def prepare_request(self, request):
+        """Constructs a :class:`PreparedRequest <PreparedRequest>` for
+        transmission and returns it. The :class:`PreparedRequest` has settings
+        merged from the :class:`Request <Request>` instance and those of the
+        :class:`Session`.
+
+        :param request: :class:`Request` instance to prepare with this
+            session's settings.
+        :rtype: requests.PreparedRequest
+        """
+        cookies = request.cookies or {}
+
+        # Bootstrap CookieJar.
+        if not isinstance(cookies, cookielib.CookieJar):
+            cookies = cookiejar_from_dict(cookies)
+
+        # Merge with session cookies
+        merged_cookies = merge_cookies(
+            merge_cookies(RequestsCookieJar(), self.cookies), cookies)
+
+        # Set environment's basic authentication if not explicitly set.
+        auth = request.auth
+        if self.trust_env and not auth and not self.auth:
+            auth = get_netrc_auth(request.url)
+
+        p = PreparedRequest()
+        p.prepare(
+            method=request.method.upper(),
+            url=request.url,
+            files=request.files,
+            data=request.data,
+            json=request.json,
+            headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
+            params=merge_setting(request.params, self.params),
+            auth=merge_setting(auth, self.auth),
+            cookies=merged_cookies,
+            hooks=merge_hooks(request.hooks, self.hooks),
+        )
+        return p
+
+    def request(self, method, url,
+            params=None, data=None, headers=None, cookies=None, files=None,
+            auth=None, timeout=None, allow_redirects=True, proxies=None,
+            hooks=None, stream=None, verify=None, cert=None, json=None):
+        """Constructs a :class:`Request <Request>`, prepares it and sends it.
+        Returns :class:`Response <Response>` object.
+
+        :param method: method for the new :class:`Request` object.
+        :param url: URL for the new :class:`Request` object.
+        :param params: (optional) Dictionary or bytes to be sent in the query
+            string for the :class:`Request`.
+        :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+            object to send in the body of the :class:`Request`.
+        :param json: (optional) json to send in the body of the
+            :class:`Request`.
+        :param headers: (optional) Dictionary of HTTP Headers to send with the
+            :class:`Request`.
+        :param cookies: (optional) Dict or CookieJar object to send with the
+            :class:`Request`.
+        :param files: (optional) Dictionary of ``'filename': file-like-objects``
+            for multipart encoding upload.
+        :param auth: (optional) Auth tuple or callable to enable
+            Basic/Digest/Custom HTTP Auth.
+        :param timeout: (optional) How long to wait for the server to send
+            data before giving up, as a float, or a :ref:`(connect timeout,
+            read timeout) <timeouts>` tuple.
+        :type timeout: float or tuple
+        :param allow_redirects: (optional) Set to True by default.
+        :type allow_redirects: bool
+        :param proxies: (optional) Dictionary mapping protocol or protocol and
+            hostname to the URL of the proxy.
+        :param stream: (optional) whether to immediately download the response
+            content. Defaults to ``False``.
+        :param verify: (optional) Either a boolean, in which case it controls whether we verify
+            the server's TLS certificate, or a string, in which case it must be a path
+            to a CA bundle to use. Defaults to ``True``.
+        :param cert: (optional) if String, path to ssl client cert file (.pem).
+            If Tuple, ('cert', 'key') pair.
+        :rtype: requests.Response
+        """
+        # Create the Request.
+        req = Request(
+            method=method.upper(),
+            url=url,
+            headers=headers,
+            files=files,
+            data=data or {},
+            json=json,
+            params=params or {},
+            auth=auth,
+            cookies=cookies,
+            hooks=hooks,
+        )
+        prep = self.prepare_request(req)
+
+        proxies = proxies or {}
+
+        settings = self.merge_environment_settings(
+            prep.url, proxies, stream, verify, cert
+        )
+
+        # Send the request.
+        send_kwargs = {
+            'timeout': timeout,
+            'allow_redirects': allow_redirects,
+        }
+        send_kwargs.update(settings)
+        resp = self.send(prep, **send_kwargs)
+
+        return resp
+
+    def get(self, url, **kwargs):
+        r"""Sends a GET request. Returns :class:`Response` object.
+
+        :param url: URL for the new :class:`Request` object.
+        :param \*\*kwargs: Optional arguments that ``request`` takes.
+        :rtype: requests.Response
+        """
+
+        kwargs.setdefault('allow_redirects', True)
+        return self.request('GET', url, **kwargs)
+
+    def options(self, url, **kwargs):
+        r"""Sends a OPTIONS request. Returns :class:`Response` object.
+
+        :param url: URL for the new :class:`Request` object.
+        :param \*\*kwargs: Optional arguments that ``request`` takes.
+        :rtype: requests.Response
+        """
+
+        kwargs.setdefault('allow_redirects', True)
+        return self.request('OPTIONS', url, **kwargs)
+
+    def head(self, url, **kwargs):
+        r"""Sends a HEAD request. Returns :class:`Response` object.
+
+        :param url: URL for the new :class:`Request` object.
+        :param \*\*kwargs: Optional arguments that ``request`` takes.
+        :rtype: requests.Response
+        """
+
+        kwargs.setdefault('allow_redirects', False)
+        return self.request('HEAD', url, **kwargs)
+
+    def post(self, url, data=None, json=None, **kwargs):
+        r"""Sends a POST request. Returns :class:`Response` object.
+
+        :param url: URL for the new :class:`Request` object.
+        :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+            object to send in the body of the :class:`Request`.
+        :param json: (optional) json to send in the body of the :class:`Request`.
+        :param \*\*kwargs: Optional arguments that ``request`` takes.
+        :rtype: requests.Response
+        """
+
+        return self.request('POST', url, data=data, json=json, **kwargs)
+
+    def put(self, url, data=None, **kwargs):
+        r"""Sends a PUT request. Returns :class:`Response` object.
+
+        :param url: URL for the new :class:`Request` object.
+        :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+            object to send in the body of the :class:`Request`.
+        :param \*\*kwargs: Optional arguments that ``request`` takes.
+        :rtype: requests.Response
+        """
+
+        return self.request('PUT', url, data=data, **kwargs)
+
+    def patch(self, url, data=None, **kwargs):
+        r"""Sends a PATCH request. Returns :class:`Response` object.
+
+        :param url: URL for the new :class:`Request` object.
+        :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+            object to send in the body of the :class:`Request`.
+        :param \*\*kwargs: Optional arguments that ``request`` takes.
+        :rtype: requests.Response
+        """
+
+        return self.request('PATCH', url, data=data, **kwargs)
+
+    def delete(self, url, **kwargs):
+        r"""Sends a DELETE request. Returns :class:`Response` object.
+
+        :param url: URL for the new :class:`Request` object.
+        :param \*\*kwargs: Optional arguments that ``request`` takes.
+        :rtype: requests.Response
+        """
+
+        return self.request('DELETE', url, **kwargs)
+
+    def send(self, request, **kwargs):
+        """Send a given PreparedRequest.
+
+        :rtype: requests.Response
+        """
+        # Set defaults that the hooks can utilize to ensure they always have
+        # the correct parameters to reproduce the previous request.
+        kwargs.setdefault('stream', self.stream)
+        kwargs.setdefault('verify', self.verify)
+        kwargs.setdefault('cert', self.cert)
+        kwargs.setdefault('proxies', self.proxies)
+
+        # It's possible that users might accidentally send a Request object.
+        # Guard against that specific failure case.
+        if isinstance(request, Request):
+            raise ValueError('You can only send PreparedRequests.')
+
+        # Set up variables needed for resolve_redirects and dispatching of hooks
+        allow_redirects = kwargs.pop('allow_redirects', True)
+        stream = kwargs.get('stream')
+        hooks = request.hooks
+
+        # Get the appropriate adapter to use
+        adapter = self.get_adapter(url=request.url)
+
+        # Start time (approximately) of the request
+        start = preferred_clock()
+
+        # Send the request
+        r = adapter.send(request, **kwargs)
+
+        # Total elapsed time of the request (approximately)
+        elapsed = preferred_clock() - start
+        r.elapsed = timedelta(seconds=elapsed)
+
+        # Response manipulation hooks
+        r = dispatch_hook('response', hooks, r, **kwargs)
+
+        # Persist cookies
+        if r.history:
+
+            # If the hooks create history then we want those cookies too
+            for resp in r.history:
+                extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
+
+        extract_cookies_to_jar(self.cookies, request, r.raw)
+
+        # Redirect resolving generator.
+        gen = self.resolve_redirects(r, request, **kwargs)
+
+        # Resolve redirects if allowed.
+        history = [resp for resp in gen] if allow_redirects else []
+
+        # Shuffle things around if there's history.
+        if history:
+            # Insert the first (original) request at the start
+            history.insert(0, r)
+            # Get the last request made
+            r = history.pop()
+            r.history = history
+
+        # If redirects aren't being followed, store the response on the Request for Response.next().
+        if not allow_redirects:
+            try:
+                r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs))
+            except StopIteration:
+                pass
+
+        if not stream:
+            r.content
+
+        return r
+
+    def merge_environment_settings(self, url, proxies, stream, verify, cert):
+        """
+        Check the environment and merge it with some settings.
+
+        :rtype: dict
+        """
+        # Gather clues from the surrounding environment.
+        if self.trust_env:
+            # Set environment's proxies.
+            no_proxy = proxies.get('no_proxy') if proxies is not None else None
+            env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
+            for (k, v) in env_proxies.items():
+                proxies.setdefault(k, v)
+
+            # Look for requests environment configuration and be compatible
+            # with cURL.
+            if verify is True or verify is None:
+                verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
+                          os.environ.get('CURL_CA_BUNDLE'))
+
+        # Merge all the kwargs.
+        proxies = merge_setting(proxies, self.proxies)
+        stream = merge_setting(stream, self.stream)
+        verify = merge_setting(verify, self.verify)
+        cert = merge_setting(cert, self.cert)
+
+        return {'verify': verify, 'proxies': proxies, 'stream': stream,
+                'cert': cert}
+
+    def get_adapter(self, url):
+        """
+        Returns the appropriate connection adapter for the given URL.
+
+        :rtype: requests.adapters.BaseAdapter
+        """
+        for (prefix, adapter) in self.adapters.items():
+
+            if url.lower().startswith(prefix.lower()):
+                return adapter
+
+        # Nothing matches :-/
+        raise InvalidSchema("No connection adapters were found for '%s'" % url)
+
+    def close(self):
+        """Closes all adapters and as such the session"""
+        for v in self.adapters.values():
+            v.close()
+
+    def mount(self, prefix, adapter):
+        """Registers a connection adapter to a prefix.
+
+        Adapters are sorted in descending order by prefix length.
+        """
+        self.adapters[prefix] = adapter
+        keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
+
+        for key in keys_to_move:
+            self.adapters[key] = self.adapters.pop(key)
+
+    def __getstate__(self):
+        state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
+        return state
+
+    def __setstate__(self, state):
+        for attr, value in state.items():
+            setattr(self, attr, value)
+
+
+def session():
+    """
+    Returns a :class:`Session` for context-management.
+
+    .. deprecated:: 1.0.0
+
+        This method has been deprecated since version 1.0.0 and is only kept for
+        backwards compatibility. New code should use :class:`~requests.sessions.Session`
+        to create a session. This may be removed at a future date.
+
+    :rtype: Session
+    """
+    return Session()
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/status_codes.py b/lib/python3.7/site-packages/pip/_vendor/requests/status_codes.py
new file mode 100644
index 0000000000000000000000000000000000000000..813e8c4e62fdcbbfcb73e2f52e187e0c3c5b84d7
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/status_codes.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+
+r"""
+The ``codes`` object defines a mapping from common names for HTTP statuses
+to their numerical codes, accessible either as attributes or as dictionary
+items.
+
+>>> requests.codes['temporary_redirect']
+307
+>>> requests.codes.teapot
+418
+>>> requests.codes['\o/']
+200
+
+Some codes have multiple names, and both upper- and lower-case versions of
+the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
+``codes.okay`` all correspond to the HTTP status code 200.
+"""
+
+from .structures import LookupDict
+
+_codes = {
+
+    # Informational.
+    100: ('continue',),
+    101: ('switching_protocols',),
+    102: ('processing',),
+    103: ('checkpoint',),
+    122: ('uri_too_long', 'request_uri_too_long'),
+    200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
+    201: ('created',),
+    202: ('accepted',),
+    203: ('non_authoritative_info', 'non_authoritative_information'),
+    204: ('no_content',),
+    205: ('reset_content', 'reset'),
+    206: ('partial_content', 'partial'),
+    207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
+    208: ('already_reported',),
+    226: ('im_used',),
+
+    # Redirection.
+    300: ('multiple_choices',),
+    301: ('moved_permanently', 'moved', '\\o-'),
+    302: ('found',),
+    303: ('see_other', 'other'),
+    304: ('not_modified',),
+    305: ('use_proxy',),
+    306: ('switch_proxy',),
+    307: ('temporary_redirect', 'temporary_moved', 'temporary'),
+    308: ('permanent_redirect',
+          'resume_incomplete', 'resume',),  # These 2 to be removed in 3.0
+
+    # Client Error.
+    400: ('bad_request', 'bad'),
+    401: ('unauthorized',),
+    402: ('payment_required', 'payment'),
+    403: ('forbidden',),
+    404: ('not_found', '-o-'),
+    405: ('method_not_allowed', 'not_allowed'),
+    406: ('not_acceptable',),
+    407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
+    408: ('request_timeout', 'timeout'),
+    409: ('conflict',),
+    410: ('gone',),
+    411: ('length_required',),
+    412: ('precondition_failed', 'precondition'),
+    413: ('request_entity_too_large',),
+    414: ('request_uri_too_large',),
+    415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
+    416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
+    417: ('expectation_failed',),
+    418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
+    421: ('misdirected_request',),
+    422: ('unprocessable_entity', 'unprocessable'),
+    423: ('locked',),
+    424: ('failed_dependency', 'dependency'),
+    425: ('unordered_collection', 'unordered'),
+    426: ('upgrade_required', 'upgrade'),
+    428: ('precondition_required', 'precondition'),
+    429: ('too_many_requests', 'too_many'),
+    431: ('header_fields_too_large', 'fields_too_large'),
+    444: ('no_response', 'none'),
+    449: ('retry_with', 'retry'),
+    450: ('blocked_by_windows_parental_controls', 'parental_controls'),
+    451: ('unavailable_for_legal_reasons', 'legal_reasons'),
+    499: ('client_closed_request',),
+
+    # Server Error.
+    500: ('internal_server_error', 'server_error', '/o\\', '✗'),
+    501: ('not_implemented',),
+    502: ('bad_gateway',),
+    503: ('service_unavailable', 'unavailable'),
+    504: ('gateway_timeout',),
+    505: ('http_version_not_supported', 'http_version'),
+    506: ('variant_also_negotiates',),
+    507: ('insufficient_storage',),
+    509: ('bandwidth_limit_exceeded', 'bandwidth'),
+    510: ('not_extended',),
+    511: ('network_authentication_required', 'network_auth', 'network_authentication'),
+}
+
+codes = LookupDict(name='status_codes')
+
+def _init():
+    for code, titles in _codes.items():
+        for title in titles:
+            setattr(codes, title, code)
+            if not title.startswith(('\\', '/')):
+                setattr(codes, title.upper(), code)
+
+    def doc(code):
+        names = ', '.join('``%s``' % n for n in _codes[code])
+        return '* %d: %s' % (code, names)
+
+    global __doc__
+    __doc__ = (__doc__ + '\n' +
+               '\n'.join(doc(code) for code in sorted(_codes))
+               if __doc__ is not None else None)
+
+_init()
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/structures.py b/lib/python3.7/site-packages/pip/_vendor/requests/structures.py
new file mode 100644
index 0000000000000000000000000000000000000000..da930e2852049feb2b6e262d442b9cc25ecb2d94
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/structures.py
@@ -0,0 +1,103 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.structures
+~~~~~~~~~~~~~~~~~~~
+
+Data structures that power Requests.
+"""
+
+from .compat import OrderedDict, Mapping, MutableMapping
+
+
+class CaseInsensitiveDict(MutableMapping):
+    """A case-insensitive ``dict``-like object.
+
+    Implements all methods and operations of
+    ``MutableMapping`` as well as dict's ``copy``. Also
+    provides ``lower_items``.
+
+    All keys are expected to be strings. The structure remembers the
+    case of the last key to be set, and ``iter(instance)``,
+    ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
+    will contain case-sensitive keys. However, querying and contains
+    testing is case insensitive::
+
+        cid = CaseInsensitiveDict()
+        cid['Accept'] = 'application/json'
+        cid['aCCEPT'] == 'application/json'  # True
+        list(cid) == ['Accept']  # True
+
+    For example, ``headers['content-encoding']`` will return the
+    value of a ``'Content-Encoding'`` response header, regardless
+    of how the header name was originally stored.
+
+    If the constructor, ``.update``, or equality comparison
+    operations are given keys that have equal ``.lower()``s, the
+    behavior is undefined.
+    """
+
+    def __init__(self, data=None, **kwargs):
+        self._store = OrderedDict()
+        if data is None:
+            data = {}
+        self.update(data, **kwargs)
+
+    def __setitem__(self, key, value):
+        # Use the lowercased key for lookups, but store the actual
+        # key alongside the value.
+        self._store[key.lower()] = (key, value)
+
+    def __getitem__(self, key):
+        return self._store[key.lower()][1]
+
+    def __delitem__(self, key):
+        del self._store[key.lower()]
+
+    def __iter__(self):
+        return (casedkey for casedkey, mappedvalue in self._store.values())
+
+    def __len__(self):
+        return len(self._store)
+
+    def lower_items(self):
+        """Like iteritems(), but with all lowercase keys."""
+        return (
+            (lowerkey, keyval[1])
+            for (lowerkey, keyval)
+            in self._store.items()
+        )
+
+    def __eq__(self, other):
+        if isinstance(other, Mapping):
+            other = CaseInsensitiveDict(other)
+        else:
+            return NotImplemented
+        # Compare insensitively
+        return dict(self.lower_items()) == dict(other.lower_items())
+
+    # Copy is required
+    def copy(self):
+        return CaseInsensitiveDict(self._store.values())
+
+    def __repr__(self):
+        return str(dict(self.items()))
+
+
+class LookupDict(dict):
+    """Dictionary lookup object."""
+
+    def __init__(self, name=None):
+        self.name = name
+        super(LookupDict, self).__init__()
+
+    def __repr__(self):
+        return '<lookup \'%s\'>' % (self.name)
+
+    def __getitem__(self, key):
+        # We allow fall-through here, so values default to None
+
+        return self.__dict__.get(key, None)
+
+    def get(self, key, default=None):
+        return self.__dict__.get(key, default)
diff --git a/lib/python3.7/site-packages/pip/_vendor/requests/utils.py b/lib/python3.7/site-packages/pip/_vendor/requests/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8170a8d2c45dc6d9dbb44abc4d5ce4d7c548463d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/requests/utils.py
@@ -0,0 +1,977 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.utils
+~~~~~~~~~~~~~~
+
+This module provides utility functions that are used within Requests
+that are also useful for external consumption.
+"""
+
+import codecs
+import contextlib
+import io
+import os
+import re
+import socket
+import struct
+import sys
+import tempfile
+import warnings
+import zipfile
+
+from .__version__ import __version__
+from . import certs
+# to_native_string is unused here, but imported here for backwards compatibility
+from ._internal_utils import to_native_string
+from .compat import parse_http_list as _parse_list_header
+from .compat import (
+    quote, urlparse, bytes, str, OrderedDict, unquote, getproxies,
+    proxy_bypass, urlunparse, basestring, integer_types, is_py3,
+    proxy_bypass_environment, getproxies_environment, Mapping)
+from .cookies import cookiejar_from_dict
+from .structures import CaseInsensitiveDict
+from .exceptions import (
+    InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
+
+NETRC_FILES = ('.netrc', '_netrc')
+
+DEFAULT_CA_BUNDLE_PATH = certs.where()
+
+DEFAULT_PORTS = {'http': 80, 'https': 443}
+
+
+if sys.platform == 'win32':
+    # provide a proxy_bypass version on Windows without DNS lookups
+
+    def proxy_bypass_registry(host):
+        try:
+            if is_py3:
+                import winreg
+            else:
+                import _winreg as winreg
+        except ImportError:
+            return False
+
+        try:
+            internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
+                r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
+            # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
+            proxyEnable = int(winreg.QueryValueEx(internetSettings,
+                                              'ProxyEnable')[0])
+            # ProxyOverride is almost always a string
+            proxyOverride = winreg.QueryValueEx(internetSettings,
+                                                'ProxyOverride')[0]
+        except OSError:
+            return False
+        if not proxyEnable or not proxyOverride:
+            return False
+
+        # make a check value list from the registry entry: replace the
+        # '<local>' string by the localhost entry and the corresponding
+        # canonical entry.
+        proxyOverride = proxyOverride.split(';')
+        # now check if we match one of the registry values.
+        for test in proxyOverride:
+            if test == '<local>':
+                if '.' not in host:
+                    return True
+            test = test.replace(".", r"\.")     # mask dots
+            test = test.replace("*", r".*")     # change glob sequence
+            test = test.replace("?", r".")      # change glob char
+            if re.match(test, host, re.I):
+                return True
+        return False
+
+    def proxy_bypass(host):  # noqa
+        """Return True, if the host should be bypassed.
+
+        Checks proxy settings gathered from the environment, if specified,
+        or the registry.
+        """
+        if getproxies_environment():
+            return proxy_bypass_environment(host)
+        else:
+            return proxy_bypass_registry(host)
+
+
+def dict_to_sequence(d):
+    """Returns an internal sequence dictionary update."""
+
+    if hasattr(d, 'items'):
+        d = d.items()
+
+    return d
+
+
+def super_len(o):
+    total_length = None
+    current_position = 0
+
+    if hasattr(o, '__len__'):
+        total_length = len(o)
+
+    elif hasattr(o, 'len'):
+        total_length = o.len
+
+    elif hasattr(o, 'fileno'):
+        try:
+            fileno = o.fileno()
+        except io.UnsupportedOperation:
+            pass
+        else:
+            total_length = os.fstat(fileno).st_size
+
+            # Having used fstat to determine the file length, we need to
+            # confirm that this file was opened up in binary mode.
+            if 'b' not in o.mode:
+                warnings.warn((
+                    "Requests has determined the content-length for this "
+                    "request using the binary size of the file: however, the "
+                    "file has been opened in text mode (i.e. without the 'b' "
+                    "flag in the mode). This may lead to an incorrect "
+                    "content-length. In Requests 3.0, support will be removed "
+                    "for files in text mode."),
+                    FileModeWarning
+                )
+
+    if hasattr(o, 'tell'):
+        try:
+            current_position = o.tell()
+        except (OSError, IOError):
+            # This can happen in some weird situations, such as when the file
+            # is actually a special file descriptor like stdin. In this
+            # instance, we don't know what the length is, so set it to zero and
+            # let requests chunk it instead.
+            if total_length is not None:
+                current_position = total_length
+        else:
+            if hasattr(o, 'seek') and total_length is None:
+                # StringIO and BytesIO have seek but no useable fileno
+                try:
+                    # seek to end of file
+                    o.seek(0, 2)
+                    total_length = o.tell()
+
+                    # seek back to current position to support
+                    # partially read file-like objects
+                    o.seek(current_position or 0)
+                except (OSError, IOError):
+                    total_length = 0
+
+    if total_length is None:
+        total_length = 0
+
+    return max(0, total_length - current_position)
+
+
+def get_netrc_auth(url, raise_errors=False):
+    """Returns the Requests tuple auth for a given url from netrc."""
+
+    try:
+        from netrc import netrc, NetrcParseError
+
+        netrc_path = None
+
+        for f in NETRC_FILES:
+            try:
+                loc = os.path.expanduser('~/{}'.format(f))
+            except KeyError:
+                # os.path.expanduser can fail when $HOME is undefined and
+                # getpwuid fails. See https://bugs.python.org/issue20164 &
+                # https://github.com/requests/requests/issues/1846
+                return
+
+            if os.path.exists(loc):
+                netrc_path = loc
+                break
+
+        # Abort early if there isn't one.
+        if netrc_path is None:
+            return
+
+        ri = urlparse(url)
+
+        # Strip port numbers from netloc. This weird `if...encode`` dance is
+        # used for Python 3.2, which doesn't support unicode literals.
+        splitstr = b':'
+        if isinstance(url, str):
+            splitstr = splitstr.decode('ascii')
+        host = ri.netloc.split(splitstr)[0]
+
+        try:
+            _netrc = netrc(netrc_path).authenticators(host)
+            if _netrc:
+                # Return with login / password
+                login_i = (0 if _netrc[0] else 1)
+                return (_netrc[login_i], _netrc[2])
+        except (NetrcParseError, IOError):
+            # If there was a parsing error or a permissions issue reading the file,
+            # we'll just skip netrc auth unless explicitly asked to raise errors.
+            if raise_errors:
+                raise
+
+    # AppEngine hackiness.
+    except (ImportError, AttributeError):
+        pass
+
+
+def guess_filename(obj):
+    """Tries to guess the filename of the given object."""
+    name = getattr(obj, 'name', None)
+    if (name and isinstance(name, basestring) and name[0] != '<' and
+            name[-1] != '>'):
+        return os.path.basename(name)
+
+
+def extract_zipped_paths(path):
+    """Replace nonexistent paths that look like they refer to a member of a zip
+    archive with the location of an extracted copy of the target, or else
+    just return the provided path unchanged.
+    """
+    if os.path.exists(path):
+        # this is already a valid path, no need to do anything further
+        return path
+
+    # find the first valid part of the provided path and treat that as a zip archive
+    # assume the rest of the path is the name of a member in the archive
+    archive, member = os.path.split(path)
+    while archive and not os.path.exists(archive):
+        archive, prefix = os.path.split(archive)
+        member = '/'.join([prefix, member])
+
+    if not zipfile.is_zipfile(archive):
+        return path
+
+    zip_file = zipfile.ZipFile(archive)
+    if member not in zip_file.namelist():
+        return path
+
+    # we have a valid zip archive and a valid member of that archive
+    tmp = tempfile.gettempdir()
+    extracted_path = os.path.join(tmp, *member.split('/'))
+    if not os.path.exists(extracted_path):
+        extracted_path = zip_file.extract(member, path=tmp)
+
+    return extracted_path
+
+
+def from_key_val_list(value):
+    """Take an object and test to see if it can be represented as a
+    dictionary. Unless it can not be represented as such, return an
+    OrderedDict, e.g.,
+
+    ::
+
+        >>> from_key_val_list([('key', 'val')])
+        OrderedDict([('key', 'val')])
+        >>> from_key_val_list('string')
+        ValueError: cannot encode objects that are not 2-tuples
+        >>> from_key_val_list({'key': 'val'})
+        OrderedDict([('key', 'val')])
+
+    :rtype: OrderedDict
+    """
+    if value is None:
+        return None
+
+    if isinstance(value, (str, bytes, bool, int)):
+        raise ValueError('cannot encode objects that are not 2-tuples')
+
+    return OrderedDict(value)
+
+
+def to_key_val_list(value):
+    """Take an object and test to see if it can be represented as a
+    dictionary. If it can be, return a list of tuples, e.g.,
+
+    ::
+
+        >>> to_key_val_list([('key', 'val')])
+        [('key', 'val')]
+        >>> to_key_val_list({'key': 'val'})
+        [('key', 'val')]
+        >>> to_key_val_list('string')
+        ValueError: cannot encode objects that are not 2-tuples.
+
+    :rtype: list
+    """
+    if value is None:
+        return None
+
+    if isinstance(value, (str, bytes, bool, int)):
+        raise ValueError('cannot encode objects that are not 2-tuples')
+
+    if isinstance(value, Mapping):
+        value = value.items()
+
+    return list(value)
+
+
+# From mitsuhiko/werkzeug (used with permission).
+def parse_list_header(value):
+    """Parse lists as described by RFC 2068 Section 2.
+
+    In particular, parse comma-separated lists where the elements of
+    the list may include quoted-strings.  A quoted-string could
+    contain a comma.  A non-quoted string could have quotes in the
+    middle.  Quotes are removed automatically after parsing.
+
+    It basically works like :func:`parse_set_header` just that items
+    may appear multiple times and case sensitivity is preserved.
+
+    The return value is a standard :class:`list`:
+
+    >>> parse_list_header('token, "quoted value"')
+    ['token', 'quoted value']
+
+    To create a header from the :class:`list` again, use the
+    :func:`dump_header` function.
+
+    :param value: a string with a list header.
+    :return: :class:`list`
+    :rtype: list
+    """
+    result = []
+    for item in _parse_list_header(value):
+        if item[:1] == item[-1:] == '"':
+            item = unquote_header_value(item[1:-1])
+        result.append(item)
+    return result
+
+
+# From mitsuhiko/werkzeug (used with permission).
+def parse_dict_header(value):
+    """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
+    convert them into a python dict:
+
+    >>> d = parse_dict_header('foo="is a fish", bar="as well"')
+    >>> type(d) is dict
+    True
+    >>> sorted(d.items())
+    [('bar', 'as well'), ('foo', 'is a fish')]
+
+    If there is no value for a key it will be `None`:
+
+    >>> parse_dict_header('key_without_value')
+    {'key_without_value': None}
+
+    To create a header from the :class:`dict` again, use the
+    :func:`dump_header` function.
+
+    :param value: a string with a dict header.
+    :return: :class:`dict`
+    :rtype: dict
+    """
+    result = {}
+    for item in _parse_list_header(value):
+        if '=' not in item:
+            result[item] = None
+            continue
+        name, value = item.split('=', 1)
+        if value[:1] == value[-1:] == '"':
+            value = unquote_header_value(value[1:-1])
+        result[name] = value
+    return result
+
+
+# From mitsuhiko/werkzeug (used with permission).
+def unquote_header_value(value, is_filename=False):
+    r"""Unquotes a header value.  (Reversal of :func:`quote_header_value`).
+    This does not use the real unquoting but what browsers are actually
+    using for quoting.
+
+    :param value: the header value to unquote.
+    :rtype: str
+    """
+    if value and value[0] == value[-1] == '"':
+        # this is not the real unquoting, but fixing this so that the
+        # RFC is met will result in bugs with internet explorer and
+        # probably some other browsers as well.  IE for example is
+        # uploading files with "C:\foo\bar.txt" as filename
+        value = value[1:-1]
+
+        # if this is a filename and the starting characters look like
+        # a UNC path, then just return the value without quotes.  Using the
+        # replace sequence below on a UNC path has the effect of turning
+        # the leading double slash into a single slash and then
+        # _fix_ie_filename() doesn't work correctly.  See #458.
+        if not is_filename or value[:2] != '\\\\':
+            return value.replace('\\\\', '\\').replace('\\"', '"')
+    return value
+
+
+def dict_from_cookiejar(cj):
+    """Returns a key/value dictionary from a CookieJar.
+
+    :param cj: CookieJar object to extract cookies from.
+    :rtype: dict
+    """
+
+    cookie_dict = {}
+
+    for cookie in cj:
+        cookie_dict[cookie.name] = cookie.value
+
+    return cookie_dict
+
+
+def add_dict_to_cookiejar(cj, cookie_dict):
+    """Returns a CookieJar from a key/value dictionary.
+
+    :param cj: CookieJar to insert cookies into.
+    :param cookie_dict: Dict of key/values to insert into CookieJar.
+    :rtype: CookieJar
+    """
+
+    return cookiejar_from_dict(cookie_dict, cj)
+
+
+def get_encodings_from_content(content):
+    """Returns encodings from given content string.
+
+    :param content: bytestring to extract encodings from.
+    """
+    warnings.warn((
+        'In requests 3.0, get_encodings_from_content will be removed. For '
+        'more information, please see the discussion on issue #2266. (This'
+        ' warning should only appear once.)'),
+        DeprecationWarning)
+
+    charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
+    pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
+    xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
+
+    return (charset_re.findall(content) +
+            pragma_re.findall(content) +
+            xml_re.findall(content))
+
+
+def _parse_content_type_header(header):
+    """Returns content type and parameters from given header
+
+    :param header: string
+    :return: tuple containing content type and dictionary of
+         parameters
+    """
+
+    tokens = header.split(';')
+    content_type, params = tokens[0].strip(), tokens[1:]
+    params_dict = {}
+    items_to_strip = "\"' "
+
+    for param in params:
+        param = param.strip()
+        if param:
+            key, value = param, True
+            index_of_equals = param.find("=")
+            if index_of_equals != -1:
+                key = param[:index_of_equals].strip(items_to_strip)
+                value = param[index_of_equals + 1:].strip(items_to_strip)
+            params_dict[key.lower()] = value
+    return content_type, params_dict
+
+
+def get_encoding_from_headers(headers):
+    """Returns encodings from given HTTP Header Dict.
+
+    :param headers: dictionary to extract encoding from.
+    :rtype: str
+    """
+
+    content_type = headers.get('content-type')
+
+    if not content_type:
+        return None
+
+    content_type, params = _parse_content_type_header(content_type)
+
+    if 'charset' in params:
+        return params['charset'].strip("'\"")
+
+    if 'text' in content_type:
+        return 'ISO-8859-1'
+
+
+def stream_decode_response_unicode(iterator, r):
+    """Stream decodes a iterator."""
+
+    if r.encoding is None:
+        for item in iterator:
+            yield item
+        return
+
+    decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
+    for chunk in iterator:
+        rv = decoder.decode(chunk)
+        if rv:
+            yield rv
+    rv = decoder.decode(b'', final=True)
+    if rv:
+        yield rv
+
+
+def iter_slices(string, slice_length):
+    """Iterate over slices of a string."""
+    pos = 0
+    if slice_length is None or slice_length <= 0:
+        slice_length = len(string)
+    while pos < len(string):
+        yield string[pos:pos + slice_length]
+        pos += slice_length
+
+
+def get_unicode_from_response(r):
+    """Returns the requested content back in unicode.
+
+    :param r: Response object to get unicode content from.
+
+    Tried:
+
+    1. charset from content-type
+    2. fall back and replace all unicode characters
+
+    :rtype: str
+    """
+    warnings.warn((
+        'In requests 3.0, get_unicode_from_response will be removed. For '
+        'more information, please see the discussion on issue #2266. (This'
+        ' warning should only appear once.)'),
+        DeprecationWarning)
+
+    tried_encodings = []
+
+    # Try charset from content-type
+    encoding = get_encoding_from_headers(r.headers)
+
+    if encoding:
+        try:
+            return str(r.content, encoding)
+        except UnicodeError:
+            tried_encodings.append(encoding)
+
+    # Fall back:
+    try:
+        return str(r.content, encoding, errors='replace')
+    except TypeError:
+        return r.content
+
+
+# The unreserved URI characters (RFC 3986)
+UNRESERVED_SET = frozenset(
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~")
+
+
+def unquote_unreserved(uri):
+    """Un-escape any percent-escape sequences in a URI that are unreserved
+    characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
+
+    :rtype: str
+    """
+    parts = uri.split('%')
+    for i in range(1, len(parts)):
+        h = parts[i][0:2]
+        if len(h) == 2 and h.isalnum():
+            try:
+                c = chr(int(h, 16))
+            except ValueError:
+                raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
+
+            if c in UNRESERVED_SET:
+                parts[i] = c + parts[i][2:]
+            else:
+                parts[i] = '%' + parts[i]
+        else:
+            parts[i] = '%' + parts[i]
+    return ''.join(parts)
+
+
+def requote_uri(uri):
+    """Re-quote the given URI.
+
+    This function passes the given URI through an unquote/quote cycle to
+    ensure that it is fully and consistently quoted.
+
+    :rtype: str
+    """
+    safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
+    safe_without_percent = "!#$&'()*+,/:;=?@[]~"
+    try:
+        # Unquote only the unreserved characters
+        # Then quote only illegal characters (do not quote reserved,
+        # unreserved, or '%')
+        return quote(unquote_unreserved(uri), safe=safe_with_percent)
+    except InvalidURL:
+        # We couldn't unquote the given URI, so let's try quoting it, but
+        # there may be unquoted '%'s in the URI. We need to make sure they're
+        # properly quoted so they do not cause issues elsewhere.
+        return quote(uri, safe=safe_without_percent)
+
+
+def address_in_network(ip, net):
+    """This function allows you to check if an IP belongs to a network subnet
+
+    Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
+             returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
+
+    :rtype: bool
+    """
+    ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
+    netaddr, bits = net.split('/')
+    netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
+    network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
+    return (ipaddr & netmask) == (network & netmask)
+
+
+def dotted_netmask(mask):
+    """Converts mask from /xx format to xxx.xxx.xxx.xxx
+
+    Example: if mask is 24 function returns 255.255.255.0
+
+    :rtype: str
+    """
+    bits = 0xffffffff ^ (1 << 32 - mask) - 1
+    return socket.inet_ntoa(struct.pack('>I', bits))
+
+
+def is_ipv4_address(string_ip):
+    """
+    :rtype: bool
+    """
+    try:
+        socket.inet_aton(string_ip)
+    except socket.error:
+        return False
+    return True
+
+
+def is_valid_cidr(string_network):
+    """
+    Very simple check of the cidr format in no_proxy variable.
+
+    :rtype: bool
+    """
+    if string_network.count('/') == 1:
+        try:
+            mask = int(string_network.split('/')[1])
+        except ValueError:
+            return False
+
+        if mask < 1 or mask > 32:
+            return False
+
+        try:
+            socket.inet_aton(string_network.split('/')[0])
+        except socket.error:
+            return False
+    else:
+        return False
+    return True
+
+
+@contextlib.contextmanager
+def set_environ(env_name, value):
+    """Set the environment variable 'env_name' to 'value'
+
+    Save previous value, yield, and then restore the previous value stored in
+    the environment variable 'env_name'.
+
+    If 'value' is None, do nothing"""
+    value_changed = value is not None
+    if value_changed:
+        old_value = os.environ.get(env_name)
+        os.environ[env_name] = value
+    try:
+        yield
+    finally:
+        if value_changed:
+            if old_value is None:
+                del os.environ[env_name]
+            else:
+                os.environ[env_name] = old_value
+
+
+def should_bypass_proxies(url, no_proxy):
+    """
+    Returns whether we should bypass proxies or not.
+
+    :rtype: bool
+    """
+    # Prioritize lowercase environment variables over uppercase
+    # to keep a consistent behaviour with other http projects (curl, wget).
+    get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
+
+    # First check whether no_proxy is defined. If it is, check that the URL
+    # we're getting isn't in the no_proxy list.
+    no_proxy_arg = no_proxy
+    if no_proxy is None:
+        no_proxy = get_proxy('no_proxy')
+    parsed = urlparse(url)
+
+    if parsed.hostname is None:
+        # URLs don't always have hostnames, e.g. file:/// urls.
+        return True
+
+    if no_proxy:
+        # We need to check whether we match here. We need to see if we match
+        # the end of the hostname, both with and without the port.
+        no_proxy = (
+            host for host in no_proxy.replace(' ', '').split(',') if host
+        )
+
+        if is_ipv4_address(parsed.hostname):
+            for proxy_ip in no_proxy:
+                if is_valid_cidr(proxy_ip):
+                    if address_in_network(parsed.hostname, proxy_ip):
+                        return True
+                elif parsed.hostname == proxy_ip:
+                    # If no_proxy ip was defined in plain IP notation instead of cidr notation &
+                    # matches the IP of the index
+                    return True
+        else:
+            host_with_port = parsed.hostname
+            if parsed.port:
+                host_with_port += ':{}'.format(parsed.port)
+
+            for host in no_proxy:
+                if parsed.hostname.endswith(host) or host_with_port.endswith(host):
+                    # The URL does match something in no_proxy, so we don't want
+                    # to apply the proxies on this URL.
+                    return True
+
+    with set_environ('no_proxy', no_proxy_arg):
+        # parsed.hostname can be `None` in cases such as a file URI.
+        try:
+            bypass = proxy_bypass(parsed.hostname)
+        except (TypeError, socket.gaierror):
+            bypass = False
+
+    if bypass:
+        return True
+
+    return False
+
+
+def get_environ_proxies(url, no_proxy=None):
+    """
+    Return a dict of environment proxies.
+
+    :rtype: dict
+    """
+    if should_bypass_proxies(url, no_proxy=no_proxy):
+        return {}
+    else:
+        return getproxies()
+
+
+def select_proxy(url, proxies):
+    """Select a proxy for the url, if applicable.
+
+    :param url: The url being for the request
+    :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
+    """
+    proxies = proxies or {}
+    urlparts = urlparse(url)
+    if urlparts.hostname is None:
+        return proxies.get(urlparts.scheme, proxies.get('all'))
+
+    proxy_keys = [
+        urlparts.scheme + '://' + urlparts.hostname,
+        urlparts.scheme,
+        'all://' + urlparts.hostname,
+        'all',
+    ]
+    proxy = None
+    for proxy_key in proxy_keys:
+        if proxy_key in proxies:
+            proxy = proxies[proxy_key]
+            break
+
+    return proxy
+
+
+def default_user_agent(name="python-requests"):
+    """
+    Return a string representing the default user agent.
+
+    :rtype: str
+    """
+    return '%s/%s' % (name, __version__)
+
+
+def default_headers():
+    """
+    :rtype: requests.structures.CaseInsensitiveDict
+    """
+    return CaseInsensitiveDict({
+        'User-Agent': default_user_agent(),
+        'Accept-Encoding': ', '.join(('gzip', 'deflate')),
+        'Accept': '*/*',
+        'Connection': 'keep-alive',
+    })
+
+
+def parse_header_links(value):
+    """Return a list of parsed link headers proxies.
+
+    i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
+
+    :rtype: list
+    """
+
+    links = []
+
+    replace_chars = ' \'"'
+
+    value = value.strip(replace_chars)
+    if not value:
+        return links
+
+    for val in re.split(', *<', value):
+        try:
+            url, params = val.split(';', 1)
+        except ValueError:
+            url, params = val, ''
+
+        link = {'url': url.strip('<> \'"')}
+
+        for param in params.split(';'):
+            try:
+                key, value = param.split('=')
+            except ValueError:
+                break
+
+            link[key.strip(replace_chars)] = value.strip(replace_chars)
+
+        links.append(link)
+
+    return links
+
+
+# Null bytes; no need to recreate these on each call to guess_json_utf
+_null = '\x00'.encode('ascii')  # encoding to ASCII for Python 3
+_null2 = _null * 2
+_null3 = _null * 3
+
+
+def guess_json_utf(data):
+    """
+    :rtype: str
+    """
+    # JSON always starts with two ASCII characters, so detection is as
+    # easy as counting the nulls and from their location and count
+    # determine the encoding. Also detect a BOM, if present.
+    sample = data[:4]
+    if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
+        return 'utf-32'     # BOM included
+    if sample[:3] == codecs.BOM_UTF8:
+        return 'utf-8-sig'  # BOM included, MS style (discouraged)
+    if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
+        return 'utf-16'     # BOM included
+    nullcount = sample.count(_null)
+    if nullcount == 0:
+        return 'utf-8'
+    if nullcount == 2:
+        if sample[::2] == _null2:   # 1st and 3rd are null
+            return 'utf-16-be'
+        if sample[1::2] == _null2:  # 2nd and 4th are null
+            return 'utf-16-le'
+        # Did not detect 2 valid UTF-16 ascii-range characters
+    if nullcount == 3:
+        if sample[:3] == _null3:
+            return 'utf-32-be'
+        if sample[1:] == _null3:
+            return 'utf-32-le'
+        # Did not detect a valid UTF-32 ascii-range character
+    return None
+
+
+def prepend_scheme_if_needed(url, new_scheme):
+    """Given a URL that may or may not have a scheme, prepend the given scheme.
+    Does not replace a present scheme with the one provided as an argument.
+
+    :rtype: str
+    """
+    scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
+
+    # urlparse is a finicky beast, and sometimes decides that there isn't a
+    # netloc present. Assume that it's being over-cautious, and switch netloc
+    # and path if urlparse decided there was no netloc.
+    if not netloc:
+        netloc, path = path, netloc
+
+    return urlunparse((scheme, netloc, path, params, query, fragment))
+
+
+def get_auth_from_url(url):
+    """Given a url with authentication components, extract them into a tuple of
+    username,password.
+
+    :rtype: (str,str)
+    """
+    parsed = urlparse(url)
+
+    try:
+        auth = (unquote(parsed.username), unquote(parsed.password))
+    except (AttributeError, TypeError):
+        auth = ('', '')
+
+    return auth
+
+
+# Moved outside of function to avoid recompile every call
+_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
+_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
+
+
+def check_header_validity(header):
+    """Verifies that header value is a string which doesn't contain
+    leading whitespace or return characters. This prevents unintended
+    header injection.
+
+    :param header: tuple, in the format (name, value).
+    """
+    name, value = header
+
+    if isinstance(value, bytes):
+        pat = _CLEAN_HEADER_REGEX_BYTE
+    else:
+        pat = _CLEAN_HEADER_REGEX_STR
+    try:
+        if not pat.match(value):
+            raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
+    except TypeError:
+        raise InvalidHeader("Value for header {%s: %s} must be of type str or "
+                            "bytes, not %s" % (name, value, type(value)))
+
+
+def urldefragauth(url):
+    """
+    Given a url remove the fragment and the authentication part.
+
+    :rtype: str
+    """
+    scheme, netloc, path, params, query, fragment = urlparse(url)
+
+    # see func:`prepend_scheme_if_needed`
+    if not netloc:
+        netloc, path = path, netloc
+
+    netloc = netloc.rsplit('@', 1)[-1]
+
+    return urlunparse((scheme, netloc, path, params, query, ''))
+
+
+def rewind_body(prepared_request):
+    """Move file pointer back to its recorded starting position
+    so it can be read again on redirect.
+    """
+    body_seek = getattr(prepared_request.body, 'seek', None)
+    if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
+        try:
+            body_seek(prepared_request._body_position)
+        except (IOError, OSError):
+            raise UnrewindableBodyError("An error occurred when rewinding request "
+                                        "body for redirect.")
+    else:
+        raise UnrewindableBodyError("Unable to rewind request body for redirect.")
diff --git a/lib/python3.7/site-packages/pip/_vendor/retrying.py b/lib/python3.7/site-packages/pip/_vendor/retrying.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d1e627aae8130f76781f9a2861a90a22329c930
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/retrying.py
@@ -0,0 +1,267 @@
+## Copyright 2013-2014 Ray Holder
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+
+import random
+from pip._vendor import six
+import sys
+import time
+import traceback
+
+
+# sys.maxint / 2, since Python 3.2 doesn't have a sys.maxint...
+MAX_WAIT = 1073741823
+
+
+def retry(*dargs, **dkw):
+    """
+    Decorator function that instantiates the Retrying object
+    @param *dargs: positional arguments passed to Retrying object
+    @param **dkw: keyword arguments passed to the Retrying object
+    """
+    # support both @retry and @retry() as valid syntax
+    if len(dargs) == 1 and callable(dargs[0]):
+        def wrap_simple(f):
+
+            @six.wraps(f)
+            def wrapped_f(*args, **kw):
+                return Retrying().call(f, *args, **kw)
+
+            return wrapped_f
+
+        return wrap_simple(dargs[0])
+
+    else:
+        def wrap(f):
+
+            @six.wraps(f)
+            def wrapped_f(*args, **kw):
+                return Retrying(*dargs, **dkw).call(f, *args, **kw)
+
+            return wrapped_f
+
+        return wrap
+
+
+class Retrying(object):
+
+    def __init__(self,
+                 stop=None, wait=None,
+                 stop_max_attempt_number=None,
+                 stop_max_delay=None,
+                 wait_fixed=None,
+                 wait_random_min=None, wait_random_max=None,
+                 wait_incrementing_start=None, wait_incrementing_increment=None,
+                 wait_exponential_multiplier=None, wait_exponential_max=None,
+                 retry_on_exception=None,
+                 retry_on_result=None,
+                 wrap_exception=False,
+                 stop_func=None,
+                 wait_func=None,
+                 wait_jitter_max=None):
+
+        self._stop_max_attempt_number = 5 if stop_max_attempt_number is None else stop_max_attempt_number
+        self._stop_max_delay = 100 if stop_max_delay is None else stop_max_delay
+        self._wait_fixed = 1000 if wait_fixed is None else wait_fixed
+        self._wait_random_min = 0 if wait_random_min is None else wait_random_min
+        self._wait_random_max = 1000 if wait_random_max is None else wait_random_max
+        self._wait_incrementing_start = 0 if wait_incrementing_start is None else wait_incrementing_start
+        self._wait_incrementing_increment = 100 if wait_incrementing_increment is None else wait_incrementing_increment
+        self._wait_exponential_multiplier = 1 if wait_exponential_multiplier is None else wait_exponential_multiplier
+        self._wait_exponential_max = MAX_WAIT if wait_exponential_max is None else wait_exponential_max
+        self._wait_jitter_max = 0 if wait_jitter_max is None else wait_jitter_max
+
+        # TODO add chaining of stop behaviors
+        # stop behavior
+        stop_funcs = []
+        if stop_max_attempt_number is not None:
+            stop_funcs.append(self.stop_after_attempt)
+
+        if stop_max_delay is not None:
+            stop_funcs.append(self.stop_after_delay)
+
+        if stop_func is not None:
+            self.stop = stop_func
+
+        elif stop is None:
+            self.stop = lambda attempts, delay: any(f(attempts, delay) for f in stop_funcs)
+
+        else:
+            self.stop = getattr(self, stop)
+
+        # TODO add chaining of wait behaviors
+        # wait behavior
+        wait_funcs = [lambda *args, **kwargs: 0]
+        if wait_fixed is not None:
+            wait_funcs.append(self.fixed_sleep)
+
+        if wait_random_min is not None or wait_random_max is not None:
+            wait_funcs.append(self.random_sleep)
+
+        if wait_incrementing_start is not None or wait_incrementing_increment is not None:
+            wait_funcs.append(self.incrementing_sleep)
+
+        if wait_exponential_multiplier is not None or wait_exponential_max is not None:
+            wait_funcs.append(self.exponential_sleep)
+
+        if wait_func is not None:
+            self.wait = wait_func
+
+        elif wait is None:
+            self.wait = lambda attempts, delay: max(f(attempts, delay) for f in wait_funcs)
+
+        else:
+            self.wait = getattr(self, wait)
+
+        # retry on exception filter
+        if retry_on_exception is None:
+            self._retry_on_exception = self.always_reject
+        else:
+            self._retry_on_exception = retry_on_exception
+
+        # TODO simplify retrying by Exception types
+        # retry on result filter
+        if retry_on_result is None:
+            self._retry_on_result = self.never_reject
+        else:
+            self._retry_on_result = retry_on_result
+
+        self._wrap_exception = wrap_exception
+
+    def stop_after_attempt(self, previous_attempt_number, delay_since_first_attempt_ms):
+        """Stop after the previous attempt >= stop_max_attempt_number."""
+        return previous_attempt_number >= self._stop_max_attempt_number
+
+    def stop_after_delay(self, previous_attempt_number, delay_since_first_attempt_ms):
+        """Stop after the time from the first attempt >= stop_max_delay."""
+        return delay_since_first_attempt_ms >= self._stop_max_delay
+
+    def no_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
+        """Don't sleep at all before retrying."""
+        return 0
+
+    def fixed_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
+        """Sleep a fixed amount of time between each retry."""
+        return self._wait_fixed
+
+    def random_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
+        """Sleep a random amount of time between wait_random_min and wait_random_max"""
+        return random.randint(self._wait_random_min, self._wait_random_max)
+
+    def incrementing_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
+        """
+        Sleep an incremental amount of time after each attempt, starting at
+        wait_incrementing_start and incrementing by wait_incrementing_increment
+        """
+        result = self._wait_incrementing_start + (self._wait_incrementing_increment * (previous_attempt_number - 1))
+        if result < 0:
+            result = 0
+        return result
+
+    def exponential_sleep(self, previous_attempt_number, delay_since_first_attempt_ms):
+        exp = 2 ** previous_attempt_number
+        result = self._wait_exponential_multiplier * exp
+        if result > self._wait_exponential_max:
+            result = self._wait_exponential_max
+        if result < 0:
+            result = 0
+        return result
+
+    def never_reject(self, result):
+        return False
+
+    def always_reject(self, result):
+        return True
+
+    def should_reject(self, attempt):
+        reject = False
+        if attempt.has_exception:
+            reject |= self._retry_on_exception(attempt.value[1])
+        else:
+            reject |= self._retry_on_result(attempt.value)
+
+        return reject
+
+    def call(self, fn, *args, **kwargs):
+        start_time = int(round(time.time() * 1000))
+        attempt_number = 1
+        while True:
+            try:
+                attempt = Attempt(fn(*args, **kwargs), attempt_number, False)
+            except:
+                tb = sys.exc_info()
+                attempt = Attempt(tb, attempt_number, True)
+
+            if not self.should_reject(attempt):
+                return attempt.get(self._wrap_exception)
+
+            delay_since_first_attempt_ms = int(round(time.time() * 1000)) - start_time
+            if self.stop(attempt_number, delay_since_first_attempt_ms):
+                if not self._wrap_exception and attempt.has_exception:
+                    # get() on an attempt with an exception should cause it to be raised, but raise just in case
+                    raise attempt.get()
+                else:
+                    raise RetryError(attempt)
+            else:
+                sleep = self.wait(attempt_number, delay_since_first_attempt_ms)
+                if self._wait_jitter_max:
+                    jitter = random.random() * self._wait_jitter_max
+                    sleep = sleep + max(0, jitter)
+                time.sleep(sleep / 1000.0)
+
+            attempt_number += 1
+
+
+class Attempt(object):
+    """
+    An Attempt encapsulates a call to a target function that may end as a
+    normal return value from the function or an Exception depending on what
+    occurred during the execution.
+    """
+
+    def __init__(self, value, attempt_number, has_exception):
+        self.value = value
+        self.attempt_number = attempt_number
+        self.has_exception = has_exception
+
+    def get(self, wrap_exception=False):
+        """
+        Return the return value of this Attempt instance or raise an Exception.
+        If wrap_exception is true, this Attempt is wrapped inside of a
+        RetryError before being raised.
+        """
+        if self.has_exception:
+            if wrap_exception:
+                raise RetryError(self)
+            else:
+                six.reraise(self.value[0], self.value[1], self.value[2])
+        else:
+            return self.value
+
+    def __repr__(self):
+        if self.has_exception:
+            return "Attempts: {0}, Error:\n{1}".format(self.attempt_number, "".join(traceback.format_tb(self.value[2])))
+        else:
+            return "Attempts: {0}, Value: {1}".format(self.attempt_number, self.value)
+
+
+class RetryError(Exception):
+    """
+    A RetryError encapsulates the last Attempt instance right before giving up.
+    """
+
+    def __init__(self, last_attempt):
+        self.last_attempt = last_attempt
+
+    def __str__(self):
+        return "RetryError[{0}]".format(self.last_attempt)
diff --git a/lib/python3.7/site-packages/pip/_vendor/six.py b/lib/python3.7/site-packages/pip/_vendor/six.py
new file mode 100644
index 0000000000000000000000000000000000000000..89b2188fd63f6f95f1c4500f476a8e6adeaabdd0
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/six.py
@@ -0,0 +1,952 @@
+# Copyright (c) 2010-2018 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.12.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+        del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)  # Invokes __set__.
+        try:
+            # This is a bit ugly, but it avoids running this again by
+            # removing this descriptor.
+            delattr(obj.__class__, self.name)
+        except AttributeError:
+            pass
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+    def __getattr__(self, attr):
+        _module = self._resolve()
+        value = getattr(_module, attr)
+        setattr(self, attr, value)
+        return value
+
+
+class _LazyModule(types.ModuleType):
+
+    def __init__(self, name):
+        super(_LazyModule, self).__init__(name)
+        self.__doc__ = self.__class__.__doc__
+
+    def __dir__(self):
+        attrs = ["__doc__", "__name__"]
+        attrs += [attr.name for attr in self._moved_attributes]
+        return attrs
+
+    # Subclasses should override this
+    _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+    """
+    A meta path importer to import six.moves and its submodules.
+
+    This class implements a PEP302 finder and loader. It should be compatible
+    with Python 2.5 and all existing versions of Python3
+    """
+
+    def __init__(self, six_module_name):
+        self.name = six_module_name
+        self.known_modules = {}
+
+    def _add_module(self, mod, *fullnames):
+        for fullname in fullnames:
+            self.known_modules[self.name + "." + fullname] = mod
+
+    def _get_module(self, fullname):
+        return self.known_modules[self.name + "." + fullname]
+
+    def find_module(self, fullname, path=None):
+        if fullname in self.known_modules:
+            return self
+        return None
+
+    def __get_module(self, fullname):
+        try:
+            return self.known_modules[fullname]
+        except KeyError:
+            raise ImportError("This loader does not know module " + fullname)
+
+    def load_module(self, fullname):
+        try:
+            # in case of a reload
+            return sys.modules[fullname]
+        except KeyError:
+            pass
+        mod = self.__get_module(fullname)
+        if isinstance(mod, MovedModule):
+            mod = mod._resolve()
+        else:
+            mod.__loader__ = self
+        sys.modules[fullname] = mod
+        return mod
+
+    def is_package(self, fullname):
+        """
+        Return true, if the named module is a package.
+
+        We need this method to get correct spec objects with
+        Python 3.4 (see PEP451)
+        """
+        return hasattr(self.__get_module(fullname), "__path__")
+
+    def get_code(self, fullname):
+        """Return None
+
+        Required, if is_package is implemented"""
+        self.__get_module(fullname)  # eventually raises ImportError
+        return None
+    get_source = get_code  # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+    """Lazy loading of moved objects"""
+    __path__ = []  # mark as package
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("intern", "__builtin__", "sys"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+    MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+    MovedAttribute("getoutput", "commands", "subprocess"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserDict", "UserDict", "collections"),
+    MovedAttribute("UserList", "UserList", "collections"),
+    MovedAttribute("UserString", "UserString", "collections"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+    MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("_thread", "thread", "_thread"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+    MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+    _moved_attributes += [
+        MovedModule("winreg", "_winreg"),
+    ]
+
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+    if isinstance(attr, MovedModule):
+        _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+    MovedAttribute("splitquery", "urllib", "urllib.parse"),
+    MovedAttribute("splittag", "urllib", "urllib.parse"),
+    MovedAttribute("splituser", "urllib", "urllib.parse"),
+    MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+    MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+                      "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+                      "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+    MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+    MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+    MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+                      "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+                      "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+                      "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    __path__ = []  # mark as package
+    parse = _importer._get_module("moves.urllib_parse")
+    error = _importer._get_module("moves.urllib_error")
+    request = _importer._get_module("moves.urllib_request")
+    response = _importer._get_module("moves.urllib_response")
+    robotparser = _importer._get_module("moves.urllib_robotparser")
+
+    def __dir__(self):
+        return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+                      "moves.urllib")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    create_bound_method = types.MethodType
+
+    def create_unbound_method(func, cls):
+        return func
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
+    def create_unbound_method(func, cls):
+        return types.MethodType(func, None, cls)
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+    def iterkeys(d, **kw):
+        return iter(d.keys(**kw))
+
+    def itervalues(d, **kw):
+        return iter(d.values(**kw))
+
+    def iteritems(d, **kw):
+        return iter(d.items(**kw))
+
+    def iterlists(d, **kw):
+        return iter(d.lists(**kw))
+
+    viewkeys = operator.methodcaller("keys")
+
+    viewvalues = operator.methodcaller("values")
+
+    viewitems = operator.methodcaller("items")
+else:
+    def iterkeys(d, **kw):
+        return d.iterkeys(**kw)
+
+    def itervalues(d, **kw):
+        return d.itervalues(**kw)
+
+    def iteritems(d, **kw):
+        return d.iteritems(**kw)
+
+    def iterlists(d, **kw):
+        return d.iterlists(**kw)
+
+    viewkeys = operator.methodcaller("viewkeys")
+
+    viewvalues = operator.methodcaller("viewvalues")
+
+    viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+         "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+         "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+
+    def u(s):
+        return s
+    unichr = chr
+    import struct
+    int2byte = struct.Struct(">B").pack
+    del struct
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+    _assertCountEqual = "assertCountEqual"
+    if sys.version_info[1] <= 1:
+        _assertRaisesRegex = "assertRaisesRegexp"
+        _assertRegex = "assertRegexpMatches"
+    else:
+        _assertRaisesRegex = "assertRaisesRegex"
+        _assertRegex = "assertRegex"
+else:
+    def b(s):
+        return s
+    # Workaround for standalone backslash
+
+    def u(s):
+        return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+    unichr = unichr
+    int2byte = chr
+
+    def byte2int(bs):
+        return ord(bs[0])
+
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    iterbytes = functools.partial(itertools.imap, ord)
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+    _assertCountEqual = "assertItemsEqual"
+    _assertRaisesRegex = "assertRaisesRegexp"
+    _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+    return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+    return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+    return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+    exec_ = getattr(moves.builtins, "exec")
+
+    def reraise(tp, value, tb=None):
+        try:
+            if value is None:
+                value = tp()
+            if value.__traceback__ is not tb:
+                raise value.with_traceback(tb)
+            raise value
+        finally:
+            value = None
+            tb = None
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+    exec_("""def reraise(tp, value, tb=None):
+    try:
+        raise tp, value, tb
+    finally:
+        tb = None
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+    exec_("""def raise_from(value, from_value):
+    try:
+        if from_value is None:
+            raise value
+        raise value from from_value
+    finally:
+        value = None
+""")
+elif sys.version_info[:2] > (3, 2):
+    exec_("""def raise_from(value, from_value):
+    try:
+        raise value from from_value
+    finally:
+        value = None
+""")
+else:
+    def raise_from(value, from_value):
+        raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+    def print_(*args, **kwargs):
+        """The new-style print function for Python 2.4 and 2.5."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            # If the file has an encoding, encode unicode with it.
+            if (isinstance(fp, file) and
+                    isinstance(data, unicode) and
+                    fp.encoding is not None):
+                errors = getattr(fp, "errors", None)
+                if errors is None:
+                    errors = "strict"
+                data = data.encode(fp.encoding, errors)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+if sys.version_info[:2] < (3, 3):
+    _print = print_
+
+    def print_(*args, **kwargs):
+        fp = kwargs.get("file", sys.stdout)
+        flush = kwargs.pop("flush", False)
+        _print(*args, **kwargs)
+        if flush and fp is not None:
+            fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+              updated=functools.WRAPPER_UPDATES):
+        def wrapper(f):
+            f = functools.wraps(wrapped, assigned, updated)(f)
+            f.__wrapped__ = wrapped
+            return f
+        return wrapper
+else:
+    wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(type):
+
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+
+        @classmethod
+        def __prepare__(cls, name, this_bases):
+            return meta.__prepare__(name, bases)
+    return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        slots = orig_vars.get('__slots__')
+        if slots is not None:
+            if isinstance(slots, str):
+                slots = [slots]
+            for slots_var in slots:
+                orig_vars.pop(slots_var)
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        if hasattr(cls, '__qualname__'):
+            orig_vars['__qualname__'] = cls.__qualname__
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper
+
+
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+    """Coerce **s** to six.binary_type.
+
+    For Python 2:
+      - `unicode` -> encoded to `str`
+      - `str` -> `str`
+
+    For Python 3:
+      - `str` -> encoded to `bytes`
+      - `bytes` -> `bytes`
+    """
+    if isinstance(s, text_type):
+        return s.encode(encoding, errors)
+    elif isinstance(s, binary_type):
+        return s
+    else:
+        raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+    """Coerce *s* to `str`.
+
+    For Python 2:
+      - `unicode` -> encoded to `str`
+      - `str` -> `str`
+
+    For Python 3:
+      - `str` -> `str`
+      - `bytes` -> decoded to `str`
+    """
+    if not isinstance(s, (text_type, binary_type)):
+        raise TypeError("not expecting type '%s'" % type(s))
+    if PY2 and isinstance(s, text_type):
+        s = s.encode(encoding, errors)
+    elif PY3 and isinstance(s, binary_type):
+        s = s.decode(encoding, errors)
+    return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+    """Coerce *s* to six.text_type.
+
+    For Python 2:
+      - `unicode` -> `unicode`
+      - `str` -> `unicode`
+
+    For Python 3:
+      - `str` -> `str`
+      - `bytes` -> decoded to `str`
+    """
+    if isinstance(s, binary_type):
+        return s.decode(encoding, errors)
+    elif isinstance(s, text_type):
+        return s
+    else:
+        raise TypeError("not expecting type '%s'" % type(s))
+
+
+
+def python_2_unicode_compatible(klass):
+    """
+    A decorator that defines __unicode__ and __str__ methods under Python 2.
+    Under Python 3 it does nothing.
+
+    To support Python 2 and 3 with a single code base, define a __str__ method
+    returning text and apply this decorator to the class.
+    """
+    if PY2:
+        if '__str__' not in klass.__dict__:
+            raise ValueError("@python_2_unicode_compatible cannot be applied "
+                             "to %s because it doesn't define __str__()." %
+                             klass.__name__)
+        klass.__unicode__ = klass.__str__
+        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+    return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = []  # required for PEP 302 and PEP 451
+__package__ = __name__  # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+    __spec__.submodule_search_locations = []  # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+    for i, importer in enumerate(sys.meta_path):
+        # Here's some real nastiness: Another "instance" of the six module might
+        # be floating around. Therefore, we can't use isinstance() to check for
+        # the six meta path importer, since the other six instance will have
+        # inserted an importer with different class.
+        if (type(importer).__name__ == "_SixMetaPathImporter" and
+                importer.name == __name__):
+            del sys.meta_path[i]
+            break
+    del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__init__.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..148a9c31a7a555ea302ca2edb3aa674bed897eab
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/__init__.py
@@ -0,0 +1,92 @@
+"""
+urllib3 - Thread-safe connection pooling and re-using.
+"""
+
+from __future__ import absolute_import
+import warnings
+
+from .connectionpool import (
+    HTTPConnectionPool,
+    HTTPSConnectionPool,
+    connection_from_url
+)
+
+from . import exceptions
+from .filepost import encode_multipart_formdata
+from .poolmanager import PoolManager, ProxyManager, proxy_from_url
+from .response import HTTPResponse
+from .util.request import make_headers
+from .util.url import get_host
+from .util.timeout import Timeout
+from .util.retry import Retry
+
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+from logging import NullHandler
+
+__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
+__license__ = 'MIT'
+__version__ = '1.24.1'
+
+__all__ = (
+    'HTTPConnectionPool',
+    'HTTPSConnectionPool',
+    'PoolManager',
+    'ProxyManager',
+    'HTTPResponse',
+    'Retry',
+    'Timeout',
+    'add_stderr_logger',
+    'connection_from_url',
+    'disable_warnings',
+    'encode_multipart_formdata',
+    'get_host',
+    'make_headers',
+    'proxy_from_url',
+)
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+
+def add_stderr_logger(level=logging.DEBUG):
+    """
+    Helper for quickly adding a StreamHandler to the logger. Useful for
+    debugging.
+
+    Returns the handler after adding it.
+    """
+    # This method needs to be in this __init__.py to get the __name__ correct
+    # even if urllib3 is vendored within another package.
+    logger = logging.getLogger(__name__)
+    handler = logging.StreamHandler()
+    handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
+    logger.addHandler(handler)
+    logger.setLevel(level)
+    logger.debug('Added a stderr logging handler to logger: %s', __name__)
+    return handler
+
+
+# ... Clean up.
+del NullHandler
+
+
+# All warning filters *must* be appended unless you're really certain that they
+# shouldn't be: otherwise, it's very hard for users to use most Python
+# mechanisms to silence them.
+# SecurityWarning's always go off by default.
+warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
+# SubjectAltNameWarning's should go off once per host
+warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
+# InsecurePlatformWarning's don't vary between requests, so we keep it default.
+warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
+                      append=True)
+# SNIMissingWarnings should go off only once.
+warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
+
+
+def disable_warnings(category=exceptions.HTTPWarning):
+    """
+    Helper for quickly disabling all urllib3 warnings.
+    """
+    warnings.simplefilter('ignore', category)
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3cf6a155725bc232e70adece9fdd0ab8a0b2b344
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d6da67c8b67170d41062d8bde3748737a5f5921
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/_collections.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fb2d10938af49b16235b796993f894fd0c8deefe
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/connection.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..51577fb1875a50839514f7ec028141a8a30a36f2
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/connectionpool.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f6851038376d977dce26cfe50931b389400f91c
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/exceptions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5d202c0faf685a90ff8681242efe3a3943353057
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/fields.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c2147a11316be5f2fbf52d0652cae8a7e4ed4b46
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/filepost.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..acbdcb73124a1c1983b4a1fb6a8ba1b4d6085295
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/poolmanager.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bfa71716479d37278560d999c08c87a64423d22d
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/request.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..575ab1db90716c8bdb9506c59d06de3302b3cfe2
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/__pycache__/response.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/_collections.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/_collections.py
new file mode 100644
index 0000000000000000000000000000000000000000..34f23811c627d22254152a456c8636302e056ff9
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/_collections.py
@@ -0,0 +1,329 @@
+from __future__ import absolute_import
+try:
+    from collections.abc import Mapping, MutableMapping
+except ImportError:
+    from collections import Mapping, MutableMapping
+try:
+    from threading import RLock
+except ImportError:  # Platform-specific: No threads available
+    class RLock:
+        def __enter__(self):
+            pass
+
+        def __exit__(self, exc_type, exc_value, traceback):
+            pass
+
+
+from collections import OrderedDict
+from .exceptions import InvalidHeader
+from .packages.six import iterkeys, itervalues, PY3
+
+
+__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
+
+
+_Null = object()
+
+
+class RecentlyUsedContainer(MutableMapping):
+    """
+    Provides a thread-safe dict-like container which maintains up to
+    ``maxsize`` keys while throwing away the least-recently-used keys beyond
+    ``maxsize``.
+
+    :param maxsize:
+        Maximum number of recent elements to retain.
+
+    :param dispose_func:
+        Every time an item is evicted from the container,
+        ``dispose_func(value)`` is called.  Callback which will get called
+    """
+
+    ContainerCls = OrderedDict
+
+    def __init__(self, maxsize=10, dispose_func=None):
+        self._maxsize = maxsize
+        self.dispose_func = dispose_func
+
+        self._container = self.ContainerCls()
+        self.lock = RLock()
+
+    def __getitem__(self, key):
+        # Re-insert the item, moving it to the end of the eviction line.
+        with self.lock:
+            item = self._container.pop(key)
+            self._container[key] = item
+            return item
+
+    def __setitem__(self, key, value):
+        evicted_value = _Null
+        with self.lock:
+            # Possibly evict the existing value of 'key'
+            evicted_value = self._container.get(key, _Null)
+            self._container[key] = value
+
+            # If we didn't evict an existing value, we might have to evict the
+            # least recently used item from the beginning of the container.
+            if len(self._container) > self._maxsize:
+                _key, evicted_value = self._container.popitem(last=False)
+
+        if self.dispose_func and evicted_value is not _Null:
+            self.dispose_func(evicted_value)
+
+    def __delitem__(self, key):
+        with self.lock:
+            value = self._container.pop(key)
+
+        if self.dispose_func:
+            self.dispose_func(value)
+
+    def __len__(self):
+        with self.lock:
+            return len(self._container)
+
+    def __iter__(self):
+        raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
+
+    def clear(self):
+        with self.lock:
+            # Copy pointers to all values, then wipe the mapping
+            values = list(itervalues(self._container))
+            self._container.clear()
+
+        if self.dispose_func:
+            for value in values:
+                self.dispose_func(value)
+
+    def keys(self):
+        with self.lock:
+            return list(iterkeys(self._container))
+
+
+class HTTPHeaderDict(MutableMapping):
+    """
+    :param headers:
+        An iterable of field-value pairs. Must not contain multiple field names
+        when compared case-insensitively.
+
+    :param kwargs:
+        Additional field-value pairs to pass in to ``dict.update``.
+
+    A ``dict`` like container for storing HTTP Headers.
+
+    Field names are stored and compared case-insensitively in compliance with
+    RFC 7230. Iteration provides the first case-sensitive key seen for each
+    case-insensitive pair.
+
+    Using ``__setitem__`` syntax overwrites fields that compare equal
+    case-insensitively in order to maintain ``dict``'s api. For fields that
+    compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
+    in a loop.
+
+    If multiple fields that are equal case-insensitively are passed to the
+    constructor or ``.update``, the behavior is undefined and some will be
+    lost.
+
+    >>> headers = HTTPHeaderDict()
+    >>> headers.add('Set-Cookie', 'foo=bar')
+    >>> headers.add('set-cookie', 'baz=quxx')
+    >>> headers['content-length'] = '7'
+    >>> headers['SET-cookie']
+    'foo=bar, baz=quxx'
+    >>> headers['Content-Length']
+    '7'
+    """
+
+    def __init__(self, headers=None, **kwargs):
+        super(HTTPHeaderDict, self).__init__()
+        self._container = OrderedDict()
+        if headers is not None:
+            if isinstance(headers, HTTPHeaderDict):
+                self._copy_from(headers)
+            else:
+                self.extend(headers)
+        if kwargs:
+            self.extend(kwargs)
+
+    def __setitem__(self, key, val):
+        self._container[key.lower()] = [key, val]
+        return self._container[key.lower()]
+
+    def __getitem__(self, key):
+        val = self._container[key.lower()]
+        return ', '.join(val[1:])
+
+    def __delitem__(self, key):
+        del self._container[key.lower()]
+
+    def __contains__(self, key):
+        return key.lower() in self._container
+
+    def __eq__(self, other):
+        if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
+            return False
+        if not isinstance(other, type(self)):
+            other = type(self)(other)
+        return (dict((k.lower(), v) for k, v in self.itermerged()) ==
+                dict((k.lower(), v) for k, v in other.itermerged()))
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    if not PY3:  # Python 2
+        iterkeys = MutableMapping.iterkeys
+        itervalues = MutableMapping.itervalues
+
+    __marker = object()
+
+    def __len__(self):
+        return len(self._container)
+
+    def __iter__(self):
+        # Only provide the originally cased names
+        for vals in self._container.values():
+            yield vals[0]
+
+    def pop(self, key, default=__marker):
+        '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+          If key is not found, d is returned if given, otherwise KeyError is raised.
+        '''
+        # Using the MutableMapping function directly fails due to the private marker.
+        # Using ordinary dict.pop would expose the internal structures.
+        # So let's reinvent the wheel.
+        try:
+            value = self[key]
+        except KeyError:
+            if default is self.__marker:
+                raise
+            return default
+        else:
+            del self[key]
+            return value
+
+    def discard(self, key):
+        try:
+            del self[key]
+        except KeyError:
+            pass
+
+    def add(self, key, val):
+        """Adds a (name, value) pair, doesn't overwrite the value if it already
+        exists.
+
+        >>> headers = HTTPHeaderDict(foo='bar')
+        >>> headers.add('Foo', 'baz')
+        >>> headers['foo']
+        'bar, baz'
+        """
+        key_lower = key.lower()
+        new_vals = [key, val]
+        # Keep the common case aka no item present as fast as possible
+        vals = self._container.setdefault(key_lower, new_vals)
+        if new_vals is not vals:
+            vals.append(val)
+
+    def extend(self, *args, **kwargs):
+        """Generic import function for any type of header-like object.
+        Adapted version of MutableMapping.update in order to insert items
+        with self.add instead of self.__setitem__
+        """
+        if len(args) > 1:
+            raise TypeError("extend() takes at most 1 positional "
+                            "arguments ({0} given)".format(len(args)))
+        other = args[0] if len(args) >= 1 else ()
+
+        if isinstance(other, HTTPHeaderDict):
+            for key, val in other.iteritems():
+                self.add(key, val)
+        elif isinstance(other, Mapping):
+            for key in other:
+                self.add(key, other[key])
+        elif hasattr(other, "keys"):
+            for key in other.keys():
+                self.add(key, other[key])
+        else:
+            for key, value in other:
+                self.add(key, value)
+
+        for key, value in kwargs.items():
+            self.add(key, value)
+
+    def getlist(self, key, default=__marker):
+        """Returns a list of all the values for the named field. Returns an
+        empty list if the key doesn't exist."""
+        try:
+            vals = self._container[key.lower()]
+        except KeyError:
+            if default is self.__marker:
+                return []
+            return default
+        else:
+            return vals[1:]
+
+    # Backwards compatibility for httplib
+    getheaders = getlist
+    getallmatchingheaders = getlist
+    iget = getlist
+
+    # Backwards compatibility for http.cookiejar
+    get_all = getlist
+
+    def __repr__(self):
+        return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+    def _copy_from(self, other):
+        for key in other:
+            val = other.getlist(key)
+            if isinstance(val, list):
+                # Don't need to convert tuples
+                val = list(val)
+            self._container[key.lower()] = [key] + val
+
+    def copy(self):
+        clone = type(self)()
+        clone._copy_from(self)
+        return clone
+
+    def iteritems(self):
+        """Iterate over all header lines, including duplicate ones."""
+        for key in self:
+            vals = self._container[key.lower()]
+            for val in vals[1:]:
+                yield vals[0], val
+
+    def itermerged(self):
+        """Iterate over all headers, merging duplicate ones together."""
+        for key in self:
+            val = self._container[key.lower()]
+            yield val[0], ', '.join(val[1:])
+
+    def items(self):
+        return list(self.iteritems())
+
+    @classmethod
+    def from_httplib(cls, message):  # Python 2
+        """Read headers from a Python 2 httplib message object."""
+        # python2.7 does not expose a proper API for exporting multiheaders
+        # efficiently. This function re-reads raw lines from the message
+        # object and extracts the multiheaders properly.
+        obs_fold_continued_leaders = (' ', '\t')
+        headers = []
+
+        for line in message.headers:
+            if line.startswith(obs_fold_continued_leaders):
+                if not headers:
+                    # We received a header line that starts with OWS as described
+                    # in RFC-7230 S3.2.4. This indicates a multiline header, but
+                    # there exists no previous header to which we can attach it.
+                    raise InvalidHeader(
+                        'Header continuation with no previous header: %s' % line
+                    )
+                else:
+                    key, value = headers[-1]
+                    headers[-1] = (key, value + ' ' + line.strip())
+                    continue
+
+            key, value = line.split(':', 1)
+            headers.append((key, value.strip()))
+
+        return cls(headers)
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/connection.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..02b36654bd7c9d9ebe4dc612715c0f1a1c1273d0
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/connection.py
@@ -0,0 +1,391 @@
+from __future__ import absolute_import
+import datetime
+import logging
+import os
+import socket
+from socket import error as SocketError, timeout as SocketTimeout
+import warnings
+from .packages import six
+from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
+from .packages.six.moves.http_client import HTTPException  # noqa: F401
+
+try:  # Compiled with SSL?
+    import ssl
+    BaseSSLError = ssl.SSLError
+except (ImportError, AttributeError):  # Platform-specific: No SSL.
+    ssl = None
+
+    class BaseSSLError(BaseException):
+        pass
+
+
+try:  # Python 3:
+    # Not a no-op, we're adding this to the namespace so it can be imported.
+    ConnectionError = ConnectionError
+except NameError:  # Python 2:
+    class ConnectionError(Exception):
+        pass
+
+
+from .exceptions import (
+    NewConnectionError,
+    ConnectTimeoutError,
+    SubjectAltNameWarning,
+    SystemTimeWarning,
+)
+from .packages.ssl_match_hostname import match_hostname, CertificateError
+
+from .util.ssl_ import (
+    resolve_cert_reqs,
+    resolve_ssl_version,
+    assert_fingerprint,
+    create_urllib3_context,
+    ssl_wrap_socket
+)
+
+
+from .util import connection
+
+from ._collections import HTTPHeaderDict
+
+log = logging.getLogger(__name__)
+
+port_by_scheme = {
+    'http': 80,
+    'https': 443,
+}
+
+# When updating RECENT_DATE, move it to within two years of the current date,
+# and not less than 6 months ago.
+# Example: if Today is 2018-01-01, then RECENT_DATE should be any date on or
+# after 2016-01-01 (today - 2 years) AND before 2017-07-01 (today - 6 months)
+RECENT_DATE = datetime.date(2017, 6, 30)
+
+
+class DummyConnection(object):
+    """Used to detect a failed ConnectionCls import."""
+    pass
+
+
+class HTTPConnection(_HTTPConnection, object):
+    """
+    Based on httplib.HTTPConnection but provides an extra constructor
+    backwards-compatibility layer between older and newer Pythons.
+
+    Additional keyword parameters are used to configure attributes of the connection.
+    Accepted parameters include:
+
+      - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
+      - ``source_address``: Set the source address for the current connection.
+      - ``socket_options``: Set specific options on the underlying socket. If not specified, then
+        defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
+        Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
+
+        For example, if you wish to enable TCP Keep Alive in addition to the defaults,
+        you might pass::
+
+            HTTPConnection.default_socket_options + [
+                (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+            ]
+
+        Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
+    """
+
+    default_port = port_by_scheme['http']
+
+    #: Disable Nagle's algorithm by default.
+    #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
+    default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
+
+    #: Whether this connection verifies the host's certificate.
+    is_verified = False
+
+    def __init__(self, *args, **kw):
+        if six.PY3:  # Python 3
+            kw.pop('strict', None)
+
+        # Pre-set source_address.
+        self.source_address = kw.get('source_address')
+
+        #: The socket options provided by the user. If no options are
+        #: provided, we use the default options.
+        self.socket_options = kw.pop('socket_options', self.default_socket_options)
+
+        _HTTPConnection.__init__(self, *args, **kw)
+
+    @property
+    def host(self):
+        """
+        Getter method to remove any trailing dots that indicate the hostname is an FQDN.
+
+        In general, SSL certificates don't include the trailing dot indicating a
+        fully-qualified domain name, and thus, they don't validate properly when
+        checked against a domain name that includes the dot. In addition, some
+        servers may not expect to receive the trailing dot when provided.
+
+        However, the hostname with trailing dot is critical to DNS resolution; doing a
+        lookup with the trailing dot will properly only resolve the appropriate FQDN,
+        whereas a lookup without a trailing dot will search the system's search domain
+        list. Thus, it's important to keep the original host around for use only in
+        those cases where it's appropriate (i.e., when doing DNS lookup to establish the
+        actual TCP connection across which we're going to send HTTP requests).
+        """
+        return self._dns_host.rstrip('.')
+
+    @host.setter
+    def host(self, value):
+        """
+        Setter for the `host` property.
+
+        We assume that only urllib3 uses the _dns_host attribute; httplib itself
+        only uses `host`, and it seems reasonable that other libraries follow suit.
+        """
+        self._dns_host = value
+
+    def _new_conn(self):
+        """ Establish a socket connection and set nodelay settings on it.
+
+        :return: New socket connection.
+        """
+        extra_kw = {}
+        if self.source_address:
+            extra_kw['source_address'] = self.source_address
+
+        if self.socket_options:
+            extra_kw['socket_options'] = self.socket_options
+
+        try:
+            conn = connection.create_connection(
+                (self._dns_host, self.port), self.timeout, **extra_kw)
+
+        except SocketTimeout as e:
+            raise ConnectTimeoutError(
+                self, "Connection to %s timed out. (connect timeout=%s)" %
+                (self.host, self.timeout))
+
+        except SocketError as e:
+            raise NewConnectionError(
+                self, "Failed to establish a new connection: %s" % e)
+
+        return conn
+
+    def _prepare_conn(self, conn):
+        self.sock = conn
+        if self._tunnel_host:
+            # TODO: Fix tunnel so it doesn't depend on self.sock state.
+            self._tunnel()
+            # Mark this connection as not reusable
+            self.auto_open = 0
+
+    def connect(self):
+        conn = self._new_conn()
+        self._prepare_conn(conn)
+
+    def request_chunked(self, method, url, body=None, headers=None):
+        """
+        Alternative to the common request method, which sends the
+        body with chunked encoding and not as one block
+        """
+        headers = HTTPHeaderDict(headers if headers is not None else {})
+        skip_accept_encoding = 'accept-encoding' in headers
+        skip_host = 'host' in headers
+        self.putrequest(
+            method,
+            url,
+            skip_accept_encoding=skip_accept_encoding,
+            skip_host=skip_host
+        )
+        for header, value in headers.items():
+            self.putheader(header, value)
+        if 'transfer-encoding' not in headers:
+            self.putheader('Transfer-Encoding', 'chunked')
+        self.endheaders()
+
+        if body is not None:
+            stringish_types = six.string_types + (bytes,)
+            if isinstance(body, stringish_types):
+                body = (body,)
+            for chunk in body:
+                if not chunk:
+                    continue
+                if not isinstance(chunk, bytes):
+                    chunk = chunk.encode('utf8')
+                len_str = hex(len(chunk))[2:]
+                self.send(len_str.encode('utf-8'))
+                self.send(b'\r\n')
+                self.send(chunk)
+                self.send(b'\r\n')
+
+        # After the if clause, to always have a closed body
+        self.send(b'0\r\n\r\n')
+
+
+class HTTPSConnection(HTTPConnection):
+    default_port = port_by_scheme['https']
+
+    ssl_version = None
+
+    def __init__(self, host, port=None, key_file=None, cert_file=None,
+                 strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+                 ssl_context=None, server_hostname=None, **kw):
+
+        HTTPConnection.__init__(self, host, port, strict=strict,
+                                timeout=timeout, **kw)
+
+        self.key_file = key_file
+        self.cert_file = cert_file
+        self.ssl_context = ssl_context
+        self.server_hostname = server_hostname
+
+        # Required property for Google AppEngine 1.9.0 which otherwise causes
+        # HTTPS requests to go out as HTTP. (See Issue #356)
+        self._protocol = 'https'
+
+    def connect(self):
+        conn = self._new_conn()
+        self._prepare_conn(conn)
+
+        if self.ssl_context is None:
+            self.ssl_context = create_urllib3_context(
+                ssl_version=resolve_ssl_version(None),
+                cert_reqs=resolve_cert_reqs(None),
+            )
+
+        self.sock = ssl_wrap_socket(
+            sock=conn,
+            keyfile=self.key_file,
+            certfile=self.cert_file,
+            ssl_context=self.ssl_context,
+            server_hostname=self.server_hostname
+        )
+
+
+class VerifiedHTTPSConnection(HTTPSConnection):
+    """
+    Based on httplib.HTTPSConnection but wraps the socket with
+    SSL certification.
+    """
+    cert_reqs = None
+    ca_certs = None
+    ca_cert_dir = None
+    ssl_version = None
+    assert_fingerprint = None
+
+    def set_cert(self, key_file=None, cert_file=None,
+                 cert_reqs=None, ca_certs=None,
+                 assert_hostname=None, assert_fingerprint=None,
+                 ca_cert_dir=None):
+        """
+        This method should only be called once, before the connection is used.
+        """
+        # If cert_reqs is not provided, we can try to guess. If the user gave
+        # us a cert database, we assume they want to use it: otherwise, if
+        # they gave us an SSL Context object we should use whatever is set for
+        # it.
+        if cert_reqs is None:
+            if ca_certs or ca_cert_dir:
+                cert_reqs = 'CERT_REQUIRED'
+            elif self.ssl_context is not None:
+                cert_reqs = self.ssl_context.verify_mode
+
+        self.key_file = key_file
+        self.cert_file = cert_file
+        self.cert_reqs = cert_reqs
+        self.assert_hostname = assert_hostname
+        self.assert_fingerprint = assert_fingerprint
+        self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
+        self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+
+    def connect(self):
+        # Add certificate verification
+        conn = self._new_conn()
+        hostname = self.host
+
+        if self._tunnel_host:
+            self.sock = conn
+            # Calls self._set_hostport(), so self.host is
+            # self._tunnel_host below.
+            self._tunnel()
+            # Mark this connection as not reusable
+            self.auto_open = 0
+
+            # Override the host with the one we're requesting data from.
+            hostname = self._tunnel_host
+
+        server_hostname = hostname
+        if self.server_hostname is not None:
+            server_hostname = self.server_hostname
+
+        is_time_off = datetime.date.today() < RECENT_DATE
+        if is_time_off:
+            warnings.warn((
+                'System time is way off (before {0}). This will probably '
+                'lead to SSL verification errors').format(RECENT_DATE),
+                SystemTimeWarning
+            )
+
+        # Wrap socket using verification with the root certs in
+        # trusted_root_certs
+        if self.ssl_context is None:
+            self.ssl_context = create_urllib3_context(
+                ssl_version=resolve_ssl_version(self.ssl_version),
+                cert_reqs=resolve_cert_reqs(self.cert_reqs),
+            )
+
+        context = self.ssl_context
+        context.verify_mode = resolve_cert_reqs(self.cert_reqs)
+        self.sock = ssl_wrap_socket(
+            sock=conn,
+            keyfile=self.key_file,
+            certfile=self.cert_file,
+            ca_certs=self.ca_certs,
+            ca_cert_dir=self.ca_cert_dir,
+            server_hostname=server_hostname,
+            ssl_context=context)
+
+        if self.assert_fingerprint:
+            assert_fingerprint(self.sock.getpeercert(binary_form=True),
+                               self.assert_fingerprint)
+        elif context.verify_mode != ssl.CERT_NONE \
+                and not getattr(context, 'check_hostname', False) \
+                and self.assert_hostname is not False:
+            # While urllib3 attempts to always turn off hostname matching from
+            # the TLS library, this cannot always be done. So we check whether
+            # the TLS Library still thinks it's matching hostnames.
+            cert = self.sock.getpeercert()
+            if not cert.get('subjectAltName', ()):
+                warnings.warn((
+                    'Certificate for {0} has no `subjectAltName`, falling back to check for a '
+                    '`commonName` for now. This feature is being removed by major browsers and '
+                    'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
+                    'for details.)'.format(hostname)),
+                    SubjectAltNameWarning
+                )
+            _match_hostname(cert, self.assert_hostname or server_hostname)
+
+        self.is_verified = (
+            context.verify_mode == ssl.CERT_REQUIRED or
+            self.assert_fingerprint is not None
+        )
+
+
+def _match_hostname(cert, asserted_hostname):
+    try:
+        match_hostname(cert, asserted_hostname)
+    except CertificateError as e:
+        log.error(
+            'Certificate did not match expected hostname: %s. '
+            'Certificate: %s', asserted_hostname, cert
+        )
+        # Add cert to exception and reraise so client code can inspect
+        # the cert when catching the exception, if they want to
+        e._peer_cert = cert
+        raise
+
+
+if ssl:
+    # Make a copy for testing.
+    UnverifiedHTTPSConnection = HTTPSConnection
+    HTTPSConnection = VerifiedHTTPSConnection
+else:
+    HTTPSConnection = DummyConnection
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/connectionpool.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/connectionpool.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7a8f193d1cc2a06550690e85ed69983dfa7d065
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/connectionpool.py
@@ -0,0 +1,896 @@
+from __future__ import absolute_import
+import errno
+import logging
+import sys
+import warnings
+
+from socket import error as SocketError, timeout as SocketTimeout
+import socket
+
+
+from .exceptions import (
+    ClosedPoolError,
+    ProtocolError,
+    EmptyPoolError,
+    HeaderParsingError,
+    HostChangedError,
+    LocationValueError,
+    MaxRetryError,
+    ProxyError,
+    ReadTimeoutError,
+    SSLError,
+    TimeoutError,
+    InsecureRequestWarning,
+    NewConnectionError,
+)
+from .packages.ssl_match_hostname import CertificateError
+from .packages import six
+from .packages.six.moves import queue
+from .connection import (
+    port_by_scheme,
+    DummyConnection,
+    HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
+    HTTPException, BaseSSLError,
+)
+from .request import RequestMethods
+from .response import HTTPResponse
+
+from .util.connection import is_connection_dropped
+from .util.request import set_file_position
+from .util.response import assert_header_parsing
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import get_host, Url, NORMALIZABLE_SCHEMES
+from .util.queue import LifoQueue
+
+
+xrange = six.moves.xrange
+
+log = logging.getLogger(__name__)
+
+_Default = object()
+
+
+# Pool objects
+class ConnectionPool(object):
+    """
+    Base class for all connection pools, such as
+    :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
+    """
+
+    scheme = None
+    QueueCls = LifoQueue
+
+    def __init__(self, host, port=None):
+        if not host:
+            raise LocationValueError("No host specified.")
+
+        self.host = _ipv6_host(host, self.scheme)
+        self._proxy_host = host.lower()
+        self.port = port
+
+    def __str__(self):
+        return '%s(host=%r, port=%r)' % (type(self).__name__,
+                                         self.host, self.port)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.close()
+        # Return False to re-raise any potential exceptions
+        return False
+
+    def close(self):
+        """
+        Close all pooled connections and disable the pool.
+        """
+        pass
+
+
+# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
+_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
+
+
+class HTTPConnectionPool(ConnectionPool, RequestMethods):
+    """
+    Thread-safe connection pool for one host.
+
+    :param host:
+        Host used for this HTTP Connection (e.g. "localhost"), passed into
+        :class:`httplib.HTTPConnection`.
+
+    :param port:
+        Port used for this HTTP Connection (None is equivalent to 80), passed
+        into :class:`httplib.HTTPConnection`.
+
+    :param strict:
+        Causes BadStatusLine to be raised if the status line can't be parsed
+        as a valid HTTP/1.0 or 1.1 status line, passed into
+        :class:`httplib.HTTPConnection`.
+
+        .. note::
+           Only works in Python 2. This parameter is ignored in Python 3.
+
+    :param timeout:
+        Socket timeout in seconds for each individual connection. This can
+        be a float or integer, which sets the timeout for the HTTP request,
+        or an instance of :class:`urllib3.util.Timeout` which gives you more
+        fine-grained control over request timeouts. After the constructor has
+        been parsed, this is always a `urllib3.util.Timeout` object.
+
+    :param maxsize:
+        Number of connections to save that can be reused. More than 1 is useful
+        in multithreaded situations. If ``block`` is set to False, more
+        connections will be created but they will not be saved once they've
+        been used.
+
+    :param block:
+        If set to True, no more than ``maxsize`` connections will be used at
+        a time. When no free connections are available, the call will block
+        until a connection has been released. This is a useful side effect for
+        particular multithreaded situations where one does not want to use more
+        than maxsize connections per host to prevent flooding.
+
+    :param headers:
+        Headers to include with all requests, unless other headers are given
+        explicitly.
+
+    :param retries:
+        Retry configuration to use by default with requests in this pool.
+
+    :param _proxy:
+        Parsed proxy URL, should not be used directly, instead, see
+        :class:`urllib3.connectionpool.ProxyManager`"
+
+    :param _proxy_headers:
+        A dictionary with proxy headers, should not be used directly,
+        instead, see :class:`urllib3.connectionpool.ProxyManager`"
+
+    :param \\**conn_kw:
+        Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
+        :class:`urllib3.connection.HTTPSConnection` instances.
+    """
+
+    scheme = 'http'
+    ConnectionCls = HTTPConnection
+    ResponseCls = HTTPResponse
+
+    def __init__(self, host, port=None, strict=False,
+                 timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
+                 headers=None, retries=None,
+                 _proxy=None, _proxy_headers=None,
+                 **conn_kw):
+        ConnectionPool.__init__(self, host, port)
+        RequestMethods.__init__(self, headers)
+
+        self.strict = strict
+
+        if not isinstance(timeout, Timeout):
+            timeout = Timeout.from_float(timeout)
+
+        if retries is None:
+            retries = Retry.DEFAULT
+
+        self.timeout = timeout
+        self.retries = retries
+
+        self.pool = self.QueueCls(maxsize)
+        self.block = block
+
+        self.proxy = _proxy
+        self.proxy_headers = _proxy_headers or {}
+
+        # Fill the queue up so that doing get() on it will block properly
+        for _ in xrange(maxsize):
+            self.pool.put(None)
+
+        # These are mostly for testing and debugging purposes.
+        self.num_connections = 0
+        self.num_requests = 0
+        self.conn_kw = conn_kw
+
+        if self.proxy:
+            # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
+            # We cannot know if the user has added default socket options, so we cannot replace the
+            # list.
+            self.conn_kw.setdefault('socket_options', [])
+
+    def _new_conn(self):
+        """
+        Return a fresh :class:`HTTPConnection`.
+        """
+        self.num_connections += 1
+        log.debug("Starting new HTTP connection (%d): %s:%s",
+                  self.num_connections, self.host, self.port or "80")
+
+        conn = self.ConnectionCls(host=self.host, port=self.port,
+                                  timeout=self.timeout.connect_timeout,
+                                  strict=self.strict, **self.conn_kw)
+        return conn
+
+    def _get_conn(self, timeout=None):
+        """
+        Get a connection. Will return a pooled connection if one is available.
+
+        If no connections are available and :prop:`.block` is ``False``, then a
+        fresh connection is returned.
+
+        :param timeout:
+            Seconds to wait before giving up and raising
+            :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
+            :prop:`.block` is ``True``.
+        """
+        conn = None
+        try:
+            conn = self.pool.get(block=self.block, timeout=timeout)
+
+        except AttributeError:  # self.pool is None
+            raise ClosedPoolError(self, "Pool is closed.")
+
+        except queue.Empty:
+            if self.block:
+                raise EmptyPoolError(self,
+                                     "Pool reached maximum size and no more "
+                                     "connections are allowed.")
+            pass  # Oh well, we'll create a new connection then
+
+        # If this is a persistent connection, check if it got disconnected
+        if conn and is_connection_dropped(conn):
+            log.debug("Resetting dropped connection: %s", self.host)
+            conn.close()
+            if getattr(conn, 'auto_open', 1) == 0:
+                # This is a proxied connection that has been mutated by
+                # httplib._tunnel() and cannot be reused (since it would
+                # attempt to bypass the proxy)
+                conn = None
+
+        return conn or self._new_conn()
+
+    def _put_conn(self, conn):
+        """
+        Put a connection back into the pool.
+
+        :param conn:
+            Connection object for the current host and port as returned by
+            :meth:`._new_conn` or :meth:`._get_conn`.
+
+        If the pool is already full, the connection is closed and discarded
+        because we exceeded maxsize. If connections are discarded frequently,
+        then maxsize should be increased.
+
+        If the pool is closed, then the connection will be closed and discarded.
+        """
+        try:
+            self.pool.put(conn, block=False)
+            return  # Everything is dandy, done.
+        except AttributeError:
+            # self.pool is None.
+            pass
+        except queue.Full:
+            # This should never happen if self.block == True
+            log.warning(
+                "Connection pool is full, discarding connection: %s",
+                self.host)
+
+        # Connection never got put back into the pool, close it.
+        if conn:
+            conn.close()
+
+    def _validate_conn(self, conn):
+        """
+        Called right before a request is made, after the socket is created.
+        """
+        pass
+
+    def _prepare_proxy(self, conn):
+        # Nothing to do for HTTP connections.
+        pass
+
+    def _get_timeout(self, timeout):
+        """ Helper that always returns a :class:`urllib3.util.Timeout` """
+        if timeout is _Default:
+            return self.timeout.clone()
+
+        if isinstance(timeout, Timeout):
+            return timeout.clone()
+        else:
+            # User passed us an int/float. This is for backwards compatibility,
+            # can be removed later
+            return Timeout.from_float(timeout)
+
+    def _raise_timeout(self, err, url, timeout_value):
+        """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
+
+        if isinstance(err, SocketTimeout):
+            raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+        # See the above comment about EAGAIN in Python 3. In Python 2 we have
+        # to specifically catch it and throw the timeout error
+        if hasattr(err, 'errno') and err.errno in _blocking_errnos:
+            raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+        # Catch possible read timeouts thrown as SSL errors. If not the
+        # case, rethrow the original. We need to do this because of:
+        # http://bugs.python.org/issue10272
+        if 'timed out' in str(err) or 'did not complete (read)' in str(err):  # Python < 2.7.4
+            raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
+
+    def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
+                      **httplib_request_kw):
+        """
+        Perform a request on a given urllib connection object taken from our
+        pool.
+
+        :param conn:
+            a connection from one of our connection pools
+
+        :param timeout:
+            Socket timeout in seconds for the request. This can be a
+            float or integer, which will set the same timeout value for
+            the socket connect and the socket read, or an instance of
+            :class:`urllib3.util.Timeout`, which gives you more fine-grained
+            control over your timeouts.
+        """
+        self.num_requests += 1
+
+        timeout_obj = self._get_timeout(timeout)
+        timeout_obj.start_connect()
+        conn.timeout = timeout_obj.connect_timeout
+
+        # Trigger any extra validation we need to do.
+        try:
+            self._validate_conn(conn)
+        except (SocketTimeout, BaseSSLError) as e:
+            # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
+            self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
+            raise
+
+        # conn.request() calls httplib.*.request, not the method in
+        # urllib3.request. It also calls makefile (recv) on the socket.
+        if chunked:
+            conn.request_chunked(method, url, **httplib_request_kw)
+        else:
+            conn.request(method, url, **httplib_request_kw)
+
+        # Reset the timeout for the recv() on the socket
+        read_timeout = timeout_obj.read_timeout
+
+        # App Engine doesn't have a sock attr
+        if getattr(conn, 'sock', None):
+            # In Python 3 socket.py will catch EAGAIN and return None when you
+            # try and read into the file pointer created by http.client, which
+            # instead raises a BadStatusLine exception. Instead of catching
+            # the exception and assuming all BadStatusLine exceptions are read
+            # timeouts, check for a zero timeout before making the request.
+            if read_timeout == 0:
+                raise ReadTimeoutError(
+                    self, url, "Read timed out. (read timeout=%s)" % read_timeout)
+            if read_timeout is Timeout.DEFAULT_TIMEOUT:
+                conn.sock.settimeout(socket.getdefaulttimeout())
+            else:  # None or a value
+                conn.sock.settimeout(read_timeout)
+
+        # Receive the response from the server
+        try:
+            try:  # Python 2.7, use buffering of HTTP responses
+                httplib_response = conn.getresponse(buffering=True)
+            except TypeError:  # Python 3
+                try:
+                    httplib_response = conn.getresponse()
+                except Exception as e:
+                    # Remove the TypeError from the exception chain in Python 3;
+                    # otherwise it looks like a programming error was the cause.
+                    six.raise_from(e, None)
+        except (SocketTimeout, BaseSSLError, SocketError) as e:
+            self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
+            raise
+
+        # AppEngine doesn't have a version attr.
+        http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
+        log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
+                  method, url, http_version, httplib_response.status,
+                  httplib_response.length)
+
+        try:
+            assert_header_parsing(httplib_response.msg)
+        except (HeaderParsingError, TypeError) as hpe:  # Platform-specific: Python 3
+            log.warning(
+                'Failed to parse headers (url=%s): %s',
+                self._absolute_url(url), hpe, exc_info=True)
+
+        return httplib_response
+
+    def _absolute_url(self, path):
+        return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
+
+    def close(self):
+        """
+        Close all pooled connections and disable the pool.
+        """
+        if self.pool is None:
+            return
+        # Disable access to the pool
+        old_pool, self.pool = self.pool, None
+
+        try:
+            while True:
+                conn = old_pool.get(block=False)
+                if conn:
+                    conn.close()
+
+        except queue.Empty:
+            pass  # Done.
+
+    def is_same_host(self, url):
+        """
+        Check if the given ``url`` is a member of the same host as this
+        connection pool.
+        """
+        if url.startswith('/'):
+            return True
+
+        # TODO: Add optional support for socket.gethostbyname checking.
+        scheme, host, port = get_host(url)
+
+        host = _ipv6_host(host, self.scheme)
+
+        # Use explicit default port for comparison when none is given
+        if self.port and not port:
+            port = port_by_scheme.get(scheme)
+        elif not self.port and port == port_by_scheme.get(scheme):
+            port = None
+
+        return (scheme, host, port) == (self.scheme, self.host, self.port)
+
+    def urlopen(self, method, url, body=None, headers=None, retries=None,
+                redirect=True, assert_same_host=True, timeout=_Default,
+                pool_timeout=None, release_conn=None, chunked=False,
+                body_pos=None, **response_kw):
+        """
+        Get a connection from the pool and perform an HTTP request. This is the
+        lowest level call for making a request, so you'll need to specify all
+        the raw details.
+
+        .. note::
+
+           More commonly, it's appropriate to use a convenience method provided
+           by :class:`.RequestMethods`, such as :meth:`request`.
+
+        .. note::
+
+           `release_conn` will only behave as expected if
+           `preload_content=False` because we want to make
+           `preload_content=False` the default behaviour someday soon without
+           breaking backwards compatibility.
+
+        :param method:
+            HTTP request method (such as GET, POST, PUT, etc.)
+
+        :param body:
+            Data to send in the request body (useful for creating
+            POST requests, see HTTPConnectionPool.post_url for
+            more convenience).
+
+        :param headers:
+            Dictionary of custom headers to send, such as User-Agent,
+            If-None-Match, etc. If None, pool headers are used. If provided,
+            these headers completely replace any pool-specific headers.
+
+        :param retries:
+            Configure the number of retries to allow before raising a
+            :class:`~urllib3.exceptions.MaxRetryError` exception.
+
+            Pass ``None`` to retry until you receive a response. Pass a
+            :class:`~urllib3.util.retry.Retry` object for fine-grained control
+            over different types of retries.
+            Pass an integer number to retry connection errors that many times,
+            but no other types of errors. Pass zero to never retry.
+
+            If ``False``, then retries are disabled and any exception is raised
+            immediately. Also, instead of raising a MaxRetryError on redirects,
+            the redirect response will be returned.
+
+        :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
+
+        :param redirect:
+            If True, automatically handle redirects (status codes 301, 302,
+            303, 307, 308). Each redirect counts as a retry. Disabling retries
+            will disable redirect, too.
+
+        :param assert_same_host:
+            If ``True``, will make sure that the host of the pool requests is
+            consistent else will raise HostChangedError. When False, you can
+            use the pool on an HTTP proxy and request foreign hosts.
+
+        :param timeout:
+            If specified, overrides the default timeout for this one
+            request. It may be a float (in seconds) or an instance of
+            :class:`urllib3.util.Timeout`.
+
+        :param pool_timeout:
+            If set and the pool is set to block=True, then this method will
+            block for ``pool_timeout`` seconds and raise EmptyPoolError if no
+            connection is available within the time period.
+
+        :param release_conn:
+            If False, then the urlopen call will not release the connection
+            back into the pool once a response is received (but will release if
+            you read the entire contents of the response such as when
+            `preload_content=True`). This is useful if you're not preloading
+            the response's content immediately. You will need to call
+            ``r.release_conn()`` on the response ``r`` to return the connection
+            back into the pool. If None, it takes the value of
+            ``response_kw.get('preload_content', True)``.
+
+        :param chunked:
+            If True, urllib3 will send the body using chunked transfer
+            encoding. Otherwise, urllib3 will send the body using the standard
+            content-length form. Defaults to False.
+
+        :param int body_pos:
+            Position to seek to in file-like body in the event of a retry or
+            redirect. Typically this won't need to be set because urllib3 will
+            auto-populate the value when needed.
+
+        :param \\**response_kw:
+            Additional parameters are passed to
+            :meth:`urllib3.response.HTTPResponse.from_httplib`
+        """
+        if headers is None:
+            headers = self.headers
+
+        if not isinstance(retries, Retry):
+            retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+        if release_conn is None:
+            release_conn = response_kw.get('preload_content', True)
+
+        # Check host
+        if assert_same_host and not self.is_same_host(url):
+            raise HostChangedError(self, url, retries)
+
+        conn = None
+
+        # Track whether `conn` needs to be released before
+        # returning/raising/recursing. Update this variable if necessary, and
+        # leave `release_conn` constant throughout the function. That way, if
+        # the function recurses, the original value of `release_conn` will be
+        # passed down into the recursive call, and its value will be respected.
+        #
+        # See issue #651 [1] for details.
+        #
+        # [1] <https://github.com/shazow/urllib3/issues/651>
+        release_this_conn = release_conn
+
+        # Merge the proxy headers. Only do this in HTTP. We have to copy the
+        # headers dict so we can safely change it without those changes being
+        # reflected in anyone else's copy.
+        if self.scheme == 'http':
+            headers = headers.copy()
+            headers.update(self.proxy_headers)
+
+        # Must keep the exception bound to a separate variable or else Python 3
+        # complains about UnboundLocalError.
+        err = None
+
+        # Keep track of whether we cleanly exited the except block. This
+        # ensures we do proper cleanup in finally.
+        clean_exit = False
+
+        # Rewind body position, if needed. Record current position
+        # for future rewinds in the event of a redirect/retry.
+        body_pos = set_file_position(body, body_pos)
+
+        try:
+            # Request a connection from the queue.
+            timeout_obj = self._get_timeout(timeout)
+            conn = self._get_conn(timeout=pool_timeout)
+
+            conn.timeout = timeout_obj.connect_timeout
+
+            is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
+            if is_new_proxy_conn:
+                self._prepare_proxy(conn)
+
+            # Make the request on the httplib connection object.
+            httplib_response = self._make_request(conn, method, url,
+                                                  timeout=timeout_obj,
+                                                  body=body, headers=headers,
+                                                  chunked=chunked)
+
+            # If we're going to release the connection in ``finally:``, then
+            # the response doesn't need to know about the connection. Otherwise
+            # it will also try to release it and we'll have a double-release
+            # mess.
+            response_conn = conn if not release_conn else None
+
+            # Pass method to Response for length checking
+            response_kw['request_method'] = method
+
+            # Import httplib's response into our own wrapper object
+            response = self.ResponseCls.from_httplib(httplib_response,
+                                                     pool=self,
+                                                     connection=response_conn,
+                                                     retries=retries,
+                                                     **response_kw)
+
+            # Everything went great!
+            clean_exit = True
+
+        except queue.Empty:
+            # Timed out by queue.
+            raise EmptyPoolError(self, "No pool connections are available.")
+
+        except (TimeoutError, HTTPException, SocketError, ProtocolError,
+                BaseSSLError, SSLError, CertificateError) as e:
+            # Discard the connection for these exceptions. It will be
+            # replaced during the next _get_conn() call.
+            clean_exit = False
+            if isinstance(e, (BaseSSLError, CertificateError)):
+                e = SSLError(e)
+            elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
+                e = ProxyError('Cannot connect to proxy.', e)
+            elif isinstance(e, (SocketError, HTTPException)):
+                e = ProtocolError('Connection aborted.', e)
+
+            retries = retries.increment(method, url, error=e, _pool=self,
+                                        _stacktrace=sys.exc_info()[2])
+            retries.sleep()
+
+            # Keep track of the error for the retry warning.
+            err = e
+
+        finally:
+            if not clean_exit:
+                # We hit some kind of exception, handled or otherwise. We need
+                # to throw the connection away unless explicitly told not to.
+                # Close the connection, set the variable to None, and make sure
+                # we put the None back in the pool to avoid leaking it.
+                conn = conn and conn.close()
+                release_this_conn = True
+
+            if release_this_conn:
+                # Put the connection back to be reused. If the connection is
+                # expired then it will be None, which will get replaced with a
+                # fresh connection during _get_conn.
+                self._put_conn(conn)
+
+        if not conn:
+            # Try again
+            log.warning("Retrying (%r) after connection "
+                        "broken by '%r': %s", retries, err, url)
+            return self.urlopen(method, url, body, headers, retries,
+                                redirect, assert_same_host,
+                                timeout=timeout, pool_timeout=pool_timeout,
+                                release_conn=release_conn, body_pos=body_pos,
+                                **response_kw)
+
+        def drain_and_release_conn(response):
+            try:
+                # discard any remaining response body, the connection will be
+                # released back to the pool once the entire response is read
+                response.read()
+            except (TimeoutError, HTTPException, SocketError, ProtocolError,
+                    BaseSSLError, SSLError) as e:
+                pass
+
+        # Handle redirect?
+        redirect_location = redirect and response.get_redirect_location()
+        if redirect_location:
+            if response.status == 303:
+                method = 'GET'
+
+            try:
+                retries = retries.increment(method, url, response=response, _pool=self)
+            except MaxRetryError:
+                if retries.raise_on_redirect:
+                    # Drain and release the connection for this response, since
+                    # we're not returning it to be released manually.
+                    drain_and_release_conn(response)
+                    raise
+                return response
+
+            # drain and return the connection to the pool before recursing
+            drain_and_release_conn(response)
+
+            retries.sleep_for_retry(response)
+            log.debug("Redirecting %s -> %s", url, redirect_location)
+            return self.urlopen(
+                method, redirect_location, body, headers,
+                retries=retries, redirect=redirect,
+                assert_same_host=assert_same_host,
+                timeout=timeout, pool_timeout=pool_timeout,
+                release_conn=release_conn, body_pos=body_pos,
+                **response_kw)
+
+        # Check if we should retry the HTTP response.
+        has_retry_after = bool(response.getheader('Retry-After'))
+        if retries.is_retry(method, response.status, has_retry_after):
+            try:
+                retries = retries.increment(method, url, response=response, _pool=self)
+            except MaxRetryError:
+                if retries.raise_on_status:
+                    # Drain and release the connection for this response, since
+                    # we're not returning it to be released manually.
+                    drain_and_release_conn(response)
+                    raise
+                return response
+
+            # drain and return the connection to the pool before recursing
+            drain_and_release_conn(response)
+
+            retries.sleep(response)
+            log.debug("Retry: %s", url)
+            return self.urlopen(
+                method, url, body, headers,
+                retries=retries, redirect=redirect,
+                assert_same_host=assert_same_host,
+                timeout=timeout, pool_timeout=pool_timeout,
+                release_conn=release_conn,
+                body_pos=body_pos, **response_kw)
+
+        return response
+
+
+class HTTPSConnectionPool(HTTPConnectionPool):
+    """
+    Same as :class:`.HTTPConnectionPool`, but HTTPS.
+
+    When Python is compiled with the :mod:`ssl` module, then
+    :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
+    instead of :class:`.HTTPSConnection`.
+
+    :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
+    ``assert_hostname`` and ``host`` in this order to verify connections.
+    If ``assert_hostname`` is False, no verification is done.
+
+    The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
+    ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
+    available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
+    the connection socket into an SSL socket.
+    """
+
+    scheme = 'https'
+    ConnectionCls = HTTPSConnection
+
+    def __init__(self, host, port=None,
+                 strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
+                 block=False, headers=None, retries=None,
+                 _proxy=None, _proxy_headers=None,
+                 key_file=None, cert_file=None, cert_reqs=None,
+                 ca_certs=None, ssl_version=None,
+                 assert_hostname=None, assert_fingerprint=None,
+                 ca_cert_dir=None, **conn_kw):
+
+        HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
+                                    block, headers, retries, _proxy, _proxy_headers,
+                                    **conn_kw)
+
+        if ca_certs and cert_reqs is None:
+            cert_reqs = 'CERT_REQUIRED'
+
+        self.key_file = key_file
+        self.cert_file = cert_file
+        self.cert_reqs = cert_reqs
+        self.ca_certs = ca_certs
+        self.ca_cert_dir = ca_cert_dir
+        self.ssl_version = ssl_version
+        self.assert_hostname = assert_hostname
+        self.assert_fingerprint = assert_fingerprint
+
+    def _prepare_conn(self, conn):
+        """
+        Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
+        and establish the tunnel if proxy is used.
+        """
+
+        if isinstance(conn, VerifiedHTTPSConnection):
+            conn.set_cert(key_file=self.key_file,
+                          cert_file=self.cert_file,
+                          cert_reqs=self.cert_reqs,
+                          ca_certs=self.ca_certs,
+                          ca_cert_dir=self.ca_cert_dir,
+                          assert_hostname=self.assert_hostname,
+                          assert_fingerprint=self.assert_fingerprint)
+            conn.ssl_version = self.ssl_version
+        return conn
+
+    def _prepare_proxy(self, conn):
+        """
+        Establish tunnel connection early, because otherwise httplib
+        would improperly set Host: header to proxy's IP:port.
+        """
+        conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
+        conn.connect()
+
+    def _new_conn(self):
+        """
+        Return a fresh :class:`httplib.HTTPSConnection`.
+        """
+        self.num_connections += 1
+        log.debug("Starting new HTTPS connection (%d): %s:%s",
+                  self.num_connections, self.host, self.port or "443")
+
+        if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
+            raise SSLError("Can't connect to HTTPS URL because the SSL "
+                           "module is not available.")
+
+        actual_host = self.host
+        actual_port = self.port
+        if self.proxy is not None:
+            actual_host = self.proxy.host
+            actual_port = self.proxy.port
+
+        conn = self.ConnectionCls(host=actual_host, port=actual_port,
+                                  timeout=self.timeout.connect_timeout,
+                                  strict=self.strict, **self.conn_kw)
+
+        return self._prepare_conn(conn)
+
+    def _validate_conn(self, conn):
+        """
+        Called right before a request is made, after the socket is created.
+        """
+        super(HTTPSConnectionPool, self)._validate_conn(conn)
+
+        # Force connect early to allow us to validate the connection.
+        if not getattr(conn, 'sock', None):  # AppEngine might not have  `.sock`
+            conn.connect()
+
+        if not conn.is_verified:
+            warnings.warn((
+                'Unverified HTTPS request is being made. '
+                'Adding certificate verification is strongly advised. See: '
+                'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+                '#ssl-warnings'),
+                InsecureRequestWarning)
+
+
+def connection_from_url(url, **kw):
+    """
+    Given a url, return an :class:`.ConnectionPool` instance of its host.
+
+    This is a shortcut for not having to parse out the scheme, host, and port
+    of the url before creating an :class:`.ConnectionPool` instance.
+
+    :param url:
+        Absolute URL string that must include the scheme. Port is optional.
+
+    :param \\**kw:
+        Passes additional parameters to the constructor of the appropriate
+        :class:`.ConnectionPool`. Useful for specifying things like
+        timeout, maxsize, headers, etc.
+
+    Example::
+
+        >>> conn = connection_from_url('http://google.com/')
+        >>> r = conn.request('GET', '/')
+    """
+    scheme, host, port = get_host(url)
+    port = port or port_by_scheme.get(scheme, 80)
+    if scheme == 'https':
+        return HTTPSConnectionPool(host, port=port, **kw)
+    else:
+        return HTTPConnectionPool(host, port=port, **kw)
+
+
+def _ipv6_host(host, scheme):
+    """
+    Process IPv6 address literals
+    """
+
+    # httplib doesn't like it when we include brackets in IPv6 addresses
+    # Specifically, if we include brackets but also pass the port then
+    # httplib crazily doubles up the square brackets on the Host header.
+    # Instead, we need to make sure we never pass ``None`` as the port.
+    # However, for backward compatibility reasons we can't actually
+    # *assert* that.  See http://bugs.python.org/issue28539
+    #
+    # Also if an IPv6 address literal has a zone identifier, the
+    # percent sign might be URIencoded, convert it back into ASCII
+    if host.startswith('[') and host.endswith(']'):
+        host = host.replace('%25', '%').strip('[]')
+    if scheme in NORMALIZABLE_SCHEMES:
+        host = host.lower()
+    return host
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__init__.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aaf1e7877c7b3f3aa1fac9cd4a69d6dd142f02b5
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d54d74db116fa974484f4ef7946c3e0c228e7647
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/_appengine_environ.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b15502ee90d42a770b29ea616671b27daa19a310
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/appengine.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99bf123acddf310022b4cb43025f48f94524903b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/ntlmpool.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ca67c95f18f7554f0ed7eed386e221da717195eb
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/pyopenssl.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d7655c63a94772d07598a0b0bd198fa73c0efbc
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/securetransport.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..912cf72e40850d4926ff98c1cc83b460da333950
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/__pycache__/socks.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3e00942cb9f9d6c6a179b1b06f2df07b7b8463c
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_appengine_environ.py
@@ -0,0 +1,30 @@
+"""
+This module provides means to detect the App Engine environment.
+"""
+
+import os
+
+
+def is_appengine():
+    return (is_local_appengine() or
+            is_prod_appengine() or
+            is_prod_appengine_mvms())
+
+
+def is_appengine_sandbox():
+    return is_appengine() and not is_prod_appengine_mvms()
+
+
+def is_local_appengine():
+    return ('APPENGINE_RUNTIME' in os.environ and
+            'Development/' in os.environ['SERVER_SOFTWARE'])
+
+
+def is_prod_appengine():
+    return ('APPENGINE_RUNTIME' in os.environ and
+            'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
+            not is_prod_appengine_mvms())
+
+
+def is_prod_appengine_mvms():
+    return os.environ.get('GAE_VM', False) == 'true'
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..022d5ff93462d417cc80543add0589b45a1147d6
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3480b8a938d149570dacb9b3cdd0b9187a1b6020
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/bindings.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d9004d43aa9501c71d5b66165d95d18943c62c0b
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/__pycache__/low_level.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py
new file mode 100644
index 0000000000000000000000000000000000000000..bcf41c02b2a3570002f0bb48c610109bba248ab8
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/bindings.py
@@ -0,0 +1,593 @@
+"""
+This module uses ctypes to bind a whole bunch of functions and constants from
+SecureTransport. The goal here is to provide the low-level API to
+SecureTransport. These are essentially the C-level functions and constants, and
+they're pretty gross to work with.
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+    Copyright (c) 2015-2016 Will Bond <will@wbond.net>
+
+    Permission is hereby granted, free of charge, to any person obtaining a
+    copy of this software and associated documentation files (the "Software"),
+    to deal in the Software without restriction, including without limitation
+    the rights to use, copy, modify, merge, publish, distribute, sublicense,
+    and/or sell copies of the Software, and to permit persons to whom the
+    Software is furnished to do so, subject to the following conditions:
+
+    The above copyright notice and this permission notice shall be included in
+    all copies or substantial portions of the Software.
+
+    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+    IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+    AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+    FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+    DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import platform
+from ctypes.util import find_library
+from ctypes import (
+    c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
+    c_bool
+)
+from ctypes import CDLL, POINTER, CFUNCTYPE
+
+
+security_path = find_library('Security')
+if not security_path:
+    raise ImportError('The library Security could not be found')
+
+
+core_foundation_path = find_library('CoreFoundation')
+if not core_foundation_path:
+    raise ImportError('The library CoreFoundation could not be found')
+
+
+version = platform.mac_ver()[0]
+version_info = tuple(map(int, version.split('.')))
+if version_info < (10, 8):
+    raise OSError(
+        'Only OS X 10.8 and newer are supported, not %s.%s' % (
+            version_info[0], version_info[1]
+        )
+    )
+
+Security = CDLL(security_path, use_errno=True)
+CoreFoundation = CDLL(core_foundation_path, use_errno=True)
+
+Boolean = c_bool
+CFIndex = c_long
+CFStringEncoding = c_uint32
+CFData = c_void_p
+CFString = c_void_p
+CFArray = c_void_p
+CFMutableArray = c_void_p
+CFDictionary = c_void_p
+CFError = c_void_p
+CFType = c_void_p
+CFTypeID = c_ulong
+
+CFTypeRef = POINTER(CFType)
+CFAllocatorRef = c_void_p
+
+OSStatus = c_int32
+
+CFDataRef = POINTER(CFData)
+CFStringRef = POINTER(CFString)
+CFArrayRef = POINTER(CFArray)
+CFMutableArrayRef = POINTER(CFMutableArray)
+CFDictionaryRef = POINTER(CFDictionary)
+CFArrayCallBacks = c_void_p
+CFDictionaryKeyCallBacks = c_void_p
+CFDictionaryValueCallBacks = c_void_p
+
+SecCertificateRef = POINTER(c_void_p)
+SecExternalFormat = c_uint32
+SecExternalItemType = c_uint32
+SecIdentityRef = POINTER(c_void_p)
+SecItemImportExportFlags = c_uint32
+SecItemImportExportKeyParameters = c_void_p
+SecKeychainRef = POINTER(c_void_p)
+SSLProtocol = c_uint32
+SSLCipherSuite = c_uint32
+SSLContextRef = POINTER(c_void_p)
+SecTrustRef = POINTER(c_void_p)
+SSLConnectionRef = c_uint32
+SecTrustResultType = c_uint32
+SecTrustOptionFlags = c_uint32
+SSLProtocolSide = c_uint32
+SSLConnectionType = c_uint32
+SSLSessionOption = c_uint32
+
+
+try:
+    Security.SecItemImport.argtypes = [
+        CFDataRef,
+        CFStringRef,
+        POINTER(SecExternalFormat),
+        POINTER(SecExternalItemType),
+        SecItemImportExportFlags,
+        POINTER(SecItemImportExportKeyParameters),
+        SecKeychainRef,
+        POINTER(CFArrayRef),
+    ]
+    Security.SecItemImport.restype = OSStatus
+
+    Security.SecCertificateGetTypeID.argtypes = []
+    Security.SecCertificateGetTypeID.restype = CFTypeID
+
+    Security.SecIdentityGetTypeID.argtypes = []
+    Security.SecIdentityGetTypeID.restype = CFTypeID
+
+    Security.SecKeyGetTypeID.argtypes = []
+    Security.SecKeyGetTypeID.restype = CFTypeID
+
+    Security.SecCertificateCreateWithData.argtypes = [
+        CFAllocatorRef,
+        CFDataRef
+    ]
+    Security.SecCertificateCreateWithData.restype = SecCertificateRef
+
+    Security.SecCertificateCopyData.argtypes = [
+        SecCertificateRef
+    ]
+    Security.SecCertificateCopyData.restype = CFDataRef
+
+    Security.SecCopyErrorMessageString.argtypes = [
+        OSStatus,
+        c_void_p
+    ]
+    Security.SecCopyErrorMessageString.restype = CFStringRef
+
+    Security.SecIdentityCreateWithCertificate.argtypes = [
+        CFTypeRef,
+        SecCertificateRef,
+        POINTER(SecIdentityRef)
+    ]
+    Security.SecIdentityCreateWithCertificate.restype = OSStatus
+
+    Security.SecKeychainCreate.argtypes = [
+        c_char_p,
+        c_uint32,
+        c_void_p,
+        Boolean,
+        c_void_p,
+        POINTER(SecKeychainRef)
+    ]
+    Security.SecKeychainCreate.restype = OSStatus
+
+    Security.SecKeychainDelete.argtypes = [
+        SecKeychainRef
+    ]
+    Security.SecKeychainDelete.restype = OSStatus
+
+    Security.SecPKCS12Import.argtypes = [
+        CFDataRef,
+        CFDictionaryRef,
+        POINTER(CFArrayRef)
+    ]
+    Security.SecPKCS12Import.restype = OSStatus
+
+    SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
+    SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
+
+    Security.SSLSetIOFuncs.argtypes = [
+        SSLContextRef,
+        SSLReadFunc,
+        SSLWriteFunc
+    ]
+    Security.SSLSetIOFuncs.restype = OSStatus
+
+    Security.SSLSetPeerID.argtypes = [
+        SSLContextRef,
+        c_char_p,
+        c_size_t
+    ]
+    Security.SSLSetPeerID.restype = OSStatus
+
+    Security.SSLSetCertificate.argtypes = [
+        SSLContextRef,
+        CFArrayRef
+    ]
+    Security.SSLSetCertificate.restype = OSStatus
+
+    Security.SSLSetCertificateAuthorities.argtypes = [
+        SSLContextRef,
+        CFTypeRef,
+        Boolean
+    ]
+    Security.SSLSetCertificateAuthorities.restype = OSStatus
+
+    Security.SSLSetConnection.argtypes = [
+        SSLContextRef,
+        SSLConnectionRef
+    ]
+    Security.SSLSetConnection.restype = OSStatus
+
+    Security.SSLSetPeerDomainName.argtypes = [
+        SSLContextRef,
+        c_char_p,
+        c_size_t
+    ]
+    Security.SSLSetPeerDomainName.restype = OSStatus
+
+    Security.SSLHandshake.argtypes = [
+        SSLContextRef
+    ]
+    Security.SSLHandshake.restype = OSStatus
+
+    Security.SSLRead.argtypes = [
+        SSLContextRef,
+        c_char_p,
+        c_size_t,
+        POINTER(c_size_t)
+    ]
+    Security.SSLRead.restype = OSStatus
+
+    Security.SSLWrite.argtypes = [
+        SSLContextRef,
+        c_char_p,
+        c_size_t,
+        POINTER(c_size_t)
+    ]
+    Security.SSLWrite.restype = OSStatus
+
+    Security.SSLClose.argtypes = [
+        SSLContextRef
+    ]
+    Security.SSLClose.restype = OSStatus
+
+    Security.SSLGetNumberSupportedCiphers.argtypes = [
+        SSLContextRef,
+        POINTER(c_size_t)
+    ]
+    Security.SSLGetNumberSupportedCiphers.restype = OSStatus
+
+    Security.SSLGetSupportedCiphers.argtypes = [
+        SSLContextRef,
+        POINTER(SSLCipherSuite),
+        POINTER(c_size_t)
+    ]
+    Security.SSLGetSupportedCiphers.restype = OSStatus
+
+    Security.SSLSetEnabledCiphers.argtypes = [
+        SSLContextRef,
+        POINTER(SSLCipherSuite),
+        c_size_t
+    ]
+    Security.SSLSetEnabledCiphers.restype = OSStatus
+
+    Security.SSLGetNumberEnabledCiphers.argtype = [
+        SSLContextRef,
+        POINTER(c_size_t)
+    ]
+    Security.SSLGetNumberEnabledCiphers.restype = OSStatus
+
+    Security.SSLGetEnabledCiphers.argtypes = [
+        SSLContextRef,
+        POINTER(SSLCipherSuite),
+        POINTER(c_size_t)
+    ]
+    Security.SSLGetEnabledCiphers.restype = OSStatus
+
+    Security.SSLGetNegotiatedCipher.argtypes = [
+        SSLContextRef,
+        POINTER(SSLCipherSuite)
+    ]
+    Security.SSLGetNegotiatedCipher.restype = OSStatus
+
+    Security.SSLGetNegotiatedProtocolVersion.argtypes = [
+        SSLContextRef,
+        POINTER(SSLProtocol)
+    ]
+    Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
+
+    Security.SSLCopyPeerTrust.argtypes = [
+        SSLContextRef,
+        POINTER(SecTrustRef)
+    ]
+    Security.SSLCopyPeerTrust.restype = OSStatus
+
+    Security.SecTrustSetAnchorCertificates.argtypes = [
+        SecTrustRef,
+        CFArrayRef
+    ]
+    Security.SecTrustSetAnchorCertificates.restype = OSStatus
+
+    Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
+        SecTrustRef,
+        Boolean
+    ]
+    Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
+
+    Security.SecTrustEvaluate.argtypes = [
+        SecTrustRef,
+        POINTER(SecTrustResultType)
+    ]
+    Security.SecTrustEvaluate.restype = OSStatus
+
+    Security.SecTrustGetCertificateCount.argtypes = [
+        SecTrustRef
+    ]
+    Security.SecTrustGetCertificateCount.restype = CFIndex
+
+    Security.SecTrustGetCertificateAtIndex.argtypes = [
+        SecTrustRef,
+        CFIndex
+    ]
+    Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
+
+    Security.SSLCreateContext.argtypes = [
+        CFAllocatorRef,
+        SSLProtocolSide,
+        SSLConnectionType
+    ]
+    Security.SSLCreateContext.restype = SSLContextRef
+
+    Security.SSLSetSessionOption.argtypes = [
+        SSLContextRef,
+        SSLSessionOption,
+        Boolean
+    ]
+    Security.SSLSetSessionOption.restype = OSStatus
+
+    Security.SSLSetProtocolVersionMin.argtypes = [
+        SSLContextRef,
+        SSLProtocol
+    ]
+    Security.SSLSetProtocolVersionMin.restype = OSStatus
+
+    Security.SSLSetProtocolVersionMax.argtypes = [
+        SSLContextRef,
+        SSLProtocol
+    ]
+    Security.SSLSetProtocolVersionMax.restype = OSStatus
+
+    Security.SecCopyErrorMessageString.argtypes = [
+        OSStatus,
+        c_void_p
+    ]
+    Security.SecCopyErrorMessageString.restype = CFStringRef
+
+    Security.SSLReadFunc = SSLReadFunc
+    Security.SSLWriteFunc = SSLWriteFunc
+    Security.SSLContextRef = SSLContextRef
+    Security.SSLProtocol = SSLProtocol
+    Security.SSLCipherSuite = SSLCipherSuite
+    Security.SecIdentityRef = SecIdentityRef
+    Security.SecKeychainRef = SecKeychainRef
+    Security.SecTrustRef = SecTrustRef
+    Security.SecTrustResultType = SecTrustResultType
+    Security.SecExternalFormat = SecExternalFormat
+    Security.OSStatus = OSStatus
+
+    Security.kSecImportExportPassphrase = CFStringRef.in_dll(
+        Security, 'kSecImportExportPassphrase'
+    )
+    Security.kSecImportItemIdentity = CFStringRef.in_dll(
+        Security, 'kSecImportItemIdentity'
+    )
+
+    # CoreFoundation time!
+    CoreFoundation.CFRetain.argtypes = [
+        CFTypeRef
+    ]
+    CoreFoundation.CFRetain.restype = CFTypeRef
+
+    CoreFoundation.CFRelease.argtypes = [
+        CFTypeRef
+    ]
+    CoreFoundation.CFRelease.restype = None
+
+    CoreFoundation.CFGetTypeID.argtypes = [
+        CFTypeRef
+    ]
+    CoreFoundation.CFGetTypeID.restype = CFTypeID
+
+    CoreFoundation.CFStringCreateWithCString.argtypes = [
+        CFAllocatorRef,
+        c_char_p,
+        CFStringEncoding
+    ]
+    CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
+
+    CoreFoundation.CFStringGetCStringPtr.argtypes = [
+        CFStringRef,
+        CFStringEncoding
+    ]
+    CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
+
+    CoreFoundation.CFStringGetCString.argtypes = [
+        CFStringRef,
+        c_char_p,
+        CFIndex,
+        CFStringEncoding
+    ]
+    CoreFoundation.CFStringGetCString.restype = c_bool
+
+    CoreFoundation.CFDataCreate.argtypes = [
+        CFAllocatorRef,
+        c_char_p,
+        CFIndex
+    ]
+    CoreFoundation.CFDataCreate.restype = CFDataRef
+
+    CoreFoundation.CFDataGetLength.argtypes = [
+        CFDataRef
+    ]
+    CoreFoundation.CFDataGetLength.restype = CFIndex
+
+    CoreFoundation.CFDataGetBytePtr.argtypes = [
+        CFDataRef
+    ]
+    CoreFoundation.CFDataGetBytePtr.restype = c_void_p
+
+    CoreFoundation.CFDictionaryCreate.argtypes = [
+        CFAllocatorRef,
+        POINTER(CFTypeRef),
+        POINTER(CFTypeRef),
+        CFIndex,
+        CFDictionaryKeyCallBacks,
+        CFDictionaryValueCallBacks
+    ]
+    CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
+
+    CoreFoundation.CFDictionaryGetValue.argtypes = [
+        CFDictionaryRef,
+        CFTypeRef
+    ]
+    CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
+
+    CoreFoundation.CFArrayCreate.argtypes = [
+        CFAllocatorRef,
+        POINTER(CFTypeRef),
+        CFIndex,
+        CFArrayCallBacks,
+    ]
+    CoreFoundation.CFArrayCreate.restype = CFArrayRef
+
+    CoreFoundation.CFArrayCreateMutable.argtypes = [
+        CFAllocatorRef,
+        CFIndex,
+        CFArrayCallBacks
+    ]
+    CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
+
+    CoreFoundation.CFArrayAppendValue.argtypes = [
+        CFMutableArrayRef,
+        c_void_p
+    ]
+    CoreFoundation.CFArrayAppendValue.restype = None
+
+    CoreFoundation.CFArrayGetCount.argtypes = [
+        CFArrayRef
+    ]
+    CoreFoundation.CFArrayGetCount.restype = CFIndex
+
+    CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
+        CFArrayRef,
+        CFIndex
+    ]
+    CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
+
+    CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
+        CoreFoundation, 'kCFAllocatorDefault'
+    )
+    CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
+    CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
+        CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
+    )
+    CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
+        CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
+    )
+
+    CoreFoundation.CFTypeRef = CFTypeRef
+    CoreFoundation.CFArrayRef = CFArrayRef
+    CoreFoundation.CFStringRef = CFStringRef
+    CoreFoundation.CFDictionaryRef = CFDictionaryRef
+
+except (AttributeError):
+    raise ImportError('Error initializing ctypes')
+
+
+class CFConst(object):
+    """
+    A class object that acts as essentially a namespace for CoreFoundation
+    constants.
+    """
+    kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
+
+
+class SecurityConst(object):
+    """
+    A class object that acts as essentially a namespace for Security constants.
+    """
+    kSSLSessionOptionBreakOnServerAuth = 0
+
+    kSSLProtocol2 = 1
+    kSSLProtocol3 = 2
+    kTLSProtocol1 = 4
+    kTLSProtocol11 = 7
+    kTLSProtocol12 = 8
+
+    kSSLClientSide = 1
+    kSSLStreamType = 0
+
+    kSecFormatPEMSequence = 10
+
+    kSecTrustResultInvalid = 0
+    kSecTrustResultProceed = 1
+    # This gap is present on purpose: this was kSecTrustResultConfirm, which
+    # is deprecated.
+    kSecTrustResultDeny = 3
+    kSecTrustResultUnspecified = 4
+    kSecTrustResultRecoverableTrustFailure = 5
+    kSecTrustResultFatalTrustFailure = 6
+    kSecTrustResultOtherError = 7
+
+    errSSLProtocol = -9800
+    errSSLWouldBlock = -9803
+    errSSLClosedGraceful = -9805
+    errSSLClosedNoNotify = -9816
+    errSSLClosedAbort = -9806
+
+    errSSLXCertChainInvalid = -9807
+    errSSLCrypto = -9809
+    errSSLInternal = -9810
+    errSSLCertExpired = -9814
+    errSSLCertNotYetValid = -9815
+    errSSLUnknownRootCert = -9812
+    errSSLNoRootCert = -9813
+    errSSLHostNameMismatch = -9843
+    errSSLPeerHandshakeFail = -9824
+    errSSLPeerUserCancelled = -9839
+    errSSLWeakPeerEphemeralDHKey = -9850
+    errSSLServerAuthCompleted = -9841
+    errSSLRecordOverflow = -9847
+
+    errSecVerifyFailed = -67808
+    errSecNoTrustSettings = -25263
+    errSecItemNotFound = -25300
+    errSecInvalidTrustSettings = -25262
+
+    # Cipher suites. We only pick the ones our default cipher string allows.
+    TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
+    TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
+    TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
+    TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
+    TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
+    TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
+    TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
+    TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
+    TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
+    TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
+    TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
+    TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
+    TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
+    TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
+    TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
+    TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
+    TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
+    TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
+    TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
+    TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
+    TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
+    TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
+    TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
+    TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
+    TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
+    TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
+    TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
+    TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
+    TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
+    TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
+    TLS_AES_128_GCM_SHA256 = 0x1301
+    TLS_AES_256_GCM_SHA384 = 0x1302
+    TLS_CHACHA20_POLY1305_SHA256 = 0x1303
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py
new file mode 100644
index 0000000000000000000000000000000000000000..b13cd9e72c66abdc4a3bb47ab422495e9bba82fb
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/_securetransport/low_level.py
@@ -0,0 +1,346 @@
+"""
+Low-level helpers for the SecureTransport bindings.
+
+These are Python functions that are not directly related to the high-level APIs
+but are necessary to get them to work. They include a whole bunch of low-level
+CoreFoundation messing about and memory management. The concerns in this module
+are almost entirely about trying to avoid memory leaks and providing
+appropriate and useful assistance to the higher-level code.
+"""
+import base64
+import ctypes
+import itertools
+import re
+import os
+import ssl
+import tempfile
+
+from .bindings import Security, CoreFoundation, CFConst
+
+
+# This regular expression is used to grab PEM data out of a PEM bundle.
+_PEM_CERTS_RE = re.compile(
+    b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
+)
+
+
+def _cf_data_from_bytes(bytestring):
+    """
+    Given a bytestring, create a CFData object from it. This CFData object must
+    be CFReleased by the caller.
+    """
+    return CoreFoundation.CFDataCreate(
+        CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
+    )
+
+
+def _cf_dictionary_from_tuples(tuples):
+    """
+    Given a list of Python tuples, create an associated CFDictionary.
+    """
+    dictionary_size = len(tuples)
+
+    # We need to get the dictionary keys and values out in the same order.
+    keys = (t[0] for t in tuples)
+    values = (t[1] for t in tuples)
+    cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
+    cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
+
+    return CoreFoundation.CFDictionaryCreate(
+        CoreFoundation.kCFAllocatorDefault,
+        cf_keys,
+        cf_values,
+        dictionary_size,
+        CoreFoundation.kCFTypeDictionaryKeyCallBacks,
+        CoreFoundation.kCFTypeDictionaryValueCallBacks,
+    )
+
+
+def _cf_string_to_unicode(value):
+    """
+    Creates a Unicode string from a CFString object. Used entirely for error
+    reporting.
+
+    Yes, it annoys me quite a lot that this function is this complex.
+    """
+    value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
+
+    string = CoreFoundation.CFStringGetCStringPtr(
+        value_as_void_p,
+        CFConst.kCFStringEncodingUTF8
+    )
+    if string is None:
+        buffer = ctypes.create_string_buffer(1024)
+        result = CoreFoundation.CFStringGetCString(
+            value_as_void_p,
+            buffer,
+            1024,
+            CFConst.kCFStringEncodingUTF8
+        )
+        if not result:
+            raise OSError('Error copying C string from CFStringRef')
+        string = buffer.value
+    if string is not None:
+        string = string.decode('utf-8')
+    return string
+
+
+def _assert_no_error(error, exception_class=None):
+    """
+    Checks the return code and throws an exception if there is an error to
+    report
+    """
+    if error == 0:
+        return
+
+    cf_error_string = Security.SecCopyErrorMessageString(error, None)
+    output = _cf_string_to_unicode(cf_error_string)
+    CoreFoundation.CFRelease(cf_error_string)
+
+    if output is None or output == u'':
+        output = u'OSStatus %s' % error
+
+    if exception_class is None:
+        exception_class = ssl.SSLError
+
+    raise exception_class(output)
+
+
+def _cert_array_from_pem(pem_bundle):
+    """
+    Given a bundle of certs in PEM format, turns them into a CFArray of certs
+    that can be used to validate a cert chain.
+    """
+    # Normalize the PEM bundle's line endings.
+    pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
+
+    der_certs = [
+        base64.b64decode(match.group(1))
+        for match in _PEM_CERTS_RE.finditer(pem_bundle)
+    ]
+    if not der_certs:
+        raise ssl.SSLError("No root certificates specified")
+
+    cert_array = CoreFoundation.CFArrayCreateMutable(
+        CoreFoundation.kCFAllocatorDefault,
+        0,
+        ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
+    )
+    if not cert_array:
+        raise ssl.SSLError("Unable to allocate memory!")
+
+    try:
+        for der_bytes in der_certs:
+            certdata = _cf_data_from_bytes(der_bytes)
+            if not certdata:
+                raise ssl.SSLError("Unable to allocate memory!")
+            cert = Security.SecCertificateCreateWithData(
+                CoreFoundation.kCFAllocatorDefault, certdata
+            )
+            CoreFoundation.CFRelease(certdata)
+            if not cert:
+                raise ssl.SSLError("Unable to build cert object!")
+
+            CoreFoundation.CFArrayAppendValue(cert_array, cert)
+            CoreFoundation.CFRelease(cert)
+    except Exception:
+        # We need to free the array before the exception bubbles further.
+        # We only want to do that if an error occurs: otherwise, the caller
+        # should free.
+        CoreFoundation.CFRelease(cert_array)
+
+    return cert_array
+
+
+def _is_cert(item):
+    """
+    Returns True if a given CFTypeRef is a certificate.
+    """
+    expected = Security.SecCertificateGetTypeID()
+    return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _is_identity(item):
+    """
+    Returns True if a given CFTypeRef is an identity.
+    """
+    expected = Security.SecIdentityGetTypeID()
+    return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _temporary_keychain():
+    """
+    This function creates a temporary Mac keychain that we can use to work with
+    credentials. This keychain uses a one-time password and a temporary file to
+    store the data. We expect to have one keychain per socket. The returned
+    SecKeychainRef must be freed by the caller, including calling
+    SecKeychainDelete.
+
+    Returns a tuple of the SecKeychainRef and the path to the temporary
+    directory that contains it.
+    """
+    # Unfortunately, SecKeychainCreate requires a path to a keychain. This
+    # means we cannot use mkstemp to use a generic temporary file. Instead,
+    # we're going to create a temporary directory and a filename to use there.
+    # This filename will be 8 random bytes expanded into base64. We also need
+    # some random bytes to password-protect the keychain we're creating, so we
+    # ask for 40 random bytes.
+    random_bytes = os.urandom(40)
+    filename = base64.b16encode(random_bytes[:8]).decode('utf-8')
+    password = base64.b16encode(random_bytes[8:])  # Must be valid UTF-8
+    tempdirectory = tempfile.mkdtemp()
+
+    keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
+
+    # We now want to create the keychain itself.
+    keychain = Security.SecKeychainRef()
+    status = Security.SecKeychainCreate(
+        keychain_path,
+        len(password),
+        password,
+        False,
+        None,
+        ctypes.byref(keychain)
+    )
+    _assert_no_error(status)
+
+    # Having created the keychain, we want to pass it off to the caller.
+    return keychain, tempdirectory
+
+
+def _load_items_from_file(keychain, path):
+    """
+    Given a single file, loads all the trust objects from it into arrays and
+    the keychain.
+    Returns a tuple of lists: the first list is a list of identities, the
+    second a list of certs.
+    """
+    certificates = []
+    identities = []
+    result_array = None
+
+    with open(path, 'rb') as f:
+        raw_filedata = f.read()
+
+    try:
+        filedata = CoreFoundation.CFDataCreate(
+            CoreFoundation.kCFAllocatorDefault,
+            raw_filedata,
+            len(raw_filedata)
+        )
+        result_array = CoreFoundation.CFArrayRef()
+        result = Security.SecItemImport(
+            filedata,  # cert data
+            None,  # Filename, leaving it out for now
+            None,  # What the type of the file is, we don't care
+            None,  # what's in the file, we don't care
+            0,  # import flags
+            None,  # key params, can include passphrase in the future
+            keychain,  # The keychain to insert into
+            ctypes.byref(result_array)  # Results
+        )
+        _assert_no_error(result)
+
+        # A CFArray is not very useful to us as an intermediary
+        # representation, so we are going to extract the objects we want
+        # and then free the array. We don't need to keep hold of keys: the
+        # keychain already has them!
+        result_count = CoreFoundation.CFArrayGetCount(result_array)
+        for index in range(result_count):
+            item = CoreFoundation.CFArrayGetValueAtIndex(
+                result_array, index
+            )
+            item = ctypes.cast(item, CoreFoundation.CFTypeRef)
+
+            if _is_cert(item):
+                CoreFoundation.CFRetain(item)
+                certificates.append(item)
+            elif _is_identity(item):
+                CoreFoundation.CFRetain(item)
+                identities.append(item)
+    finally:
+        if result_array:
+            CoreFoundation.CFRelease(result_array)
+
+        CoreFoundation.CFRelease(filedata)
+
+    return (identities, certificates)
+
+
+def _load_client_cert_chain(keychain, *paths):
+    """
+    Load certificates and maybe keys from a number of files. Has the end goal
+    of returning a CFArray containing one SecIdentityRef, and then zero or more
+    SecCertificateRef objects, suitable for use as a client certificate trust
+    chain.
+    """
+    # Ok, the strategy.
+    #
+    # This relies on knowing that macOS will not give you a SecIdentityRef
+    # unless you have imported a key into a keychain. This is a somewhat
+    # artificial limitation of macOS (for example, it doesn't necessarily
+    # affect iOS), but there is nothing inside Security.framework that lets you
+    # get a SecIdentityRef without having a key in a keychain.
+    #
+    # So the policy here is we take all the files and iterate them in order.
+    # Each one will use SecItemImport to have one or more objects loaded from
+    # it. We will also point at a keychain that macOS can use to work with the
+    # private key.
+    #
+    # Once we have all the objects, we'll check what we actually have. If we
+    # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
+    # we'll take the first certificate (which we assume to be our leaf) and
+    # ask the keychain to give us a SecIdentityRef with that cert's associated
+    # key.
+    #
+    # We'll then return a CFArray containing the trust chain: one
+    # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
+    # responsibility for freeing this CFArray will be with the caller. This
+    # CFArray must remain alive for the entire connection, so in practice it
+    # will be stored with a single SSLSocket, along with the reference to the
+    # keychain.
+    certificates = []
+    identities = []
+
+    # Filter out bad paths.
+    paths = (path for path in paths if path)
+
+    try:
+        for file_path in paths:
+            new_identities, new_certs = _load_items_from_file(
+                keychain, file_path
+            )
+            identities.extend(new_identities)
+            certificates.extend(new_certs)
+
+        # Ok, we have everything. The question is: do we have an identity? If
+        # not, we want to grab one from the first cert we have.
+        if not identities:
+            new_identity = Security.SecIdentityRef()
+            status = Security.SecIdentityCreateWithCertificate(
+                keychain,
+                certificates[0],
+                ctypes.byref(new_identity)
+            )
+            _assert_no_error(status)
+            identities.append(new_identity)
+
+            # We now want to release the original certificate, as we no longer
+            # need it.
+            CoreFoundation.CFRelease(certificates.pop(0))
+
+        # We now need to build a new CFArray that holds the trust chain.
+        trust_chain = CoreFoundation.CFArrayCreateMutable(
+            CoreFoundation.kCFAllocatorDefault,
+            0,
+            ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+        )
+        for item in itertools.chain(identities, certificates):
+            # ArrayAppendValue does a CFRetain on the item. That's fine,
+            # because the finally block will release our other refs to them.
+            CoreFoundation.CFArrayAppendValue(trust_chain, item)
+
+        return trust_chain
+    finally:
+        for obj in itertools.chain(identities, certificates):
+            CoreFoundation.CFRelease(obj)
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/appengine.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/appengine.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b42952d7bf6b5b157bcc1ec3380193ecae6dade
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/appengine.py
@@ -0,0 +1,289 @@
+"""
+This module provides a pool manager that uses Google App Engine's
+`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+Example usage::
+
+    from pip._vendor.urllib3 import PoolManager
+    from pip._vendor.urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
+
+    if is_appengine_sandbox():
+        # AppEngineManager uses AppEngine's URLFetch API behind the scenes
+        http = AppEngineManager()
+    else:
+        # PoolManager uses a socket-level API behind the scenes
+        http = PoolManager()
+
+    r = http.request('GET', 'https://google.com/')
+
+There are `limitations <https://cloud.google.com/appengine/docs/python/\
+urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
+the best choice for your application. There are three options for using
+urllib3 on Google App Engine:
+
+1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
+   cost-effective in many circumstances as long as your usage is within the
+   limitations.
+2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
+   Sockets also have `limitations and restrictions
+   <https://cloud.google.com/appengine/docs/python/sockets/\
+   #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
+   To use sockets, be sure to specify the following in your ``app.yaml``::
+
+        env_variables:
+            GAE_USE_SOCKETS_HTTPLIB : 'true'
+
+3. If you are using `App Engine Flexible
+<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
+:class:`PoolManager` without any configuration or special environment variables.
+"""
+
+from __future__ import absolute_import
+import io
+import logging
+import warnings
+from ..packages.six.moves.urllib.parse import urljoin
+
+from ..exceptions import (
+    HTTPError,
+    HTTPWarning,
+    MaxRetryError,
+    ProtocolError,
+    TimeoutError,
+    SSLError
+)
+
+from ..request import RequestMethods
+from ..response import HTTPResponse
+from ..util.timeout import Timeout
+from ..util.retry import Retry
+from . import _appengine_environ
+
+try:
+    from google.appengine.api import urlfetch
+except ImportError:
+    urlfetch = None
+
+
+log = logging.getLogger(__name__)
+
+
+class AppEnginePlatformWarning(HTTPWarning):
+    pass
+
+
+class AppEnginePlatformError(HTTPError):
+    pass
+
+
+class AppEngineManager(RequestMethods):
+    """
+    Connection manager for Google App Engine sandbox applications.
+
+    This manager uses the URLFetch service directly instead of using the
+    emulated httplib, and is subject to URLFetch limitations as described in
+    the App Engine documentation `here
+    <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+    Notably it will raise an :class:`AppEnginePlatformError` if:
+        * URLFetch is not available.
+        * If you attempt to use this on App Engine Flexible, as full socket
+          support is available.
+        * If a request size is more than 10 megabytes.
+        * If a response size is more than 32 megabtyes.
+        * If you use an unsupported request method such as OPTIONS.
+
+    Beyond those cases, it will raise normal urllib3 errors.
+    """
+
+    def __init__(self, headers=None, retries=None, validate_certificate=True,
+                 urlfetch_retries=True):
+        if not urlfetch:
+            raise AppEnginePlatformError(
+                "URLFetch is not available in this environment.")
+
+        if is_prod_appengine_mvms():
+            raise AppEnginePlatformError(
+                "Use normal urllib3.PoolManager instead of AppEngineManager"
+                "on Managed VMs, as using URLFetch is not necessary in "
+                "this environment.")
+
+        warnings.warn(
+            "urllib3 is using URLFetch on Google App Engine sandbox instead "
+            "of sockets. To use sockets directly instead of URLFetch see "
+            "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
+            AppEnginePlatformWarning)
+
+        RequestMethods.__init__(self, headers)
+        self.validate_certificate = validate_certificate
+        self.urlfetch_retries = urlfetch_retries
+
+        self.retries = retries or Retry.DEFAULT
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        # Return False to re-raise any potential exceptions
+        return False
+
+    def urlopen(self, method, url, body=None, headers=None,
+                retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
+                **response_kw):
+
+        retries = self._get_retries(retries, redirect)
+
+        try:
+            follow_redirects = (
+                    redirect and
+                    retries.redirect != 0 and
+                    retries.total)
+            response = urlfetch.fetch(
+                url,
+                payload=body,
+                method=method,
+                headers=headers or {},
+                allow_truncated=False,
+                follow_redirects=self.urlfetch_retries and follow_redirects,
+                deadline=self._get_absolute_timeout(timeout),
+                validate_certificate=self.validate_certificate,
+            )
+        except urlfetch.DeadlineExceededError as e:
+            raise TimeoutError(self, e)
+
+        except urlfetch.InvalidURLError as e:
+            if 'too large' in str(e):
+                raise AppEnginePlatformError(
+                    "URLFetch request too large, URLFetch only "
+                    "supports requests up to 10mb in size.", e)
+            raise ProtocolError(e)
+
+        except urlfetch.DownloadError as e:
+            if 'Too many redirects' in str(e):
+                raise MaxRetryError(self, url, reason=e)
+            raise ProtocolError(e)
+
+        except urlfetch.ResponseTooLargeError as e:
+            raise AppEnginePlatformError(
+                "URLFetch response too large, URLFetch only supports"
+                "responses up to 32mb in size.", e)
+
+        except urlfetch.SSLCertificateError as e:
+            raise SSLError(e)
+
+        except urlfetch.InvalidMethodError as e:
+            raise AppEnginePlatformError(
+                "URLFetch does not support method: %s" % method, e)
+
+        http_response = self._urlfetch_response_to_http_response(
+            response, retries=retries, **response_kw)
+
+        # Handle redirect?
+        redirect_location = redirect and http_response.get_redirect_location()
+        if redirect_location:
+            # Check for redirect response
+            if (self.urlfetch_retries and retries.raise_on_redirect):
+                raise MaxRetryError(self, url, "too many redirects")
+            else:
+                if http_response.status == 303:
+                    method = 'GET'
+
+                try:
+                    retries = retries.increment(method, url, response=http_response, _pool=self)
+                except MaxRetryError:
+                    if retries.raise_on_redirect:
+                        raise MaxRetryError(self, url, "too many redirects")
+                    return http_response
+
+                retries.sleep_for_retry(http_response)
+                log.debug("Redirecting %s -> %s", url, redirect_location)
+                redirect_url = urljoin(url, redirect_location)
+                return self.urlopen(
+                    method, redirect_url, body, headers,
+                    retries=retries, redirect=redirect,
+                    timeout=timeout, **response_kw)
+
+        # Check if we should retry the HTTP response.
+        has_retry_after = bool(http_response.getheader('Retry-After'))
+        if retries.is_retry(method, http_response.status, has_retry_after):
+            retries = retries.increment(
+                method, url, response=http_response, _pool=self)
+            log.debug("Retry: %s", url)
+            retries.sleep(http_response)
+            return self.urlopen(
+                method, url,
+                body=body, headers=headers,
+                retries=retries, redirect=redirect,
+                timeout=timeout, **response_kw)
+
+        return http_response
+
+    def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
+
+        if is_prod_appengine():
+            # Production GAE handles deflate encoding automatically, but does
+            # not remove the encoding header.
+            content_encoding = urlfetch_resp.headers.get('content-encoding')
+
+            if content_encoding == 'deflate':
+                del urlfetch_resp.headers['content-encoding']
+
+        transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
+        # We have a full response's content,
+        # so let's make sure we don't report ourselves as chunked data.
+        if transfer_encoding == 'chunked':
+            encodings = transfer_encoding.split(",")
+            encodings.remove('chunked')
+            urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
+
+        original_response = HTTPResponse(
+            # In order for decoding to work, we must present the content as
+            # a file-like object.
+            body=io.BytesIO(urlfetch_resp.content),
+            msg=urlfetch_resp.header_msg,
+            headers=urlfetch_resp.headers,
+            status=urlfetch_resp.status_code,
+            **response_kw
+        )
+
+        return HTTPResponse(
+            body=io.BytesIO(urlfetch_resp.content),
+            headers=urlfetch_resp.headers,
+            status=urlfetch_resp.status_code,
+            original_response=original_response,
+            **response_kw
+        )
+
+    def _get_absolute_timeout(self, timeout):
+        if timeout is Timeout.DEFAULT_TIMEOUT:
+            return None  # Defer to URLFetch's default.
+        if isinstance(timeout, Timeout):
+            if timeout._read is not None or timeout._connect is not None:
+                warnings.warn(
+                    "URLFetch does not support granular timeout settings, "
+                    "reverting to total or default URLFetch timeout.",
+                    AppEnginePlatformWarning)
+            return timeout.total
+        return timeout
+
+    def _get_retries(self, retries, redirect):
+        if not isinstance(retries, Retry):
+            retries = Retry.from_int(
+                retries, redirect=redirect, default=self.retries)
+
+        if retries.connect or retries.read or retries.redirect:
+            warnings.warn(
+                "URLFetch only supports total retries and does not "
+                "recognize connect, read, or redirect retry parameters.",
+                AppEnginePlatformWarning)
+
+        return retries
+
+
+# Alias methods from _appengine_environ to maintain public API interface.
+
+is_appengine = _appengine_environ.is_appengine
+is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
+is_local_appengine = _appengine_environ.is_local_appengine
+is_prod_appengine = _appengine_environ.is_prod_appengine
+is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ea127c58338217fdcedb13feab027987d280e44
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py
@@ -0,0 +1,111 @@
+"""
+NTLM authenticating pool, contributed by erikcederstran
+
+Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
+"""
+from __future__ import absolute_import
+
+from logging import getLogger
+from ntlm import ntlm
+
+from .. import HTTPSConnectionPool
+from ..packages.six.moves.http_client import HTTPSConnection
+
+
+log = getLogger(__name__)
+
+
+class NTLMConnectionPool(HTTPSConnectionPool):
+    """
+    Implements an NTLM authentication version of an urllib3 connection pool
+    """
+
+    scheme = 'https'
+
+    def __init__(self, user, pw, authurl, *args, **kwargs):
+        """
+        authurl is a random URL on the server that is protected by NTLM.
+        user is the Windows user, probably in the DOMAIN\\username format.
+        pw is the password for the user.
+        """
+        super(NTLMConnectionPool, self).__init__(*args, **kwargs)
+        self.authurl = authurl
+        self.rawuser = user
+        user_parts = user.split('\\', 1)
+        self.domain = user_parts[0].upper()
+        self.user = user_parts[1]
+        self.pw = pw
+
+    def _new_conn(self):
+        # Performs the NTLM handshake that secures the connection. The socket
+        # must be kept open while requests are performed.
+        self.num_connections += 1
+        log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
+                  self.num_connections, self.host, self.authurl)
+
+        headers = {'Connection': 'Keep-Alive'}
+        req_header = 'Authorization'
+        resp_header = 'www-authenticate'
+
+        conn = HTTPSConnection(host=self.host, port=self.port)
+
+        # Send negotiation message
+        headers[req_header] = (
+            'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
+        log.debug('Request headers: %s', headers)
+        conn.request('GET', self.authurl, None, headers)
+        res = conn.getresponse()
+        reshdr = dict(res.getheaders())
+        log.debug('Response status: %s %s', res.status, res.reason)
+        log.debug('Response headers: %s', reshdr)
+        log.debug('Response data: %s [...]', res.read(100))
+
+        # Remove the reference to the socket, so that it can not be closed by
+        # the response object (we want to keep the socket open)
+        res.fp = None
+
+        # Server should respond with a challenge message
+        auth_header_values = reshdr[resp_header].split(', ')
+        auth_header_value = None
+        for s in auth_header_values:
+            if s[:5] == 'NTLM ':
+                auth_header_value = s[5:]
+        if auth_header_value is None:
+            raise Exception('Unexpected %s response header: %s' %
+                            (resp_header, reshdr[resp_header]))
+
+        # Send authentication message
+        ServerChallenge, NegotiateFlags = \
+            ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
+        auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
+                                                         self.user,
+                                                         self.domain,
+                                                         self.pw,
+                                                         NegotiateFlags)
+        headers[req_header] = 'NTLM %s' % auth_msg
+        log.debug('Request headers: %s', headers)
+        conn.request('GET', self.authurl, None, headers)
+        res = conn.getresponse()
+        log.debug('Response status: %s %s', res.status, res.reason)
+        log.debug('Response headers: %s', dict(res.getheaders()))
+        log.debug('Response data: %s [...]', res.read()[:100])
+        if res.status != 200:
+            if res.status == 401:
+                raise Exception('Server rejected request: wrong '
+                                'username or password')
+            raise Exception('Wrong server response: %s %s' %
+                            (res.status, res.reason))
+
+        res.fp = None
+        log.debug('Connection established')
+        return conn
+
+    def urlopen(self, method, url, body=None, headers=None, retries=3,
+                redirect=True, assert_same_host=True):
+        if headers is None:
+            headers = {}
+        headers['Connection'] = 'Keep-Alive'
+        return super(NTLMConnectionPool, self).urlopen(method, url, body,
+                                                       headers, retries,
+                                                       redirect,
+                                                       assert_same_host)
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py
new file mode 100644
index 0000000000000000000000000000000000000000..363667cb5658676924edf34e8c509704c2c28180
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/pyopenssl.py
@@ -0,0 +1,466 @@
+"""
+SSL with SNI_-support for Python 2. Follow these instructions if you would
+like to verify SSL certificates in Python 2. Note, the default libraries do
+*not* do certificate checking; you need to do additional work to validate
+certificates yourself.
+
+This needs the following packages installed:
+
+* pyOpenSSL (tested with 16.0.0)
+* cryptography (minimum 1.3.4, from pyopenssl)
+* idna (minimum 2.0, from cryptography)
+
+However, pyopenssl depends on cryptography, which depends on idna, so while we
+use all three directly here we end up having relatively few packages required.
+
+You can install them with the following command:
+
+    pip install pyopenssl cryptography idna
+
+To activate certificate checking, call
+:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
+before you begin making HTTP requests. This can be done in a ``sitecustomize``
+module, or at any other time before your application begins using ``urllib3``,
+like this::
+
+    try:
+        import urllib3.contrib.pyopenssl
+        urllib3.contrib.pyopenssl.inject_into_urllib3()
+    except ImportError:
+        pass
+
+Now you can use :mod:`urllib3` as you normally would, and it will support SNI
+when the required modules are installed.
+
+Activating this module also has the positive side effect of disabling SSL/TLS
+compression in Python 2 (see `CRIME attack`_).
+
+If you want to configure the default list of supported cipher suites, you can
+set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
+
+.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
+.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
+"""
+from __future__ import absolute_import
+
+import OpenSSL.SSL
+from cryptography import x509
+from cryptography.hazmat.backends.openssl import backend as openssl_backend
+from cryptography.hazmat.backends.openssl.x509 import _Certificate
+try:
+    from cryptography.x509 import UnsupportedExtension
+except ImportError:
+    # UnsupportedExtension is gone in cryptography >= 2.1.0
+    class UnsupportedExtension(Exception):
+        pass
+
+from socket import timeout, error as SocketError
+from io import BytesIO
+
+try:  # Platform-specific: Python 2
+    from socket import _fileobject
+except ImportError:  # Platform-specific: Python 3
+    _fileobject = None
+    from ..packages.backports.makefile import backport_makefile
+
+import logging
+import ssl
+from ..packages import six
+import sys
+
+from .. import util
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works.
+HAS_SNI = True
+
+# Map from urllib3 to PyOpenSSL compatible parameter-values.
+_openssl_versions = {
+    ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
+    ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
+}
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
+    _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
+
+if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
+    _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
+
+try:
+    _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
+except AttributeError:
+    pass
+
+_stdlib_to_openssl_verify = {
+    ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
+    ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
+    ssl.CERT_REQUIRED:
+        OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+}
+_openssl_to_stdlib_verify = dict(
+    (v, k) for k, v in _stdlib_to_openssl_verify.items()
+)
+
+# OpenSSL will only write 16K at a time
+SSL_WRITE_BLOCKSIZE = 16384
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+
+log = logging.getLogger(__name__)
+
+
+def inject_into_urllib3():
+    'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
+
+    _validate_dependencies_met()
+
+    util.ssl_.SSLContext = PyOpenSSLContext
+    util.HAS_SNI = HAS_SNI
+    util.ssl_.HAS_SNI = HAS_SNI
+    util.IS_PYOPENSSL = True
+    util.ssl_.IS_PYOPENSSL = True
+
+
+def extract_from_urllib3():
+    'Undo monkey-patching by :func:`inject_into_urllib3`.'
+
+    util.ssl_.SSLContext = orig_util_SSLContext
+    util.HAS_SNI = orig_util_HAS_SNI
+    util.ssl_.HAS_SNI = orig_util_HAS_SNI
+    util.IS_PYOPENSSL = False
+    util.ssl_.IS_PYOPENSSL = False
+
+
+def _validate_dependencies_met():
+    """
+    Verifies that PyOpenSSL's package-level dependencies have been met.
+    Throws `ImportError` if they are not met.
+    """
+    # Method added in `cryptography==1.1`; not available in older versions
+    from cryptography.x509.extensions import Extensions
+    if getattr(Extensions, "get_extension_for_class", None) is None:
+        raise ImportError("'cryptography' module missing required functionality.  "
+                          "Try upgrading to v1.3.4 or newer.")
+
+    # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
+    # attribute is only present on those versions.
+    from OpenSSL.crypto import X509
+    x509 = X509()
+    if getattr(x509, "_x509", None) is None:
+        raise ImportError("'pyOpenSSL' module missing required functionality. "
+                          "Try upgrading to v0.14 or newer.")
+
+
+def _dnsname_to_stdlib(name):
+    """
+    Converts a dNSName SubjectAlternativeName field to the form used by the
+    standard library on the given Python version.
+
+    Cryptography produces a dNSName as a unicode string that was idna-decoded
+    from ASCII bytes. We need to idna-encode that string to get it back, and
+    then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
+    uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
+
+    If the name cannot be idna-encoded then we return None signalling that
+    the name given should be skipped.
+    """
+    def idna_encode(name):
+        """
+        Borrowed wholesale from the Python Cryptography Project. It turns out
+        that we can't just safely call `idna.encode`: it can explode for
+        wildcard names. This avoids that problem.
+        """
+        from pip._vendor import idna
+
+        try:
+            for prefix in [u'*.', u'.']:
+                if name.startswith(prefix):
+                    name = name[len(prefix):]
+                    return prefix.encode('ascii') + idna.encode(name)
+            return idna.encode(name)
+        except idna.core.IDNAError:
+            return None
+
+    name = idna_encode(name)
+    if name is None:
+        return None
+    elif sys.version_info >= (3, 0):
+        name = name.decode('utf-8')
+    return name
+
+
+def get_subj_alt_name(peer_cert):
+    """
+    Given an PyOpenSSL certificate, provides all the subject alternative names.
+    """
+    # Pass the cert to cryptography, which has much better APIs for this.
+    if hasattr(peer_cert, "to_cryptography"):
+        cert = peer_cert.to_cryptography()
+    else:
+        # This is technically using private APIs, but should work across all
+        # relevant versions before PyOpenSSL got a proper API for this.
+        cert = _Certificate(openssl_backend, peer_cert._x509)
+
+    # We want to find the SAN extension. Ask Cryptography to locate it (it's
+    # faster than looping in Python)
+    try:
+        ext = cert.extensions.get_extension_for_class(
+            x509.SubjectAlternativeName
+        ).value
+    except x509.ExtensionNotFound:
+        # No such extension, return the empty list.
+        return []
+    except (x509.DuplicateExtension, UnsupportedExtension,
+            x509.UnsupportedGeneralNameType, UnicodeError) as e:
+        # A problem has been found with the quality of the certificate. Assume
+        # no SAN field is present.
+        log.warning(
+            "A problem was encountered with the certificate that prevented "
+            "urllib3 from finding the SubjectAlternativeName field. This can "
+            "affect certificate validation. The error was %s",
+            e,
+        )
+        return []
+
+    # We want to return dNSName and iPAddress fields. We need to cast the IPs
+    # back to strings because the match_hostname function wants them as
+    # strings.
+    # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
+    # decoded. This is pretty frustrating, but that's what the standard library
+    # does with certificates, and so we need to attempt to do the same.
+    # We also want to skip over names which cannot be idna encoded.
+    names = [
+        ('DNS', name) for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
+        if name is not None
+    ]
+    names.extend(
+        ('IP Address', str(name))
+        for name in ext.get_values_for_type(x509.IPAddress)
+    )
+
+    return names
+
+
+class WrappedSocket(object):
+    '''API-compatibility wrapper for Python OpenSSL's Connection-class.
+
+    Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
+    collector of pypy.
+    '''
+
+    def __init__(self, connection, socket, suppress_ragged_eofs=True):
+        self.connection = connection
+        self.socket = socket
+        self.suppress_ragged_eofs = suppress_ragged_eofs
+        self._makefile_refs = 0
+        self._closed = False
+
+    def fileno(self):
+        return self.socket.fileno()
+
+    # Copy-pasted from Python 3.5 source code
+    def _decref_socketios(self):
+        if self._makefile_refs > 0:
+            self._makefile_refs -= 1
+        if self._closed:
+            self.close()
+
+    def recv(self, *args, **kwargs):
+        try:
+            data = self.connection.recv(*args, **kwargs)
+        except OpenSSL.SSL.SysCallError as e:
+            if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+                return b''
+            else:
+                raise SocketError(str(e))
+        except OpenSSL.SSL.ZeroReturnError as e:
+            if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+                return b''
+            else:
+                raise
+        except OpenSSL.SSL.WantReadError:
+            if not util.wait_for_read(self.socket, self.socket.gettimeout()):
+                raise timeout('The read operation timed out')
+            else:
+                return self.recv(*args, **kwargs)
+        else:
+            return data
+
+    def recv_into(self, *args, **kwargs):
+        try:
+            return self.connection.recv_into(*args, **kwargs)
+        except OpenSSL.SSL.SysCallError as e:
+            if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
+                return 0
+            else:
+                raise SocketError(str(e))
+        except OpenSSL.SSL.ZeroReturnError as e:
+            if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+                return 0
+            else:
+                raise
+        except OpenSSL.SSL.WantReadError:
+            if not util.wait_for_read(self.socket, self.socket.gettimeout()):
+                raise timeout('The read operation timed out')
+            else:
+                return self.recv_into(*args, **kwargs)
+
+    def settimeout(self, timeout):
+        return self.socket.settimeout(timeout)
+
+    def _send_until_done(self, data):
+        while True:
+            try:
+                return self.connection.send(data)
+            except OpenSSL.SSL.WantWriteError:
+                if not util.wait_for_write(self.socket, self.socket.gettimeout()):
+                    raise timeout()
+                continue
+            except OpenSSL.SSL.SysCallError as e:
+                raise SocketError(str(e))
+
+    def sendall(self, data):
+        total_sent = 0
+        while total_sent < len(data):
+            sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+            total_sent += sent
+
+    def shutdown(self):
+        # FIXME rethrow compatible exceptions should we ever use this
+        self.connection.shutdown()
+
+    def close(self):
+        if self._makefile_refs < 1:
+            try:
+                self._closed = True
+                return self.connection.close()
+            except OpenSSL.SSL.Error:
+                return
+        else:
+            self._makefile_refs -= 1
+
+    def getpeercert(self, binary_form=False):
+        x509 = self.connection.get_peer_certificate()
+
+        if not x509:
+            return x509
+
+        if binary_form:
+            return OpenSSL.crypto.dump_certificate(
+                OpenSSL.crypto.FILETYPE_ASN1,
+                x509)
+
+        return {
+            'subject': (
+                (('commonName', x509.get_subject().CN),),
+            ),
+            'subjectAltName': get_subj_alt_name(x509)
+        }
+
+    def _reuse(self):
+        self._makefile_refs += 1
+
+    def _drop(self):
+        if self._makefile_refs < 1:
+            self.close()
+        else:
+            self._makefile_refs -= 1
+
+
+if _fileobject:  # Platform-specific: Python 2
+    def makefile(self, mode, bufsize=-1):
+        self._makefile_refs += 1
+        return _fileobject(self, mode, bufsize, close=True)
+else:  # Platform-specific: Python 3
+    makefile = backport_makefile
+
+WrappedSocket.makefile = makefile
+
+
+class PyOpenSSLContext(object):
+    """
+    I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
+    for translating the interface of the standard library ``SSLContext`` object
+    to calls into PyOpenSSL.
+    """
+    def __init__(self, protocol):
+        self.protocol = _openssl_versions[protocol]
+        self._ctx = OpenSSL.SSL.Context(self.protocol)
+        self._options = 0
+        self.check_hostname = False
+
+    @property
+    def options(self):
+        return self._options
+
+    @options.setter
+    def options(self, value):
+        self._options = value
+        self._ctx.set_options(value)
+
+    @property
+    def verify_mode(self):
+        return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
+
+    @verify_mode.setter
+    def verify_mode(self, value):
+        self._ctx.set_verify(
+            _stdlib_to_openssl_verify[value],
+            _verify_callback
+        )
+
+    def set_default_verify_paths(self):
+        self._ctx.set_default_verify_paths()
+
+    def set_ciphers(self, ciphers):
+        if isinstance(ciphers, six.text_type):
+            ciphers = ciphers.encode('utf-8')
+        self._ctx.set_cipher_list(ciphers)
+
+    def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+        if cafile is not None:
+            cafile = cafile.encode('utf-8')
+        if capath is not None:
+            capath = capath.encode('utf-8')
+        self._ctx.load_verify_locations(cafile, capath)
+        if cadata is not None:
+            self._ctx.load_verify_locations(BytesIO(cadata))
+
+    def load_cert_chain(self, certfile, keyfile=None, password=None):
+        self._ctx.use_certificate_chain_file(certfile)
+        if password is not None:
+            self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
+        self._ctx.use_privatekey_file(keyfile or certfile)
+
+    def wrap_socket(self, sock, server_side=False,
+                    do_handshake_on_connect=True, suppress_ragged_eofs=True,
+                    server_hostname=None):
+        cnx = OpenSSL.SSL.Connection(self._ctx, sock)
+
+        if isinstance(server_hostname, six.text_type):  # Platform-specific: Python 3
+            server_hostname = server_hostname.encode('utf-8')
+
+        if server_hostname is not None:
+            cnx.set_tlsext_host_name(server_hostname)
+
+        cnx.set_connect_state()
+
+        while True:
+            try:
+                cnx.do_handshake()
+            except OpenSSL.SSL.WantReadError:
+                if not util.wait_for_read(sock, sock.gettimeout()):
+                    raise timeout('select timed out')
+                continue
+            except OpenSSL.SSL.Error as e:
+                raise ssl.SSLError('bad handshake: %r' % e)
+            break
+
+        return WrappedSocket(cnx, sock)
+
+
+def _verify_callback(cnx, x509, err_no, err_depth, return_code):
+    return err_no == 0
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.py
new file mode 100644
index 0000000000000000000000000000000000000000..77cb59ed71460a777621e3cd0ee60c97613668e4
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/securetransport.py
@@ -0,0 +1,804 @@
+"""
+SecureTranport support for urllib3 via ctypes.
+
+This makes platform-native TLS available to urllib3 users on macOS without the
+use of a compiler. This is an important feature because the Python Package
+Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
+that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
+this is to give macOS users an alternative solution to the problem, and that
+solution is to use SecureTransport.
+
+We use ctypes here because this solution must not require a compiler. That's
+because pip is not allowed to require a compiler either.
+
+This is not intended to be a seriously long-term solution to this problem.
+The hope is that PEP 543 will eventually solve this issue for us, at which
+point we can retire this contrib module. But in the short term, we need to
+solve the impending tire fire that is Python on Mac without this kind of
+contrib module. So...here we are.
+
+To use this module, simply import and inject it::
+
+    import urllib3.contrib.securetransport
+    urllib3.contrib.securetransport.inject_into_urllib3()
+
+Happy TLSing!
+"""
+from __future__ import absolute_import
+
+import contextlib
+import ctypes
+import errno
+import os.path
+import shutil
+import socket
+import ssl
+import threading
+import weakref
+
+from .. import util
+from ._securetransport.bindings import (
+    Security, SecurityConst, CoreFoundation
+)
+from ._securetransport.low_level import (
+    _assert_no_error, _cert_array_from_pem, _temporary_keychain,
+    _load_client_cert_chain
+)
+
+try:  # Platform-specific: Python 2
+    from socket import _fileobject
+except ImportError:  # Platform-specific: Python 3
+    _fileobject = None
+    from ..packages.backports.makefile import backport_makefile
+
+__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
+
+# SNI always works
+HAS_SNI = True
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+# This dictionary is used by the read callback to obtain a handle to the
+# calling wrapped socket. This is a pretty silly approach, but for now it'll
+# do. I feel like I should be able to smuggle a handle to the wrapped socket
+# directly in the SSLConnectionRef, but for now this approach will work I
+# guess.
+#
+# We need to lock around this structure for inserts, but we don't do it for
+# reads/writes in the callbacks. The reasoning here goes as follows:
+#
+#    1. It is not possible to call into the callbacks before the dictionary is
+#       populated, so once in the callback the id must be in the dictionary.
+#    2. The callbacks don't mutate the dictionary, they only read from it, and
+#       so cannot conflict with any of the insertions.
+#
+# This is good: if we had to lock in the callbacks we'd drastically slow down
+# the performance of this code.
+_connection_refs = weakref.WeakValueDictionary()
+_connection_ref_lock = threading.Lock()
+
+# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
+# for no better reason than we need *a* limit, and this one is right there.
+SSL_WRITE_BLOCKSIZE = 16384
+
+# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
+# individual cipher suites. We need to do this because this is how
+# SecureTransport wants them.
+CIPHER_SUITES = [
+    SecurityConst.TLS_AES_256_GCM_SHA384,
+    SecurityConst.TLS_CHACHA20_POLY1305_SHA256,
+    SecurityConst.TLS_AES_128_GCM_SHA256,
+    SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+    SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+    SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+    SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+    SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
+    SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
+    SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
+    SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
+    SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+    SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+    SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+    SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+    SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+    SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+    SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+    SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+    SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+    SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+    SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+    SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+    SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+    SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+    SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+    SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+    SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
+    SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
+    SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
+    SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
+    SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
+    SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
+]
+
+# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
+# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
+_protocol_to_min_max = {
+    ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv2"):
+    _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
+        SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
+    )
+if hasattr(ssl, "PROTOCOL_SSLv3"):
+    _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
+        SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
+    )
+if hasattr(ssl, "PROTOCOL_TLSv1"):
+    _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
+        SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
+    )
+if hasattr(ssl, "PROTOCOL_TLSv1_1"):
+    _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
+        SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
+    )
+if hasattr(ssl, "PROTOCOL_TLSv1_2"):
+    _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
+        SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
+    )
+if hasattr(ssl, "PROTOCOL_TLS"):
+    _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
+
+
+def inject_into_urllib3():
+    """
+    Monkey-patch urllib3 with SecureTransport-backed SSL-support.
+    """
+    util.ssl_.SSLContext = SecureTransportContext
+    util.HAS_SNI = HAS_SNI
+    util.ssl_.HAS_SNI = HAS_SNI
+    util.IS_SECURETRANSPORT = True
+    util.ssl_.IS_SECURETRANSPORT = True
+
+
+def extract_from_urllib3():
+    """
+    Undo monkey-patching by :func:`inject_into_urllib3`.
+    """
+    util.ssl_.SSLContext = orig_util_SSLContext
+    util.HAS_SNI = orig_util_HAS_SNI
+    util.ssl_.HAS_SNI = orig_util_HAS_SNI
+    util.IS_SECURETRANSPORT = False
+    util.ssl_.IS_SECURETRANSPORT = False
+
+
+def _read_callback(connection_id, data_buffer, data_length_pointer):
+    """
+    SecureTransport read callback. This is called by ST to request that data
+    be returned from the socket.
+    """
+    wrapped_socket = None
+    try:
+        wrapped_socket = _connection_refs.get(connection_id)
+        if wrapped_socket is None:
+            return SecurityConst.errSSLInternal
+        base_socket = wrapped_socket.socket
+
+        requested_length = data_length_pointer[0]
+
+        timeout = wrapped_socket.gettimeout()
+        error = None
+        read_count = 0
+
+        try:
+            while read_count < requested_length:
+                if timeout is None or timeout >= 0:
+                    if not util.wait_for_read(base_socket, timeout):
+                        raise socket.error(errno.EAGAIN, 'timed out')
+
+                remaining = requested_length - read_count
+                buffer = (ctypes.c_char * remaining).from_address(
+                    data_buffer + read_count
+                )
+                chunk_size = base_socket.recv_into(buffer, remaining)
+                read_count += chunk_size
+                if not chunk_size:
+                    if not read_count:
+                        return SecurityConst.errSSLClosedGraceful
+                    break
+        except (socket.error) as e:
+            error = e.errno
+
+            if error is not None and error != errno.EAGAIN:
+                data_length_pointer[0] = read_count
+                if error == errno.ECONNRESET or error == errno.EPIPE:
+                    return SecurityConst.errSSLClosedAbort
+                raise
+
+        data_length_pointer[0] = read_count
+
+        if read_count != requested_length:
+            return SecurityConst.errSSLWouldBlock
+
+        return 0
+    except Exception as e:
+        if wrapped_socket is not None:
+            wrapped_socket._exception = e
+        return SecurityConst.errSSLInternal
+
+
+def _write_callback(connection_id, data_buffer, data_length_pointer):
+    """
+    SecureTransport write callback. This is called by ST to request that data
+    actually be sent on the network.
+    """
+    wrapped_socket = None
+    try:
+        wrapped_socket = _connection_refs.get(connection_id)
+        if wrapped_socket is None:
+            return SecurityConst.errSSLInternal
+        base_socket = wrapped_socket.socket
+
+        bytes_to_write = data_length_pointer[0]
+        data = ctypes.string_at(data_buffer, bytes_to_write)
+
+        timeout = wrapped_socket.gettimeout()
+        error = None
+        sent = 0
+
+        try:
+            while sent < bytes_to_write:
+                if timeout is None or timeout >= 0:
+                    if not util.wait_for_write(base_socket, timeout):
+                        raise socket.error(errno.EAGAIN, 'timed out')
+                chunk_sent = base_socket.send(data)
+                sent += chunk_sent
+
+                # This has some needless copying here, but I'm not sure there's
+                # much value in optimising this data path.
+                data = data[chunk_sent:]
+        except (socket.error) as e:
+            error = e.errno
+
+            if error is not None and error != errno.EAGAIN:
+                data_length_pointer[0] = sent
+                if error == errno.ECONNRESET or error == errno.EPIPE:
+                    return SecurityConst.errSSLClosedAbort
+                raise
+
+        data_length_pointer[0] = sent
+
+        if sent != bytes_to_write:
+            return SecurityConst.errSSLWouldBlock
+
+        return 0
+    except Exception as e:
+        if wrapped_socket is not None:
+            wrapped_socket._exception = e
+        return SecurityConst.errSSLInternal
+
+
+# We need to keep these two objects references alive: if they get GC'd while
+# in use then SecureTransport could attempt to call a function that is in freed
+# memory. That would be...uh...bad. Yeah, that's the word. Bad.
+_read_callback_pointer = Security.SSLReadFunc(_read_callback)
+_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
+
+
+class WrappedSocket(object):
+    """
+    API-compatibility wrapper for Python's OpenSSL wrapped socket object.
+
+    Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
+    collector of PyPy.
+    """
+    def __init__(self, socket):
+        self.socket = socket
+        self.context = None
+        self._makefile_refs = 0
+        self._closed = False
+        self._exception = None
+        self._keychain = None
+        self._keychain_dir = None
+        self._client_cert_chain = None
+
+        # We save off the previously-configured timeout and then set it to
+        # zero. This is done because we use select and friends to handle the
+        # timeouts, but if we leave the timeout set on the lower socket then
+        # Python will "kindly" call select on that socket again for us. Avoid
+        # that by forcing the timeout to zero.
+        self._timeout = self.socket.gettimeout()
+        self.socket.settimeout(0)
+
+    @contextlib.contextmanager
+    def _raise_on_error(self):
+        """
+        A context manager that can be used to wrap calls that do I/O from
+        SecureTransport. If any of the I/O callbacks hit an exception, this
+        context manager will correctly propagate the exception after the fact.
+        This avoids silently swallowing those exceptions.
+
+        It also correctly forces the socket closed.
+        """
+        self._exception = None
+
+        # We explicitly don't catch around this yield because in the unlikely
+        # event that an exception was hit in the block we don't want to swallow
+        # it.
+        yield
+        if self._exception is not None:
+            exception, self._exception = self._exception, None
+            self.close()
+            raise exception
+
+    def _set_ciphers(self):
+        """
+        Sets up the allowed ciphers. By default this matches the set in
+        util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
+        custom and doesn't allow changing at this time, mostly because parsing
+        OpenSSL cipher strings is going to be a freaking nightmare.
+        """
+        ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
+        result = Security.SSLSetEnabledCiphers(
+            self.context, ciphers, len(CIPHER_SUITES)
+        )
+        _assert_no_error(result)
+
+    def _custom_validate(self, verify, trust_bundle):
+        """
+        Called when we have set custom validation. We do this in two cases:
+        first, when cert validation is entirely disabled; and second, when
+        using a custom trust DB.
+        """
+        # If we disabled cert validation, just say: cool.
+        if not verify:
+            return
+
+        # We want data in memory, so load it up.
+        if os.path.isfile(trust_bundle):
+            with open(trust_bundle, 'rb') as f:
+                trust_bundle = f.read()
+
+        cert_array = None
+        trust = Security.SecTrustRef()
+
+        try:
+            # Get a CFArray that contains the certs we want.
+            cert_array = _cert_array_from_pem(trust_bundle)
+
+            # Ok, now the hard part. We want to get the SecTrustRef that ST has
+            # created for this connection, shove our CAs into it, tell ST to
+            # ignore everything else it knows, and then ask if it can build a
+            # chain. This is a buuuunch of code.
+            result = Security.SSLCopyPeerTrust(
+                self.context, ctypes.byref(trust)
+            )
+            _assert_no_error(result)
+            if not trust:
+                raise ssl.SSLError("Failed to copy trust reference")
+
+            result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
+            _assert_no_error(result)
+
+            result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
+            _assert_no_error(result)
+
+            trust_result = Security.SecTrustResultType()
+            result = Security.SecTrustEvaluate(
+                trust, ctypes.byref(trust_result)
+            )
+            _assert_no_error(result)
+        finally:
+            if trust:
+                CoreFoundation.CFRelease(trust)
+
+            if cert_array is not None:
+                CoreFoundation.CFRelease(cert_array)
+
+        # Ok, now we can look at what the result was.
+        successes = (
+            SecurityConst.kSecTrustResultUnspecified,
+            SecurityConst.kSecTrustResultProceed
+        )
+        if trust_result.value not in successes:
+            raise ssl.SSLError(
+                "certificate verify failed, error code: %d" %
+                trust_result.value
+            )
+
+    def handshake(self,
+                  server_hostname,
+                  verify,
+                  trust_bundle,
+                  min_version,
+                  max_version,
+                  client_cert,
+                  client_key,
+                  client_key_passphrase):
+        """
+        Actually performs the TLS handshake. This is run automatically by
+        wrapped socket, and shouldn't be needed in user code.
+        """
+        # First, we do the initial bits of connection setup. We need to create
+        # a context, set its I/O funcs, and set the connection reference.
+        self.context = Security.SSLCreateContext(
+            None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
+        )
+        result = Security.SSLSetIOFuncs(
+            self.context, _read_callback_pointer, _write_callback_pointer
+        )
+        _assert_no_error(result)
+
+        # Here we need to compute the handle to use. We do this by taking the
+        # id of self modulo 2**31 - 1. If this is already in the dictionary, we
+        # just keep incrementing by one until we find a free space.
+        with _connection_ref_lock:
+            handle = id(self) % 2147483647
+            while handle in _connection_refs:
+                handle = (handle + 1) % 2147483647
+            _connection_refs[handle] = self
+
+        result = Security.SSLSetConnection(self.context, handle)
+        _assert_no_error(result)
+
+        # If we have a server hostname, we should set that too.
+        if server_hostname:
+            if not isinstance(server_hostname, bytes):
+                server_hostname = server_hostname.encode('utf-8')
+
+            result = Security.SSLSetPeerDomainName(
+                self.context, server_hostname, len(server_hostname)
+            )
+            _assert_no_error(result)
+
+        # Setup the ciphers.
+        self._set_ciphers()
+
+        # Set the minimum and maximum TLS versions.
+        result = Security.SSLSetProtocolVersionMin(self.context, min_version)
+        _assert_no_error(result)
+        result = Security.SSLSetProtocolVersionMax(self.context, max_version)
+        _assert_no_error(result)
+
+        # If there's a trust DB, we need to use it. We do that by telling
+        # SecureTransport to break on server auth. We also do that if we don't
+        # want to validate the certs at all: we just won't actually do any
+        # authing in that case.
+        if not verify or trust_bundle is not None:
+            result = Security.SSLSetSessionOption(
+                self.context,
+                SecurityConst.kSSLSessionOptionBreakOnServerAuth,
+                True
+            )
+            _assert_no_error(result)
+
+        # If there's a client cert, we need to use it.
+        if client_cert:
+            self._keychain, self._keychain_dir = _temporary_keychain()
+            self._client_cert_chain = _load_client_cert_chain(
+                self._keychain, client_cert, client_key
+            )
+            result = Security.SSLSetCertificate(
+                self.context, self._client_cert_chain
+            )
+            _assert_no_error(result)
+
+        while True:
+            with self._raise_on_error():
+                result = Security.SSLHandshake(self.context)
+
+                if result == SecurityConst.errSSLWouldBlock:
+                    raise socket.timeout("handshake timed out")
+                elif result == SecurityConst.errSSLServerAuthCompleted:
+                    self._custom_validate(verify, trust_bundle)
+                    continue
+                else:
+                    _assert_no_error(result)
+                    break
+
+    def fileno(self):
+        return self.socket.fileno()
+
+    # Copy-pasted from Python 3.5 source code
+    def _decref_socketios(self):
+        if self._makefile_refs > 0:
+            self._makefile_refs -= 1
+        if self._closed:
+            self.close()
+
+    def recv(self, bufsiz):
+        buffer = ctypes.create_string_buffer(bufsiz)
+        bytes_read = self.recv_into(buffer, bufsiz)
+        data = buffer[:bytes_read]
+        return data
+
+    def recv_into(self, buffer, nbytes=None):
+        # Read short on EOF.
+        if self._closed:
+            return 0
+
+        if nbytes is None:
+            nbytes = len(buffer)
+
+        buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
+        processed_bytes = ctypes.c_size_t(0)
+
+        with self._raise_on_error():
+            result = Security.SSLRead(
+                self.context, buffer, nbytes, ctypes.byref(processed_bytes)
+            )
+
+        # There are some result codes that we want to treat as "not always
+        # errors". Specifically, those are errSSLWouldBlock,
+        # errSSLClosedGraceful, and errSSLClosedNoNotify.
+        if (result == SecurityConst.errSSLWouldBlock):
+            # If we didn't process any bytes, then this was just a time out.
+            # However, we can get errSSLWouldBlock in situations when we *did*
+            # read some data, and in those cases we should just read "short"
+            # and return.
+            if processed_bytes.value == 0:
+                # Timed out, no data read.
+                raise socket.timeout("recv timed out")
+        elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
+            # The remote peer has closed this connection. We should do so as
+            # well. Note that we don't actually return here because in
+            # principle this could actually be fired along with return data.
+            # It's unlikely though.
+            self.close()
+        else:
+            _assert_no_error(result)
+
+        # Ok, we read and probably succeeded. We should return whatever data
+        # was actually read.
+        return processed_bytes.value
+
+    def settimeout(self, timeout):
+        self._timeout = timeout
+
+    def gettimeout(self):
+        return self._timeout
+
+    def send(self, data):
+        processed_bytes = ctypes.c_size_t(0)
+
+        with self._raise_on_error():
+            result = Security.SSLWrite(
+                self.context, data, len(data), ctypes.byref(processed_bytes)
+            )
+
+        if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
+            # Timed out
+            raise socket.timeout("send timed out")
+        else:
+            _assert_no_error(result)
+
+        # We sent, and probably succeeded. Tell them how much we sent.
+        return processed_bytes.value
+
+    def sendall(self, data):
+        total_sent = 0
+        while total_sent < len(data):
+            sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
+            total_sent += sent
+
+    def shutdown(self):
+        with self._raise_on_error():
+            Security.SSLClose(self.context)
+
+    def close(self):
+        # TODO: should I do clean shutdown here? Do I have to?
+        if self._makefile_refs < 1:
+            self._closed = True
+            if self.context:
+                CoreFoundation.CFRelease(self.context)
+                self.context = None
+            if self._client_cert_chain:
+                CoreFoundation.CFRelease(self._client_cert_chain)
+                self._client_cert_chain = None
+            if self._keychain:
+                Security.SecKeychainDelete(self._keychain)
+                CoreFoundation.CFRelease(self._keychain)
+                shutil.rmtree(self._keychain_dir)
+                self._keychain = self._keychain_dir = None
+            return self.socket.close()
+        else:
+            self._makefile_refs -= 1
+
+    def getpeercert(self, binary_form=False):
+        # Urgh, annoying.
+        #
+        # Here's how we do this:
+        #
+        # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
+        #    connection.
+        # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
+        # 3. To get the CN, call SecCertificateCopyCommonName and process that
+        #    string so that it's of the appropriate type.
+        # 4. To get the SAN, we need to do something a bit more complex:
+        #    a. Call SecCertificateCopyValues to get the data, requesting
+        #       kSecOIDSubjectAltName.
+        #    b. Mess about with this dictionary to try to get the SANs out.
+        #
+        # This is gross. Really gross. It's going to be a few hundred LoC extra
+        # just to repeat something that SecureTransport can *already do*. So my
+        # operating assumption at this time is that what we want to do is
+        # instead to just flag to urllib3 that it shouldn't do its own hostname
+        # validation when using SecureTransport.
+        if not binary_form:
+            raise ValueError(
+                "SecureTransport only supports dumping binary certs"
+            )
+        trust = Security.SecTrustRef()
+        certdata = None
+        der_bytes = None
+
+        try:
+            # Grab the trust store.
+            result = Security.SSLCopyPeerTrust(
+                self.context, ctypes.byref(trust)
+            )
+            _assert_no_error(result)
+            if not trust:
+                # Probably we haven't done the handshake yet. No biggie.
+                return None
+
+            cert_count = Security.SecTrustGetCertificateCount(trust)
+            if not cert_count:
+                # Also a case that might happen if we haven't handshaked.
+                # Handshook? Handshaken?
+                return None
+
+            leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
+            assert leaf
+
+            # Ok, now we want the DER bytes.
+            certdata = Security.SecCertificateCopyData(leaf)
+            assert certdata
+
+            data_length = CoreFoundation.CFDataGetLength(certdata)
+            data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
+            der_bytes = ctypes.string_at(data_buffer, data_length)
+        finally:
+            if certdata:
+                CoreFoundation.CFRelease(certdata)
+            if trust:
+                CoreFoundation.CFRelease(trust)
+
+        return der_bytes
+
+    def _reuse(self):
+        self._makefile_refs += 1
+
+    def _drop(self):
+        if self._makefile_refs < 1:
+            self.close()
+        else:
+            self._makefile_refs -= 1
+
+
+if _fileobject:  # Platform-specific: Python 2
+    def makefile(self, mode, bufsize=-1):
+        self._makefile_refs += 1
+        return _fileobject(self, mode, bufsize, close=True)
+else:  # Platform-specific: Python 3
+    def makefile(self, mode="r", buffering=None, *args, **kwargs):
+        # We disable buffering with SecureTransport because it conflicts with
+        # the buffering that ST does internally (see issue #1153 for more).
+        buffering = 0
+        return backport_makefile(self, mode, buffering, *args, **kwargs)
+
+WrappedSocket.makefile = makefile
+
+
+class SecureTransportContext(object):
+    """
+    I am a wrapper class for the SecureTransport library, to translate the
+    interface of the standard library ``SSLContext`` object to calls into
+    SecureTransport.
+    """
+    def __init__(self, protocol):
+        self._min_version, self._max_version = _protocol_to_min_max[protocol]
+        self._options = 0
+        self._verify = False
+        self._trust_bundle = None
+        self._client_cert = None
+        self._client_key = None
+        self._client_key_passphrase = None
+
+    @property
+    def check_hostname(self):
+        """
+        SecureTransport cannot have its hostname checking disabled. For more,
+        see the comment on getpeercert() in this file.
+        """
+        return True
+
+    @check_hostname.setter
+    def check_hostname(self, value):
+        """
+        SecureTransport cannot have its hostname checking disabled. For more,
+        see the comment on getpeercert() in this file.
+        """
+        pass
+
+    @property
+    def options(self):
+        # TODO: Well, crap.
+        #
+        # So this is the bit of the code that is the most likely to cause us
+        # trouble. Essentially we need to enumerate all of the SSL options that
+        # users might want to use and try to see if we can sensibly translate
+        # them, or whether we should just ignore them.
+        return self._options
+
+    @options.setter
+    def options(self, value):
+        # TODO: Update in line with above.
+        self._options = value
+
+    @property
+    def verify_mode(self):
+        return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
+
+    @verify_mode.setter
+    def verify_mode(self, value):
+        self._verify = True if value == ssl.CERT_REQUIRED else False
+
+    def set_default_verify_paths(self):
+        # So, this has to do something a bit weird. Specifically, what it does
+        # is nothing.
+        #
+        # This means that, if we had previously had load_verify_locations
+        # called, this does not undo that. We need to do that because it turns
+        # out that the rest of the urllib3 code will attempt to load the
+        # default verify paths if it hasn't been told about any paths, even if
+        # the context itself was sometime earlier. We resolve that by just
+        # ignoring it.
+        pass
+
+    def load_default_certs(self):
+        return self.set_default_verify_paths()
+
+    def set_ciphers(self, ciphers):
+        # For now, we just require the default cipher string.
+        if ciphers != util.ssl_.DEFAULT_CIPHERS:
+            raise ValueError(
+                "SecureTransport doesn't support custom cipher strings"
+            )
+
+    def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+        # OK, we only really support cadata and cafile.
+        if capath is not None:
+            raise ValueError(
+                "SecureTransport does not support cert directories"
+            )
+
+        self._trust_bundle = cafile or cadata
+
+    def load_cert_chain(self, certfile, keyfile=None, password=None):
+        self._client_cert = certfile
+        self._client_key = keyfile
+        self._client_cert_passphrase = password
+
+    def wrap_socket(self, sock, server_side=False,
+                    do_handshake_on_connect=True, suppress_ragged_eofs=True,
+                    server_hostname=None):
+        # So, what do we do here? Firstly, we assert some properties. This is a
+        # stripped down shim, so there is some functionality we don't support.
+        # See PEP 543 for the real deal.
+        assert not server_side
+        assert do_handshake_on_connect
+        assert suppress_ragged_eofs
+
+        # Ok, we're good to go. Now we want to create the wrapped socket object
+        # and store it in the appropriate place.
+        wrapped_socket = WrappedSocket(sock)
+
+        # Now we can handshake
+        wrapped_socket.handshake(
+            server_hostname, self._verify, self._trust_bundle,
+            self._min_version, self._max_version, self._client_cert,
+            self._client_key, self._client_key_passphrase
+        )
+        return wrapped_socket
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/socks.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/socks.py
new file mode 100644
index 0000000000000000000000000000000000000000..811e312ec873707574d1046dd881c46deb2057f2
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/contrib/socks.py
@@ -0,0 +1,192 @@
+# -*- coding: utf-8 -*-
+"""
+This module contains provisional support for SOCKS proxies from within
+urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
+SOCKS5. To enable its functionality, either install PySocks or install this
+module with the ``socks`` extra.
+
+The SOCKS implementation supports the full range of urllib3 features. It also
+supports the following SOCKS features:
+
+- SOCKS4
+- SOCKS4a
+- SOCKS5
+- Usernames and passwords for the SOCKS proxy
+
+Known Limitations:
+
+- Currently PySocks does not support contacting remote websites via literal
+  IPv6 addresses. Any such connection attempt will fail. You must use a domain
+  name.
+- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
+  such connection attempt will fail.
+"""
+from __future__ import absolute_import
+
+try:
+    import socks
+except ImportError:
+    import warnings
+    from ..exceptions import DependencyWarning
+
+    warnings.warn((
+        'SOCKS support in urllib3 requires the installation of optional '
+        'dependencies: specifically, PySocks.  For more information, see '
+        'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
+        ),
+        DependencyWarning
+    )
+    raise
+
+from socket import error as SocketError, timeout as SocketTimeout
+
+from ..connection import (
+    HTTPConnection, HTTPSConnection
+)
+from ..connectionpool import (
+    HTTPConnectionPool, HTTPSConnectionPool
+)
+from ..exceptions import ConnectTimeoutError, NewConnectionError
+from ..poolmanager import PoolManager
+from ..util.url import parse_url
+
+try:
+    import ssl
+except ImportError:
+    ssl = None
+
+
+class SOCKSConnection(HTTPConnection):
+    """
+    A plain-text HTTP connection that connects via a SOCKS proxy.
+    """
+    def __init__(self, *args, **kwargs):
+        self._socks_options = kwargs.pop('_socks_options')
+        super(SOCKSConnection, self).__init__(*args, **kwargs)
+
+    def _new_conn(self):
+        """
+        Establish a new connection via the SOCKS proxy.
+        """
+        extra_kw = {}
+        if self.source_address:
+            extra_kw['source_address'] = self.source_address
+
+        if self.socket_options:
+            extra_kw['socket_options'] = self.socket_options
+
+        try:
+            conn = socks.create_connection(
+                (self.host, self.port),
+                proxy_type=self._socks_options['socks_version'],
+                proxy_addr=self._socks_options['proxy_host'],
+                proxy_port=self._socks_options['proxy_port'],
+                proxy_username=self._socks_options['username'],
+                proxy_password=self._socks_options['password'],
+                proxy_rdns=self._socks_options['rdns'],
+                timeout=self.timeout,
+                **extra_kw
+            )
+
+        except SocketTimeout as e:
+            raise ConnectTimeoutError(
+                self, "Connection to %s timed out. (connect timeout=%s)" %
+                (self.host, self.timeout))
+
+        except socks.ProxyError as e:
+            # This is fragile as hell, but it seems to be the only way to raise
+            # useful errors here.
+            if e.socket_err:
+                error = e.socket_err
+                if isinstance(error, SocketTimeout):
+                    raise ConnectTimeoutError(
+                        self,
+                        "Connection to %s timed out. (connect timeout=%s)" %
+                        (self.host, self.timeout)
+                    )
+                else:
+                    raise NewConnectionError(
+                        self,
+                        "Failed to establish a new connection: %s" % error
+                    )
+            else:
+                raise NewConnectionError(
+                    self,
+                    "Failed to establish a new connection: %s" % e
+                )
+
+        except SocketError as e:  # Defensive: PySocks should catch all these.
+            raise NewConnectionError(
+                self, "Failed to establish a new connection: %s" % e)
+
+        return conn
+
+
+# We don't need to duplicate the Verified/Unverified distinction from
+# urllib3/connection.py here because the HTTPSConnection will already have been
+# correctly set to either the Verified or Unverified form by that module. This
+# means the SOCKSHTTPSConnection will automatically be the correct type.
+class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
+    pass
+
+
+class SOCKSHTTPConnectionPool(HTTPConnectionPool):
+    ConnectionCls = SOCKSConnection
+
+
+class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
+    ConnectionCls = SOCKSHTTPSConnection
+
+
+class SOCKSProxyManager(PoolManager):
+    """
+    A version of the urllib3 ProxyManager that routes connections via the
+    defined SOCKS proxy.
+    """
+    pool_classes_by_scheme = {
+        'http': SOCKSHTTPConnectionPool,
+        'https': SOCKSHTTPSConnectionPool,
+    }
+
+    def __init__(self, proxy_url, username=None, password=None,
+                 num_pools=10, headers=None, **connection_pool_kw):
+        parsed = parse_url(proxy_url)
+
+        if username is None and password is None and parsed.auth is not None:
+            split = parsed.auth.split(':')
+            if len(split) == 2:
+                username, password = split
+        if parsed.scheme == 'socks5':
+            socks_version = socks.PROXY_TYPE_SOCKS5
+            rdns = False
+        elif parsed.scheme == 'socks5h':
+            socks_version = socks.PROXY_TYPE_SOCKS5
+            rdns = True
+        elif parsed.scheme == 'socks4':
+            socks_version = socks.PROXY_TYPE_SOCKS4
+            rdns = False
+        elif parsed.scheme == 'socks4a':
+            socks_version = socks.PROXY_TYPE_SOCKS4
+            rdns = True
+        else:
+            raise ValueError(
+                "Unable to determine SOCKS version from %s" % proxy_url
+            )
+
+        self.proxy_url = proxy_url
+
+        socks_options = {
+            'socks_version': socks_version,
+            'proxy_host': parsed.host,
+            'proxy_port': parsed.port,
+            'username': username,
+            'password': password,
+            'rdns': rdns
+        }
+        connection_pool_kw['_socks_options'] = socks_options
+
+        super(SOCKSProxyManager, self).__init__(
+            num_pools, headers, **connection_pool_kw
+        )
+
+        self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/exceptions.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bbaa9871f5718ea1b70d0a31ba40bc4409f630b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/exceptions.py
@@ -0,0 +1,246 @@
+from __future__ import absolute_import
+from .packages.six.moves.http_client import (
+    IncompleteRead as httplib_IncompleteRead
+)
+# Base Exceptions
+
+
+class HTTPError(Exception):
+    "Base exception used by this module."
+    pass
+
+
+class HTTPWarning(Warning):
+    "Base warning used by this module."
+    pass
+
+
+class PoolError(HTTPError):
+    "Base exception for errors caused within a pool."
+    def __init__(self, pool, message):
+        self.pool = pool
+        HTTPError.__init__(self, "%s: %s" % (pool, message))
+
+    def __reduce__(self):
+        # For pickling purposes.
+        return self.__class__, (None, None)
+
+
+class RequestError(PoolError):
+    "Base exception for PoolErrors that have associated URLs."
+    def __init__(self, pool, url, message):
+        self.url = url
+        PoolError.__init__(self, pool, message)
+
+    def __reduce__(self):
+        # For pickling purposes.
+        return self.__class__, (None, self.url, None)
+
+
+class SSLError(HTTPError):
+    "Raised when SSL certificate fails in an HTTPS connection."
+    pass
+
+
+class ProxyError(HTTPError):
+    "Raised when the connection to a proxy fails."
+    pass
+
+
+class DecodeError(HTTPError):
+    "Raised when automatic decoding based on Content-Type fails."
+    pass
+
+
+class ProtocolError(HTTPError):
+    "Raised when something unexpected happens mid-request/response."
+    pass
+
+
+#: Renamed to ProtocolError but aliased for backwards compatibility.
+ConnectionError = ProtocolError
+
+
+# Leaf Exceptions
+
+class MaxRetryError(RequestError):
+    """Raised when the maximum number of retries is exceeded.
+
+    :param pool: The connection pool
+    :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
+    :param string url: The requested Url
+    :param exceptions.Exception reason: The underlying error
+
+    """
+
+    def __init__(self, pool, url, reason=None):
+        self.reason = reason
+
+        message = "Max retries exceeded with url: %s (Caused by %r)" % (
+            url, reason)
+
+        RequestError.__init__(self, pool, url, message)
+
+
+class HostChangedError(RequestError):
+    "Raised when an existing pool gets a request for a foreign host."
+
+    def __init__(self, pool, url, retries=3):
+        message = "Tried to open a foreign host with url: %s" % url
+        RequestError.__init__(self, pool, url, message)
+        self.retries = retries
+
+
+class TimeoutStateError(HTTPError):
+    """ Raised when passing an invalid state to a timeout """
+    pass
+
+
+class TimeoutError(HTTPError):
+    """ Raised when a socket timeout error occurs.
+
+    Catching this error will catch both :exc:`ReadTimeoutErrors
+    <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
+    """
+    pass
+
+
+class ReadTimeoutError(TimeoutError, RequestError):
+    "Raised when a socket timeout occurs while receiving data from a server"
+    pass
+
+
+# This timeout error does not have a URL attached and needs to inherit from the
+# base HTTPError
+class ConnectTimeoutError(TimeoutError):
+    "Raised when a socket timeout occurs while connecting to a server"
+    pass
+
+
+class NewConnectionError(ConnectTimeoutError, PoolError):
+    "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
+    pass
+
+
+class EmptyPoolError(PoolError):
+    "Raised when a pool runs out of connections and no more are allowed."
+    pass
+
+
+class ClosedPoolError(PoolError):
+    "Raised when a request enters a pool after the pool has been closed."
+    pass
+
+
+class LocationValueError(ValueError, HTTPError):
+    "Raised when there is something wrong with a given URL input."
+    pass
+
+
+class LocationParseError(LocationValueError):
+    "Raised when get_host or similar fails to parse the URL input."
+
+    def __init__(self, location):
+        message = "Failed to parse: %s" % location
+        HTTPError.__init__(self, message)
+
+        self.location = location
+
+
+class ResponseError(HTTPError):
+    "Used as a container for an error reason supplied in a MaxRetryError."
+    GENERIC_ERROR = 'too many error responses'
+    SPECIFIC_ERROR = 'too many {status_code} error responses'
+
+
+class SecurityWarning(HTTPWarning):
+    "Warned when performing security reducing actions"
+    pass
+
+
+class SubjectAltNameWarning(SecurityWarning):
+    "Warned when connecting to a host with a certificate missing a SAN."
+    pass
+
+
+class InsecureRequestWarning(SecurityWarning):
+    "Warned when making an unverified HTTPS request."
+    pass
+
+
+class SystemTimeWarning(SecurityWarning):
+    "Warned when system time is suspected to be wrong"
+    pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+    "Warned when certain SSL configuration is not available on a platform."
+    pass
+
+
+class SNIMissingWarning(HTTPWarning):
+    "Warned when making a HTTPS request without SNI available."
+    pass
+
+
+class DependencyWarning(HTTPWarning):
+    """
+    Warned when an attempt is made to import a module with missing optional
+    dependencies.
+    """
+    pass
+
+
+class ResponseNotChunked(ProtocolError, ValueError):
+    "Response needs to be chunked in order to read it as chunks."
+    pass
+
+
+class BodyNotHttplibCompatible(HTTPError):
+    """
+    Body should be httplib.HTTPResponse like (have an fp attribute which
+    returns raw chunks) for read_chunked().
+    """
+    pass
+
+
+class IncompleteRead(HTTPError, httplib_IncompleteRead):
+    """
+    Response length doesn't match expected Content-Length
+
+    Subclass of http_client.IncompleteRead to allow int value
+    for `partial` to avoid creating large objects on streamed
+    reads.
+    """
+    def __init__(self, partial, expected):
+        super(IncompleteRead, self).__init__(partial, expected)
+
+    def __repr__(self):
+        return ('IncompleteRead(%i bytes read, '
+                '%i more expected)' % (self.partial, self.expected))
+
+
+class InvalidHeader(HTTPError):
+    "The header provided was somehow invalid."
+    pass
+
+
+class ProxySchemeUnknown(AssertionError, ValueError):
+    "ProxyManager does not support the supplied scheme"
+    # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
+
+    def __init__(self, scheme):
+        message = "Not supported proxy scheme %s" % scheme
+        super(ProxySchemeUnknown, self).__init__(message)
+
+
+class HeaderParsingError(HTTPError):
+    "Raised by assert_header_parsing, but we convert it to a log.warning statement."
+    def __init__(self, defects, unparsed_data):
+        message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
+        super(HeaderParsingError, self).__init__(message)
+
+
+class UnrewindableBodyError(HTTPError):
+    "urllib3 encountered an error when trying to rewind a body"
+    pass
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/fields.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/fields.py
new file mode 100644
index 0000000000000000000000000000000000000000..37fe64a3e86b0e2d9c4bb64c5c054279698140db
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/fields.py
@@ -0,0 +1,178 @@
+from __future__ import absolute_import
+import email.utils
+import mimetypes
+
+from .packages import six
+
+
+def guess_content_type(filename, default='application/octet-stream'):
+    """
+    Guess the "Content-Type" of a file.
+
+    :param filename:
+        The filename to guess the "Content-Type" of using :mod:`mimetypes`.
+    :param default:
+        If no "Content-Type" can be guessed, default to `default`.
+    """
+    if filename:
+        return mimetypes.guess_type(filename)[0] or default
+    return default
+
+
+def format_header_param(name, value):
+    """
+    Helper function to format and quote a single header parameter.
+
+    Particularly useful for header parameters which might contain
+    non-ASCII values, like file names. This follows RFC 2231, as
+    suggested by RFC 2388 Section 4.4.
+
+    :param name:
+        The name of the parameter, a string expected to be ASCII only.
+    :param value:
+        The value of the parameter, provided as a unicode string.
+    """
+    if not any(ch in value for ch in '"\\\r\n'):
+        result = '%s="%s"' % (name, value)
+        try:
+            result.encode('ascii')
+        except (UnicodeEncodeError, UnicodeDecodeError):
+            pass
+        else:
+            return result
+    if not six.PY3 and isinstance(value, six.text_type):  # Python 2:
+        value = value.encode('utf-8')
+    value = email.utils.encode_rfc2231(value, 'utf-8')
+    value = '%s*=%s' % (name, value)
+    return value
+
+
+class RequestField(object):
+    """
+    A data container for request body parameters.
+
+    :param name:
+        The name of this request field.
+    :param data:
+        The data/value body.
+    :param filename:
+        An optional filename of the request field.
+    :param headers:
+        An optional dict-like object of headers to initially use for the field.
+    """
+    def __init__(self, name, data, filename=None, headers=None):
+        self._name = name
+        self._filename = filename
+        self.data = data
+        self.headers = {}
+        if headers:
+            self.headers = dict(headers)
+
+    @classmethod
+    def from_tuples(cls, fieldname, value):
+        """
+        A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
+
+        Supports constructing :class:`~urllib3.fields.RequestField` from
+        parameter of key/value strings AND key/filetuple. A filetuple is a
+        (filename, data, MIME type) tuple where the MIME type is optional.
+        For example::
+
+            'foo': 'bar',
+            'fakefile': ('foofile.txt', 'contents of foofile'),
+            'realfile': ('barfile.txt', open('realfile').read()),
+            'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
+            'nonamefile': 'contents of nonamefile field',
+
+        Field names and filenames must be unicode.
+        """
+        if isinstance(value, tuple):
+            if len(value) == 3:
+                filename, data, content_type = value
+            else:
+                filename, data = value
+                content_type = guess_content_type(filename)
+        else:
+            filename = None
+            content_type = None
+            data = value
+
+        request_param = cls(fieldname, data, filename=filename)
+        request_param.make_multipart(content_type=content_type)
+
+        return request_param
+
+    def _render_part(self, name, value):
+        """
+        Overridable helper function to format a single header parameter.
+
+        :param name:
+            The name of the parameter, a string expected to be ASCII only.
+        :param value:
+            The value of the parameter, provided as a unicode string.
+        """
+        return format_header_param(name, value)
+
+    def _render_parts(self, header_parts):
+        """
+        Helper function to format and quote a single header.
+
+        Useful for single headers that are composed of multiple items. E.g.,
+        'Content-Disposition' fields.
+
+        :param header_parts:
+            A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format
+            as `k1="v1"; k2="v2"; ...`.
+        """
+        parts = []
+        iterable = header_parts
+        if isinstance(header_parts, dict):
+            iterable = header_parts.items()
+
+        for name, value in iterable:
+            if value is not None:
+                parts.append(self._render_part(name, value))
+
+        return '; '.join(parts)
+
+    def render_headers(self):
+        """
+        Renders the headers for this request field.
+        """
+        lines = []
+
+        sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
+        for sort_key in sort_keys:
+            if self.headers.get(sort_key, False):
+                lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
+
+        for header_name, header_value in self.headers.items():
+            if header_name not in sort_keys:
+                if header_value:
+                    lines.append('%s: %s' % (header_name, header_value))
+
+        lines.append('\r\n')
+        return '\r\n'.join(lines)
+
+    def make_multipart(self, content_disposition=None, content_type=None,
+                       content_location=None):
+        """
+        Makes this request field into a multipart request field.
+
+        This method overrides "Content-Disposition", "Content-Type" and
+        "Content-Location" headers to the request parameter.
+
+        :param content_type:
+            The 'Content-Type' of the request body.
+        :param content_location:
+            The 'Content-Location' of the request body.
+
+        """
+        self.headers['Content-Disposition'] = content_disposition or 'form-data'
+        self.headers['Content-Disposition'] += '; '.join([
+            '', self._render_parts(
+                (('name', self._name), ('filename', self._filename))
+            )
+        ])
+        self.headers['Content-Type'] = content_type
+        self.headers['Content-Location'] = content_location
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/filepost.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/filepost.py
new file mode 100644
index 0000000000000000000000000000000000000000..78f1e19b0ed8b579bd259508ddd38c918f78a041
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/filepost.py
@@ -0,0 +1,98 @@
+from __future__ import absolute_import
+import binascii
+import codecs
+import os
+
+from io import BytesIO
+
+from .packages import six
+from .packages.six import b
+from .fields import RequestField
+
+writer = codecs.lookup('utf-8')[3]
+
+
+def choose_boundary():
+    """
+    Our embarrassingly-simple replacement for mimetools.choose_boundary.
+    """
+    boundary = binascii.hexlify(os.urandom(16))
+    if six.PY3:
+        boundary = boundary.decode('ascii')
+    return boundary
+
+
+def iter_field_objects(fields):
+    """
+    Iterate over fields.
+
+    Supports list of (k, v) tuples and dicts, and lists of
+    :class:`~urllib3.fields.RequestField`.
+
+    """
+    if isinstance(fields, dict):
+        i = six.iteritems(fields)
+    else:
+        i = iter(fields)
+
+    for field in i:
+        if isinstance(field, RequestField):
+            yield field
+        else:
+            yield RequestField.from_tuples(*field)
+
+
+def iter_fields(fields):
+    """
+    .. deprecated:: 1.6
+
+    Iterate over fields.
+
+    The addition of :class:`~urllib3.fields.RequestField` makes this function
+    obsolete. Instead, use :func:`iter_field_objects`, which returns
+    :class:`~urllib3.fields.RequestField` objects.
+
+    Supports list of (k, v) tuples and dicts.
+    """
+    if isinstance(fields, dict):
+        return ((k, v) for k, v in six.iteritems(fields))
+
+    return ((k, v) for k, v in fields)
+
+
+def encode_multipart_formdata(fields, boundary=None):
+    """
+    Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
+
+    :param fields:
+        Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
+
+    :param boundary:
+        If not specified, then a random boundary will be generated using
+        :func:`urllib3.filepost.choose_boundary`.
+    """
+    body = BytesIO()
+    if boundary is None:
+        boundary = choose_boundary()
+
+    for field in iter_field_objects(fields):
+        body.write(b('--%s\r\n' % (boundary)))
+
+        writer(body).write(field.render_headers())
+        data = field.data
+
+        if isinstance(data, int):
+            data = str(data)  # Backwards compatibility
+
+        if isinstance(data, six.text_type):
+            writer(body).write(data)
+        else:
+            body.write(data)
+
+        body.write(b'\r\n')
+
+    body.write(b('--%s--\r\n' % (boundary)))
+
+    content_type = str('multipart/form-data; boundary=%s' % boundary)
+
+    return body.getvalue(), content_type
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/__init__.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..170e974c15746c3aa6e960d3c7a53981459a528d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+
+from . import ssl_match_hostname
+
+__all__ = ('ssl_match_hostname', )
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88a2b23e9ea2c7d5edf621d902930473e4e3634f
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11e82dbab1c698de60140a8546d883189e4af1fd
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/__pycache__/six.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c75b22967ce175ae3c0fad97280f88fc238c2146
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ae20e3932b11dc90ecca2f9959cc94f8f39742e5
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/__pycache__/makefile.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py
new file mode 100644
index 0000000000000000000000000000000000000000..740db377d99a963dafa6789e8aed6c2effa5e97d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/backports/makefile.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*-
+"""
+backports.makefile
+~~~~~~~~~~~~~~~~~~
+
+Backports the Python 3 ``socket.makefile`` method for use with anything that
+wants to create a "fake" socket object.
+"""
+import io
+
+from socket import SocketIO
+
+
+def backport_makefile(self, mode="r", buffering=None, encoding=None,
+                      errors=None, newline=None):
+    """
+    Backport of ``socket.makefile`` from Python 3.5.
+    """
+    if not set(mode) <= {"r", "w", "b"}:
+        raise ValueError(
+            "invalid mode %r (only r, w, b allowed)" % (mode,)
+        )
+    writing = "w" in mode
+    reading = "r" in mode or not writing
+    assert reading or writing
+    binary = "b" in mode
+    rawmode = ""
+    if reading:
+        rawmode += "r"
+    if writing:
+        rawmode += "w"
+    raw = SocketIO(self, rawmode)
+    self._makefile_refs += 1
+    if buffering is None:
+        buffering = -1
+    if buffering < 0:
+        buffering = io.DEFAULT_BUFFER_SIZE
+    if buffering == 0:
+        if not binary:
+            raise ValueError("unbuffered streams must be binary")
+        return raw
+    if reading and writing:
+        buffer = io.BufferedRWPair(raw, raw, buffering)
+    elif reading:
+        buffer = io.BufferedReader(raw, buffering)
+    else:
+        assert writing
+        buffer = io.BufferedWriter(raw, buffering)
+    if binary:
+        return buffer
+    text = io.TextIOWrapper(buffer, encoding, errors, newline)
+    text.mode = mode
+    return text
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/six.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/six.py
new file mode 100644
index 0000000000000000000000000000000000000000..190c0239cd7d7af82a6e0cbc8d68053fa2e3dfaf
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/six.py
@@ -0,0 +1,868 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+        del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)  # Invokes __set__.
+        try:
+            # This is a bit ugly, but it avoids running this again by
+            # removing this descriptor.
+            delattr(obj.__class__, self.name)
+        except AttributeError:
+            pass
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+    def __getattr__(self, attr):
+        _module = self._resolve()
+        value = getattr(_module, attr)
+        setattr(self, attr, value)
+        return value
+
+
+class _LazyModule(types.ModuleType):
+
+    def __init__(self, name):
+        super(_LazyModule, self).__init__(name)
+        self.__doc__ = self.__class__.__doc__
+
+    def __dir__(self):
+        attrs = ["__doc__", "__name__"]
+        attrs += [attr.name for attr in self._moved_attributes]
+        return attrs
+
+    # Subclasses should override this
+    _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+    """
+    A meta path importer to import six.moves and its submodules.
+
+    This class implements a PEP302 finder and loader. It should be compatible
+    with Python 2.5 and all existing versions of Python3
+    """
+
+    def __init__(self, six_module_name):
+        self.name = six_module_name
+        self.known_modules = {}
+
+    def _add_module(self, mod, *fullnames):
+        for fullname in fullnames:
+            self.known_modules[self.name + "." + fullname] = mod
+
+    def _get_module(self, fullname):
+        return self.known_modules[self.name + "." + fullname]
+
+    def find_module(self, fullname, path=None):
+        if fullname in self.known_modules:
+            return self
+        return None
+
+    def __get_module(self, fullname):
+        try:
+            return self.known_modules[fullname]
+        except KeyError:
+            raise ImportError("This loader does not know module " + fullname)
+
+    def load_module(self, fullname):
+        try:
+            # in case of a reload
+            return sys.modules[fullname]
+        except KeyError:
+            pass
+        mod = self.__get_module(fullname)
+        if isinstance(mod, MovedModule):
+            mod = mod._resolve()
+        else:
+            mod.__loader__ = self
+        sys.modules[fullname] = mod
+        return mod
+
+    def is_package(self, fullname):
+        """
+        Return true, if the named module is a package.
+
+        We need this method to get correct spec objects with
+        Python 3.4 (see PEP451)
+        """
+        return hasattr(self.__get_module(fullname), "__path__")
+
+    def get_code(self, fullname):
+        """Return None
+
+        Required, if is_package is implemented"""
+        self.__get_module(fullname)  # eventually raises ImportError
+        return None
+    get_source = get_code  # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+    """Lazy loading of moved objects"""
+    __path__ = []  # mark as package
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("intern", "__builtin__", "sys"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+    MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserDict", "UserDict", "collections"),
+    MovedAttribute("UserList", "UserList", "collections"),
+    MovedAttribute("UserString", "UserString", "collections"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+    MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("_thread", "thread", "_thread"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+    MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+    _moved_attributes += [
+        MovedModule("winreg", "_winreg"),
+    ]
+
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+    if isinstance(attr, MovedModule):
+        _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+    MovedAttribute("splitquery", "urllib", "urllib.parse"),
+    MovedAttribute("splittag", "urllib", "urllib.parse"),
+    MovedAttribute("splituser", "urllib", "urllib.parse"),
+    MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+                      "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+                      "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+    MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+                      "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+                      "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+                      "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    __path__ = []  # mark as package
+    parse = _importer._get_module("moves.urllib_parse")
+    error = _importer._get_module("moves.urllib_error")
+    request = _importer._get_module("moves.urllib_request")
+    response = _importer._get_module("moves.urllib_response")
+    robotparser = _importer._get_module("moves.urllib_robotparser")
+
+    def __dir__(self):
+        return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+                      "moves.urllib")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    create_bound_method = types.MethodType
+
+    def create_unbound_method(func, cls):
+        return func
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
+    def create_unbound_method(func, cls):
+        return types.MethodType(func, None, cls)
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+    def iterkeys(d, **kw):
+        return iter(d.keys(**kw))
+
+    def itervalues(d, **kw):
+        return iter(d.values(**kw))
+
+    def iteritems(d, **kw):
+        return iter(d.items(**kw))
+
+    def iterlists(d, **kw):
+        return iter(d.lists(**kw))
+
+    viewkeys = operator.methodcaller("keys")
+
+    viewvalues = operator.methodcaller("values")
+
+    viewitems = operator.methodcaller("items")
+else:
+    def iterkeys(d, **kw):
+        return d.iterkeys(**kw)
+
+    def itervalues(d, **kw):
+        return d.itervalues(**kw)
+
+    def iteritems(d, **kw):
+        return d.iteritems(**kw)
+
+    def iterlists(d, **kw):
+        return d.iterlists(**kw)
+
+    viewkeys = operator.methodcaller("viewkeys")
+
+    viewvalues = operator.methodcaller("viewvalues")
+
+    viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+         "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+         "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+
+    def u(s):
+        return s
+    unichr = chr
+    import struct
+    int2byte = struct.Struct(">B").pack
+    del struct
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+    _assertCountEqual = "assertCountEqual"
+    if sys.version_info[1] <= 1:
+        _assertRaisesRegex = "assertRaisesRegexp"
+        _assertRegex = "assertRegexpMatches"
+    else:
+        _assertRaisesRegex = "assertRaisesRegex"
+        _assertRegex = "assertRegex"
+else:
+    def b(s):
+        return s
+    # Workaround for standalone backslash
+
+    def u(s):
+        return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+    unichr = unichr
+    int2byte = chr
+
+    def byte2int(bs):
+        return ord(bs[0])
+
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    iterbytes = functools.partial(itertools.imap, ord)
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+    _assertCountEqual = "assertItemsEqual"
+    _assertRaisesRegex = "assertRaisesRegexp"
+    _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+    return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+    return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+    return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+    exec_ = getattr(moves.builtins, "exec")
+
+    def reraise(tp, value, tb=None):
+        if value is None:
+            value = tp()
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+    exec_("""def raise_from(value, from_value):
+    if from_value is None:
+        raise value
+    raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+    exec_("""def raise_from(value, from_value):
+    raise value from from_value
+""")
+else:
+    def raise_from(value, from_value):
+        raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+    def print_(*args, **kwargs):
+        """The new-style print function for Python 2.4 and 2.5."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            # If the file has an encoding, encode unicode with it.
+            if (isinstance(fp, file) and
+                    isinstance(data, unicode) and
+                    fp.encoding is not None):
+                errors = getattr(fp, "errors", None)
+                if errors is None:
+                    errors = "strict"
+                data = data.encode(fp.encoding, errors)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+if sys.version_info[:2] < (3, 3):
+    _print = print_
+
+    def print_(*args, **kwargs):
+        fp = kwargs.get("file", sys.stdout)
+        flush = kwargs.pop("flush", False)
+        _print(*args, **kwargs)
+        if flush and fp is not None:
+            fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+              updated=functools.WRAPPER_UPDATES):
+        def wrapper(f):
+            f = functools.wraps(wrapped, assigned, updated)(f)
+            f.__wrapped__ = wrapped
+            return f
+        return wrapper
+else:
+    wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        slots = orig_vars.get('__slots__')
+        if slots is not None:
+            if isinstance(slots, str):
+                slots = [slots]
+            for slots_var in slots:
+                orig_vars.pop(slots_var)
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper
+
+
+def python_2_unicode_compatible(klass):
+    """
+    A decorator that defines __unicode__ and __str__ methods under Python 2.
+    Under Python 3 it does nothing.
+
+    To support Python 2 and 3 with a single code base, define a __str__ method
+    returning text and apply this decorator to the class.
+    """
+    if PY2:
+        if '__str__' not in klass.__dict__:
+            raise ValueError("@python_2_unicode_compatible cannot be applied "
+                             "to %s because it doesn't define __str__()." %
+                             klass.__name__)
+        klass.__unicode__ = klass.__str__
+        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+    return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = []  # required for PEP 302 and PEP 451
+__package__ = __name__  # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+    __spec__.submodule_search_locations = []  # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+    for i, importer in enumerate(sys.meta_path):
+        # Here's some real nastiness: Another "instance" of the six module might
+        # be floating around. Therefore, we can't use isinstance() to check for
+        # the six meta path importer, since the other six instance will have
+        # inserted an importer with different class.
+        if (type(importer).__name__ == "_SixMetaPathImporter" and
+                importer.name == __name__):
+            del sys.meta_path[i]
+            break
+    del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6594eb264d3bbe36b98209d77de55a806817751
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py
@@ -0,0 +1,19 @@
+import sys
+
+try:
+    # Our match_hostname function is the same as 3.5's, so we only want to
+    # import the match_hostname function if it's at least that good.
+    if sys.version_info < (3, 5):
+        raise ImportError("Fallback to vendored code")
+
+    from ssl import CertificateError, match_hostname
+except ImportError:
+    try:
+        # Backport of the function from a pypi module
+        from backports.ssl_match_hostname import CertificateError, match_hostname
+    except ImportError:
+        # Our vendored copy
+        from ._implementation import CertificateError, match_hostname
+
+# Not needed, but documenting what we provide.
+__all__ = ('CertificateError', 'match_hostname')
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8fb385c45d079e28ef244da714746dfff0311946
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0e93374a7b29661b04adcdc37725c25c84c12baa
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__pycache__/_implementation.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py
new file mode 100644
index 0000000000000000000000000000000000000000..970cf653e7f45f61936475f1b0bd368f1ddf4f8f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/_implementation.py
@@ -0,0 +1,156 @@
+"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
+
+# Note: This file is under the PSF license as the code comes from the python
+# stdlib.   http://docs.python.org/3/license.html
+
+import re
+import sys
+
+# ipaddress has been backported to 2.6+ in pypi.  If it is installed on the
+# system, use it to handle IPAddress ServerAltnames (this was added in
+# python-3.5) otherwise only do DNS matching.  This allows
+# backports.ssl_match_hostname to continue to be used in Python 2.7.
+try:
+    from pip._vendor import ipaddress
+except ImportError:
+    ipaddress = None
+
+__version__ = '3.5.0.1'
+
+
+class CertificateError(ValueError):
+    pass
+
+
+def _dnsname_match(dn, hostname, max_wildcards=1):
+    """Matching according to RFC 6125, section 6.4.3
+
+    http://tools.ietf.org/html/rfc6125#section-6.4.3
+    """
+    pats = []
+    if not dn:
+        return False
+
+    # Ported from python3-syntax:
+    # leftmost, *remainder = dn.split(r'.')
+    parts = dn.split(r'.')
+    leftmost = parts[0]
+    remainder = parts[1:]
+
+    wildcards = leftmost.count('*')
+    if wildcards > max_wildcards:
+        # Issue #17980: avoid denials of service by refusing more
+        # than one wildcard per fragment.  A survey of established
+        # policy among SSL implementations showed it to be a
+        # reasonable choice.
+        raise CertificateError(
+            "too many wildcards in certificate DNS name: " + repr(dn))
+
+    # speed up common case w/o wildcards
+    if not wildcards:
+        return dn.lower() == hostname.lower()
+
+    # RFC 6125, section 6.4.3, subitem 1.
+    # The client SHOULD NOT attempt to match a presented identifier in which
+    # the wildcard character comprises a label other than the left-most label.
+    if leftmost == '*':
+        # When '*' is a fragment by itself, it matches a non-empty dotless
+        # fragment.
+        pats.append('[^.]+')
+    elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+        # RFC 6125, section 6.4.3, subitem 3.
+        # The client SHOULD NOT attempt to match a presented identifier
+        # where the wildcard character is embedded within an A-label or
+        # U-label of an internationalized domain name.
+        pats.append(re.escape(leftmost))
+    else:
+        # Otherwise, '*' matches any dotless string, e.g. www*
+        pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+    # add the remaining fragments, ignore any wildcards
+    for frag in remainder:
+        pats.append(re.escape(frag))
+
+    pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+    return pat.match(hostname)
+
+
+def _to_unicode(obj):
+    if isinstance(obj, str) and sys.version_info < (3,):
+        obj = unicode(obj, encoding='ascii', errors='strict')
+    return obj
+
+def _ipaddress_match(ipname, host_ip):
+    """Exact matching of IP addresses.
+
+    RFC 6125 explicitly doesn't define an algorithm for this
+    (section 1.7.2 - "Out of Scope").
+    """
+    # OpenSSL may add a trailing newline to a subjectAltName's IP address
+    # Divergence from upstream: ipaddress can't handle byte str
+    ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
+    return ip == host_ip
+
+
+def match_hostname(cert, hostname):
+    """Verify that *cert* (in decoded format as returned by
+    SSLSocket.getpeercert()) matches the *hostname*.  RFC 2818 and RFC 6125
+    rules are followed, but IP addresses are not accepted for *hostname*.
+
+    CertificateError is raised on failure. On success, the function
+    returns nothing.
+    """
+    if not cert:
+        raise ValueError("empty or no certificate, match_hostname needs a "
+                         "SSL socket or SSL context with either "
+                         "CERT_OPTIONAL or CERT_REQUIRED")
+    try:
+        # Divergence from upstream: ipaddress can't handle byte str
+        host_ip = ipaddress.ip_address(_to_unicode(hostname))
+    except ValueError:
+        # Not an IP address (common case)
+        host_ip = None
+    except UnicodeError:
+        # Divergence from upstream: Have to deal with ipaddress not taking
+        # byte strings.  addresses should be all ascii, so we consider it not
+        # an ipaddress in this case
+        host_ip = None
+    except AttributeError:
+        # Divergence from upstream: Make ipaddress library optional
+        if ipaddress is None:
+            host_ip = None
+        else:
+            raise
+    dnsnames = []
+    san = cert.get('subjectAltName', ())
+    for key, value in san:
+        if key == 'DNS':
+            if host_ip is None and _dnsname_match(value, hostname):
+                return
+            dnsnames.append(value)
+        elif key == 'IP Address':
+            if host_ip is not None and _ipaddress_match(value, host_ip):
+                return
+            dnsnames.append(value)
+    if not dnsnames:
+        # The subject is only checked when there is no dNSName entry
+        # in subjectAltName
+        for sub in cert.get('subject', ()):
+            for key, value in sub:
+                # XXX according to RFC 2818, the most specific Common Name
+                # must be used.
+                if key == 'commonName':
+                    if _dnsname_match(value, hostname):
+                        return
+                    dnsnames.append(value)
+    if len(dnsnames) > 1:
+        raise CertificateError("hostname %r "
+            "doesn't match either of %s"
+            % (hostname, ', '.join(map(repr, dnsnames))))
+    elif len(dnsnames) == 1:
+        raise CertificateError("hostname %r "
+            "doesn't match %r"
+            % (hostname, dnsnames[0]))
+    else:
+        raise CertificateError("no appropriate commonName or "
+            "subjectAltName fields were found")
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/poolmanager.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/poolmanager.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe5491cfdadff57660b027d9fac634585676717a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/poolmanager.py
@@ -0,0 +1,450 @@
+from __future__ import absolute_import
+import collections
+import functools
+import logging
+
+from ._collections import RecentlyUsedContainer
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
+from .connectionpool import port_by_scheme
+from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
+from .packages.six.moves.urllib.parse import urljoin
+from .request import RequestMethods
+from .util.url import parse_url
+from .util.retry import Retry
+
+
+__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
+
+
+log = logging.getLogger(__name__)
+
+SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
+                'ssl_version', 'ca_cert_dir', 'ssl_context')
+
+# All known keyword arguments that could be provided to the pool manager, its
+# pools, or the underlying connections. This is used to construct a pool key.
+_key_fields = (
+    'key_scheme',  # str
+    'key_host',  # str
+    'key_port',  # int
+    'key_timeout',  # int or float or Timeout
+    'key_retries',  # int or Retry
+    'key_strict',  # bool
+    'key_block',  # bool
+    'key_source_address',  # str
+    'key_key_file',  # str
+    'key_cert_file',  # str
+    'key_cert_reqs',  # str
+    'key_ca_certs',  # str
+    'key_ssl_version',  # str
+    'key_ca_cert_dir',  # str
+    'key_ssl_context',  # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
+    'key_maxsize',  # int
+    'key_headers',  # dict
+    'key__proxy',  # parsed proxy url
+    'key__proxy_headers',  # dict
+    'key_socket_options',  # list of (level (int), optname (int), value (int or str)) tuples
+    'key__socks_options',  # dict
+    'key_assert_hostname',  # bool or string
+    'key_assert_fingerprint',  # str
+    'key_server_hostname', #str
+)
+
+#: The namedtuple class used to construct keys for the connection pool.
+#: All custom key schemes should include the fields in this key at a minimum.
+PoolKey = collections.namedtuple('PoolKey', _key_fields)
+
+
+def _default_key_normalizer(key_class, request_context):
+    """
+    Create a pool key out of a request context dictionary.
+
+    According to RFC 3986, both the scheme and host are case-insensitive.
+    Therefore, this function normalizes both before constructing the pool
+    key for an HTTPS request. If you wish to change this behaviour, provide
+    alternate callables to ``key_fn_by_scheme``.
+
+    :param key_class:
+        The class to use when constructing the key. This should be a namedtuple
+        with the ``scheme`` and ``host`` keys at a minimum.
+    :type  key_class: namedtuple
+    :param request_context:
+        A dictionary-like object that contain the context for a request.
+    :type  request_context: dict
+
+    :return: A namedtuple that can be used as a connection pool key.
+    :rtype:  PoolKey
+    """
+    # Since we mutate the dictionary, make a copy first
+    context = request_context.copy()
+    context['scheme'] = context['scheme'].lower()
+    context['host'] = context['host'].lower()
+
+    # These are both dictionaries and need to be transformed into frozensets
+    for key in ('headers', '_proxy_headers', '_socks_options'):
+        if key in context and context[key] is not None:
+            context[key] = frozenset(context[key].items())
+
+    # The socket_options key may be a list and needs to be transformed into a
+    # tuple.
+    socket_opts = context.get('socket_options')
+    if socket_opts is not None:
+        context['socket_options'] = tuple(socket_opts)
+
+    # Map the kwargs to the names in the namedtuple - this is necessary since
+    # namedtuples can't have fields starting with '_'.
+    for key in list(context.keys()):
+        context['key_' + key] = context.pop(key)
+
+    # Default to ``None`` for keys missing from the context
+    for field in key_class._fields:
+        if field not in context:
+            context[field] = None
+
+    return key_class(**context)
+
+
+#: A dictionary that maps a scheme to a callable that creates a pool key.
+#: This can be used to alter the way pool keys are constructed, if desired.
+#: Each PoolManager makes a copy of this dictionary so they can be configured
+#: globally here, or individually on the instance.
+key_fn_by_scheme = {
+    'http': functools.partial(_default_key_normalizer, PoolKey),
+    'https': functools.partial(_default_key_normalizer, PoolKey),
+}
+
+pool_classes_by_scheme = {
+    'http': HTTPConnectionPool,
+    'https': HTTPSConnectionPool,
+}
+
+
+class PoolManager(RequestMethods):
+    """
+    Allows for arbitrary requests while transparently keeping track of
+    necessary connection pools for you.
+
+    :param num_pools:
+        Number of connection pools to cache before discarding the least
+        recently used pool.
+
+    :param headers:
+        Headers to include with all requests, unless other headers are given
+        explicitly.
+
+    :param \\**connection_pool_kw:
+        Additional parameters are used to create fresh
+        :class:`urllib3.connectionpool.ConnectionPool` instances.
+
+    Example::
+
+        >>> manager = PoolManager(num_pools=2)
+        >>> r = manager.request('GET', 'http://google.com/')
+        >>> r = manager.request('GET', 'http://google.com/mail')
+        >>> r = manager.request('GET', 'http://yahoo.com/')
+        >>> len(manager.pools)
+        2
+
+    """
+
+    proxy = None
+
+    def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
+        RequestMethods.__init__(self, headers)
+        self.connection_pool_kw = connection_pool_kw
+        self.pools = RecentlyUsedContainer(num_pools,
+                                           dispose_func=lambda p: p.close())
+
+        # Locally set the pool classes and keys so other PoolManagers can
+        # override them.
+        self.pool_classes_by_scheme = pool_classes_by_scheme
+        self.key_fn_by_scheme = key_fn_by_scheme.copy()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.clear()
+        # Return False to re-raise any potential exceptions
+        return False
+
+    def _new_pool(self, scheme, host, port, request_context=None):
+        """
+        Create a new :class:`ConnectionPool` based on host, port, scheme, and
+        any additional pool keyword arguments.
+
+        If ``request_context`` is provided, it is provided as keyword arguments
+        to the pool class used. This method is used to actually create the
+        connection pools handed out by :meth:`connection_from_url` and
+        companion methods. It is intended to be overridden for customization.
+        """
+        pool_cls = self.pool_classes_by_scheme[scheme]
+        if request_context is None:
+            request_context = self.connection_pool_kw.copy()
+
+        # Although the context has everything necessary to create the pool,
+        # this function has historically only used the scheme, host, and port
+        # in the positional args. When an API change is acceptable these can
+        # be removed.
+        for key in ('scheme', 'host', 'port'):
+            request_context.pop(key, None)
+
+        if scheme == 'http':
+            for kw in SSL_KEYWORDS:
+                request_context.pop(kw, None)
+
+        return pool_cls(host, port, **request_context)
+
+    def clear(self):
+        """
+        Empty our store of pools and direct them all to close.
+
+        This will not affect in-flight connections, but they will not be
+        re-used after completion.
+        """
+        self.pools.clear()
+
+    def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+        """
+        Get a :class:`ConnectionPool` based on the host, port, and scheme.
+
+        If ``port`` isn't given, it will be derived from the ``scheme`` using
+        ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
+        provided, it is merged with the instance's ``connection_pool_kw``
+        variable and used to create the new connection pool, if one is
+        needed.
+        """
+
+        if not host:
+            raise LocationValueError("No host specified.")
+
+        request_context = self._merge_pool_kwargs(pool_kwargs)
+        request_context['scheme'] = scheme or 'http'
+        if not port:
+            port = port_by_scheme.get(request_context['scheme'].lower(), 80)
+        request_context['port'] = port
+        request_context['host'] = host
+
+        return self.connection_from_context(request_context)
+
+    def connection_from_context(self, request_context):
+        """
+        Get a :class:`ConnectionPool` based on the request context.
+
+        ``request_context`` must at least contain the ``scheme`` key and its
+        value must be a key in ``key_fn_by_scheme`` instance variable.
+        """
+        scheme = request_context['scheme'].lower()
+        pool_key_constructor = self.key_fn_by_scheme[scheme]
+        pool_key = pool_key_constructor(request_context)
+
+        return self.connection_from_pool_key(pool_key, request_context=request_context)
+
+    def connection_from_pool_key(self, pool_key, request_context=None):
+        """
+        Get a :class:`ConnectionPool` based on the provided pool key.
+
+        ``pool_key`` should be a namedtuple that only contains immutable
+        objects. At a minimum it must have the ``scheme``, ``host``, and
+        ``port`` fields.
+        """
+        with self.pools.lock:
+            # If the scheme, host, or port doesn't match existing open
+            # connections, open a new ConnectionPool.
+            pool = self.pools.get(pool_key)
+            if pool:
+                return pool
+
+            # Make a fresh ConnectionPool of the desired type
+            scheme = request_context['scheme']
+            host = request_context['host']
+            port = request_context['port']
+            pool = self._new_pool(scheme, host, port, request_context=request_context)
+            self.pools[pool_key] = pool
+
+        return pool
+
+    def connection_from_url(self, url, pool_kwargs=None):
+        """
+        Similar to :func:`urllib3.connectionpool.connection_from_url`.
+
+        If ``pool_kwargs`` is not provided and a new pool needs to be
+        constructed, ``self.connection_pool_kw`` is used to initialize
+        the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
+        is provided, it is used instead. Note that if a new pool does not
+        need to be created for the request, the provided ``pool_kwargs`` are
+        not used.
+        """
+        u = parse_url(url)
+        return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
+                                         pool_kwargs=pool_kwargs)
+
+    def _merge_pool_kwargs(self, override):
+        """
+        Merge a dictionary of override values for self.connection_pool_kw.
+
+        This does not modify self.connection_pool_kw and returns a new dict.
+        Any keys in the override dictionary with a value of ``None`` are
+        removed from the merged dictionary.
+        """
+        base_pool_kwargs = self.connection_pool_kw.copy()
+        if override:
+            for key, value in override.items():
+                if value is None:
+                    try:
+                        del base_pool_kwargs[key]
+                    except KeyError:
+                        pass
+                else:
+                    base_pool_kwargs[key] = value
+        return base_pool_kwargs
+
+    def urlopen(self, method, url, redirect=True, **kw):
+        """
+        Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
+        with custom cross-host redirect logic and only sends the request-uri
+        portion of the ``url``.
+
+        The given ``url`` parameter must be absolute, such that an appropriate
+        :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
+        """
+        u = parse_url(url)
+        conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
+
+        kw['assert_same_host'] = False
+        kw['redirect'] = False
+
+        if 'headers' not in kw:
+            kw['headers'] = self.headers.copy()
+
+        if self.proxy is not None and u.scheme == "http":
+            response = conn.urlopen(method, url, **kw)
+        else:
+            response = conn.urlopen(method, u.request_uri, **kw)
+
+        redirect_location = redirect and response.get_redirect_location()
+        if not redirect_location:
+            return response
+
+        # Support relative URLs for redirecting.
+        redirect_location = urljoin(url, redirect_location)
+
+        # RFC 7231, Section 6.4.4
+        if response.status == 303:
+            method = 'GET'
+
+        retries = kw.get('retries')
+        if not isinstance(retries, Retry):
+            retries = Retry.from_int(retries, redirect=redirect)
+
+        # Strip headers marked as unsafe to forward to the redirected location.
+        # Check remove_headers_on_redirect to avoid a potential network call within
+        # conn.is_same_host() which may use socket.gethostbyname() in the future.
+        if (retries.remove_headers_on_redirect
+                and not conn.is_same_host(redirect_location)):
+            for header in retries.remove_headers_on_redirect:
+                kw['headers'].pop(header, None)
+
+        try:
+            retries = retries.increment(method, url, response=response, _pool=conn)
+        except MaxRetryError:
+            if retries.raise_on_redirect:
+                raise
+            return response
+
+        kw['retries'] = retries
+        kw['redirect'] = redirect
+
+        log.info("Redirecting %s -> %s", url, redirect_location)
+        return self.urlopen(method, redirect_location, **kw)
+
+
+class ProxyManager(PoolManager):
+    """
+    Behaves just like :class:`PoolManager`, but sends all requests through
+    the defined proxy, using the CONNECT method for HTTPS URLs.
+
+    :param proxy_url:
+        The URL of the proxy to be used.
+
+    :param proxy_headers:
+        A dictionary containing headers that will be sent to the proxy. In case
+        of HTTP they are being sent with each request, while in the
+        HTTPS/CONNECT case they are sent only once. Could be used for proxy
+        authentication.
+
+    Example:
+        >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
+        >>> r1 = proxy.request('GET', 'http://google.com/')
+        >>> r2 = proxy.request('GET', 'http://httpbin.org/')
+        >>> len(proxy.pools)
+        1
+        >>> r3 = proxy.request('GET', 'https://httpbin.org/')
+        >>> r4 = proxy.request('GET', 'https://twitter.com/')
+        >>> len(proxy.pools)
+        3
+
+    """
+
+    def __init__(self, proxy_url, num_pools=10, headers=None,
+                 proxy_headers=None, **connection_pool_kw):
+
+        if isinstance(proxy_url, HTTPConnectionPool):
+            proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
+                                        proxy_url.port)
+        proxy = parse_url(proxy_url)
+        if not proxy.port:
+            port = port_by_scheme.get(proxy.scheme, 80)
+            proxy = proxy._replace(port=port)
+
+        if proxy.scheme not in ("http", "https"):
+            raise ProxySchemeUnknown(proxy.scheme)
+
+        self.proxy = proxy
+        self.proxy_headers = proxy_headers or {}
+
+        connection_pool_kw['_proxy'] = self.proxy
+        connection_pool_kw['_proxy_headers'] = self.proxy_headers
+
+        super(ProxyManager, self).__init__(
+            num_pools, headers, **connection_pool_kw)
+
+    def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
+        if scheme == "https":
+            return super(ProxyManager, self).connection_from_host(
+                host, port, scheme, pool_kwargs=pool_kwargs)
+
+        return super(ProxyManager, self).connection_from_host(
+            self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
+
+    def _set_proxy_headers(self, url, headers=None):
+        """
+        Sets headers needed by proxies: specifically, the Accept and Host
+        headers. Only sets headers not provided by the user.
+        """
+        headers_ = {'Accept': '*/*'}
+
+        netloc = parse_url(url).netloc
+        if netloc:
+            headers_['Host'] = netloc
+
+        if headers:
+            headers_.update(headers)
+        return headers_
+
+    def urlopen(self, method, url, redirect=True, **kw):
+        "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
+        u = parse_url(url)
+
+        if u.scheme == "http":
+            # For proxied HTTPS requests, httplib sets the necessary headers
+            # on the CONNECT to the proxy. For HTTP, we'll definitely
+            # need to set 'Host' at the very least.
+            headers = kw.get('headers', self.headers)
+            kw['headers'] = self._set_proxy_headers(url, headers)
+
+        return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
+
+
+def proxy_from_url(url, **kw):
+    return ProxyManager(proxy_url=url, **kw)
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/request.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/request.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f2f44bb2110a0017a0baa10aed5d8e175b7f7db
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/request.py
@@ -0,0 +1,150 @@
+from __future__ import absolute_import
+
+from .filepost import encode_multipart_formdata
+from .packages.six.moves.urllib.parse import urlencode
+
+
+__all__ = ['RequestMethods']
+
+
+class RequestMethods(object):
+    """
+    Convenience mixin for classes who implement a :meth:`urlopen` method, such
+    as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
+    :class:`~urllib3.poolmanager.PoolManager`.
+
+    Provides behavior for making common types of HTTP request methods and
+    decides which type of request field encoding to use.
+
+    Specifically,
+
+    :meth:`.request_encode_url` is for sending requests whose fields are
+    encoded in the URL (such as GET, HEAD, DELETE).
+
+    :meth:`.request_encode_body` is for sending requests whose fields are
+    encoded in the *body* of the request using multipart or www-form-urlencoded
+    (such as for POST, PUT, PATCH).
+
+    :meth:`.request` is for making any kind of request, it will look up the
+    appropriate encoding format and use one of the above two methods to make
+    the request.
+
+    Initializer parameters:
+
+    :param headers:
+        Headers to include with all requests, unless other headers are given
+        explicitly.
+    """
+
+    _encode_url_methods = {'DELETE', 'GET', 'HEAD', 'OPTIONS'}
+
+    def __init__(self, headers=None):
+        self.headers = headers or {}
+
+    def urlopen(self, method, url, body=None, headers=None,
+                encode_multipart=True, multipart_boundary=None,
+                **kw):  # Abstract
+        raise NotImplementedError("Classes extending RequestMethods must implement "
+                                  "their own ``urlopen`` method.")
+
+    def request(self, method, url, fields=None, headers=None, **urlopen_kw):
+        """
+        Make a request using :meth:`urlopen` with the appropriate encoding of
+        ``fields`` based on the ``method`` used.
+
+        This is a convenience method that requires the least amount of manual
+        effort. It can be used in most situations, while still having the
+        option to drop down to more specific methods when necessary, such as
+        :meth:`request_encode_url`, :meth:`request_encode_body`,
+        or even the lowest level :meth:`urlopen`.
+        """
+        method = method.upper()
+
+        urlopen_kw['request_url'] = url
+
+        if method in self._encode_url_methods:
+            return self.request_encode_url(method, url, fields=fields,
+                                           headers=headers,
+                                           **urlopen_kw)
+        else:
+            return self.request_encode_body(method, url, fields=fields,
+                                            headers=headers,
+                                            **urlopen_kw)
+
+    def request_encode_url(self, method, url, fields=None, headers=None,
+                           **urlopen_kw):
+        """
+        Make a request using :meth:`urlopen` with the ``fields`` encoded in
+        the url. This is useful for request methods like GET, HEAD, DELETE, etc.
+        """
+        if headers is None:
+            headers = self.headers
+
+        extra_kw = {'headers': headers}
+        extra_kw.update(urlopen_kw)
+
+        if fields:
+            url += '?' + urlencode(fields)
+
+        return self.urlopen(method, url, **extra_kw)
+
+    def request_encode_body(self, method, url, fields=None, headers=None,
+                            encode_multipart=True, multipart_boundary=None,
+                            **urlopen_kw):
+        """
+        Make a request using :meth:`urlopen` with the ``fields`` encoded in
+        the body. This is useful for request methods like POST, PUT, PATCH, etc.
+
+        When ``encode_multipart=True`` (default), then
+        :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
+        the payload with the appropriate content type. Otherwise
+        :meth:`urllib.urlencode` is used with the
+        'application/x-www-form-urlencoded' content type.
+
+        Multipart encoding must be used when posting files, and it's reasonably
+        safe to use it in other times too. However, it may break request
+        signing, such as with OAuth.
+
+        Supports an optional ``fields`` parameter of key/value strings AND
+        key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
+        the MIME type is optional. For example::
+
+            fields = {
+                'foo': 'bar',
+                'fakefile': ('foofile.txt', 'contents of foofile'),
+                'realfile': ('barfile.txt', open('realfile').read()),
+                'typedfile': ('bazfile.bin', open('bazfile').read(),
+                              'image/jpeg'),
+                'nonamefile': 'contents of nonamefile field',
+            }
+
+        When uploading a file, providing a filename (the first parameter of the
+        tuple) is optional but recommended to best mimic behavior of browsers.
+
+        Note that if ``headers`` are supplied, the 'Content-Type' header will
+        be overwritten because it depends on the dynamic random boundary string
+        which is used to compose the body of the request. The random boundary
+        string can be explicitly set with the ``multipart_boundary`` parameter.
+        """
+        if headers is None:
+            headers = self.headers
+
+        extra_kw = {'headers': {}}
+
+        if fields:
+            if 'body' in urlopen_kw:
+                raise TypeError(
+                    "request got values for both 'fields' and 'body', can only specify one.")
+
+            if encode_multipart:
+                body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
+            else:
+                body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
+
+            extra_kw['body'] = body
+            extra_kw['headers'] = {'Content-Type': content_type}
+
+        extra_kw['headers'].update(headers)
+        extra_kw.update(urlopen_kw)
+
+        return self.urlopen(method, url, **extra_kw)
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/response.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/response.py
new file mode 100644
index 0000000000000000000000000000000000000000..c112690b0a7af86bec04cb4a4f6ac8edb3cdaa3b
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/response.py
@@ -0,0 +1,705 @@
+from __future__ import absolute_import
+from contextlib import contextmanager
+import zlib
+import io
+import logging
+from socket import timeout as SocketTimeout
+from socket import error as SocketError
+
+from ._collections import HTTPHeaderDict
+from .exceptions import (
+    BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
+    ResponseNotChunked, IncompleteRead, InvalidHeader
+)
+from .packages.six import string_types as basestring, PY3
+from .packages.six.moves import http_client as httplib
+from .connection import HTTPException, BaseSSLError
+from .util.response import is_fp_closed, is_response_to_head
+
+log = logging.getLogger(__name__)
+
+
+class DeflateDecoder(object):
+
+    def __init__(self):
+        self._first_try = True
+        self._data = b''
+        self._obj = zlib.decompressobj()
+
+    def __getattr__(self, name):
+        return getattr(self._obj, name)
+
+    def decompress(self, data):
+        if not data:
+            return data
+
+        if not self._first_try:
+            return self._obj.decompress(data)
+
+        self._data += data
+        try:
+            decompressed = self._obj.decompress(data)
+            if decompressed:
+                self._first_try = False
+                self._data = None
+            return decompressed
+        except zlib.error:
+            self._first_try = False
+            self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
+            try:
+                return self.decompress(self._data)
+            finally:
+                self._data = None
+
+
+class GzipDecoderState(object):
+
+    FIRST_MEMBER = 0
+    OTHER_MEMBERS = 1
+    SWALLOW_DATA = 2
+
+
+class GzipDecoder(object):
+
+    def __init__(self):
+        self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+        self._state = GzipDecoderState.FIRST_MEMBER
+
+    def __getattr__(self, name):
+        return getattr(self._obj, name)
+
+    def decompress(self, data):
+        ret = bytearray()
+        if self._state == GzipDecoderState.SWALLOW_DATA or not data:
+            return bytes(ret)
+        while True:
+            try:
+                ret += self._obj.decompress(data)
+            except zlib.error:
+                previous_state = self._state
+                # Ignore data after the first error
+                self._state = GzipDecoderState.SWALLOW_DATA
+                if previous_state == GzipDecoderState.OTHER_MEMBERS:
+                    # Allow trailing garbage acceptable in other gzip clients
+                    return bytes(ret)
+                raise
+            data = self._obj.unused_data
+            if not data:
+                return bytes(ret)
+            self._state = GzipDecoderState.OTHER_MEMBERS
+            self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+
+class MultiDecoder(object):
+    """
+    From RFC7231:
+        If one or more encodings have been applied to a representation, the
+        sender that applied the encodings MUST generate a Content-Encoding
+        header field that lists the content codings in the order in which
+        they were applied.
+    """
+
+    def __init__(self, modes):
+        self._decoders = [_get_decoder(m.strip()) for m in modes.split(',')]
+
+    def flush(self):
+        return self._decoders[0].flush()
+
+    def decompress(self, data):
+        for d in reversed(self._decoders):
+            data = d.decompress(data)
+        return data
+
+
+def _get_decoder(mode):
+    if ',' in mode:
+        return MultiDecoder(mode)
+
+    if mode == 'gzip':
+        return GzipDecoder()
+
+    return DeflateDecoder()
+
+
+class HTTPResponse(io.IOBase):
+    """
+    HTTP Response container.
+
+    Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
+    loaded and decoded on-demand when the ``data`` property is accessed.  This
+    class is also compatible with the Python standard library's :mod:`io`
+    module, and can hence be treated as a readable object in the context of that
+    framework.
+
+    Extra parameters for behaviour not present in httplib.HTTPResponse:
+
+    :param preload_content:
+        If True, the response's body will be preloaded during construction.
+
+    :param decode_content:
+        If True, will attempt to decode the body based on the
+        'content-encoding' header.
+
+    :param original_response:
+        When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
+        object, it's convenient to include the original for debug purposes. It's
+        otherwise unused.
+
+    :param retries:
+        The retries contains the last :class:`~urllib3.util.retry.Retry` that
+        was used during the request.
+
+    :param enforce_content_length:
+        Enforce content length checking. Body returned by server must match
+        value of Content-Length header, if present. Otherwise, raise error.
+    """
+
+    CONTENT_DECODERS = ['gzip', 'deflate']
+    REDIRECT_STATUSES = [301, 302, 303, 307, 308]
+
+    def __init__(self, body='', headers=None, status=0, version=0, reason=None,
+                 strict=0, preload_content=True, decode_content=True,
+                 original_response=None, pool=None, connection=None, msg=None,
+                 retries=None, enforce_content_length=False,
+                 request_method=None, request_url=None):
+
+        if isinstance(headers, HTTPHeaderDict):
+            self.headers = headers
+        else:
+            self.headers = HTTPHeaderDict(headers)
+        self.status = status
+        self.version = version
+        self.reason = reason
+        self.strict = strict
+        self.decode_content = decode_content
+        self.retries = retries
+        self.enforce_content_length = enforce_content_length
+
+        self._decoder = None
+        self._body = None
+        self._fp = None
+        self._original_response = original_response
+        self._fp_bytes_read = 0
+        self.msg = msg
+        self._request_url = request_url
+
+        if body and isinstance(body, (basestring, bytes)):
+            self._body = body
+
+        self._pool = pool
+        self._connection = connection
+
+        if hasattr(body, 'read'):
+            self._fp = body
+
+        # Are we using the chunked-style of transfer encoding?
+        self.chunked = False
+        self.chunk_left = None
+        tr_enc = self.headers.get('transfer-encoding', '').lower()
+        # Don't incur the penalty of creating a list and then discarding it
+        encodings = (enc.strip() for enc in tr_enc.split(","))
+        if "chunked" in encodings:
+            self.chunked = True
+
+        # Determine length of response
+        self.length_remaining = self._init_length(request_method)
+
+        # If requested, preload the body.
+        if preload_content and not self._body:
+            self._body = self.read(decode_content=decode_content)
+
+    def get_redirect_location(self):
+        """
+        Should we redirect and where to?
+
+        :returns: Truthy redirect location string if we got a redirect status
+            code and valid location. ``None`` if redirect status and no
+            location. ``False`` if not a redirect status code.
+        """
+        if self.status in self.REDIRECT_STATUSES:
+            return self.headers.get('location')
+
+        return False
+
+    def release_conn(self):
+        if not self._pool or not self._connection:
+            return
+
+        self._pool._put_conn(self._connection)
+        self._connection = None
+
+    @property
+    def data(self):
+        # For backwords-compat with earlier urllib3 0.4 and earlier.
+        if self._body:
+            return self._body
+
+        if self._fp:
+            return self.read(cache_content=True)
+
+    @property
+    def connection(self):
+        return self._connection
+
+    def isclosed(self):
+        return is_fp_closed(self._fp)
+
+    def tell(self):
+        """
+        Obtain the number of bytes pulled over the wire so far. May differ from
+        the amount of content returned by :meth:``HTTPResponse.read`` if bytes
+        are encoded on the wire (e.g, compressed).
+        """
+        return self._fp_bytes_read
+
+    def _init_length(self, request_method):
+        """
+        Set initial length value for Response content if available.
+        """
+        length = self.headers.get('content-length')
+
+        if length is not None:
+            if self.chunked:
+                # This Response will fail with an IncompleteRead if it can't be
+                # received as chunked. This method falls back to attempt reading
+                # the response before raising an exception.
+                log.warning("Received response with both Content-Length and "
+                            "Transfer-Encoding set. This is expressly forbidden "
+                            "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
+                            "attempting to process response as Transfer-Encoding: "
+                            "chunked.")
+                return None
+
+            try:
+                # RFC 7230 section 3.3.2 specifies multiple content lengths can
+                # be sent in a single Content-Length header
+                # (e.g. Content-Length: 42, 42). This line ensures the values
+                # are all valid ints and that as long as the `set` length is 1,
+                # all values are the same. Otherwise, the header is invalid.
+                lengths = set([int(val) for val in length.split(',')])
+                if len(lengths) > 1:
+                    raise InvalidHeader("Content-Length contained multiple "
+                                        "unmatching values (%s)" % length)
+                length = lengths.pop()
+            except ValueError:
+                length = None
+            else:
+                if length < 0:
+                    length = None
+
+        # Convert status to int for comparison
+        # In some cases, httplib returns a status of "_UNKNOWN"
+        try:
+            status = int(self.status)
+        except ValueError:
+            status = 0
+
+        # Check for responses that shouldn't include a body
+        if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
+            length = 0
+
+        return length
+
+    def _init_decoder(self):
+        """
+        Set-up the _decoder attribute if necessary.
+        """
+        # Note: content-encoding value should be case-insensitive, per RFC 7230
+        # Section 3.2
+        content_encoding = self.headers.get('content-encoding', '').lower()
+        if self._decoder is None:
+            if content_encoding in self.CONTENT_DECODERS:
+                self._decoder = _get_decoder(content_encoding)
+            elif ',' in content_encoding:
+                encodings = [e.strip() for e in content_encoding.split(',') if e.strip() in self.CONTENT_DECODERS]
+                if len(encodings):
+                    self._decoder = _get_decoder(content_encoding)
+
+    def _decode(self, data, decode_content, flush_decoder):
+        """
+        Decode the data passed in and potentially flush the decoder.
+        """
+        try:
+            if decode_content and self._decoder:
+                data = self._decoder.decompress(data)
+        except (IOError, zlib.error) as e:
+            content_encoding = self.headers.get('content-encoding', '').lower()
+            raise DecodeError(
+                "Received response with content-encoding: %s, but "
+                "failed to decode it." % content_encoding, e)
+
+        if flush_decoder and decode_content:
+            data += self._flush_decoder()
+
+        return data
+
+    def _flush_decoder(self):
+        """
+        Flushes the decoder. Should only be called if the decoder is actually
+        being used.
+        """
+        if self._decoder:
+            buf = self._decoder.decompress(b'')
+            return buf + self._decoder.flush()
+
+        return b''
+
+    @contextmanager
+    def _error_catcher(self):
+        """
+        Catch low-level python exceptions, instead re-raising urllib3
+        variants, so that low-level exceptions are not leaked in the
+        high-level api.
+
+        On exit, release the connection back to the pool.
+        """
+        clean_exit = False
+
+        try:
+            try:
+                yield
+
+            except SocketTimeout:
+                # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+                # there is yet no clean way to get at it from this context.
+                raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+            except BaseSSLError as e:
+                # FIXME: Is there a better way to differentiate between SSLErrors?
+                if 'read operation timed out' not in str(e):  # Defensive:
+                    # This shouldn't happen but just in case we're missing an edge
+                    # case, let's avoid swallowing SSL errors.
+                    raise
+
+                raise ReadTimeoutError(self._pool, None, 'Read timed out.')
+
+            except (HTTPException, SocketError) as e:
+                # This includes IncompleteRead.
+                raise ProtocolError('Connection broken: %r' % e, e)
+
+            # If no exception is thrown, we should avoid cleaning up
+            # unnecessarily.
+            clean_exit = True
+        finally:
+            # If we didn't terminate cleanly, we need to throw away our
+            # connection.
+            if not clean_exit:
+                # The response may not be closed but we're not going to use it
+                # anymore so close it now to ensure that the connection is
+                # released back to the pool.
+                if self._original_response:
+                    self._original_response.close()
+
+                # Closing the response may not actually be sufficient to close
+                # everything, so if we have a hold of the connection close that
+                # too.
+                if self._connection:
+                    self._connection.close()
+
+            # If we hold the original response but it's closed now, we should
+            # return the connection back to the pool.
+            if self._original_response and self._original_response.isclosed():
+                self.release_conn()
+
+    def read(self, amt=None, decode_content=None, cache_content=False):
+        """
+        Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
+        parameters: ``decode_content`` and ``cache_content``.
+
+        :param amt:
+            How much of the content to read. If specified, caching is skipped
+            because it doesn't make sense to cache partial content as the full
+            response.
+
+        :param decode_content:
+            If True, will attempt to decode the body based on the
+            'content-encoding' header.
+
+        :param cache_content:
+            If True, will save the returned data such that the same result is
+            returned despite of the state of the underlying file object. This
+            is useful if you want the ``.data`` property to continue working
+            after having ``.read()`` the file object. (Overridden if ``amt`` is
+            set.)
+        """
+        self._init_decoder()
+        if decode_content is None:
+            decode_content = self.decode_content
+
+        if self._fp is None:
+            return
+
+        flush_decoder = False
+        data = None
+
+        with self._error_catcher():
+            if amt is None:
+                # cStringIO doesn't like amt=None
+                data = self._fp.read()
+                flush_decoder = True
+            else:
+                cache_content = False
+                data = self._fp.read(amt)
+                if amt != 0 and not data:  # Platform-specific: Buggy versions of Python.
+                    # Close the connection when no data is returned
+                    #
+                    # This is redundant to what httplib/http.client _should_
+                    # already do.  However, versions of python released before
+                    # December 15, 2012 (http://bugs.python.org/issue16298) do
+                    # not properly close the connection in all cases. There is
+                    # no harm in redundantly calling close.
+                    self._fp.close()
+                    flush_decoder = True
+                    if self.enforce_content_length and self.length_remaining not in (0, None):
+                        # This is an edge case that httplib failed to cover due
+                        # to concerns of backward compatibility. We're
+                        # addressing it here to make sure IncompleteRead is
+                        # raised during streaming, so all calls with incorrect
+                        # Content-Length are caught.
+                        raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
+
+        if data:
+            self._fp_bytes_read += len(data)
+            if self.length_remaining is not None:
+                self.length_remaining -= len(data)
+
+            data = self._decode(data, decode_content, flush_decoder)
+
+            if cache_content:
+                self._body = data
+
+        return data
+
+    def stream(self, amt=2**16, decode_content=None):
+        """
+        A generator wrapper for the read() method. A call will block until
+        ``amt`` bytes have been read from the connection or until the
+        connection is closed.
+
+        :param amt:
+            How much of the content to read. The generator will return up to
+            much data per iteration, but may return less. This is particularly
+            likely when using compressed data. However, the empty string will
+            never be returned.
+
+        :param decode_content:
+            If True, will attempt to decode the body based on the
+            'content-encoding' header.
+        """
+        if self.chunked and self.supports_chunked_reads():
+            for line in self.read_chunked(amt, decode_content=decode_content):
+                yield line
+        else:
+            while not is_fp_closed(self._fp):
+                data = self.read(amt=amt, decode_content=decode_content)
+
+                if data:
+                    yield data
+
+    @classmethod
+    def from_httplib(ResponseCls, r, **response_kw):
+        """
+        Given an :class:`httplib.HTTPResponse` instance ``r``, return a
+        corresponding :class:`urllib3.response.HTTPResponse` object.
+
+        Remaining parameters are passed to the HTTPResponse constructor, along
+        with ``original_response=r``.
+        """
+        headers = r.msg
+
+        if not isinstance(headers, HTTPHeaderDict):
+            if PY3:  # Python 3
+                headers = HTTPHeaderDict(headers.items())
+            else:  # Python 2
+                headers = HTTPHeaderDict.from_httplib(headers)
+
+        # HTTPResponse objects in Python 3 don't have a .strict attribute
+        strict = getattr(r, 'strict', 0)
+        resp = ResponseCls(body=r,
+                           headers=headers,
+                           status=r.status,
+                           version=r.version,
+                           reason=r.reason,
+                           strict=strict,
+                           original_response=r,
+                           **response_kw)
+        return resp
+
+    # Backwards-compatibility methods for httplib.HTTPResponse
+    def getheaders(self):
+        return self.headers
+
+    def getheader(self, name, default=None):
+        return self.headers.get(name, default)
+
+    # Backwards compatibility for http.cookiejar
+    def info(self):
+        return self.headers
+
+    # Overrides from io.IOBase
+    def close(self):
+        if not self.closed:
+            self._fp.close()
+
+        if self._connection:
+            self._connection.close()
+
+    @property
+    def closed(self):
+        if self._fp is None:
+            return True
+        elif hasattr(self._fp, 'isclosed'):
+            return self._fp.isclosed()
+        elif hasattr(self._fp, 'closed'):
+            return self._fp.closed
+        else:
+            return True
+
+    def fileno(self):
+        if self._fp is None:
+            raise IOError("HTTPResponse has no file to get a fileno from")
+        elif hasattr(self._fp, "fileno"):
+            return self._fp.fileno()
+        else:
+            raise IOError("The file-like object this HTTPResponse is wrapped "
+                          "around has no file descriptor")
+
+    def flush(self):
+        if self._fp is not None and hasattr(self._fp, 'flush'):
+            return self._fp.flush()
+
+    def readable(self):
+        # This method is required for `io` module compatibility.
+        return True
+
+    def readinto(self, b):
+        # This method is required for `io` module compatibility.
+        temp = self.read(len(b))
+        if len(temp) == 0:
+            return 0
+        else:
+            b[:len(temp)] = temp
+            return len(temp)
+
+    def supports_chunked_reads(self):
+        """
+        Checks if the underlying file-like object looks like a
+        httplib.HTTPResponse object. We do this by testing for the fp
+        attribute. If it is present we assume it returns raw chunks as
+        processed by read_chunked().
+        """
+        return hasattr(self._fp, 'fp')
+
+    def _update_chunk_length(self):
+        # First, we'll figure out length of a chunk and then
+        # we'll try to read it from socket.
+        if self.chunk_left is not None:
+            return
+        line = self._fp.fp.readline()
+        line = line.split(b';', 1)[0]
+        try:
+            self.chunk_left = int(line, 16)
+        except ValueError:
+            # Invalid chunked protocol response, abort.
+            self.close()
+            raise httplib.IncompleteRead(line)
+
+    def _handle_chunk(self, amt):
+        returned_chunk = None
+        if amt is None:
+            chunk = self._fp._safe_read(self.chunk_left)
+            returned_chunk = chunk
+            self._fp._safe_read(2)  # Toss the CRLF at the end of the chunk.
+            self.chunk_left = None
+        elif amt < self.chunk_left:
+            value = self._fp._safe_read(amt)
+            self.chunk_left = self.chunk_left - amt
+            returned_chunk = value
+        elif amt == self.chunk_left:
+            value = self._fp._safe_read(amt)
+            self._fp._safe_read(2)  # Toss the CRLF at the end of the chunk.
+            self.chunk_left = None
+            returned_chunk = value
+        else:  # amt > self.chunk_left
+            returned_chunk = self._fp._safe_read(self.chunk_left)
+            self._fp._safe_read(2)  # Toss the CRLF at the end of the chunk.
+            self.chunk_left = None
+        return returned_chunk
+
+    def read_chunked(self, amt=None, decode_content=None):
+        """
+        Similar to :meth:`HTTPResponse.read`, but with an additional
+        parameter: ``decode_content``.
+
+        :param amt:
+            How much of the content to read. If specified, caching is skipped
+            because it doesn't make sense to cache partial content as the full
+            response.
+
+        :param decode_content:
+            If True, will attempt to decode the body based on the
+            'content-encoding' header.
+        """
+        self._init_decoder()
+        # FIXME: Rewrite this method and make it a class with a better structured logic.
+        if not self.chunked:
+            raise ResponseNotChunked(
+                "Response is not chunked. "
+                "Header 'transfer-encoding: chunked' is missing.")
+        if not self.supports_chunked_reads():
+            raise BodyNotHttplibCompatible(
+                "Body should be httplib.HTTPResponse like. "
+                "It should have have an fp attribute which returns raw chunks.")
+
+        with self._error_catcher():
+            # Don't bother reading the body of a HEAD request.
+            if self._original_response and is_response_to_head(self._original_response):
+                self._original_response.close()
+                return
+
+            # If a response is already read and closed
+            # then return immediately.
+            if self._fp.fp is None:
+                return
+
+            while True:
+                self._update_chunk_length()
+                if self.chunk_left == 0:
+                    break
+                chunk = self._handle_chunk(amt)
+                decoded = self._decode(chunk, decode_content=decode_content,
+                                       flush_decoder=False)
+                if decoded:
+                    yield decoded
+
+            if decode_content:
+                # On CPython and PyPy, we should never need to flush the
+                # decoder. However, on Jython we *might* need to, so
+                # lets defensively do it anyway.
+                decoded = self._flush_decoder()
+                if decoded:  # Platform-specific: Jython.
+                    yield decoded
+
+            # Chunk content ends with \r\n: discard it.
+            while True:
+                line = self._fp.fp.readline()
+                if not line:
+                    # Some sites may not end with '\r\n'.
+                    break
+                if line == b'\r\n':
+                    break
+
+            # We read everything; close the "file".
+            if self._original_response:
+                self._original_response.close()
+
+    def geturl(self):
+        """
+        Returns the URL that was the source of this response.
+        If the request that generated this response redirected, this method
+        will return the final redirect location.
+        """
+        if self.retries is not None and len(self.retries.history):
+            return self.retries.history[-1].redirect_location
+        else:
+            return self._request_url
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__init__.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f2770b62277d59e83a0e696df370759e05a7b5c
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__init__.py
@@ -0,0 +1,54 @@
+from __future__ import absolute_import
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import make_headers
+from .response import is_fp_closed
+from .ssl_ import (
+    SSLContext,
+    HAS_SNI,
+    IS_PYOPENSSL,
+    IS_SECURETRANSPORT,
+    assert_fingerprint,
+    resolve_cert_reqs,
+    resolve_ssl_version,
+    ssl_wrap_socket,
+)
+from .timeout import (
+    current_time,
+    Timeout,
+)
+
+from .retry import Retry
+from .url import (
+    get_host,
+    parse_url,
+    split_first,
+    Url,
+)
+from .wait import (
+    wait_for_read,
+    wait_for_write
+)
+
+__all__ = (
+    'HAS_SNI',
+    'IS_PYOPENSSL',
+    'IS_SECURETRANSPORT',
+    'SSLContext',
+    'Retry',
+    'Timeout',
+    'Url',
+    'assert_fingerprint',
+    'current_time',
+    'is_connection_dropped',
+    'is_fp_closed',
+    'get_host',
+    'parse_url',
+    'make_headers',
+    'resolve_cert_reqs',
+    'resolve_ssl_version',
+    'split_first',
+    'ssl_wrap_socket',
+    'wait_for_read',
+    'wait_for_write'
+)
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4cd34911fa6bc801df0259fece328bb51e1ca9e7
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1cd0f6bfcfbb682ac44a63c37441fb273b61a609
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/connection.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e9682cd4c813d71a2830db043fe5c9ee3dc7fbed
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/queue.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d6b50f34a89237f6d496b45ed0dc4a367e436cd5
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/request.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d4b6e419ea477eeb4fe5e3de80f893332b10384
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/response.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..83e11680c101e34a238d6dea62a2423710c9bfd8
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/retry.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d49ec1a4826575775e8384bee319fa5f98fabd28
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/ssl_.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..71839d2a10e9e86c1f76c8c5b56a981d9748f749
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/timeout.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2645755193815e693d36884aa321daabf34eae90
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/url.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d9a4a684b03bd54d4956a271d51de34db1f19734
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/__pycache__/wait.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/connection.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/connection.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ad70b2f1ca8e575088626a345fd66fc3746f99c
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/connection.py
@@ -0,0 +1,134 @@
+from __future__ import absolute_import
+import socket
+from .wait import NoWayToWaitForSocketError, wait_for_read
+from ..contrib import _appengine_environ
+
+
+def is_connection_dropped(conn):  # Platform-specific
+    """
+    Returns True if the connection is dropped and should be closed.
+
+    :param conn:
+        :class:`httplib.HTTPConnection` object.
+
+    Note: For platforms like AppEngine, this will always return ``False`` to
+    let the platform handle connection recycling transparently for us.
+    """
+    sock = getattr(conn, 'sock', False)
+    if sock is False:  # Platform-specific: AppEngine
+        return False
+    if sock is None:  # Connection already closed (such as by httplib).
+        return True
+    try:
+        # Returns True if readable, which here means it's been dropped
+        return wait_for_read(sock, timeout=0.0)
+    except NoWayToWaitForSocketError:  # Platform-specific: AppEngine
+        return False
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
+def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+                      source_address=None, socket_options=None):
+    """Connect to *address* and return the socket object.
+
+    Convenience function.  Connect to *address* (a 2-tuple ``(host,
+    port)``) and return the socket object.  Passing the optional
+    *timeout* parameter will set the timeout on the socket instance
+    before attempting to connect.  If no *timeout* is supplied, the
+    global default timeout setting returned by :func:`getdefaulttimeout`
+    is used.  If *source_address* is set it must be a tuple of (host, port)
+    for the socket to bind as a source address before making the connection.
+    An host of '' or port 0 tells the OS to use the default.
+    """
+
+    host, port = address
+    if host.startswith('['):
+        host = host.strip('[]')
+    err = None
+
+    # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+    # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+    # The original create_connection function always returns all records.
+    family = allowed_gai_family()
+
+    for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
+        af, socktype, proto, canonname, sa = res
+        sock = None
+        try:
+            sock = socket.socket(af, socktype, proto)
+
+            # If provided, set socket level options before connecting.
+            _set_socket_options(sock, socket_options)
+
+            if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+                sock.settimeout(timeout)
+            if source_address:
+                sock.bind(source_address)
+            sock.connect(sa)
+            return sock
+
+        except socket.error as e:
+            err = e
+            if sock is not None:
+                sock.close()
+                sock = None
+
+    if err is not None:
+        raise err
+
+    raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+    if options is None:
+        return
+
+    for opt in options:
+        sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+    """This function is designed to work in the context of
+    getaddrinfo, where family=socket.AF_UNSPEC is the default and
+    will perform a DNS search for both IPv6 and IPv4 records."""
+
+    family = socket.AF_INET
+    if HAS_IPV6:
+        family = socket.AF_UNSPEC
+    return family
+
+
+def _has_ipv6(host):
+    """ Returns True if the system can bind an IPv6 address. """
+    sock = None
+    has_ipv6 = False
+
+    # App Engine doesn't support IPV6 sockets and actually has a quota on the
+    # number of sockets that can be used, so just early out here instead of
+    # creating a socket needlessly.
+    # See https://github.com/urllib3/urllib3/issues/1446
+    if _appengine_environ.is_appengine_sandbox():
+        return False
+
+    if socket.has_ipv6:
+        # has_ipv6 returns true if cPython was compiled with IPv6 support.
+        # It does not tell us if the system has IPv6 support enabled. To
+        # determine that we must bind to an IPv6 address.
+        # https://github.com/shazow/urllib3/pull/611
+        # https://bugs.python.org/issue658327
+        try:
+            sock = socket.socket(socket.AF_INET6)
+            sock.bind((host, 0))
+            has_ipv6 = True
+        except Exception:
+            pass
+
+    if sock:
+        sock.close()
+    return has_ipv6
+
+
+HAS_IPV6 = _has_ipv6('::1')
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/queue.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/queue.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3d379a19997b0c76c820a82e10b8fd36db9bea4
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/queue.py
@@ -0,0 +1,21 @@
+import collections
+from ..packages import six
+from ..packages.six.moves import queue
+
+if six.PY2:
+    # Queue is imported for side effects on MS Windows. See issue #229.
+    import Queue as _unused_module_Queue  # noqa: F401
+
+
+class LifoQueue(queue.Queue):
+    def _init(self, _):
+        self.queue = collections.deque()
+
+    def _qsize(self, len=len):
+        return len(self.queue)
+
+    def _put(self, item):
+        self.queue.append(item)
+
+    def _get(self):
+        return self.queue.pop()
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/request.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/request.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ddfcd559476bacf7d2471ee6f7b20a4b5e4dc0f
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/request.py
@@ -0,0 +1,118 @@
+from __future__ import absolute_import
+from base64 import b64encode
+
+from ..packages.six import b, integer_types
+from ..exceptions import UnrewindableBodyError
+
+ACCEPT_ENCODING = 'gzip,deflate'
+_FAILEDTELL = object()
+
+
+def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
+                 basic_auth=None, proxy_basic_auth=None, disable_cache=None):
+    """
+    Shortcuts for generating request headers.
+
+    :param keep_alive:
+        If ``True``, adds 'connection: keep-alive' header.
+
+    :param accept_encoding:
+        Can be a boolean, list, or string.
+        ``True`` translates to 'gzip,deflate'.
+        List will get joined by comma.
+        String will be used as provided.
+
+    :param user_agent:
+        String representing the user-agent you want, such as
+        "python-urllib3/0.6"
+
+    :param basic_auth:
+        Colon-separated username:password string for 'authorization: basic ...'
+        auth header.
+
+    :param proxy_basic_auth:
+        Colon-separated username:password string for 'proxy-authorization: basic ...'
+        auth header.
+
+    :param disable_cache:
+        If ``True``, adds 'cache-control: no-cache' header.
+
+    Example::
+
+        >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+        {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+        >>> make_headers(accept_encoding=True)
+        {'accept-encoding': 'gzip,deflate'}
+    """
+    headers = {}
+    if accept_encoding:
+        if isinstance(accept_encoding, str):
+            pass
+        elif isinstance(accept_encoding, list):
+            accept_encoding = ','.join(accept_encoding)
+        else:
+            accept_encoding = ACCEPT_ENCODING
+        headers['accept-encoding'] = accept_encoding
+
+    if user_agent:
+        headers['user-agent'] = user_agent
+
+    if keep_alive:
+        headers['connection'] = 'keep-alive'
+
+    if basic_auth:
+        headers['authorization'] = 'Basic ' + \
+            b64encode(b(basic_auth)).decode('utf-8')
+
+    if proxy_basic_auth:
+        headers['proxy-authorization'] = 'Basic ' + \
+            b64encode(b(proxy_basic_auth)).decode('utf-8')
+
+    if disable_cache:
+        headers['cache-control'] = 'no-cache'
+
+    return headers
+
+
+def set_file_position(body, pos):
+    """
+    If a position is provided, move file to that point.
+    Otherwise, we'll attempt to record a position for future use.
+    """
+    if pos is not None:
+        rewind_body(body, pos)
+    elif getattr(body, 'tell', None) is not None:
+        try:
+            pos = body.tell()
+        except (IOError, OSError):
+            # This differentiates from None, allowing us to catch
+            # a failed `tell()` later when trying to rewind the body.
+            pos = _FAILEDTELL
+
+    return pos
+
+
+def rewind_body(body, body_pos):
+    """
+    Attempt to rewind body to a certain position.
+    Primarily used for request redirects and retries.
+
+    :param body:
+        File-like object that supports seek.
+
+    :param int pos:
+        Position to seek to in file.
+    """
+    body_seek = getattr(body, 'seek', None)
+    if body_seek is not None and isinstance(body_pos, integer_types):
+        try:
+            body_seek(body_pos)
+        except (IOError, OSError):
+            raise UnrewindableBodyError("An error occurred when rewinding request "
+                                        "body for redirect/retry.")
+    elif body_pos is _FAILEDTELL:
+        raise UnrewindableBodyError("Unable to record file position for rewinding "
+                                    "request body during a redirect/retry.")
+    else:
+        raise ValueError("body_pos must be of type integer, "
+                         "instead it was %s." % type(body_pos))
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/response.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/response.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d5486485a647bc7f3ed75a184117abdbe55d733
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/response.py
@@ -0,0 +1,87 @@
+from __future__ import absolute_import
+from ..packages.six.moves import http_client as httplib
+
+from ..exceptions import HeaderParsingError
+
+
+def is_fp_closed(obj):
+    """
+    Checks whether a given file-like object is closed.
+
+    :param obj:
+        The file-like object to check.
+    """
+
+    try:
+        # Check `isclosed()` first, in case Python3 doesn't set `closed`.
+        # GH Issue #928
+        return obj.isclosed()
+    except AttributeError:
+        pass
+
+    try:
+        # Check via the official file-like-object way.
+        return obj.closed
+    except AttributeError:
+        pass
+
+    try:
+        # Check if the object is a container for another file-like object that
+        # gets released on exhaustion (e.g. HTTPResponse).
+        return obj.fp is None
+    except AttributeError:
+        pass
+
+    raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+    """
+    Asserts whether all headers have been successfully parsed.
+    Extracts encountered errors from the result of parsing headers.
+
+    Only works on Python 3.
+
+    :param headers: Headers to verify.
+    :type headers: `httplib.HTTPMessage`.
+
+    :raises urllib3.exceptions.HeaderParsingError:
+        If parsing errors are found.
+    """
+
+    # This will fail silently if we pass in the wrong kind of parameter.
+    # To make debugging easier add an explicit check.
+    if not isinstance(headers, httplib.HTTPMessage):
+        raise TypeError('expected httplib.Message, got {0}.'.format(
+            type(headers)))
+
+    defects = getattr(headers, 'defects', None)
+    get_payload = getattr(headers, 'get_payload', None)
+
+    unparsed_data = None
+    if get_payload:
+        # get_payload is actually email.message.Message.get_payload;
+        # we're only interested in the result if it's not a multipart message
+        if not headers.is_multipart():
+            payload = get_payload()
+
+            if isinstance(payload, (bytes, str)):
+                unparsed_data = payload
+
+    if defects or unparsed_data:
+        raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+    """
+    Checks whether the request of a response has been a HEAD-request.
+    Handles the quirks of AppEngine.
+
+    :param conn:
+    :type conn: :class:`httplib.HTTPResponse`
+    """
+    # FIXME: Can we do this somehow without accessing private httplib _method?
+    method = response._method
+    if isinstance(method, int):  # Platform-specific: Appengine
+        return method == 3
+    return method.upper() == 'HEAD'
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/retry.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/retry.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7d0abd610b956235521e3dcf97c553155d18052
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/retry.py
@@ -0,0 +1,411 @@
+from __future__ import absolute_import
+import time
+import logging
+from collections import namedtuple
+from itertools import takewhile
+import email
+import re
+
+from ..exceptions import (
+    ConnectTimeoutError,
+    MaxRetryError,
+    ProtocolError,
+    ReadTimeoutError,
+    ResponseError,
+    InvalidHeader,
+)
+from ..packages import six
+
+
+log = logging.getLogger(__name__)
+
+
+# Data structure for representing the metadata of requests that result in a retry.
+RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
+                                               "status", "redirect_location"])
+
+
+class Retry(object):
+    """ Retry configuration.
+
+    Each retry attempt will create a new Retry object with updated values, so
+    they can be safely reused.
+
+    Retries can be defined as a default for a pool::
+
+        retries = Retry(connect=5, read=2, redirect=5)
+        http = PoolManager(retries=retries)
+        response = http.request('GET', 'http://example.com/')
+
+    Or per-request (which overrides the default for the pool)::
+
+        response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+    Retries can be disabled by passing ``False``::
+
+        response = http.request('GET', 'http://example.com/', retries=False)
+
+    Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+    retries are disabled, in which case the causing exception will be raised.
+
+    :param int total:
+        Total number of retries to allow. Takes precedence over other counts.
+
+        Set to ``None`` to remove this constraint and fall back on other
+        counts. It's a good idea to set this to some sensibly-high value to
+        account for unexpected edge cases and avoid infinite retry loops.
+
+        Set to ``0`` to fail on the first retry.
+
+        Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+    :param int connect:
+        How many connection-related errors to retry on.
+
+        These are errors raised before the request is sent to the remote server,
+        which we assume has not triggered the server to process the request.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+    :param int read:
+        How many times to retry on read errors.
+
+        These errors are raised after the request was sent to the server, so the
+        request may have side-effects.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+    :param int redirect:
+        How many redirects to perform. Limit this to avoid infinite redirect
+        loops.
+
+        A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+        308.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+        Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+    :param int status:
+        How many times to retry on bad status codes.
+
+        These are retries made on responses, where status code matches
+        ``status_forcelist``.
+
+        Set to ``0`` to fail on the first retry of this type.
+
+    :param iterable method_whitelist:
+        Set of uppercased HTTP method verbs that we should retry on.
+
+        By default, we only retry on methods which are considered to be
+        idempotent (multiple requests with the same parameters end with the
+        same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
+
+        Set to a ``False`` value to retry on any verb.
+
+    :param iterable status_forcelist:
+        A set of integer HTTP status codes that we should force a retry on.
+        A retry is initiated if the request method is in ``method_whitelist``
+        and the response status code is in ``status_forcelist``.
+
+        By default, this is disabled with ``None``.
+
+    :param float backoff_factor:
+        A backoff factor to apply between attempts after the second try
+        (most errors are resolved immediately by a second try without a
+        delay). urllib3 will sleep for::
+
+            {backoff factor} * (2 ** ({number of total retries} - 1))
+
+        seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+        for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
+        than :attr:`Retry.BACKOFF_MAX`.
+
+        By default, backoff is disabled (set to 0).
+
+    :param bool raise_on_redirect: Whether, if the number of redirects is
+        exhausted, to raise a MaxRetryError, or to return a response with a
+        response code in the 3xx range.
+
+    :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
+        whether we should raise an exception, or return a response,
+        if status falls in ``status_forcelist`` range and retries have
+        been exhausted.
+
+    :param tuple history: The history of the request encountered during
+        each call to :meth:`~Retry.increment`. The list is in the order
+        the requests occurred. Each list item is of class :class:`RequestHistory`.
+
+    :param bool respect_retry_after_header:
+        Whether to respect Retry-After header on status codes defined as
+        :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
+
+    :param iterable remove_headers_on_redirect:
+        Sequence of headers to remove from the request when a response
+        indicating a redirect is returned before firing off the redirected
+        request.
+    """
+
+    DEFAULT_METHOD_WHITELIST = frozenset([
+        'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
+
+    RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
+
+    DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(['Authorization'])
+
+    #: Maximum backoff time.
+    BACKOFF_MAX = 120
+
+    def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
+                 method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
+                 backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
+                 history=None, respect_retry_after_header=True,
+                 remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST):
+
+        self.total = total
+        self.connect = connect
+        self.read = read
+        self.status = status
+
+        if redirect is False or total is False:
+            redirect = 0
+            raise_on_redirect = False
+
+        self.redirect = redirect
+        self.status_forcelist = status_forcelist or set()
+        self.method_whitelist = method_whitelist
+        self.backoff_factor = backoff_factor
+        self.raise_on_redirect = raise_on_redirect
+        self.raise_on_status = raise_on_status
+        self.history = history or tuple()
+        self.respect_retry_after_header = respect_retry_after_header
+        self.remove_headers_on_redirect = remove_headers_on_redirect
+
+    def new(self, **kw):
+        params = dict(
+            total=self.total,
+            connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
+            method_whitelist=self.method_whitelist,
+            status_forcelist=self.status_forcelist,
+            backoff_factor=self.backoff_factor,
+            raise_on_redirect=self.raise_on_redirect,
+            raise_on_status=self.raise_on_status,
+            history=self.history,
+            remove_headers_on_redirect=self.remove_headers_on_redirect
+        )
+        params.update(kw)
+        return type(self)(**params)
+
+    @classmethod
+    def from_int(cls, retries, redirect=True, default=None):
+        """ Backwards-compatibility for the old retries format."""
+        if retries is None:
+            retries = default if default is not None else cls.DEFAULT
+
+        if isinstance(retries, Retry):
+            return retries
+
+        redirect = bool(redirect) and None
+        new_retries = cls(retries, redirect=redirect)
+        log.debug("Converted retries value: %r -> %r", retries, new_retries)
+        return new_retries
+
+    def get_backoff_time(self):
+        """ Formula for computing the current backoff
+
+        :rtype: float
+        """
+        # We want to consider only the last consecutive errors sequence (Ignore redirects).
+        consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
+                                                    reversed(self.history))))
+        if consecutive_errors_len <= 1:
+            return 0
+
+        backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
+        return min(self.BACKOFF_MAX, backoff_value)
+
+    def parse_retry_after(self, retry_after):
+        # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
+        if re.match(r"^\s*[0-9]+\s*$", retry_after):
+            seconds = int(retry_after)
+        else:
+            retry_date_tuple = email.utils.parsedate(retry_after)
+            if retry_date_tuple is None:
+                raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
+            retry_date = time.mktime(retry_date_tuple)
+            seconds = retry_date - time.time()
+
+        if seconds < 0:
+            seconds = 0
+
+        return seconds
+
+    def get_retry_after(self, response):
+        """ Get the value of Retry-After in seconds. """
+
+        retry_after = response.getheader("Retry-After")
+
+        if retry_after is None:
+            return None
+
+        return self.parse_retry_after(retry_after)
+
+    def sleep_for_retry(self, response=None):
+        retry_after = self.get_retry_after(response)
+        if retry_after:
+            time.sleep(retry_after)
+            return True
+
+        return False
+
+    def _sleep_backoff(self):
+        backoff = self.get_backoff_time()
+        if backoff <= 0:
+            return
+        time.sleep(backoff)
+
+    def sleep(self, response=None):
+        """ Sleep between retry attempts.
+
+        This method will respect a server's ``Retry-After`` response header
+        and sleep the duration of the time requested. If that is not present, it
+        will use an exponential backoff. By default, the backoff factor is 0 and
+        this method will return immediately.
+        """
+
+        if response:
+            slept = self.sleep_for_retry(response)
+            if slept:
+                return
+
+        self._sleep_backoff()
+
+    def _is_connection_error(self, err):
+        """ Errors when we're fairly sure that the server did not receive the
+        request, so it should be safe to retry.
+        """
+        return isinstance(err, ConnectTimeoutError)
+
+    def _is_read_error(self, err):
+        """ Errors that occur after the request has been started, so we should
+        assume that the server began processing it.
+        """
+        return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+    def _is_method_retryable(self, method):
+        """ Checks if a given HTTP method should be retried upon, depending if
+        it is included on the method whitelist.
+        """
+        if self.method_whitelist and method.upper() not in self.method_whitelist:
+            return False
+
+        return True
+
+    def is_retry(self, method, status_code, has_retry_after=False):
+        """ Is this method/status code retryable? (Based on whitelists and control
+        variables such as the number of total retries to allow, whether to
+        respect the Retry-After header, whether this header is present, and
+        whether the returned status code is on the list of status codes to
+        be retried upon on the presence of the aforementioned header)
+        """
+        if not self._is_method_retryable(method):
+            return False
+
+        if self.status_forcelist and status_code in self.status_forcelist:
+            return True
+
+        return (self.total and self.respect_retry_after_header and
+                has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
+
+    def is_exhausted(self):
+        """ Are we out of retries? """
+        retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
+        retry_counts = list(filter(None, retry_counts))
+        if not retry_counts:
+            return False
+
+        return min(retry_counts) < 0
+
+    def increment(self, method=None, url=None, response=None, error=None,
+                  _pool=None, _stacktrace=None):
+        """ Return a new Retry object with incremented retry counters.
+
+        :param response: A response object, or None, if the server did not
+            return a response.
+        :type response: :class:`~urllib3.response.HTTPResponse`
+        :param Exception error: An error encountered during the request, or
+            None if the response was received successfully.
+
+        :return: A new ``Retry`` object.
+        """
+        if self.total is False and error:
+            # Disabled, indicate to re-raise the error.
+            raise six.reraise(type(error), error, _stacktrace)
+
+        total = self.total
+        if total is not None:
+            total -= 1
+
+        connect = self.connect
+        read = self.read
+        redirect = self.redirect
+        status_count = self.status
+        cause = 'unknown'
+        status = None
+        redirect_location = None
+
+        if error and self._is_connection_error(error):
+            # Connect retry?
+            if connect is False:
+                raise six.reraise(type(error), error, _stacktrace)
+            elif connect is not None:
+                connect -= 1
+
+        elif error and self._is_read_error(error):
+            # Read retry?
+            if read is False or not self._is_method_retryable(method):
+                raise six.reraise(type(error), error, _stacktrace)
+            elif read is not None:
+                read -= 1
+
+        elif response and response.get_redirect_location():
+            # Redirect retry?
+            if redirect is not None:
+                redirect -= 1
+            cause = 'too many redirects'
+            redirect_location = response.get_redirect_location()
+            status = response.status
+
+        else:
+            # Incrementing because of a server error like a 500 in
+            # status_forcelist and a the given method is in the whitelist
+            cause = ResponseError.GENERIC_ERROR
+            if response and response.status:
+                if status_count is not None:
+                    status_count -= 1
+                cause = ResponseError.SPECIFIC_ERROR.format(
+                    status_code=response.status)
+                status = response.status
+
+        history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
+
+        new_retry = self.new(
+            total=total,
+            connect=connect, read=read, redirect=redirect, status=status_count,
+            history=history)
+
+        if new_retry.is_exhausted():
+            raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+        log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
+
+        return new_retry
+
+    def __repr__(self):
+        return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
+                'read={self.read}, redirect={self.redirect}, status={self.status})').format(
+                    cls=type(self), self=self)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/ssl_.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/ssl_.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfc553ff41eb16b2cb71a1568cd2541bd122ab38
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/ssl_.py
@@ -0,0 +1,381 @@
+from __future__ import absolute_import
+import errno
+import warnings
+import hmac
+import socket
+
+from binascii import hexlify, unhexlify
+from hashlib import md5, sha1, sha256
+
+from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
+from ..packages import six
+
+
+SSLContext = None
+HAS_SNI = False
+IS_PYOPENSSL = False
+IS_SECURETRANSPORT = False
+
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {
+    32: md5,
+    40: sha1,
+    64: sha256,
+}
+
+
+def _const_compare_digest_backport(a, b):
+    """
+    Compare two digests of equal length in constant time.
+
+    The digests must be of type str/bytes.
+    Returns True if the digests match, and False otherwise.
+    """
+    result = abs(len(a) - len(b))
+    for l, r in zip(bytearray(a), bytearray(b)):
+        result |= l ^ r
+    return result == 0
+
+
+_const_compare_digest = getattr(hmac, 'compare_digest',
+                                _const_compare_digest_backport)
+
+
+try:  # Test for SSL features
+    import ssl
+    from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
+    from ssl import HAS_SNI  # Has SNI?
+except ImportError:
+    pass
+
+
+try:
+    from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
+except ImportError:
+    OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+    OP_NO_COMPRESSION = 0x20000
+
+
+# Python 2.7 doesn't have inet_pton on non-Linux so we fallback on inet_aton in
+# those cases. This means that we can only detect IPv4 addresses in this case.
+if hasattr(socket, 'inet_pton'):
+    inet_pton = socket.inet_pton
+else:
+    # Maybe we can use ipaddress if the user has urllib3[secure]?
+    try:
+        from pip._vendor import ipaddress
+
+        def inet_pton(_, host):
+            if isinstance(host, bytes):
+                host = host.decode('ascii')
+            return ipaddress.ip_address(host)
+
+    except ImportError:  # Platform-specific: Non-Linux
+        def inet_pton(_, host):
+            return socket.inet_aton(host)
+
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - Prefer TLS 1.3 cipher suites
+# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+#   security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs and DSS for security reasons.
+DEFAULT_CIPHERS = ':'.join([
+    'TLS13-AES-256-GCM-SHA384',
+    'TLS13-CHACHA20-POLY1305-SHA256',
+    'TLS13-AES-128-GCM-SHA256',
+    'ECDH+AESGCM',
+    'ECDH+CHACHA20',
+    'DH+AESGCM',
+    'DH+CHACHA20',
+    'ECDH+AES256',
+    'DH+AES256',
+    'ECDH+AES128',
+    'DH+AES',
+    'RSA+AESGCM',
+    'RSA+AES',
+    '!aNULL',
+    '!eNULL',
+    '!MD5',
+])
+
+try:
+    from ssl import SSLContext  # Modern SSL?
+except ImportError:
+    import sys
+
+    class SSLContext(object):  # Platform-specific: Python 2
+        def __init__(self, protocol_version):
+            self.protocol = protocol_version
+            # Use default values from a real SSLContext
+            self.check_hostname = False
+            self.verify_mode = ssl.CERT_NONE
+            self.ca_certs = None
+            self.options = 0
+            self.certfile = None
+            self.keyfile = None
+            self.ciphers = None
+
+        def load_cert_chain(self, certfile, keyfile):
+            self.certfile = certfile
+            self.keyfile = keyfile
+
+        def load_verify_locations(self, cafile=None, capath=None):
+            self.ca_certs = cafile
+
+            if capath is not None:
+                raise SSLError("CA directories not supported in older Pythons")
+
+        def set_ciphers(self, cipher_suite):
+            self.ciphers = cipher_suite
+
+        def wrap_socket(self, socket, server_hostname=None, server_side=False):
+            warnings.warn(
+                'A true SSLContext object is not available. This prevents '
+                'urllib3 from configuring SSL appropriately and may cause '
+                'certain SSL connections to fail. You can upgrade to a newer '
+                'version of Python to solve this. For more information, see '
+                'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+                '#ssl-warnings',
+                InsecurePlatformWarning
+            )
+            kwargs = {
+                'keyfile': self.keyfile,
+                'certfile': self.certfile,
+                'ca_certs': self.ca_certs,
+                'cert_reqs': self.verify_mode,
+                'ssl_version': self.protocol,
+                'server_side': server_side,
+            }
+            return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+    """
+    Checks if given fingerprint matches the supplied certificate.
+
+    :param cert:
+        Certificate as bytes object.
+    :param fingerprint:
+        Fingerprint as string of hexdigits, can be interspersed by colons.
+    """
+
+    fingerprint = fingerprint.replace(':', '').lower()
+    digest_length = len(fingerprint)
+    hashfunc = HASHFUNC_MAP.get(digest_length)
+    if not hashfunc:
+        raise SSLError(
+            'Fingerprint of invalid length: {0}'.format(fingerprint))
+
+    # We need encode() here for py32; works on py2 and p33.
+    fingerprint_bytes = unhexlify(fingerprint.encode())
+
+    cert_digest = hashfunc(cert).digest()
+
+    if not _const_compare_digest(cert_digest, fingerprint_bytes):
+        raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
+                       .format(fingerprint, hexlify(cert_digest)))
+
+
+def resolve_cert_reqs(candidate):
+    """
+    Resolves the argument to a numeric constant, which can be passed to
+    the wrap_socket function/method from the ssl module.
+    Defaults to :data:`ssl.CERT_NONE`.
+    If given a string it is assumed to be the name of the constant in the
+    :mod:`ssl` module or its abbreviation.
+    (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+    If it's neither `None` nor a string we assume it is already the numeric
+    constant which can directly be passed to wrap_socket.
+    """
+    if candidate is None:
+        return CERT_NONE
+
+    if isinstance(candidate, str):
+        res = getattr(ssl, candidate, None)
+        if res is None:
+            res = getattr(ssl, 'CERT_' + candidate)
+        return res
+
+    return candidate
+
+
+def resolve_ssl_version(candidate):
+    """
+    like resolve_cert_reqs
+    """
+    if candidate is None:
+        return PROTOCOL_SSLv23
+
+    if isinstance(candidate, str):
+        res = getattr(ssl, candidate, None)
+        if res is None:
+            res = getattr(ssl, 'PROTOCOL_' + candidate)
+        return res
+
+    return candidate
+
+
+def create_urllib3_context(ssl_version=None, cert_reqs=None,
+                           options=None, ciphers=None):
+    """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+    By default, this function does a lot of the same work that
+    ``ssl.create_default_context`` does on Python 3.4+. It:
+
+    - Disables SSLv2, SSLv3, and compression
+    - Sets a restricted set of server ciphers
+
+    If you wish to enable SSLv3, you can do::
+
+        from pip._vendor.urllib3.util import ssl_
+        context = ssl_.create_urllib3_context()
+        context.options &= ~ssl_.OP_NO_SSLv3
+
+    You can do the same to enable compression (substituting ``COMPRESSION``
+    for ``SSLv3`` in the last line above).
+
+    :param ssl_version:
+        The desired protocol version to use. This will default to
+        PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+        the server and your installation of OpenSSL support.
+    :param cert_reqs:
+        Whether to require the certificate verification. This defaults to
+        ``ssl.CERT_REQUIRED``.
+    :param options:
+        Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+        ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
+    :param ciphers:
+        Which cipher suites to allow the server to select.
+    :returns:
+        Constructed SSLContext object with specified options
+    :rtype: SSLContext
+    """
+    context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
+
+    context.set_ciphers(ciphers or DEFAULT_CIPHERS)
+
+    # Setting the default here, as we may have no ssl module on import
+    cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
+    if options is None:
+        options = 0
+        # SSLv2 is easily broken and is considered harmful and dangerous
+        options |= OP_NO_SSLv2
+        # SSLv3 has several problems and is now dangerous
+        options |= OP_NO_SSLv3
+        # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+        # (issue #309)
+        options |= OP_NO_COMPRESSION
+
+    context.options |= options
+
+    context.verify_mode = cert_reqs
+    if getattr(context, 'check_hostname', None) is not None:  # Platform-specific: Python 3.2
+        # We do our own verification, including fingerprints and alternative
+        # hostnames. So disable it here
+        context.check_hostname = False
+    return context
+
+
+def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
+                    ca_certs=None, server_hostname=None,
+                    ssl_version=None, ciphers=None, ssl_context=None,
+                    ca_cert_dir=None):
+    """
+    All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+    the same meaning as they do when using :func:`ssl.wrap_socket`.
+
+    :param server_hostname:
+        When SNI is supported, the expected hostname of the certificate
+    :param ssl_context:
+        A pre-made :class:`SSLContext` object. If none is provided, one will
+        be created using :func:`create_urllib3_context`.
+    :param ciphers:
+        A string of ciphers we wish the client to support.
+    :param ca_cert_dir:
+        A directory containing CA certificates in multiple separate files, as
+        supported by OpenSSL's -CApath flag or the capath argument to
+        SSLContext.load_verify_locations().
+    """
+    context = ssl_context
+    if context is None:
+        # Note: This branch of code and all the variables in it are no longer
+        # used by urllib3 itself. We should consider deprecating and removing
+        # this code.
+        context = create_urllib3_context(ssl_version, cert_reqs,
+                                         ciphers=ciphers)
+
+    if ca_certs or ca_cert_dir:
+        try:
+            context.load_verify_locations(ca_certs, ca_cert_dir)
+        except IOError as e:  # Platform-specific: Python 2.7
+            raise SSLError(e)
+        # Py33 raises FileNotFoundError which subclasses OSError
+        # These are not equivalent unless we check the errno attribute
+        except OSError as e:  # Platform-specific: Python 3.3 and beyond
+            if e.errno == errno.ENOENT:
+                raise SSLError(e)
+            raise
+    elif getattr(context, 'load_default_certs', None) is not None:
+        # try to load OS default certs; works well on Windows (require Python3.4+)
+        context.load_default_certs()
+
+    if certfile:
+        context.load_cert_chain(certfile, keyfile)
+
+    # If we detect server_hostname is an IP address then the SNI
+    # extension should not be used according to RFC3546 Section 3.1
+    # We shouldn't warn the user if SNI isn't available but we would
+    # not be using SNI anyways due to IP address for server_hostname.
+    if ((server_hostname is not None and not is_ipaddress(server_hostname))
+            or IS_SECURETRANSPORT):
+        if HAS_SNI and server_hostname is not None:
+            return context.wrap_socket(sock, server_hostname=server_hostname)
+
+        warnings.warn(
+            'An HTTPS request has been made, but the SNI (Server Name '
+            'Indication) extension to TLS is not available on this platform. '
+            'This may cause the server to present an incorrect TLS '
+            'certificate, which can cause validation failures. You can upgrade to '
+            'a newer version of Python to solve this. For more information, see '
+            'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
+            '#ssl-warnings',
+            SNIMissingWarning
+        )
+
+    return context.wrap_socket(sock)
+
+
+def is_ipaddress(hostname):
+    """Detects whether the hostname given is an IP address.
+
+    :param str hostname: Hostname to examine.
+    :return: True if the hostname is an IP address, False otherwise.
+    """
+    if six.PY3 and isinstance(hostname, bytes):
+        # IDN A-label bytes are ASCII compatible.
+        hostname = hostname.decode('ascii')
+
+    families = [socket.AF_INET]
+    if hasattr(socket, 'AF_INET6'):
+        families.append(socket.AF_INET6)
+
+    for af in families:
+        try:
+            inet_pton(af, hostname)
+        except (socket.error, ValueError, OSError):
+            pass
+        else:
+            return True
+    return False
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/timeout.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/timeout.py
new file mode 100644
index 0000000000000000000000000000000000000000..cec817e6efa53e830cb2b5877faa42013743d5db
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/timeout.py
@@ -0,0 +1,242 @@
+from __future__ import absolute_import
+# The default socket timeout, used by httplib to indicate that no timeout was
+# specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+import time
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+
+# Use time.monotonic if available.
+current_time = getattr(time, "monotonic", time.time)
+
+
+class Timeout(object):
+    """ Timeout configuration.
+
+    Timeouts can be defined as a default for a pool::
+
+        timeout = Timeout(connect=2.0, read=7.0)
+        http = PoolManager(timeout=timeout)
+        response = http.request('GET', 'http://example.com/')
+
+    Or per-request (which overrides the default for the pool)::
+
+        response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+    Timeouts can be disabled by setting all the parameters to ``None``::
+
+        no_timeout = Timeout(connect=None, read=None)
+        response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+    :param total:
+        This combines the connect and read timeouts into one; the read timeout
+        will be set to the time leftover from the connect attempt. In the
+        event that both a connect timeout and a total are specified, or a read
+        timeout and a total are specified, the shorter timeout will be applied.
+
+        Defaults to None.
+
+    :type total: integer, float, or None
+
+    :param connect:
+        The maximum amount of time to wait for a connection attempt to a server
+        to succeed. Omitting the parameter will default the connect timeout to
+        the system default, probably `the global default timeout in socket.py
+        <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+        None will set an infinite timeout for connection attempts.
+
+    :type connect: integer, float, or None
+
+    :param read:
+        The maximum amount of time to wait between consecutive
+        read operations for a response from the server. Omitting
+        the parameter will default the read timeout to the system
+        default, probably `the global default timeout in socket.py
+        <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+        None will set an infinite timeout.
+
+    :type read: integer, float, or None
+
+    .. note::
+
+        Many factors can affect the total amount of time for urllib3 to return
+        an HTTP response.
+
+        For example, Python's DNS resolver does not obey the timeout specified
+        on the socket. Other factors that can affect total request time include
+        high CPU load, high swap, the program running at a low priority level,
+        or other behaviors.
+
+        In addition, the read and total timeouts only measure the time between
+        read operations on the socket connecting the client and the server,
+        not the total amount of time for the request to return a complete
+        response. For most requests, the timeout is raised because the server
+        has not sent the first byte in the specified time. This is not always
+        the case; if a server streams one byte every fifteen seconds, a timeout
+        of 20 seconds will not trigger, even though the request will take
+        several minutes to complete.
+
+        If your goal is to cut off any request after a set amount of wall clock
+        time, consider having a second "watcher" thread to cut off a slow
+        request.
+    """
+
+    #: A sentinel object representing the default timeout value
+    DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+    def __init__(self, total=None, connect=_Default, read=_Default):
+        self._connect = self._validate_timeout(connect, 'connect')
+        self._read = self._validate_timeout(read, 'read')
+        self.total = self._validate_timeout(total, 'total')
+        self._start_connect = None
+
+    def __str__(self):
+        return '%s(connect=%r, read=%r, total=%r)' % (
+            type(self).__name__, self._connect, self._read, self.total)
+
+    @classmethod
+    def _validate_timeout(cls, value, name):
+        """ Check that a timeout attribute is valid.
+
+        :param value: The timeout value to validate
+        :param name: The name of the timeout attribute to validate. This is
+            used to specify in error messages.
+        :return: The validated and casted version of the given value.
+        :raises ValueError: If it is a numeric value less than or equal to
+            zero, or the type is not an integer, float, or None.
+        """
+        if value is _Default:
+            return cls.DEFAULT_TIMEOUT
+
+        if value is None or value is cls.DEFAULT_TIMEOUT:
+            return value
+
+        if isinstance(value, bool):
+            raise ValueError("Timeout cannot be a boolean value. It must "
+                             "be an int, float or None.")
+        try:
+            float(value)
+        except (TypeError, ValueError):
+            raise ValueError("Timeout value %s was %s, but it must be an "
+                             "int, float or None." % (name, value))
+
+        try:
+            if value <= 0:
+                raise ValueError("Attempted to set %s timeout to %s, but the "
+                                 "timeout cannot be set to a value less "
+                                 "than or equal to 0." % (name, value))
+        except TypeError:  # Python 3
+            raise ValueError("Timeout value %s was %s, but it must be an "
+                             "int, float or None." % (name, value))
+
+        return value
+
+    @classmethod
+    def from_float(cls, timeout):
+        """ Create a new Timeout from a legacy timeout value.
+
+        The timeout value used by httplib.py sets the same timeout on the
+        connect(), and recv() socket requests. This creates a :class:`Timeout`
+        object that sets the individual timeouts to the ``timeout`` value
+        passed to this function.
+
+        :param timeout: The legacy timeout value.
+        :type timeout: integer, float, sentinel default object, or None
+        :return: Timeout object
+        :rtype: :class:`Timeout`
+        """
+        return Timeout(read=timeout, connect=timeout)
+
+    def clone(self):
+        """ Create a copy of the timeout object
+
+        Timeout properties are stored per-pool but each request needs a fresh
+        Timeout object to ensure each one has its own start/stop configured.
+
+        :return: a copy of the timeout object
+        :rtype: :class:`Timeout`
+        """
+        # We can't use copy.deepcopy because that will also create a new object
+        # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+        # detect the user default.
+        return Timeout(connect=self._connect, read=self._read,
+                       total=self.total)
+
+    def start_connect(self):
+        """ Start the timeout clock, used during a connect() attempt
+
+        :raises urllib3.exceptions.TimeoutStateError: if you attempt
+            to start a timer that has been started already.
+        """
+        if self._start_connect is not None:
+            raise TimeoutStateError("Timeout timer has already been started.")
+        self._start_connect = current_time()
+        return self._start_connect
+
+    def get_connect_duration(self):
+        """ Gets the time elapsed since the call to :meth:`start_connect`.
+
+        :return: Elapsed time.
+        :rtype: float
+        :raises urllib3.exceptions.TimeoutStateError: if you attempt
+            to get duration for a timer that hasn't been started.
+        """
+        if self._start_connect is None:
+            raise TimeoutStateError("Can't get connect duration for timer "
+                                    "that has not started.")
+        return current_time() - self._start_connect
+
+    @property
+    def connect_timeout(self):
+        """ Get the value to use when setting a connection timeout.
+
+        This will be a positive float or integer, the value None
+        (never timeout), or the default system timeout.
+
+        :return: Connect timeout.
+        :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+        """
+        if self.total is None:
+            return self._connect
+
+        if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+            return self.total
+
+        return min(self._connect, self.total)
+
+    @property
+    def read_timeout(self):
+        """ Get the value for the read timeout.
+
+        This assumes some time has elapsed in the connection timeout and
+        computes the read timeout appropriately.
+
+        If self.total is set, the read timeout is dependent on the amount of
+        time taken by the connect timeout. If the connection time has not been
+        established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+        raised.
+
+        :return: Value to use for the read timeout.
+        :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+        :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+            has not yet been called on this object.
+        """
+        if (self.total is not None and
+                self.total is not self.DEFAULT_TIMEOUT and
+                self._read is not None and
+                self._read is not self.DEFAULT_TIMEOUT):
+            # In case the connect timeout has not yet been established.
+            if self._start_connect is None:
+                return self._read
+            return max(0, min(self.total - self.get_connect_duration(),
+                              self._read))
+        elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+            return max(0, self.total - self.get_connect_duration())
+        else:
+            return self._read
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/url.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/url.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b6f9968d7a106931ef315066d0ec156a1823112
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/url.py
@@ -0,0 +1,230 @@
+from __future__ import absolute_import
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+
+
+url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
+
+# We only want to normalize urls with an HTTP(S) scheme.
+# urllib3 infers URLs without a scheme (None) to be http.
+NORMALIZABLE_SCHEMES = ('http', 'https', None)
+
+
+class Url(namedtuple('Url', url_attrs)):
+    """
+    Datastructure for representing an HTTP URL. Used as a return value for
+    :func:`parse_url`. Both the scheme and host are normalized as they are
+    both case-insensitive according to RFC 3986.
+    """
+    __slots__ = ()
+
+    def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
+                query=None, fragment=None):
+        if path and not path.startswith('/'):
+            path = '/' + path
+        if scheme:
+            scheme = scheme.lower()
+        if host and scheme in NORMALIZABLE_SCHEMES:
+            host = host.lower()
+        return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
+                                       query, fragment)
+
+    @property
+    def hostname(self):
+        """For backwards-compatibility with urlparse. We're nice like that."""
+        return self.host
+
+    @property
+    def request_uri(self):
+        """Absolute path including the query string."""
+        uri = self.path or '/'
+
+        if self.query is not None:
+            uri += '?' + self.query
+
+        return uri
+
+    @property
+    def netloc(self):
+        """Network location including host and port"""
+        if self.port:
+            return '%s:%d' % (self.host, self.port)
+        return self.host
+
+    @property
+    def url(self):
+        """
+        Convert self into a url
+
+        This function should more or less round-trip with :func:`.parse_url`. The
+        returned url may not be exactly the same as the url inputted to
+        :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+        with a blank port will have : removed).
+
+        Example: ::
+
+            >>> U = parse_url('http://google.com/mail/')
+            >>> U.url
+            'http://google.com/mail/'
+            >>> Url('http', 'username:password', 'host.com', 80,
+            ... '/path', 'query', 'fragment').url
+            'http://username:password@host.com:80/path?query#fragment'
+        """
+        scheme, auth, host, port, path, query, fragment = self
+        url = ''
+
+        # We use "is not None" we want things to happen with empty strings (or 0 port)
+        if scheme is not None:
+            url += scheme + '://'
+        if auth is not None:
+            url += auth + '@'
+        if host is not None:
+            url += host
+        if port is not None:
+            url += ':' + str(port)
+        if path is not None:
+            url += path
+        if query is not None:
+            url += '?' + query
+        if fragment is not None:
+            url += '#' + fragment
+
+        return url
+
+    def __str__(self):
+        return self.url
+
+
+def split_first(s, delims):
+    """
+    Given a string and an iterable of delimiters, split on the first found
+    delimiter. Return two split parts and the matched delimiter.
+
+    If not found, then the first part is the full input string.
+
+    Example::
+
+        >>> split_first('foo/bar?baz', '?/=')
+        ('foo', 'bar?baz', '/')
+        >>> split_first('foo/bar?baz', '123')
+        ('foo/bar?baz', '', None)
+
+    Scales linearly with number of delims. Not ideal for large number of delims.
+    """
+    min_idx = None
+    min_delim = None
+    for d in delims:
+        idx = s.find(d)
+        if idx < 0:
+            continue
+
+        if min_idx is None or idx < min_idx:
+            min_idx = idx
+            min_delim = d
+
+    if min_idx is None or min_idx < 0:
+        return s, '', None
+
+    return s[:min_idx], s[min_idx + 1:], min_delim
+
+
+def parse_url(url):
+    """
+    Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+    performed to parse incomplete urls. Fields not provided will be None.
+
+    Partly backwards-compatible with :mod:`urlparse`.
+
+    Example::
+
+        >>> parse_url('http://google.com/mail/')
+        Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+        >>> parse_url('google.com:80')
+        Url(scheme=None, host='google.com', port=80, path=None, ...)
+        >>> parse_url('/foo?bar')
+        Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+    """
+
+    # While this code has overlap with stdlib's urlparse, it is much
+    # simplified for our needs and less annoying.
+    # Additionally, this implementations does silly things to be optimal
+    # on CPython.
+
+    if not url:
+        # Empty
+        return Url()
+
+    scheme = None
+    auth = None
+    host = None
+    port = None
+    path = None
+    fragment = None
+    query = None
+
+    # Scheme
+    if '://' in url:
+        scheme, url = url.split('://', 1)
+
+    # Find the earliest Authority Terminator
+    # (http://tools.ietf.org/html/rfc3986#section-3.2)
+    url, path_, delim = split_first(url, ['/', '?', '#'])
+
+    if delim:
+        # Reassemble the path
+        path = delim + path_
+
+    # Auth
+    if '@' in url:
+        # Last '@' denotes end of auth part
+        auth, url = url.rsplit('@', 1)
+
+    # IPv6
+    if url and url[0] == '[':
+        host, url = url.split(']', 1)
+        host += ']'
+
+    # Port
+    if ':' in url:
+        _host, port = url.split(':', 1)
+
+        if not host:
+            host = _host
+
+        if port:
+            # If given, ports must be integers. No whitespace, no plus or
+            # minus prefixes, no non-integer digits such as ^2 (superscript).
+            if not port.isdigit():
+                raise LocationParseError(url)
+            try:
+                port = int(port)
+            except ValueError:
+                raise LocationParseError(url)
+        else:
+            # Blank ports are cool, too. (rfc3986#section-3.2.3)
+            port = None
+
+    elif not host and url:
+        host = url
+
+    if not path:
+        return Url(scheme, auth, host, port, path, query, fragment)
+
+    # Fragment
+    if '#' in path:
+        path, fragment = path.split('#', 1)
+
+    # Query
+    if '?' in path:
+        path, query = path.split('?', 1)
+
+    return Url(scheme, auth, host, port, path, query, fragment)
+
+
+def get_host(url):
+    """
+    Deprecated. Use :func:`parse_url` instead.
+    """
+    p = parse_url(url)
+    return p.scheme or 'http', p.hostname, p.port
diff --git a/lib/python3.7/site-packages/pip/_vendor/urllib3/util/wait.py b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/wait.py
new file mode 100644
index 0000000000000000000000000000000000000000..4db71bafd87b742f98683011df2fac2c73358d23
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/urllib3/util/wait.py
@@ -0,0 +1,150 @@
+import errno
+from functools import partial
+import select
+import sys
+try:
+    from time import monotonic
+except ImportError:
+    from time import time as monotonic
+
+__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
+
+
+class NoWayToWaitForSocketError(Exception):
+    pass
+
+
+# How should we wait on sockets?
+#
+# There are two types of APIs you can use for waiting on sockets: the fancy
+# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
+# select/poll. The stateful APIs are more efficient when you have a lots of
+# sockets to keep track of, because you can set them up once and then use them
+# lots of times. But we only ever want to wait on a single socket at a time
+# and don't want to keep track of state, so the stateless APIs are actually
+# more efficient. So we want to use select() or poll().
+#
+# Now, how do we choose between select() and poll()? On traditional Unixes,
+# select() has a strange calling convention that makes it slow, or fail
+# altogether, for high-numbered file descriptors. The point of poll() is to fix
+# that, so on Unixes, we prefer poll().
+#
+# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
+# for it), but that's OK, because on Windows, select() doesn't have this
+# strange calling convention; plain select() works fine.
+#
+# So: on Windows we use select(), and everywhere else we use poll(). We also
+# fall back to select() in case poll() is somehow broken or missing.
+
+if sys.version_info >= (3, 5):
+    # Modern Python, that retries syscalls by default
+    def _retry_on_intr(fn, timeout):
+        return fn(timeout)
+else:
+    # Old and broken Pythons.
+    def _retry_on_intr(fn, timeout):
+        if timeout is None:
+            deadline = float("inf")
+        else:
+            deadline = monotonic() + timeout
+
+        while True:
+            try:
+                return fn(timeout)
+            # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
+            except (OSError, select.error) as e:
+                # 'e.args[0]' incantation works for both OSError and select.error
+                if e.args[0] != errno.EINTR:
+                    raise
+                else:
+                    timeout = deadline - monotonic()
+                    if timeout < 0:
+                        timeout = 0
+                    if timeout == float("inf"):
+                        timeout = None
+                    continue
+
+
+def select_wait_for_socket(sock, read=False, write=False, timeout=None):
+    if not read and not write:
+        raise RuntimeError("must specify at least one of read=True, write=True")
+    rcheck = []
+    wcheck = []
+    if read:
+        rcheck.append(sock)
+    if write:
+        wcheck.append(sock)
+    # When doing a non-blocking connect, most systems signal success by
+    # marking the socket writable. Windows, though, signals success by marked
+    # it as "exceptional". We paper over the difference by checking the write
+    # sockets for both conditions. (The stdlib selectors module does the same
+    # thing.)
+    fn = partial(select.select, rcheck, wcheck, wcheck)
+    rready, wready, xready = _retry_on_intr(fn, timeout)
+    return bool(rready or wready or xready)
+
+
+def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
+    if not read and not write:
+        raise RuntimeError("must specify at least one of read=True, write=True")
+    mask = 0
+    if read:
+        mask |= select.POLLIN
+    if write:
+        mask |= select.POLLOUT
+    poll_obj = select.poll()
+    poll_obj.register(sock, mask)
+
+    # For some reason, poll() takes timeout in milliseconds
+    def do_poll(t):
+        if t is not None:
+            t *= 1000
+        return poll_obj.poll(t)
+
+    return bool(_retry_on_intr(do_poll, timeout))
+
+
+def null_wait_for_socket(*args, **kwargs):
+    raise NoWayToWaitForSocketError("no select-equivalent available")
+
+
+def _have_working_poll():
+    # Apparently some systems have a select.poll that fails as soon as you try
+    # to use it, either due to strange configuration or broken monkeypatching
+    # from libraries like eventlet/greenlet.
+    try:
+        poll_obj = select.poll()
+        _retry_on_intr(poll_obj.poll, 0)
+    except (AttributeError, OSError):
+        return False
+    else:
+        return True
+
+
+def wait_for_socket(*args, **kwargs):
+    # We delay choosing which implementation to use until the first time we're
+    # called. We could do it at import time, but then we might make the wrong
+    # decision if someone goes wild with monkeypatching select.poll after
+    # we're imported.
+    global wait_for_socket
+    if _have_working_poll():
+        wait_for_socket = poll_wait_for_socket
+    elif hasattr(select, "select"):
+        wait_for_socket = select_wait_for_socket
+    else:  # Platform-specific: Appengine.
+        wait_for_socket = null_wait_for_socket
+    return wait_for_socket(*args, **kwargs)
+
+
+def wait_for_read(sock, timeout=None):
+    """ Waits for reading to be available on a given socket.
+    Returns True if the socket is readable, or False if the timeout expired.
+    """
+    return wait_for_socket(sock, read=True, timeout=timeout)
+
+
+def wait_for_write(sock, timeout=None):
+    """ Waits for writing to be available on a given socket.
+    Returns True if the socket is readable, or False if the timeout expired.
+    """
+    return wait_for_socket(sock, write=True, timeout=timeout)
diff --git a/lib/python3.7/site-packages/pip/_vendor/webencodings/__init__.py b/lib/python3.7/site-packages/pip/_vendor/webencodings/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d21d697c887bed1f8ab7f36d10185e986d9f1e54
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/webencodings/__init__.py
@@ -0,0 +1,342 @@
+# coding: utf-8
+"""
+
+    webencodings
+    ~~~~~~~~~~~~
+
+    This is a Python implementation of the `WHATWG Encoding standard
+    <http://encoding.spec.whatwg.org/>`. See README for details.
+
+    :copyright: Copyright 2012 by Simon Sapin
+    :license: BSD, see LICENSE for details.
+
+"""
+
+from __future__ import unicode_literals
+
+import codecs
+
+from .labels import LABELS
+
+
+VERSION = '0.5.1'
+
+
+# Some names in Encoding are not valid Python aliases. Remap these.
+PYTHON_NAMES = {
+    'iso-8859-8-i': 'iso-8859-8',
+    'x-mac-cyrillic': 'mac-cyrillic',
+    'macintosh': 'mac-roman',
+    'windows-874': 'cp874'}
+
+CACHE = {}
+
+
+def ascii_lower(string):
+    r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z.
+
+    :param string: An Unicode string.
+    :returns: A new Unicode string.
+
+    This is used for `ASCII case-insensitive
+    <http://encoding.spec.whatwg.org/#ascii-case-insensitive>`_
+    matching of encoding labels.
+    The same matching is also used, among other things,
+    for `CSS keywords <http://dev.w3.org/csswg/css-values/#keywords>`_.
+
+    This is different from the :meth:`~py:str.lower` method of Unicode strings
+    which also affect non-ASCII characters,
+    sometimes mapping them into the ASCII range:
+
+        >>> keyword = u'Bac\N{KELVIN SIGN}ground'
+        >>> assert keyword.lower() == u'background'
+        >>> assert ascii_lower(keyword) != keyword.lower()
+        >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground'
+
+    """
+    # This turns out to be faster than unicode.translate()
+    return string.encode('utf8').lower().decode('utf8')
+
+
+def lookup(label):
+    """
+    Look for an encoding by its label.
+    This is the spec’s `get an encoding
+    <http://encoding.spec.whatwg.org/#concept-encoding-get>`_ algorithm.
+    Supported labels are listed there.
+
+    :param label: A string.
+    :returns:
+        An :class:`Encoding` object, or :obj:`None` for an unknown label.
+
+    """
+    # Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020.
+    label = ascii_lower(label.strip('\t\n\f\r '))
+    name = LABELS.get(label)
+    if name is None:
+        return None
+    encoding = CACHE.get(name)
+    if encoding is None:
+        if name == 'x-user-defined':
+            from .x_user_defined import codec_info
+        else:
+            python_name = PYTHON_NAMES.get(name, name)
+            # Any python_name value that gets to here should be valid.
+            codec_info = codecs.lookup(python_name)
+        encoding = Encoding(name, codec_info)
+        CACHE[name] = encoding
+    return encoding
+
+
+def _get_encoding(encoding_or_label):
+    """
+    Accept either an encoding object or label.
+
+    :param encoding: An :class:`Encoding` object or a label string.
+    :returns: An :class:`Encoding` object.
+    :raises: :exc:`~exceptions.LookupError` for an unknown label.
+
+    """
+    if hasattr(encoding_or_label, 'codec_info'):
+        return encoding_or_label
+
+    encoding = lookup(encoding_or_label)
+    if encoding is None:
+        raise LookupError('Unknown encoding label: %r' % encoding_or_label)
+    return encoding
+
+
+class Encoding(object):
+    """Reresents a character encoding such as UTF-8,
+    that can be used for decoding or encoding.
+
+    .. attribute:: name
+
+        Canonical name of the encoding
+
+    .. attribute:: codec_info
+
+        The actual implementation of the encoding,
+        a stdlib :class:`~codecs.CodecInfo` object.
+        See :func:`codecs.register`.
+
+    """
+    def __init__(self, name, codec_info):
+        self.name = name
+        self.codec_info = codec_info
+
+    def __repr__(self):
+        return '<Encoding %s>' % self.name
+
+
+#: The UTF-8 encoding. Should be used for new content and formats.
+UTF8 = lookup('utf-8')
+
+_UTF16LE = lookup('utf-16le')
+_UTF16BE = lookup('utf-16be')
+
+
+def decode(input, fallback_encoding, errors='replace'):
+    """
+    Decode a single string.
+
+    :param input: A byte string
+    :param fallback_encoding:
+        An :class:`Encoding` object or a label string.
+        The encoding to use if :obj:`input` does note have a BOM.
+    :param errors: Type of error handling. See :func:`codecs.register`.
+    :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+    :return:
+        A ``(output, encoding)`` tuple of an Unicode string
+        and an :obj:`Encoding`.
+
+    """
+    # Fail early if `encoding` is an invalid label.
+    fallback_encoding = _get_encoding(fallback_encoding)
+    bom_encoding, input = _detect_bom(input)
+    encoding = bom_encoding or fallback_encoding
+    return encoding.codec_info.decode(input, errors)[0], encoding
+
+
+def _detect_bom(input):
+    """Return (bom_encoding, input), with any BOM removed from the input."""
+    if input.startswith(b'\xFF\xFE'):
+        return _UTF16LE, input[2:]
+    if input.startswith(b'\xFE\xFF'):
+        return _UTF16BE, input[2:]
+    if input.startswith(b'\xEF\xBB\xBF'):
+        return UTF8, input[3:]
+    return None, input
+
+
+def encode(input, encoding=UTF8, errors='strict'):
+    """
+    Encode a single string.
+
+    :param input: An Unicode string.
+    :param encoding: An :class:`Encoding` object or a label string.
+    :param errors: Type of error handling. See :func:`codecs.register`.
+    :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+    :return: A byte string.
+
+    """
+    return _get_encoding(encoding).codec_info.encode(input, errors)[0]
+
+
+def iter_decode(input, fallback_encoding, errors='replace'):
+    """
+    "Pull"-based decoder.
+
+    :param input:
+        An iterable of byte strings.
+
+        The input is first consumed just enough to determine the encoding
+        based on the precense of a BOM,
+        then consumed on demand when the return value is.
+    :param fallback_encoding:
+        An :class:`Encoding` object or a label string.
+        The encoding to use if :obj:`input` does note have a BOM.
+    :param errors: Type of error handling. See :func:`codecs.register`.
+    :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+    :returns:
+        An ``(output, encoding)`` tuple.
+        :obj:`output` is an iterable of Unicode strings,
+        :obj:`encoding` is the :obj:`Encoding` that is being used.
+
+    """
+
+    decoder = IncrementalDecoder(fallback_encoding, errors)
+    generator = _iter_decode_generator(input, decoder)
+    encoding = next(generator)
+    return generator, encoding
+
+
+def _iter_decode_generator(input, decoder):
+    """Return a generator that first yields the :obj:`Encoding`,
+    then yields output chukns as Unicode strings.
+
+    """
+    decode = decoder.decode
+    input = iter(input)
+    for chunck in input:
+        output = decode(chunck)
+        if output:
+            assert decoder.encoding is not None
+            yield decoder.encoding
+            yield output
+            break
+    else:
+        # Input exhausted without determining the encoding
+        output = decode(b'', final=True)
+        assert decoder.encoding is not None
+        yield decoder.encoding
+        if output:
+            yield output
+        return
+
+    for chunck in input:
+        output = decode(chunck)
+        if output:
+            yield output
+    output = decode(b'', final=True)
+    if output:
+        yield output
+
+
+def iter_encode(input, encoding=UTF8, errors='strict'):
+    """
+    “Pull”-based encoder.
+
+    :param input: An iterable of Unicode strings.
+    :param encoding: An :class:`Encoding` object or a label string.
+    :param errors: Type of error handling. See :func:`codecs.register`.
+    :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+    :returns: An iterable of byte strings.
+
+    """
+    # Fail early if `encoding` is an invalid label.
+    encode = IncrementalEncoder(encoding, errors).encode
+    return _iter_encode_generator(input, encode)
+
+
+def _iter_encode_generator(input, encode):
+    for chunck in input:
+        output = encode(chunck)
+        if output:
+            yield output
+    output = encode('', final=True)
+    if output:
+        yield output
+
+
+class IncrementalDecoder(object):
+    """
+    “Push”-based decoder.
+
+    :param fallback_encoding:
+        An :class:`Encoding` object or a label string.
+        The encoding to use if :obj:`input` does note have a BOM.
+    :param errors: Type of error handling. See :func:`codecs.register`.
+    :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+
+    """
+    def __init__(self, fallback_encoding, errors='replace'):
+        # Fail early if `encoding` is an invalid label.
+        self._fallback_encoding = _get_encoding(fallback_encoding)
+        self._errors = errors
+        self._buffer = b''
+        self._decoder = None
+        #: The actual :class:`Encoding` that is being used,
+        #: or :obj:`None` if that is not determined yet.
+        #: (Ie. if there is not enough input yet to determine
+        #: if there is a BOM.)
+        self.encoding = None  # Not known yet.
+
+    def decode(self, input, final=False):
+        """Decode one chunk of the input.
+
+        :param input: A byte string.
+        :param final:
+            Indicate that no more input is available.
+            Must be :obj:`True` if this is the last call.
+        :returns: An Unicode string.
+
+        """
+        decoder = self._decoder
+        if decoder is not None:
+            return decoder(input, final)
+
+        input = self._buffer + input
+        encoding, input = _detect_bom(input)
+        if encoding is None:
+            if len(input) < 3 and not final:  # Not enough data yet.
+                self._buffer = input
+                return ''
+            else:  # No BOM
+                encoding = self._fallback_encoding
+        decoder = encoding.codec_info.incrementaldecoder(self._errors).decode
+        self._decoder = decoder
+        self.encoding = encoding
+        return decoder(input, final)
+
+
+class IncrementalEncoder(object):
+    """
+    “Push”-based encoder.
+
+    :param encoding: An :class:`Encoding` object or a label string.
+    :param errors: Type of error handling. See :func:`codecs.register`.
+    :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+
+    .. method:: encode(input, final=False)
+
+        :param input: An Unicode string.
+        :param final:
+            Indicate that no more input is available.
+            Must be :obj:`True` if this is the last call.
+        :returns: A byte string.
+
+    """
+    def __init__(self, encoding=UTF8, errors='strict'):
+        encoding = _get_encoding(encoding)
+        self.encode = encoding.codec_info.incrementalencoder(errors).encode
diff --git a/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f2834234ead2a2e86c0d597800fff02755a328e
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/labels.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/labels.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26612b10237b3bd48918f220eef55b0086e3a091
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/labels.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/mklabels.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/mklabels.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..64f6fa5ac2c4cbb6b5c4733e4cc97fc07245b825
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/mklabels.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/tests.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/tests.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c92f1da402bcb8d5dba20326d58e974956563425
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/tests.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-37.pyc b/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7276ed50d107b961131bed1e7a8ae2d74d8c2e95
Binary files /dev/null and b/lib/python3.7/site-packages/pip/_vendor/webencodings/__pycache__/x_user_defined.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pip/_vendor/webencodings/labels.py b/lib/python3.7/site-packages/pip/_vendor/webencodings/labels.py
new file mode 100644
index 0000000000000000000000000000000000000000..29cbf91ef79b89971e51db9ddfc3720d8b4db82a
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/webencodings/labels.py
@@ -0,0 +1,231 @@
+"""
+
+    webencodings.labels
+    ~~~~~~~~~~~~~~~~~~~
+
+    Map encoding labels to their name.
+
+    :copyright: Copyright 2012 by Simon Sapin
+    :license: BSD, see LICENSE for details.
+
+"""
+
+# XXX Do not edit!
+# This file is automatically generated by mklabels.py
+
+LABELS = {
+    'unicode-1-1-utf-8':   'utf-8',
+    'utf-8':               'utf-8',
+    'utf8':                'utf-8',
+    '866':                 'ibm866',
+    'cp866':               'ibm866',
+    'csibm866':            'ibm866',
+    'ibm866':              'ibm866',
+    'csisolatin2':         'iso-8859-2',
+    'iso-8859-2':          'iso-8859-2',
+    'iso-ir-101':          'iso-8859-2',
+    'iso8859-2':           'iso-8859-2',
+    'iso88592':            'iso-8859-2',
+    'iso_8859-2':          'iso-8859-2',
+    'iso_8859-2:1987':     'iso-8859-2',
+    'l2':                  'iso-8859-2',
+    'latin2':              'iso-8859-2',
+    'csisolatin3':         'iso-8859-3',
+    'iso-8859-3':          'iso-8859-3',
+    'iso-ir-109':          'iso-8859-3',
+    'iso8859-3':           'iso-8859-3',
+    'iso88593':            'iso-8859-3',
+    'iso_8859-3':          'iso-8859-3',
+    'iso_8859-3:1988':     'iso-8859-3',
+    'l3':                  'iso-8859-3',
+    'latin3':              'iso-8859-3',
+    'csisolatin4':         'iso-8859-4',
+    'iso-8859-4':          'iso-8859-4',
+    'iso-ir-110':          'iso-8859-4',
+    'iso8859-4':           'iso-8859-4',
+    'iso88594':            'iso-8859-4',
+    'iso_8859-4':          'iso-8859-4',
+    'iso_8859-4:1988':     'iso-8859-4',
+    'l4':                  'iso-8859-4',
+    'latin4':              'iso-8859-4',
+    'csisolatincyrillic':  'iso-8859-5',
+    'cyrillic':            'iso-8859-5',
+    'iso-8859-5':          'iso-8859-5',
+    'iso-ir-144':          'iso-8859-5',
+    'iso8859-5':           'iso-8859-5',
+    'iso88595':            'iso-8859-5',
+    'iso_8859-5':          'iso-8859-5',
+    'iso_8859-5:1988':     'iso-8859-5',
+    'arabic':              'iso-8859-6',
+    'asmo-708':            'iso-8859-6',
+    'csiso88596e':         'iso-8859-6',
+    'csiso88596i':         'iso-8859-6',
+    'csisolatinarabic':    'iso-8859-6',
+    'ecma-114':            'iso-8859-6',
+    'iso-8859-6':          'iso-8859-6',
+    'iso-8859-6-e':        'iso-8859-6',
+    'iso-8859-6-i':        'iso-8859-6',
+    'iso-ir-127':          'iso-8859-6',
+    'iso8859-6':           'iso-8859-6',
+    'iso88596':            'iso-8859-6',
+    'iso_8859-6':          'iso-8859-6',
+    'iso_8859-6:1987':     'iso-8859-6',
+    'csisolatingreek':     'iso-8859-7',
+    'ecma-118':            'iso-8859-7',
+    'elot_928':            'iso-8859-7',
+    'greek':               'iso-8859-7',
+    'greek8':              'iso-8859-7',
+    'iso-8859-7':          'iso-8859-7',
+    'iso-ir-126':          'iso-8859-7',
+    'iso8859-7':           'iso-8859-7',
+    'iso88597':            'iso-8859-7',
+    'iso_8859-7':          'iso-8859-7',
+    'iso_8859-7:1987':     'iso-8859-7',
+    'sun_eu_greek':        'iso-8859-7',
+    'csiso88598e':         'iso-8859-8',
+    'csisolatinhebrew':    'iso-8859-8',
+    'hebrew':              'iso-8859-8',
+    'iso-8859-8':          'iso-8859-8',
+    'iso-8859-8-e':        'iso-8859-8',
+    'iso-ir-138':          'iso-8859-8',
+    'iso8859-8':           'iso-8859-8',
+    'iso88598':            'iso-8859-8',
+    'iso_8859-8':          'iso-8859-8',
+    'iso_8859-8:1988':     'iso-8859-8',
+    'visual':              'iso-8859-8',
+    'csiso88598i':         'iso-8859-8-i',
+    'iso-8859-8-i':        'iso-8859-8-i',
+    'logical':             'iso-8859-8-i',
+    'csisolatin6':         'iso-8859-10',
+    'iso-8859-10':         'iso-8859-10',
+    'iso-ir-157':          'iso-8859-10',
+    'iso8859-10':          'iso-8859-10',
+    'iso885910':           'iso-8859-10',
+    'l6':                  'iso-8859-10',
+    'latin6':              'iso-8859-10',
+    'iso-8859-13':         'iso-8859-13',
+    'iso8859-13':          'iso-8859-13',
+    'iso885913':           'iso-8859-13',
+    'iso-8859-14':         'iso-8859-14',
+    'iso8859-14':          'iso-8859-14',
+    'iso885914':           'iso-8859-14',
+    'csisolatin9':         'iso-8859-15',
+    'iso-8859-15':         'iso-8859-15',
+    'iso8859-15':          'iso-8859-15',
+    'iso885915':           'iso-8859-15',
+    'iso_8859-15':         'iso-8859-15',
+    'l9':                  'iso-8859-15',
+    'iso-8859-16':         'iso-8859-16',
+    'cskoi8r':             'koi8-r',
+    'koi':                 'koi8-r',
+    'koi8':                'koi8-r',
+    'koi8-r':              'koi8-r',
+    'koi8_r':              'koi8-r',
+    'koi8-u':              'koi8-u',
+    'csmacintosh':         'macintosh',
+    'mac':                 'macintosh',
+    'macintosh':           'macintosh',
+    'x-mac-roman':         'macintosh',
+    'dos-874':             'windows-874',
+    'iso-8859-11':         'windows-874',
+    'iso8859-11':          'windows-874',
+    'iso885911':           'windows-874',
+    'tis-620':             'windows-874',
+    'windows-874':         'windows-874',
+    'cp1250':              'windows-1250',
+    'windows-1250':        'windows-1250',
+    'x-cp1250':            'windows-1250',
+    'cp1251':              'windows-1251',
+    'windows-1251':        'windows-1251',
+    'x-cp1251':            'windows-1251',
+    'ansi_x3.4-1968':      'windows-1252',
+    'ascii':               'windows-1252',
+    'cp1252':              'windows-1252',
+    'cp819':               'windows-1252',
+    'csisolatin1':         'windows-1252',
+    'ibm819':              'windows-1252',
+    'iso-8859-1':          'windows-1252',
+    'iso-ir-100':          'windows-1252',
+    'iso8859-1':           'windows-1252',
+    'iso88591':            'windows-1252',
+    'iso_8859-1':          'windows-1252',
+    'iso_8859-1:1987':     'windows-1252',
+    'l1':                  'windows-1252',
+    'latin1':              'windows-1252',
+    'us-ascii':            'windows-1252',
+    'windows-1252':        'windows-1252',
+    'x-cp1252':            'windows-1252',
+    'cp1253':              'windows-1253',
+    'windows-1253':        'windows-1253',
+    'x-cp1253':            'windows-1253',
+    'cp1254':              'windows-1254',
+    'csisolatin5':         'windows-1254',
+    'iso-8859-9':          'windows-1254',
+    'iso-ir-148':          'windows-1254',
+    'iso8859-9':           'windows-1254',
+    'iso88599':            'windows-1254',
+    'iso_8859-9':          'windows-1254',
+    'iso_8859-9:1989':     'windows-1254',
+    'l5':                  'windows-1254',
+    'latin5':              'windows-1254',
+    'windows-1254':        'windows-1254',
+    'x-cp1254':            'windows-1254',
+    'cp1255':              'windows-1255',
+    'windows-1255':        'windows-1255',
+    'x-cp1255':            'windows-1255',
+    'cp1256':              'windows-1256',
+    'windows-1256':        'windows-1256',
+    'x-cp1256':            'windows-1256',
+    'cp1257':              'windows-1257',
+    'windows-1257':        'windows-1257',
+    'x-cp1257':            'windows-1257',
+    'cp1258':              'windows-1258',
+    'windows-1258':        'windows-1258',
+    'x-cp1258':            'windows-1258',
+    'x-mac-cyrillic':      'x-mac-cyrillic',
+    'x-mac-ukrainian':     'x-mac-cyrillic',
+    'chinese':             'gbk',
+    'csgb2312':            'gbk',
+    'csiso58gb231280':     'gbk',
+    'gb2312':              'gbk',
+    'gb_2312':             'gbk',
+    'gb_2312-80':          'gbk',
+    'gbk':                 'gbk',
+    'iso-ir-58':           'gbk',
+    'x-gbk':               'gbk',
+    'gb18030':             'gb18030',
+    'hz-gb-2312':          'hz-gb-2312',
+    'big5':                'big5',
+    'big5-hkscs':          'big5',
+    'cn-big5':             'big5',
+    'csbig5':              'big5',
+    'x-x-big5':            'big5',
+    'cseucpkdfmtjapanese': 'euc-jp',
+    'euc-jp':              'euc-jp',
+    'x-euc-jp':            'euc-jp',
+    'csiso2022jp':         'iso-2022-jp',
+    'iso-2022-jp':         'iso-2022-jp',
+    'csshiftjis':          'shift_jis',
+    'ms_kanji':            'shift_jis',
+    'shift-jis':           'shift_jis',
+    'shift_jis':           'shift_jis',
+    'sjis':                'shift_jis',
+    'windows-31j':         'shift_jis',
+    'x-sjis':              'shift_jis',
+    'cseuckr':             'euc-kr',
+    'csksc56011987':       'euc-kr',
+    'euc-kr':              'euc-kr',
+    'iso-ir-149':          'euc-kr',
+    'korean':              'euc-kr',
+    'ks_c_5601-1987':      'euc-kr',
+    'ks_c_5601-1989':      'euc-kr',
+    'ksc5601':             'euc-kr',
+    'ksc_5601':            'euc-kr',
+    'windows-949':         'euc-kr',
+    'csiso2022kr':         'iso-2022-kr',
+    'iso-2022-kr':         'iso-2022-kr',
+    'utf-16be':            'utf-16be',
+    'utf-16':              'utf-16le',
+    'utf-16le':            'utf-16le',
+    'x-user-defined':      'x-user-defined',
+}
diff --git a/lib/python3.7/site-packages/pip/_vendor/webencodings/mklabels.py b/lib/python3.7/site-packages/pip/_vendor/webencodings/mklabels.py
new file mode 100644
index 0000000000000000000000000000000000000000..295dc928ba71fc00caa52708ac70097abe6dc3e4
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/webencodings/mklabels.py
@@ -0,0 +1,59 @@
+"""
+
+    webencodings.mklabels
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    Regenarate the webencodings.labels module.
+
+    :copyright: Copyright 2012 by Simon Sapin
+    :license: BSD, see LICENSE for details.
+
+"""
+
+import json
+try:
+    from urllib import urlopen
+except ImportError:
+    from urllib.request import urlopen
+
+
+def assert_lower(string):
+    assert string == string.lower()
+    return string
+
+
+def generate(url):
+    parts = ['''\
+"""
+
+    webencodings.labels
+    ~~~~~~~~~~~~~~~~~~~
+
+    Map encoding labels to their name.
+
+    :copyright: Copyright 2012 by Simon Sapin
+    :license: BSD, see LICENSE for details.
+
+"""
+
+# XXX Do not edit!
+# This file is automatically generated by mklabels.py
+
+LABELS = {
+''']
+    labels = [
+        (repr(assert_lower(label)).lstrip('u'),
+         repr(encoding['name']).lstrip('u'))
+        for category in json.loads(urlopen(url).read().decode('ascii'))
+        for encoding in category['encodings']
+        for label in encoding['labels']]
+    max_len = max(len(label) for label, name in labels)
+    parts.extend(
+        '    %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name)
+        for label, name in labels)
+    parts.append('}')
+    return ''.join(parts)
+
+
+if __name__ == '__main__':
+    print(generate('http://encoding.spec.whatwg.org/encodings.json'))
diff --git a/lib/python3.7/site-packages/pip/_vendor/webencodings/tests.py b/lib/python3.7/site-packages/pip/_vendor/webencodings/tests.py
new file mode 100644
index 0000000000000000000000000000000000000000..e12c10d033026f09cf97b81d29555e12aae8c762
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/webencodings/tests.py
@@ -0,0 +1,153 @@
+# coding: utf-8
+"""
+
+    webencodings.tests
+    ~~~~~~~~~~~~~~~~~~
+
+    A basic test suite for Encoding.
+
+    :copyright: Copyright 2012 by Simon Sapin
+    :license: BSD, see LICENSE for details.
+
+"""
+
+from __future__ import unicode_literals
+
+from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode,
+               IncrementalDecoder, IncrementalEncoder, UTF8)
+
+
+def assert_raises(exception, function, *args, **kwargs):
+    try:
+        function(*args, **kwargs)
+    except exception:
+        return
+    else:  # pragma: no cover
+        raise AssertionError('Did not raise %s.' % exception)
+
+
+def test_labels():
+    assert lookup('utf-8').name == 'utf-8'
+    assert lookup('Utf-8').name == 'utf-8'
+    assert lookup('UTF-8').name == 'utf-8'
+    assert lookup('utf8').name == 'utf-8'
+    assert lookup('utf8').name == 'utf-8'
+    assert lookup('utf8 ').name == 'utf-8'
+    assert lookup(' \r\nutf8\t').name == 'utf-8'
+    assert lookup('u8') is None  # Python label.
+    assert lookup('utf-8 ') is None  # Non-ASCII white space.
+
+    assert lookup('US-ASCII').name == 'windows-1252'
+    assert lookup('iso-8859-1').name == 'windows-1252'
+    assert lookup('latin1').name == 'windows-1252'
+    assert lookup('LATIN1').name == 'windows-1252'
+    assert lookup('latin-1') is None
+    assert lookup('LATİN1') is None  # ASCII-only case insensitivity.
+
+
+def test_all_labels():
+    for label in LABELS:
+        assert decode(b'', label) == ('', lookup(label))
+        assert encode('', label) == b''
+        for repeat in [0, 1, 12]:
+            output, _ = iter_decode([b''] * repeat, label)
+            assert list(output) == []
+            assert list(iter_encode([''] * repeat, label)) == []
+        decoder = IncrementalDecoder(label)
+        assert decoder.decode(b'') == ''
+        assert decoder.decode(b'', final=True) == ''
+        encoder = IncrementalEncoder(label)
+        assert encoder.encode('') == b''
+        assert encoder.encode('', final=True) == b''
+    # All encoding names are valid labels too:
+    for name in set(LABELS.values()):
+        assert lookup(name).name == name
+
+
+def test_invalid_label():
+    assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid')
+    assert_raises(LookupError, encode, 'é', 'invalid')
+    assert_raises(LookupError, iter_decode, [], 'invalid')
+    assert_raises(LookupError, iter_encode, [], 'invalid')
+    assert_raises(LookupError, IncrementalDecoder, 'invalid')
+    assert_raises(LookupError, IncrementalEncoder, 'invalid')
+
+
+def test_decode():
+    assert decode(b'\x80', 'latin1') == ('€', lookup('latin1'))
+    assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1'))
+    assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8'))
+    assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8'))
+    assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii'))
+    assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8'))  # UTF-8 with BOM
+
+    assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be'))  # UTF-16-BE with BOM
+    assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le'))  # UTF-16-LE with BOM
+    assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be'))
+    assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le'))
+
+    assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be'))
+    assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le'))
+    assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le'))
+
+    assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be'))
+    assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le'))
+    assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le'))
+
+
+def test_encode():
+    assert encode('é', 'latin1') == b'\xe9'
+    assert encode('é', 'utf8') == b'\xc3\xa9'
+    assert encode('é', 'utf8') == b'\xc3\xa9'
+    assert encode('é', 'utf-16') == b'\xe9\x00'
+    assert encode('é', 'utf-16le') == b'\xe9\x00'
+    assert encode('é', 'utf-16be') == b'\x00\xe9'
+
+
+def test_iter_decode():
+    def iter_decode_to_string(input, fallback_encoding):
+        output, _encoding = iter_decode(input, fallback_encoding)
+        return ''.join(output)
+    assert iter_decode_to_string([], 'latin1') == ''
+    assert iter_decode_to_string([b''], 'latin1') == ''
+    assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é'
+    assert iter_decode_to_string([b'hello'], 'latin1') == 'hello'
+    assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello'
+    assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello'
+    assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é'
+    assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é'
+    assert iter_decode_to_string([
+        b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é'
+    assert iter_decode_to_string([
+        b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD'
+    assert iter_decode_to_string([
+        b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é'
+    assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == ''
+    assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»'
+    assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é'
+    assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é'
+    assert iter_decode_to_string([
+        b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é'
+    assert iter_decode_to_string([
+        b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo'
+
+
+def test_iter_encode():
+    assert b''.join(iter_encode([], 'latin1')) == b''
+    assert b''.join(iter_encode([''], 'latin1')) == b''
+    assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9'
+    assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9'
+    assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00'
+    assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00'
+    assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9'
+    assert b''.join(iter_encode([
+        '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo'
+
+
+def test_x_user_defined():
+    encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca'
+    decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca'
+    encoded = b'aa'
+    decoded = 'aa'
+    assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined'))
+    assert encode(decoded, 'x-user-defined') == encoded
diff --git a/lib/python3.7/site-packages/pip/_vendor/webencodings/x_user_defined.py b/lib/python3.7/site-packages/pip/_vendor/webencodings/x_user_defined.py
new file mode 100644
index 0000000000000000000000000000000000000000..d16e326024c05a59548619e13258acad781e0a6d
--- /dev/null
+++ b/lib/python3.7/site-packages/pip/_vendor/webencodings/x_user_defined.py
@@ -0,0 +1,325 @@
+# coding: utf-8
+"""
+
+    webencodings.x_user_defined
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    An implementation of the x-user-defined encoding.
+
+    :copyright: Copyright 2012 by Simon Sapin
+    :license: BSD, see LICENSE for details.
+
+"""
+
+from __future__ import unicode_literals
+
+import codecs
+
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+    def encode(self, input, errors='strict'):
+        return codecs.charmap_encode(input, errors, encoding_table)
+
+    def decode(self, input, errors='strict'):
+        return codecs.charmap_decode(input, errors, decoding_table)
+
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+    def encode(self, input, final=False):
+        return codecs.charmap_encode(input, self.errors, encoding_table)[0]
+
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+    def decode(self, input, final=False):
+        return codecs.charmap_decode(input, self.errors, decoding_table)[0]
+
+
+class StreamWriter(Codec, codecs.StreamWriter):
+    pass
+
+
+class StreamReader(Codec, codecs.StreamReader):
+    pass
+
+
+### encodings module API
+
+codec_info = codecs.CodecInfo(
+    name='x-user-defined',
+    encode=Codec().encode,
+    decode=Codec().decode,
+    incrementalencoder=IncrementalEncoder,
+    incrementaldecoder=IncrementalDecoder,
+    streamreader=StreamReader,
+    streamwriter=StreamWriter,
+)
+
+
+### Decoding Table
+
+# Python 3:
+# for c in range(256): print('    %r' % chr(c if c < 128 else c + 0xF700))
+decoding_table = (
+    '\x00'
+    '\x01'
+    '\x02'
+    '\x03'
+    '\x04'
+    '\x05'
+    '\x06'
+    '\x07'
+    '\x08'
+    '\t'
+    '\n'
+    '\x0b'
+    '\x0c'
+    '\r'
+    '\x0e'
+    '\x0f'
+    '\x10'
+    '\x11'
+    '\x12'
+    '\x13'
+    '\x14'
+    '\x15'
+    '\x16'
+    '\x17'
+    '\x18'
+    '\x19'
+    '\x1a'
+    '\x1b'
+    '\x1c'
+    '\x1d'
+    '\x1e'
+    '\x1f'
+    ' '
+    '!'
+    '"'
+    '#'
+    '$'
+    '%'
+    '&'
+    "'"
+    '('
+    ')'
+    '*'
+    '+'
+    ','
+    '-'
+    '.'
+    '/'
+    '0'
+    '1'
+    '2'
+    '3'
+    '4'
+    '5'
+    '6'
+    '7'
+    '8'
+    '9'
+    ':'
+    ';'
+    '<'
+    '='
+    '>'
+    '?'
+    '@'
+    'A'
+    'B'
+    'C'
+    'D'
+    'E'
+    'F'
+    'G'
+    'H'
+    'I'
+    'J'
+    'K'
+    'L'
+    'M'
+    'N'
+    'O'
+    'P'
+    'Q'
+    'R'
+    'S'
+    'T'
+    'U'
+    'V'
+    'W'
+    'X'
+    'Y'
+    'Z'
+    '['
+    '\\'
+    ']'
+    '^'
+    '_'
+    '`'
+    'a'
+    'b'
+    'c'
+    'd'
+    'e'
+    'f'
+    'g'
+    'h'
+    'i'
+    'j'
+    'k'
+    'l'
+    'm'
+    'n'
+    'o'
+    'p'
+    'q'
+    'r'
+    's'
+    't'
+    'u'
+    'v'
+    'w'
+    'x'
+    'y'
+    'z'
+    '{'
+    '|'
+    '}'
+    '~'
+    '\x7f'
+    '\uf780'
+    '\uf781'
+    '\uf782'
+    '\uf783'
+    '\uf784'
+    '\uf785'
+    '\uf786'
+    '\uf787'
+    '\uf788'
+    '\uf789'
+    '\uf78a'
+    '\uf78b'
+    '\uf78c'
+    '\uf78d'
+    '\uf78e'
+    '\uf78f'
+    '\uf790'
+    '\uf791'
+    '\uf792'
+    '\uf793'
+    '\uf794'
+    '\uf795'
+    '\uf796'
+    '\uf797'
+    '\uf798'
+    '\uf799'
+    '\uf79a'
+    '\uf79b'
+    '\uf79c'
+    '\uf79d'
+    '\uf79e'
+    '\uf79f'
+    '\uf7a0'
+    '\uf7a1'
+    '\uf7a2'
+    '\uf7a3'
+    '\uf7a4'
+    '\uf7a5'
+    '\uf7a6'
+    '\uf7a7'
+    '\uf7a8'
+    '\uf7a9'
+    '\uf7aa'
+    '\uf7ab'
+    '\uf7ac'
+    '\uf7ad'
+    '\uf7ae'
+    '\uf7af'
+    '\uf7b0'
+    '\uf7b1'
+    '\uf7b2'
+    '\uf7b3'
+    '\uf7b4'
+    '\uf7b5'
+    '\uf7b6'
+    '\uf7b7'
+    '\uf7b8'
+    '\uf7b9'
+    '\uf7ba'
+    '\uf7bb'
+    '\uf7bc'
+    '\uf7bd'
+    '\uf7be'
+    '\uf7bf'
+    '\uf7c0'
+    '\uf7c1'
+    '\uf7c2'
+    '\uf7c3'
+    '\uf7c4'
+    '\uf7c5'
+    '\uf7c6'
+    '\uf7c7'
+    '\uf7c8'
+    '\uf7c9'
+    '\uf7ca'
+    '\uf7cb'
+    '\uf7cc'
+    '\uf7cd'
+    '\uf7ce'
+    '\uf7cf'
+    '\uf7d0'
+    '\uf7d1'
+    '\uf7d2'
+    '\uf7d3'
+    '\uf7d4'
+    '\uf7d5'
+    '\uf7d6'
+    '\uf7d7'
+    '\uf7d8'
+    '\uf7d9'
+    '\uf7da'
+    '\uf7db'
+    '\uf7dc'
+    '\uf7dd'
+    '\uf7de'
+    '\uf7df'
+    '\uf7e0'
+    '\uf7e1'
+    '\uf7e2'
+    '\uf7e3'
+    '\uf7e4'
+    '\uf7e5'
+    '\uf7e6'
+    '\uf7e7'
+    '\uf7e8'
+    '\uf7e9'
+    '\uf7ea'
+    '\uf7eb'
+    '\uf7ec'
+    '\uf7ed'
+    '\uf7ee'
+    '\uf7ef'
+    '\uf7f0'
+    '\uf7f1'
+    '\uf7f2'
+    '\uf7f3'
+    '\uf7f4'
+    '\uf7f5'
+    '\uf7f6'
+    '\uf7f7'
+    '\uf7f8'
+    '\uf7f9'
+    '\uf7fa'
+    '\uf7fb'
+    '\uf7fc'
+    '\uf7fd'
+    '\uf7fe'
+    '\uf7ff'
+)
+
+### Encoding table
+encoding_table = codecs.charmap_build(decoding_table)
diff --git a/lib/python3.7/site-packages/pkg_resources/__init__.py b/lib/python3.7/site-packages/pkg_resources/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..97e08d6827eb213be621da730e97b2b339290ea2
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/__init__.py
@@ -0,0 +1,3286 @@
+# coding: utf-8
+"""
+Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof.  The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is.  Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files.  It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+"""
+
+from __future__ import absolute_import
+
+import sys
+import os
+import io
+import time
+import re
+import types
+import zipfile
+import zipimport
+import warnings
+import stat
+import functools
+import pkgutil
+import operator
+import platform
+import collections
+import plistlib
+import email.parser
+import errno
+import tempfile
+import textwrap
+import itertools
+import inspect
+import ntpath
+import posixpath
+from pkgutil import get_importer
+
+try:
+    import _imp
+except ImportError:
+    # Python 3.2 compatibility
+    import imp as _imp
+
+try:
+    FileExistsError
+except NameError:
+    FileExistsError = OSError
+
+from pkg_resources.extern import six
+from pkg_resources.extern.six.moves import urllib, map, filter
+
+# capture these to bypass sandboxing
+from os import utime
+try:
+    from os import mkdir, rename, unlink
+    WRITE_SUPPORT = True
+except ImportError:
+    # no write support, probably under GAE
+    WRITE_SUPPORT = False
+
+from os import open as os_open
+from os.path import isdir, split
+
+try:
+    import importlib.machinery as importlib_machinery
+    # access attribute to force import under delayed import mechanisms.
+    importlib_machinery.__name__
+except ImportError:
+    importlib_machinery = None
+
+from . import py31compat
+from pkg_resources.extern import appdirs
+from pkg_resources.extern import packaging
+__import__('pkg_resources.extern.packaging.version')
+__import__('pkg_resources.extern.packaging.specifiers')
+__import__('pkg_resources.extern.packaging.requirements')
+__import__('pkg_resources.extern.packaging.markers')
+
+
+__metaclass__ = type
+
+
+if (3, 0) < sys.version_info < (3, 4):
+    raise RuntimeError("Python 3.4 or later is required")
+
+if six.PY2:
+    # Those builtin exceptions are only defined in Python 3
+    PermissionError = None
+    NotADirectoryError = None
+
+# declare some globals that will be defined later to
+# satisfy the linters.
+require = None
+working_set = None
+add_activation_listener = None
+resources_stream = None
+cleanup_resources = None
+resource_dir = None
+resource_stream = None
+set_extraction_path = None
+resource_isdir = None
+resource_string = None
+iter_entry_points = None
+resource_listdir = None
+resource_filename = None
+resource_exists = None
+_distribution_finders = None
+_namespace_handlers = None
+_namespace_packages = None
+
+
+class PEP440Warning(RuntimeWarning):
+    """
+    Used when there is an issue with a version or specifier not complying with
+    PEP 440.
+    """
+
+
+def parse_version(v):
+    try:
+        return packaging.version.Version(v)
+    except packaging.version.InvalidVersion:
+        return packaging.version.LegacyVersion(v)
+
+
+_state_vars = {}
+
+
+def _declare_state(vartype, **kw):
+    globals().update(kw)
+    _state_vars.update(dict.fromkeys(kw, vartype))
+
+
+def __getstate__():
+    state = {}
+    g = globals()
+    for k, v in _state_vars.items():
+        state[k] = g['_sget_' + v](g[k])
+    return state
+
+
+def __setstate__(state):
+    g = globals()
+    for k, v in state.items():
+        g['_sset_' + _state_vars[k]](k, g[k], v)
+    return state
+
+
+def _sget_dict(val):
+    return val.copy()
+
+
+def _sset_dict(key, ob, state):
+    ob.clear()
+    ob.update(state)
+
+
+def _sget_object(val):
+    return val.__getstate__()
+
+
+def _sset_object(key, ob, state):
+    ob.__setstate__(state)
+
+
+_sget_none = _sset_none = lambda *args: None
+
+
+def get_supported_platform():
+    """Return this platform's maximum compatible version.
+
+    distutils.util.get_platform() normally reports the minimum version
+    of Mac OS X that would be required to *use* extensions produced by
+    distutils.  But what we want when checking compatibility is to know the
+    version of Mac OS X that we are *running*.  To allow usage of packages that
+    explicitly require a newer version of Mac OS X, we must also know the
+    current version of the OS.
+
+    If this condition occurs for any other platform with a version in its
+    platform strings, this function should be extended accordingly.
+    """
+    plat = get_build_platform()
+    m = macosVersionString.match(plat)
+    if m is not None and sys.platform == "darwin":
+        try:
+            plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
+        except ValueError:
+            # not Mac OS X
+            pass
+    return plat
+
+
+__all__ = [
+    # Basic resource access and distribution/entry point discovery
+    'require', 'run_script', 'get_provider', 'get_distribution',
+    'load_entry_point', 'get_entry_map', 'get_entry_info',
+    'iter_entry_points',
+    'resource_string', 'resource_stream', 'resource_filename',
+    'resource_listdir', 'resource_exists', 'resource_isdir',
+
+    # Environmental control
+    'declare_namespace', 'working_set', 'add_activation_listener',
+    'find_distributions', 'set_extraction_path', 'cleanup_resources',
+    'get_default_cache',
+
+    # Primary implementation classes
+    'Environment', 'WorkingSet', 'ResourceManager',
+    'Distribution', 'Requirement', 'EntryPoint',
+
+    # Exceptions
+    'ResolutionError', 'VersionConflict', 'DistributionNotFound',
+    'UnknownExtra', 'ExtractionError',
+
+    # Warnings
+    'PEP440Warning',
+
+    # Parsing functions and string utilities
+    'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
+    'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
+    'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
+
+    # filesystem utilities
+    'ensure_directory', 'normalize_path',
+
+    # Distribution "precedence" constants
+    'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
+
+    # "Provider" interfaces, implementations, and registration/lookup APIs
+    'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
+    'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
+    'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
+    'register_finder', 'register_namespace_handler', 'register_loader_type',
+    'fixup_namespace_packages', 'get_importer',
+
+    # Warnings
+    'PkgResourcesDeprecationWarning',
+
+    # Deprecated/backward compatibility only
+    'run_main', 'AvailableDistributions',
+]
+
+
+class ResolutionError(Exception):
+    """Abstract base for dependency resolution errors"""
+
+    def __repr__(self):
+        return self.__class__.__name__ + repr(self.args)
+
+
+class VersionConflict(ResolutionError):
+    """
+    An already-installed version conflicts with the requested version.
+
+    Should be initialized with the installed Distribution and the requested
+    Requirement.
+    """
+
+    _template = "{self.dist} is installed but {self.req} is required"
+
+    @property
+    def dist(self):
+        return self.args[0]
+
+    @property
+    def req(self):
+        return self.args[1]
+
+    def report(self):
+        return self._template.format(**locals())
+
+    def with_context(self, required_by):
+        """
+        If required_by is non-empty, return a version of self that is a
+        ContextualVersionConflict.
+        """
+        if not required_by:
+            return self
+        args = self.args + (required_by,)
+        return ContextualVersionConflict(*args)
+
+
+class ContextualVersionConflict(VersionConflict):
+    """
+    A VersionConflict that accepts a third parameter, the set of the
+    requirements that required the installed Distribution.
+    """
+
+    _template = VersionConflict._template + ' by {self.required_by}'
+
+    @property
+    def required_by(self):
+        return self.args[2]
+
+
+class DistributionNotFound(ResolutionError):
+    """A requested distribution was not found"""
+
+    _template = ("The '{self.req}' distribution was not found "
+                 "and is required by {self.requirers_str}")
+
+    @property
+    def req(self):
+        return self.args[0]
+
+    @property
+    def requirers(self):
+        return self.args[1]
+
+    @property
+    def requirers_str(self):
+        if not self.requirers:
+            return 'the application'
+        return ', '.join(self.requirers)
+
+    def report(self):
+        return self._template.format(**locals())
+
+    def __str__(self):
+        return self.report()
+
+
+class UnknownExtra(ResolutionError):
+    """Distribution doesn't have an "extra feature" of the given name"""
+
+
+_provider_factories = {}
+
+PY_MAJOR = sys.version[:3]
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+
+def register_loader_type(loader_type, provider_factory):
+    """Register `provider_factory` to make providers for `loader_type`
+
+    `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+    and `provider_factory` is a function that, passed a *module* object,
+    returns an ``IResourceProvider`` for that module.
+    """
+    _provider_factories[loader_type] = provider_factory
+
+
+def get_provider(moduleOrReq):
+    """Return an IResourceProvider for the named module or requirement"""
+    if isinstance(moduleOrReq, Requirement):
+        return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+    try:
+        module = sys.modules[moduleOrReq]
+    except KeyError:
+        __import__(moduleOrReq)
+        module = sys.modules[moduleOrReq]
+    loader = getattr(module, '__loader__', None)
+    return _find_adapter(_provider_factories, loader)(module)
+
+
+def _macosx_vers(_cache=[]):
+    if not _cache:
+        version = platform.mac_ver()[0]
+        # fallback for MacPorts
+        if version == '':
+            plist = '/System/Library/CoreServices/SystemVersion.plist'
+            if os.path.exists(plist):
+                if hasattr(plistlib, 'readPlist'):
+                    plist_content = plistlib.readPlist(plist)
+                    if 'ProductVersion' in plist_content:
+                        version = plist_content['ProductVersion']
+
+        _cache.append(version.split('.'))
+    return _cache[0]
+
+
+def _macosx_arch(machine):
+    return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
+
+
+def get_build_platform():
+    """Return this platform's string for platform-specific distributions
+
+    XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+    needs some hacks for Linux and Mac OS X.
+    """
+    from sysconfig import get_platform
+
+    plat = get_platform()
+    if sys.platform == "darwin" and not plat.startswith('macosx-'):
+        try:
+            version = _macosx_vers()
+            machine = os.uname()[4].replace(" ", "_")
+            return "macosx-%d.%d-%s" % (
+                int(version[0]), int(version[1]),
+                _macosx_arch(machine),
+            )
+        except ValueError:
+            # if someone is running a non-Mac darwin system, this will fall
+            # through to the default implementation
+            pass
+    return plat
+
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+# XXX backward compat
+get_platform = get_build_platform
+
+
+def compatible_platforms(provided, required):
+    """Can code for the `provided` platform run on the `required` platform?
+
+    Returns true if either platform is ``None``, or the platforms are equal.
+
+    XXX Needs compatibility checks for Linux and other unixy OSes.
+    """
+    if provided is None or required is None or provided == required:
+        # easy case
+        return True
+
+    # Mac OS X special cases
+    reqMac = macosVersionString.match(required)
+    if reqMac:
+        provMac = macosVersionString.match(provided)
+
+        # is this a Mac package?
+        if not provMac:
+            # this is backwards compatibility for packages built before
+            # setuptools 0.6. All packages built after this point will
+            # use the new macosx designation.
+            provDarwin = darwinVersionString.match(provided)
+            if provDarwin:
+                dversion = int(provDarwin.group(1))
+                macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+                if dversion == 7 and macosversion >= "10.3" or \
+                        dversion == 8 and macosversion >= "10.4":
+                    return True
+            # egg isn't macosx or legacy darwin
+            return False
+
+        # are they the same major version and machine type?
+        if provMac.group(1) != reqMac.group(1) or \
+                provMac.group(3) != reqMac.group(3):
+            return False
+
+        # is the required OS major update >= the provided one?
+        if int(provMac.group(2)) > int(reqMac.group(2)):
+            return False
+
+        return True
+
+    # XXX Linux and other platforms' special cases should go here
+    return False
+
+
+def run_script(dist_spec, script_name):
+    """Locate distribution `dist_spec` and run its `script_name` script"""
+    ns = sys._getframe(1).f_globals
+    name = ns['__name__']
+    ns.clear()
+    ns['__name__'] = name
+    require(dist_spec)[0].run_script(script_name, ns)
+
+
+# backward compatibility
+run_main = run_script
+
+
+def get_distribution(dist):
+    """Return a current distribution object for a Requirement or string"""
+    if isinstance(dist, six.string_types):
+        dist = Requirement.parse(dist)
+    if isinstance(dist, Requirement):
+        dist = get_provider(dist)
+    if not isinstance(dist, Distribution):
+        raise TypeError("Expected string, Requirement, or Distribution", dist)
+    return dist
+
+
+def load_entry_point(dist, group, name):
+    """Return `name` entry point of `group` for `dist` or raise ImportError"""
+    return get_distribution(dist).load_entry_point(group, name)
+
+
+def get_entry_map(dist, group=None):
+    """Return the entry point map for `group`, or the full entry map"""
+    return get_distribution(dist).get_entry_map(group)
+
+
+def get_entry_info(dist, group, name):
+    """Return the EntryPoint object for `group`+`name`, or ``None``"""
+    return get_distribution(dist).get_entry_info(group, name)
+
+
+class IMetadataProvider:
+    def has_metadata(name):
+        """Does the package's distribution contain the named metadata?"""
+
+    def get_metadata(name):
+        """The named metadata resource as a string"""
+
+    def get_metadata_lines(name):
+        """Yield named metadata resource as list of non-blank non-comment lines
+
+       Leading and trailing whitespace is stripped from each line, and lines
+       with ``#`` as the first non-blank character are omitted."""
+
+    def metadata_isdir(name):
+        """Is the named metadata a directory?  (like ``os.path.isdir()``)"""
+
+    def metadata_listdir(name):
+        """List of metadata names in the directory (like ``os.listdir()``)"""
+
+    def run_script(script_name, namespace):
+        """Execute the named script in the supplied namespace dictionary"""
+
+
+class IResourceProvider(IMetadataProvider):
+    """An object that provides access to package resources"""
+
+    def get_resource_filename(manager, resource_name):
+        """Return a true filesystem path for `resource_name`
+
+        `manager` must be an ``IResourceManager``"""
+
+    def get_resource_stream(manager, resource_name):
+        """Return a readable file-like object for `resource_name`
+
+        `manager` must be an ``IResourceManager``"""
+
+    def get_resource_string(manager, resource_name):
+        """Return a string containing the contents of `resource_name`
+
+        `manager` must be an ``IResourceManager``"""
+
+    def has_resource(resource_name):
+        """Does the package contain the named resource?"""
+
+    def resource_isdir(resource_name):
+        """Is the named resource a directory?  (like ``os.path.isdir()``)"""
+
+    def resource_listdir(resource_name):
+        """List of resource names in the directory (like ``os.listdir()``)"""
+
+
+class WorkingSet:
+    """A collection of active distributions on sys.path (or a similar list)"""
+
+    def __init__(self, entries=None):
+        """Create working set from list of path entries (default=sys.path)"""
+        self.entries = []
+        self.entry_keys = {}
+        self.by_key = {}
+        self.callbacks = []
+
+        if entries is None:
+            entries = sys.path
+
+        for entry in entries:
+            self.add_entry(entry)
+
+    @classmethod
+    def _build_master(cls):
+        """
+        Prepare the master working set.
+        """
+        ws = cls()
+        try:
+            from __main__ import __requires__
+        except ImportError:
+            # The main program does not list any requirements
+            return ws
+
+        # ensure the requirements are met
+        try:
+            ws.require(__requires__)
+        except VersionConflict:
+            return cls._build_from_requirements(__requires__)
+
+        return ws
+
+    @classmethod
+    def _build_from_requirements(cls, req_spec):
+        """
+        Build a working set from a requirement spec. Rewrites sys.path.
+        """
+        # try it without defaults already on sys.path
+        # by starting with an empty path
+        ws = cls([])
+        reqs = parse_requirements(req_spec)
+        dists = ws.resolve(reqs, Environment())
+        for dist in dists:
+            ws.add(dist)
+
+        # add any missing entries from sys.path
+        for entry in sys.path:
+            if entry not in ws.entries:
+                ws.add_entry(entry)
+
+        # then copy back to sys.path
+        sys.path[:] = ws.entries
+        return ws
+
+    def add_entry(self, entry):
+        """Add a path item to ``.entries``, finding any distributions on it
+
+        ``find_distributions(entry, True)`` is used to find distributions
+        corresponding to the path entry, and they are added.  `entry` is
+        always appended to ``.entries``, even if it is already present.
+        (This is because ``sys.path`` can contain the same value more than
+        once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+        equal ``sys.path``.)
+        """
+        self.entry_keys.setdefault(entry, [])
+        self.entries.append(entry)
+        for dist in find_distributions(entry, True):
+            self.add(dist, entry, False)
+
+    def __contains__(self, dist):
+        """True if `dist` is the active distribution for its project"""
+        return self.by_key.get(dist.key) == dist
+
+    def find(self, req):
+        """Find a distribution matching requirement `req`
+
+        If there is an active distribution for the requested project, this
+        returns it as long as it meets the version requirement specified by
+        `req`.  But, if there is an active distribution for the project and it
+        does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+        If there is no active distribution for the requested project, ``None``
+        is returned.
+        """
+        dist = self.by_key.get(req.key)
+        if dist is not None and dist not in req:
+            # XXX add more info
+            raise VersionConflict(dist, req)
+        return dist
+
+    def iter_entry_points(self, group, name=None):
+        """Yield entry point objects from `group` matching `name`
+
+        If `name` is None, yields all entry points in `group` from all
+        distributions in the working set, otherwise only ones matching
+        both `group` and `name` are yielded (in distribution order).
+        """
+        return (
+            entry
+            for dist in self
+            for entry in dist.get_entry_map(group).values()
+            if name is None or name == entry.name
+        )
+
+    def run_script(self, requires, script_name):
+        """Locate distribution for `requires` and run `script_name` script"""
+        ns = sys._getframe(1).f_globals
+        name = ns['__name__']
+        ns.clear()
+        ns['__name__'] = name
+        self.require(requires)[0].run_script(script_name, ns)
+
+    def __iter__(self):
+        """Yield distributions for non-duplicate projects in the working set
+
+        The yield order is the order in which the items' path entries were
+        added to the working set.
+        """
+        seen = {}
+        for item in self.entries:
+            if item not in self.entry_keys:
+                # workaround a cache issue
+                continue
+
+            for key in self.entry_keys[item]:
+                if key not in seen:
+                    seen[key] = 1
+                    yield self.by_key[key]
+
+    def add(self, dist, entry=None, insert=True, replace=False):
+        """Add `dist` to working set, associated with `entry`
+
+        If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+        On exit from this routine, `entry` is added to the end of the working
+        set's ``.entries`` (if it wasn't already present).
+
+        `dist` is only added to the working set if it's for a project that
+        doesn't already have a distribution in the set, unless `replace=True`.
+        If it's added, any callbacks registered with the ``subscribe()`` method
+        will be called.
+        """
+        if insert:
+            dist.insert_on(self.entries, entry, replace=replace)
+
+        if entry is None:
+            entry = dist.location
+        keys = self.entry_keys.setdefault(entry, [])
+        keys2 = self.entry_keys.setdefault(dist.location, [])
+        if not replace and dist.key in self.by_key:
+            # ignore hidden distros
+            return
+
+        self.by_key[dist.key] = dist
+        if dist.key not in keys:
+            keys.append(dist.key)
+        if dist.key not in keys2:
+            keys2.append(dist.key)
+        self._added_new(dist)
+
+    def resolve(self, requirements, env=None, installer=None,
+                replace_conflicting=False, extras=None):
+        """List all distributions needed to (recursively) meet `requirements`
+
+        `requirements` must be a sequence of ``Requirement`` objects.  `env`,
+        if supplied, should be an ``Environment`` instance.  If
+        not supplied, it defaults to all distributions available within any
+        entry or distribution in the working set.  `installer`, if supplied,
+        will be invoked with each requirement that cannot be met by an
+        already-installed distribution; it should return a ``Distribution`` or
+        ``None``.
+
+        Unless `replace_conflicting=True`, raises a VersionConflict exception
+        if
+        any requirements are found on the path that have the correct name but
+        the wrong version.  Otherwise, if an `installer` is supplied it will be
+        invoked to obtain the correct version of the requirement and activate
+        it.
+
+        `extras` is a list of the extras to be used with these requirements.
+        This is important because extra requirements may look like `my_req;
+        extra = "my_extra"`, which would otherwise be interpreted as a purely
+        optional requirement.  Instead, we want to be able to assert that these
+        requirements are truly required.
+        """
+
+        # set up the stack
+        requirements = list(requirements)[::-1]
+        # set of processed requirements
+        processed = {}
+        # key -> dist
+        best = {}
+        to_activate = []
+
+        req_extras = _ReqExtras()
+
+        # Mapping of requirement to set of distributions that required it;
+        # useful for reporting info about conflicts.
+        required_by = collections.defaultdict(set)
+
+        while requirements:
+            # process dependencies breadth-first
+            req = requirements.pop(0)
+            if req in processed:
+                # Ignore cyclic or redundant dependencies
+                continue
+
+            if not req_extras.markers_pass(req, extras):
+                continue
+
+            dist = best.get(req.key)
+            if dist is None:
+                # Find the best distribution and add it to the map
+                dist = self.by_key.get(req.key)
+                if dist is None or (dist not in req and replace_conflicting):
+                    ws = self
+                    if env is None:
+                        if dist is None:
+                            env = Environment(self.entries)
+                        else:
+                            # Use an empty environment and workingset to avoid
+                            # any further conflicts with the conflicting
+                            # distribution
+                            env = Environment([])
+                            ws = WorkingSet([])
+                    dist = best[req.key] = env.best_match(
+                        req, ws, installer,
+                        replace_conflicting=replace_conflicting
+                    )
+                    if dist is None:
+                        requirers = required_by.get(req, None)
+                        raise DistributionNotFound(req, requirers)
+                to_activate.append(dist)
+            if dist not in req:
+                # Oops, the "best" so far conflicts with a dependency
+                dependent_req = required_by[req]
+                raise VersionConflict(dist, req).with_context(dependent_req)
+
+            # push the new requirements onto the stack
+            new_requirements = dist.requires(req.extras)[::-1]
+            requirements.extend(new_requirements)
+
+            # Register the new requirements needed by req
+            for new_requirement in new_requirements:
+                required_by[new_requirement].add(req.project_name)
+                req_extras[new_requirement] = req.extras
+
+            processed[req] = True
+
+        # return list of distros to activate
+        return to_activate
+
+    def find_plugins(
+            self, plugin_env, full_env=None, installer=None, fallback=True):
+        """Find all activatable distributions in `plugin_env`
+
+        Example usage::
+
+            distributions, errors = working_set.find_plugins(
+                Environment(plugin_dirlist)
+            )
+            # add plugins+libs to sys.path
+            map(working_set.add, distributions)
+            # display errors
+            print('Could not load', errors)
+
+        The `plugin_env` should be an ``Environment`` instance that contains
+        only distributions that are in the project's "plugin directory" or
+        directories. The `full_env`, if supplied, should be an ``Environment``
+        contains all currently-available distributions.  If `full_env` is not
+        supplied, one is created automatically from the ``WorkingSet`` this
+        method is called on, which will typically mean that every directory on
+        ``sys.path`` will be scanned for distributions.
+
+        `installer` is a standard installer callback as used by the
+        ``resolve()`` method. The `fallback` flag indicates whether we should
+        attempt to resolve older versions of a plugin if the newest version
+        cannot be resolved.
+
+        This method returns a 2-tuple: (`distributions`, `error_info`), where
+        `distributions` is a list of the distributions found in `plugin_env`
+        that were loadable, along with any other distributions that are needed
+        to resolve their dependencies.  `error_info` is a dictionary mapping
+        unloadable plugin distributions to an exception instance describing the
+        error that occurred. Usually this will be a ``DistributionNotFound`` or
+        ``VersionConflict`` instance.
+        """
+
+        plugin_projects = list(plugin_env)
+        # scan project names in alphabetic order
+        plugin_projects.sort()
+
+        error_info = {}
+        distributions = {}
+
+        if full_env is None:
+            env = Environment(self.entries)
+            env += plugin_env
+        else:
+            env = full_env + plugin_env
+
+        shadow_set = self.__class__([])
+        # put all our entries in shadow_set
+        list(map(shadow_set.add, self))
+
+        for project_name in plugin_projects:
+
+            for dist in plugin_env[project_name]:
+
+                req = [dist.as_requirement()]
+
+                try:
+                    resolvees = shadow_set.resolve(req, env, installer)
+
+                except ResolutionError as v:
+                    # save error info
+                    error_info[dist] = v
+                    if fallback:
+                        # try the next older version of project
+                        continue
+                    else:
+                        # give up on this project, keep going
+                        break
+
+                else:
+                    list(map(shadow_set.add, resolvees))
+                    distributions.update(dict.fromkeys(resolvees))
+
+                    # success, no need to try any more versions of this project
+                    break
+
+        distributions = list(distributions)
+        distributions.sort()
+
+        return distributions, error_info
+
+    def require(self, *requirements):
+        """Ensure that distributions matching `requirements` are activated
+
+        `requirements` must be a string or a (possibly-nested) sequence
+        thereof, specifying the distributions and versions required.  The
+        return value is a sequence of the distributions that needed to be
+        activated to fulfill the requirements; all relevant distributions are
+        included, even if they were already activated in this working set.
+        """
+        needed = self.resolve(parse_requirements(requirements))
+
+        for dist in needed:
+            self.add(dist)
+
+        return needed
+
+    def subscribe(self, callback, existing=True):
+        """Invoke `callback` for all distributions
+
+        If `existing=True` (default),
+        call on all existing ones, as well.
+        """
+        if callback in self.callbacks:
+            return
+        self.callbacks.append(callback)
+        if not existing:
+            return
+        for dist in self:
+            callback(dist)
+
+    def _added_new(self, dist):
+        for callback in self.callbacks:
+            callback(dist)
+
+    def __getstate__(self):
+        return (
+            self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
+            self.callbacks[:]
+        )
+
+    def __setstate__(self, e_k_b_c):
+        entries, keys, by_key, callbacks = e_k_b_c
+        self.entries = entries[:]
+        self.entry_keys = keys.copy()
+        self.by_key = by_key.copy()
+        self.callbacks = callbacks[:]
+
+
+class _ReqExtras(dict):
+    """
+    Map each requirement to the extras that demanded it.
+    """
+
+    def markers_pass(self, req, extras=None):
+        """
+        Evaluate markers for req against each extra that
+        demanded it.
+
+        Return False if the req has a marker and fails
+        evaluation. Otherwise, return True.
+        """
+        extra_evals = (
+            req.marker.evaluate({'extra': extra})
+            for extra in self.get(req, ()) + (extras or (None,))
+        )
+        return not req.marker or any(extra_evals)
+
+
+class Environment:
+    """Searchable snapshot of distributions on a search path"""
+
+    def __init__(
+            self, search_path=None, platform=get_supported_platform(),
+            python=PY_MAJOR):
+        """Snapshot distributions available on a search path
+
+        Any distributions found on `search_path` are added to the environment.
+        `search_path` should be a sequence of ``sys.path`` items.  If not
+        supplied, ``sys.path`` is used.
+
+        `platform` is an optional string specifying the name of the platform
+        that platform-specific distributions must be compatible with.  If
+        unspecified, it defaults to the current platform.  `python` is an
+        optional string naming the desired version of Python (e.g. ``'3.6'``);
+        it defaults to the current version.
+
+        You may explicitly set `platform` (and/or `python`) to ``None`` if you
+        wish to map *all* distributions, not just those compatible with the
+        running platform or Python version.
+        """
+        self._distmap = {}
+        self.platform = platform
+        self.python = python
+        self.scan(search_path)
+
+    def can_add(self, dist):
+        """Is distribution `dist` acceptable for this environment?
+
+        The distribution must match the platform and python version
+        requirements specified when this environment was created, or False
+        is returned.
+        """
+        py_compat = (
+            self.python is None
+            or dist.py_version is None
+            or dist.py_version == self.python
+        )
+        return py_compat and compatible_platforms(dist.platform, self.platform)
+
+    def remove(self, dist):
+        """Remove `dist` from the environment"""
+        self._distmap[dist.key].remove(dist)
+
+    def scan(self, search_path=None):
+        """Scan `search_path` for distributions usable in this environment
+
+        Any distributions found are added to the environment.
+        `search_path` should be a sequence of ``sys.path`` items.  If not
+        supplied, ``sys.path`` is used.  Only distributions conforming to
+        the platform/python version defined at initialization are added.
+        """
+        if search_path is None:
+            search_path = sys.path
+
+        for item in search_path:
+            for dist in find_distributions(item):
+                self.add(dist)
+
+    def __getitem__(self, project_name):
+        """Return a newest-to-oldest list of distributions for `project_name`
+
+        Uses case-insensitive `project_name` comparison, assuming all the
+        project's distributions use their project's name converted to all
+        lowercase as their key.
+
+        """
+        distribution_key = project_name.lower()
+        return self._distmap.get(distribution_key, [])
+
+    def add(self, dist):
+        """Add `dist` if we ``can_add()`` it and it has not already been added
+        """
+        if self.can_add(dist) and dist.has_version():
+            dists = self._distmap.setdefault(dist.key, [])
+            if dist not in dists:
+                dists.append(dist)
+                dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
+
+    def best_match(
+            self, req, working_set, installer=None, replace_conflicting=False):
+        """Find distribution best matching `req` and usable on `working_set`
+
+        This calls the ``find(req)`` method of the `working_set` to see if a
+        suitable distribution is already active.  (This may raise
+        ``VersionConflict`` if an unsuitable version of the project is already
+        active in the specified `working_set`.)  If a suitable distribution
+        isn't active, this method returns the newest distribution in the
+        environment that meets the ``Requirement`` in `req`.  If no suitable
+        distribution is found, and `installer` is supplied, then the result of
+        calling the environment's ``obtain(req, installer)`` method will be
+        returned.
+        """
+        try:
+            dist = working_set.find(req)
+        except VersionConflict:
+            if not replace_conflicting:
+                raise
+            dist = None
+        if dist is not None:
+            return dist
+        for dist in self[req.key]:
+            if dist in req:
+                return dist
+        # try to download/install
+        return self.obtain(req, installer)
+
+    def obtain(self, requirement, installer=None):
+        """Obtain a distribution matching `requirement` (e.g. via download)
+
+        Obtain a distro that matches requirement (e.g. via download).  In the
+        base ``Environment`` class, this routine just returns
+        ``installer(requirement)``, unless `installer` is None, in which case
+        None is returned instead.  This method is a hook that allows subclasses
+        to attempt other ways of obtaining a distribution before falling back
+        to the `installer` argument."""
+        if installer is not None:
+            return installer(requirement)
+
+    def __iter__(self):
+        """Yield the unique project names of the available distributions"""
+        for key in self._distmap.keys():
+            if self[key]:
+                yield key
+
+    def __iadd__(self, other):
+        """In-place addition of a distribution or environment"""
+        if isinstance(other, Distribution):
+            self.add(other)
+        elif isinstance(other, Environment):
+            for project in other:
+                for dist in other[project]:
+                    self.add(dist)
+        else:
+            raise TypeError("Can't add %r to environment" % (other,))
+        return self
+
+    def __add__(self, other):
+        """Add an environment or distribution to an environment"""
+        new = self.__class__([], platform=None, python=None)
+        for env in self, other:
+            new += env
+        return new
+
+
+# XXX backward compatibility
+AvailableDistributions = Environment
+
+
+class ExtractionError(RuntimeError):
+    """An error occurred extracting a resource
+
+    The following attributes are available from instances of this exception:
+
+    manager
+        The resource manager that raised this exception
+
+    cache_path
+        The base directory for resource extraction
+
+    original_error
+        The exception instance that caused extraction to fail
+    """
+
+
+class ResourceManager:
+    """Manage resource extraction and packages"""
+    extraction_path = None
+
+    def __init__(self):
+        self.cached_files = {}
+
+    def resource_exists(self, package_or_requirement, resource_name):
+        """Does the named resource exist?"""
+        return get_provider(package_or_requirement).has_resource(resource_name)
+
+    def resource_isdir(self, package_or_requirement, resource_name):
+        """Is the named resource an existing directory?"""
+        return get_provider(package_or_requirement).resource_isdir(
+            resource_name
+        )
+
+    def resource_filename(self, package_or_requirement, resource_name):
+        """Return a true filesystem path for specified resource"""
+        return get_provider(package_or_requirement).get_resource_filename(
+            self, resource_name
+        )
+
+    def resource_stream(self, package_or_requirement, resource_name):
+        """Return a readable file-like object for specified resource"""
+        return get_provider(package_or_requirement).get_resource_stream(
+            self, resource_name
+        )
+
+    def resource_string(self, package_or_requirement, resource_name):
+        """Return specified resource as a string"""
+        return get_provider(package_or_requirement).get_resource_string(
+            self, resource_name
+        )
+
+    def resource_listdir(self, package_or_requirement, resource_name):
+        """List the contents of the named resource directory"""
+        return get_provider(package_or_requirement).resource_listdir(
+            resource_name
+        )
+
+    def extraction_error(self):
+        """Give an error message for problems extracting file(s)"""
+
+        old_exc = sys.exc_info()[1]
+        cache_path = self.extraction_path or get_default_cache()
+
+        tmpl = textwrap.dedent("""
+            Can't extract file(s) to egg cache
+
+            The following error occurred while trying to extract file(s)
+            to the Python egg cache:
+
+              {old_exc}
+
+            The Python egg cache directory is currently set to:
+
+              {cache_path}
+
+            Perhaps your account does not have write access to this directory?
+            You can change the cache directory by setting the PYTHON_EGG_CACHE
+            environment variable to point to an accessible directory.
+            """).lstrip()
+        err = ExtractionError(tmpl.format(**locals()))
+        err.manager = self
+        err.cache_path = cache_path
+        err.original_error = old_exc
+        raise err
+
+    def get_cache_path(self, archive_name, names=()):
+        """Return absolute location in cache for `archive_name` and `names`
+
+        The parent directory of the resulting path will be created if it does
+        not already exist.  `archive_name` should be the base filename of the
+        enclosing egg (which may not be the name of the enclosing zipfile!),
+        including its ".egg" extension.  `names`, if provided, should be a
+        sequence of path name parts "under" the egg's extraction location.
+
+        This method should only be called by resource providers that need to
+        obtain an extraction location, and only for names they intend to
+        extract, as it tracks the generated names for possible cleanup later.
+        """
+        extract_path = self.extraction_path or get_default_cache()
+        target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
+        try:
+            _bypass_ensure_directory(target_path)
+        except Exception:
+            self.extraction_error()
+
+        self._warn_unsafe_extraction_path(extract_path)
+
+        self.cached_files[target_path] = 1
+        return target_path
+
+    @staticmethod
+    def _warn_unsafe_extraction_path(path):
+        """
+        If the default extraction path is overridden and set to an insecure
+        location, such as /tmp, it opens up an opportunity for an attacker to
+        replace an extracted file with an unauthorized payload. Warn the user
+        if a known insecure location is used.
+
+        See Distribute #375 for more details.
+        """
+        if os.name == 'nt' and not path.startswith(os.environ['windir']):
+            # On Windows, permissions are generally restrictive by default
+            #  and temp directories are not writable by other users, so
+            #  bypass the warning.
+            return
+        mode = os.stat(path).st_mode
+        if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
+            msg = (
+                "%s is writable by group/others and vulnerable to attack "
+                "when "
+                "used with get_resource_filename. Consider a more secure "
+                "location (set with .set_extraction_path or the "
+                "PYTHON_EGG_CACHE environment variable)." % path
+            )
+            warnings.warn(msg, UserWarning)
+
+    def postprocess(self, tempname, filename):
+        """Perform any platform-specific postprocessing of `tempname`
+
+        This is where Mac header rewrites should be done; other platforms don't
+        have anything special they should do.
+
+        Resource providers should call this method ONLY after successfully
+        extracting a compressed resource.  They must NOT call it on resources
+        that are already in the filesystem.
+
+        `tempname` is the current (temporary) name of the file, and `filename`
+        is the name it will be renamed to by the caller after this routine
+        returns.
+        """
+
+        if os.name == 'posix':
+            # Make the resource executable
+            mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
+            os.chmod(tempname, mode)
+
+    def set_extraction_path(self, path):
+        """Set the base path where resources will be extracted to, if needed.
+
+        If you do not call this routine before any extractions take place, the
+        path defaults to the return value of ``get_default_cache()``.  (Which
+        is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+        platform-specific fallbacks.  See that routine's documentation for more
+        details.)
+
+        Resources are extracted to subdirectories of this path based upon
+        information given by the ``IResourceProvider``.  You may set this to a
+        temporary directory, but then you must call ``cleanup_resources()`` to
+        delete the extracted files when done.  There is no guarantee that
+        ``cleanup_resources()`` will be able to remove all extracted files.
+
+        (Note: you may not change the extraction path for a given resource
+        manager once resources have been extracted, unless you first call
+        ``cleanup_resources()``.)
+        """
+        if self.cached_files:
+            raise ValueError(
+                "Can't change extraction path, files already extracted"
+            )
+
+        self.extraction_path = path
+
+    def cleanup_resources(self, force=False):
+        """
+        Delete all extracted resource files and directories, returning a list
+        of the file and directory names that could not be successfully removed.
+        This function does not have any concurrency protection, so it should
+        generally only be called when the extraction path is a temporary
+        directory exclusive to a single process.  This method is not
+        automatically called; you must call it explicitly or register it as an
+        ``atexit`` function if you wish to ensure cleanup of a temporary
+        directory used for extractions.
+        """
+        # XXX
+
+
+def get_default_cache():
+    """
+    Return the ``PYTHON_EGG_CACHE`` environment variable
+    or a platform-relevant user cache dir for an app
+    named "Python-Eggs".
+    """
+    return (
+        os.environ.get('PYTHON_EGG_CACHE')
+        or appdirs.user_cache_dir(appname='Python-Eggs')
+    )
+
+
+def safe_name(name):
+    """Convert an arbitrary string to a standard distribution name
+
+    Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+    """
+    return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+    """
+    Convert an arbitrary string to a standard version string
+    """
+    try:
+        # normalize the version
+        return str(packaging.version.Version(version))
+    except packaging.version.InvalidVersion:
+        version = version.replace(' ', '.')
+        return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def safe_extra(extra):
+    """Convert an arbitrary string to a standard 'extra' name
+
+    Any runs of non-alphanumeric characters are replaced with a single '_',
+    and the result is always lowercased.
+    """
+    return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
+
+
+def to_filename(name):
+    """Convert a project or version name to its filename-escaped form
+
+    Any '-' characters are currently replaced with '_'.
+    """
+    return name.replace('-', '_')
+
+
+def invalid_marker(text):
+    """
+    Validate text as a PEP 508 environment marker; return an exception
+    if invalid or False otherwise.
+    """
+    try:
+        evaluate_marker(text)
+    except SyntaxError as e:
+        e.filename = None
+        e.lineno = None
+        return e
+    return False
+
+
+def evaluate_marker(text, extra=None):
+    """
+    Evaluate a PEP 508 environment marker.
+    Return a boolean indicating the marker result in this environment.
+    Raise SyntaxError if marker is invalid.
+
+    This implementation uses the 'pyparsing' module.
+    """
+    try:
+        marker = packaging.markers.Marker(text)
+        return marker.evaluate()
+    except packaging.markers.InvalidMarker as e:
+        raise SyntaxError(e)
+
+
+class NullProvider:
+    """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+    egg_name = None
+    egg_info = None
+    loader = None
+
+    def __init__(self, module):
+        self.loader = getattr(module, '__loader__', None)
+        self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+    def get_resource_filename(self, manager, resource_name):
+        return self._fn(self.module_path, resource_name)
+
+    def get_resource_stream(self, manager, resource_name):
+        return io.BytesIO(self.get_resource_string(manager, resource_name))
+
+    def get_resource_string(self, manager, resource_name):
+        return self._get(self._fn(self.module_path, resource_name))
+
+    def has_resource(self, resource_name):
+        return self._has(self._fn(self.module_path, resource_name))
+
+    def _get_metadata_path(self, name):
+        return self._fn(self.egg_info, name)
+
+    def has_metadata(self, name):
+        if not self.egg_info:
+            return self.egg_info
+
+        path = self._get_metadata_path(name)
+        return self._has(path)
+
+    def get_metadata(self, name):
+        if not self.egg_info:
+            return ""
+        value = self._get(self._fn(self.egg_info, name))
+        return value.decode('utf-8') if six.PY3 else value
+
+    def get_metadata_lines(self, name):
+        return yield_lines(self.get_metadata(name))
+
+    def resource_isdir(self, resource_name):
+        return self._isdir(self._fn(self.module_path, resource_name))
+
+    def metadata_isdir(self, name):
+        return self.egg_info and self._isdir(self._fn(self.egg_info, name))
+
+    def resource_listdir(self, resource_name):
+        return self._listdir(self._fn(self.module_path, resource_name))
+
+    def metadata_listdir(self, name):
+        if self.egg_info:
+            return self._listdir(self._fn(self.egg_info, name))
+        return []
+
+    def run_script(self, script_name, namespace):
+        script = 'scripts/' + script_name
+        if not self.has_metadata(script):
+            raise ResolutionError(
+                "Script {script!r} not found in metadata at {self.egg_info!r}"
+                .format(**locals()),
+            )
+        script_text = self.get_metadata(script).replace('\r\n', '\n')
+        script_text = script_text.replace('\r', '\n')
+        script_filename = self._fn(self.egg_info, script)
+        namespace['__file__'] = script_filename
+        if os.path.exists(script_filename):
+            source = open(script_filename).read()
+            code = compile(source, script_filename, 'exec')
+            exec(code, namespace, namespace)
+        else:
+            from linecache import cache
+            cache[script_filename] = (
+                len(script_text), 0, script_text.split('\n'), script_filename
+            )
+            script_code = compile(script_text, script_filename, 'exec')
+            exec(script_code, namespace, namespace)
+
+    def _has(self, path):
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _isdir(self, path):
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _listdir(self, path):
+        raise NotImplementedError(
+            "Can't perform this operation for unregistered loader type"
+        )
+
+    def _fn(self, base, resource_name):
+        self._validate_resource_path(resource_name)
+        if resource_name:
+            return os.path.join(base, *resource_name.split('/'))
+        return base
+
+    @staticmethod
+    def _validate_resource_path(path):
+        """
+        Validate the resource paths according to the docs.
+        https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
+
+        >>> warned = getfixture('recwarn')
+        >>> warnings.simplefilter('always')
+        >>> vrp = NullProvider._validate_resource_path
+        >>> vrp('foo/bar.txt')
+        >>> bool(warned)
+        False
+        >>> vrp('../foo/bar.txt')
+        >>> bool(warned)
+        True
+        >>> warned.clear()
+        >>> vrp('/foo/bar.txt')
+        >>> bool(warned)
+        True
+        >>> vrp('foo/../../bar.txt')
+        >>> bool(warned)
+        True
+        >>> warned.clear()
+        >>> vrp('foo/f../bar.txt')
+        >>> bool(warned)
+        False
+
+        Windows path separators are straight-up disallowed.
+        >>> vrp(r'\\foo/bar.txt')
+        Traceback (most recent call last):
+        ...
+        ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+        >>> vrp(r'C:\\foo/bar.txt')
+        Traceback (most recent call last):
+        ...
+        ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+        Blank values are allowed
+
+        >>> vrp('')
+        >>> bool(warned)
+        False
+
+        Non-string values are not.
+
+        >>> vrp(None)
+        Traceback (most recent call last):
+        ...
+        AttributeError: ...
+        """
+        invalid = (
+            os.path.pardir in path.split(posixpath.sep) or
+            posixpath.isabs(path) or
+            ntpath.isabs(path)
+        )
+        if not invalid:
+            return
+
+        msg = "Use of .. or absolute path in a resource path is not allowed."
+
+        # Aggressively disallow Windows absolute paths
+        if ntpath.isabs(path) and not posixpath.isabs(path):
+            raise ValueError(msg)
+
+        # for compatibility, warn; in future
+        # raise ValueError(msg)
+        warnings.warn(
+            msg[:-1] + " and will raise exceptions in a future release.",
+            DeprecationWarning,
+            stacklevel=4,
+        )
+
+    def _get(self, path):
+        if hasattr(self.loader, 'get_data'):
+            return self.loader.get_data(path)
+        raise NotImplementedError(
+            "Can't perform this operation for loaders without 'get_data()'"
+        )
+
+
+register_loader_type(object, NullProvider)
+
+
+class EggProvider(NullProvider):
+    """Provider based on a virtual filesystem"""
+
+    def __init__(self, module):
+        NullProvider.__init__(self, module)
+        self._setup_prefix()
+
+    def _setup_prefix(self):
+        # we assume here that our metadata may be nested inside a "basket"
+        # of multiple eggs; that's why we use module_path instead of .archive
+        path = self.module_path
+        old = None
+        while path != old:
+            if _is_egg_path(path):
+                self.egg_name = os.path.basename(path)
+                self.egg_info = os.path.join(path, 'EGG-INFO')
+                self.egg_root = path
+                break
+            old = path
+            path, base = os.path.split(path)
+
+
+class DefaultProvider(EggProvider):
+    """Provides access to package resources in the filesystem"""
+
+    def _has(self, path):
+        return os.path.exists(path)
+
+    def _isdir(self, path):
+        return os.path.isdir(path)
+
+    def _listdir(self, path):
+        return os.listdir(path)
+
+    def get_resource_stream(self, manager, resource_name):
+        return open(self._fn(self.module_path, resource_name), 'rb')
+
+    def _get(self, path):
+        with open(path, 'rb') as stream:
+            return stream.read()
+
+    @classmethod
+    def _register(cls):
+        loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
+        for name in loader_names:
+            loader_cls = getattr(importlib_machinery, name, type(None))
+            register_loader_type(loader_cls, cls)
+
+
+DefaultProvider._register()
+
+
+class EmptyProvider(NullProvider):
+    """Provider that returns nothing for all requests"""
+
+    module_path = None
+
+    _isdir = _has = lambda self, path: False
+
+    def _get(self, path):
+        return ''
+
+    def _listdir(self, path):
+        return []
+
+    def __init__(self):
+        pass
+
+
+empty_provider = EmptyProvider()
+
+
+class ZipManifests(dict):
+    """
+    zip manifest builder
+    """
+
+    @classmethod
+    def build(cls, path):
+        """
+        Build a dictionary similar to the zipimport directory
+        caches, except instead of tuples, store ZipInfo objects.
+
+        Use a platform-specific path separator (os.sep) for the path keys
+        for compatibility with pypy on Windows.
+        """
+        with zipfile.ZipFile(path) as zfile:
+            items = (
+                (
+                    name.replace('/', os.sep),
+                    zfile.getinfo(name),
+                )
+                for name in zfile.namelist()
+            )
+            return dict(items)
+
+    load = build
+
+
+class MemoizedZipManifests(ZipManifests):
+    """
+    Memoized zipfile manifests.
+    """
+    manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
+
+    def load(self, path):
+        """
+        Load a manifest at path or return a suitable manifest already loaded.
+        """
+        path = os.path.normpath(path)
+        mtime = os.stat(path).st_mtime
+
+        if path not in self or self[path].mtime != mtime:
+            manifest = self.build(path)
+            self[path] = self.manifest_mod(manifest, mtime)
+
+        return self[path].manifest
+
+
+class ZipProvider(EggProvider):
+    """Resource support for zips and eggs"""
+
+    eagers = None
+    _zip_manifests = MemoizedZipManifests()
+
+    def __init__(self, module):
+        EggProvider.__init__(self, module)
+        self.zip_pre = self.loader.archive + os.sep
+
+    def _zipinfo_name(self, fspath):
+        # Convert a virtual filename (full path to file) into a zipfile subpath
+        # usable with the zipimport directory cache for our target archive
+        fspath = fspath.rstrip(os.sep)
+        if fspath == self.loader.archive:
+            return ''
+        if fspath.startswith(self.zip_pre):
+            return fspath[len(self.zip_pre):]
+        raise AssertionError(
+            "%s is not a subpath of %s" % (fspath, self.zip_pre)
+        )
+
+    def _parts(self, zip_path):
+        # Convert a zipfile subpath into an egg-relative path part list.
+        # pseudo-fs path
+        fspath = self.zip_pre + zip_path
+        if fspath.startswith(self.egg_root + os.sep):
+            return fspath[len(self.egg_root) + 1:].split(os.sep)
+        raise AssertionError(
+            "%s is not a subpath of %s" % (fspath, self.egg_root)
+        )
+
+    @property
+    def zipinfo(self):
+        return self._zip_manifests.load(self.loader.archive)
+
+    def get_resource_filename(self, manager, resource_name):
+        if not self.egg_name:
+            raise NotImplementedError(
+                "resource_filename() only supported for .egg, not .zip"
+            )
+        # no need to lock for extraction, since we use temp names
+        zip_path = self._resource_to_zip(resource_name)
+        eagers = self._get_eager_resources()
+        if '/'.join(self._parts(zip_path)) in eagers:
+            for name in eagers:
+                self._extract_resource(manager, self._eager_to_zip(name))
+        return self._extract_resource(manager, zip_path)
+
+    @staticmethod
+    def _get_date_and_size(zip_stat):
+        size = zip_stat.file_size
+        # ymdhms+wday, yday, dst
+        date_time = zip_stat.date_time + (0, 0, -1)
+        # 1980 offset already done
+        timestamp = time.mktime(date_time)
+        return timestamp, size
+
+    def _extract_resource(self, manager, zip_path):
+
+        if zip_path in self._index():
+            for name in self._index()[zip_path]:
+                last = self._extract_resource(
+                    manager, os.path.join(zip_path, name)
+                )
+            # return the extracted directory name
+            return os.path.dirname(last)
+
+        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+
+        if not WRITE_SUPPORT:
+            raise IOError('"os.rename" and "os.unlink" are not supported '
+                          'on this platform')
+        try:
+
+            real_path = manager.get_cache_path(
+                self.egg_name, self._parts(zip_path)
+            )
+
+            if self._is_current(real_path, zip_path):
+                return real_path
+
+            outf, tmpnam = _mkstemp(
+                ".$extract",
+                dir=os.path.dirname(real_path),
+            )
+            os.write(outf, self.loader.get_data(zip_path))
+            os.close(outf)
+            utime(tmpnam, (timestamp, timestamp))
+            manager.postprocess(tmpnam, real_path)
+
+            try:
+                rename(tmpnam, real_path)
+
+            except os.error:
+                if os.path.isfile(real_path):
+                    if self._is_current(real_path, zip_path):
+                        # the file became current since it was checked above,
+                        #  so proceed.
+                        return real_path
+                    # Windows, del old file and retry
+                    elif os.name == 'nt':
+                        unlink(real_path)
+                        rename(tmpnam, real_path)
+                        return real_path
+                raise
+
+        except os.error:
+            # report a user-friendly error
+            manager.extraction_error()
+
+        return real_path
+
+    def _is_current(self, file_path, zip_path):
+        """
+        Return True if the file_path is current for this zip_path
+        """
+        timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+        if not os.path.isfile(file_path):
+            return False
+        stat = os.stat(file_path)
+        if stat.st_size != size or stat.st_mtime != timestamp:
+            return False
+        # check that the contents match
+        zip_contents = self.loader.get_data(zip_path)
+        with open(file_path, 'rb') as f:
+            file_contents = f.read()
+        return zip_contents == file_contents
+
+    def _get_eager_resources(self):
+        if self.eagers is None:
+            eagers = []
+            for name in ('native_libs.txt', 'eager_resources.txt'):
+                if self.has_metadata(name):
+                    eagers.extend(self.get_metadata_lines(name))
+            self.eagers = eagers
+        return self.eagers
+
+    def _index(self):
+        try:
+            return self._dirindex
+        except AttributeError:
+            ind = {}
+            for path in self.zipinfo:
+                parts = path.split(os.sep)
+                while parts:
+                    parent = os.sep.join(parts[:-1])
+                    if parent in ind:
+                        ind[parent].append(parts[-1])
+                        break
+                    else:
+                        ind[parent] = [parts.pop()]
+            self._dirindex = ind
+            return ind
+
+    def _has(self, fspath):
+        zip_path = self._zipinfo_name(fspath)
+        return zip_path in self.zipinfo or zip_path in self._index()
+
+    def _isdir(self, fspath):
+        return self._zipinfo_name(fspath) in self._index()
+
+    def _listdir(self, fspath):
+        return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+    def _eager_to_zip(self, resource_name):
+        return self._zipinfo_name(self._fn(self.egg_root, resource_name))
+
+    def _resource_to_zip(self, resource_name):
+        return self._zipinfo_name(self._fn(self.module_path, resource_name))
+
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+class FileMetadata(EmptyProvider):
+    """Metadata handler for standalone PKG-INFO files
+
+    Usage::
+
+        metadata = FileMetadata("/path/to/PKG-INFO")
+
+    This provider rejects all data and metadata requests except for PKG-INFO,
+    which is treated as existing, and will be the contents of the file at
+    the provided location.
+    """
+
+    def __init__(self, path):
+        self.path = path
+
+    def _get_metadata_path(self, name):
+        return self.path
+
+    def has_metadata(self, name):
+        return name == 'PKG-INFO' and os.path.isfile(self.path)
+
+    def get_metadata(self, name):
+        if name != 'PKG-INFO':
+            raise KeyError("No metadata except PKG-INFO is available")
+
+        with io.open(self.path, encoding='utf-8', errors="replace") as f:
+            metadata = f.read()
+        self._warn_on_replacement(metadata)
+        return metadata
+
+    def _warn_on_replacement(self, metadata):
+        # Python 2.7 compat for: replacement_char = '�'
+        replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
+        if replacement_char in metadata:
+            tmpl = "{self.path} could not be properly decoded in UTF-8"
+            msg = tmpl.format(**locals())
+            warnings.warn(msg)
+
+    def get_metadata_lines(self, name):
+        return yield_lines(self.get_metadata(name))
+
+
+class PathMetadata(DefaultProvider):
+    """Metadata provider for egg directories
+
+    Usage::
+
+        # Development eggs:
+
+        egg_info = "/path/to/PackageName.egg-info"
+        base_dir = os.path.dirname(egg_info)
+        metadata = PathMetadata(base_dir, egg_info)
+        dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+        dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
+
+        # Unpacked egg directories:
+
+        egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+        metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+        dist = Distribution.from_filename(egg_path, metadata=metadata)
+    """
+
+    def __init__(self, path, egg_info):
+        self.module_path = path
+        self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+    """Metadata provider for .egg files"""
+
+    def __init__(self, importer):
+        """Create a metadata provider from a zipimporter"""
+
+        self.zip_pre = importer.archive + os.sep
+        self.loader = importer
+        if importer.prefix:
+            self.module_path = os.path.join(importer.archive, importer.prefix)
+        else:
+            self.module_path = importer.archive
+        self._setup_prefix()
+
+
+_declare_state('dict', _distribution_finders={})
+
+
+def register_finder(importer_type, distribution_finder):
+    """Register `distribution_finder` to find distributions in sys.path items
+
+    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+    handler), and `distribution_finder` is a callable that, passed a path
+    item and the importer instance, yields ``Distribution`` instances found on
+    that path item.  See ``pkg_resources.find_on_path`` for an example."""
+    _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item, only=False):
+    """Yield distributions accessible via `path_item`"""
+    importer = get_importer(path_item)
+    finder = _find_adapter(_distribution_finders, importer)
+    return finder(importer, path_item, only)
+
+
+def find_eggs_in_zip(importer, path_item, only=False):
+    """
+    Find eggs in zip files; possibly multiple nested eggs.
+    """
+    if importer.archive.endswith('.whl'):
+        # wheels are not supported with this finder
+        # they don't have PKG-INFO metadata, and won't ever contain eggs
+        return
+    metadata = EggMetadata(importer)
+    if metadata.has_metadata('PKG-INFO'):
+        yield Distribution.from_filename(path_item, metadata=metadata)
+    if only:
+        # don't yield nested distros
+        return
+    for subitem in metadata.resource_listdir(''):
+        if _is_egg_path(subitem):
+            subpath = os.path.join(path_item, subitem)
+            dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
+            for dist in dists:
+                yield dist
+        elif subitem.lower().endswith('.dist-info'):
+            subpath = os.path.join(path_item, subitem)
+            submeta = EggMetadata(zipimport.zipimporter(subpath))
+            submeta.egg_info = subpath
+            yield Distribution.from_location(path_item, subitem, submeta)
+
+
+register_finder(zipimport.zipimporter, find_eggs_in_zip)
+
+
+def find_nothing(importer, path_item, only=False):
+    return ()
+
+
+register_finder(object, find_nothing)
+
+
+def _by_version_descending(names):
+    """
+    Given a list of filenames, return them in descending order
+    by version number.
+
+    >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
+    >>> _by_version_descending(names)
+    ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
+    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
+    >>> _by_version_descending(names)
+    ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
+    >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
+    >>> _by_version_descending(names)
+    ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
+    """
+    def _by_version(name):
+        """
+        Parse each component of the filename
+        """
+        name, ext = os.path.splitext(name)
+        parts = itertools.chain(name.split('-'), [ext])
+        return [packaging.version.parse(part) for part in parts]
+
+    return sorted(names, key=_by_version, reverse=True)
+
+
+def find_on_path(importer, path_item, only=False):
+    """Yield distributions accessible on a sys.path directory"""
+    path_item = _normalize_cached(path_item)
+
+    if _is_unpacked_egg(path_item):
+        yield Distribution.from_filename(
+            path_item, metadata=PathMetadata(
+                path_item, os.path.join(path_item, 'EGG-INFO')
+            )
+        )
+        return
+
+    entries = safe_listdir(path_item)
+
+    # for performance, before sorting by version,
+    # screen entries for only those that will yield
+    # distributions
+    filtered = (
+        entry
+        for entry in entries
+        if dist_factory(path_item, entry, only)
+    )
+
+    # scan for .egg and .egg-info in directory
+    path_item_entries = _by_version_descending(filtered)
+    for entry in path_item_entries:
+        fullpath = os.path.join(path_item, entry)
+        factory = dist_factory(path_item, entry, only)
+        for dist in factory(fullpath):
+            yield dist
+
+
+def dist_factory(path_item, entry, only):
+    """
+    Return a dist_factory for a path_item and entry
+    """
+    lower = entry.lower()
+    is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
+    return (
+        distributions_from_metadata
+        if is_meta else
+        find_distributions
+        if not only and _is_egg_path(entry) else
+        resolve_egg_link
+        if not only and lower.endswith('.egg-link') else
+        NoDists()
+    )
+
+
+class NoDists:
+    """
+    >>> bool(NoDists())
+    False
+
+    >>> list(NoDists()('anything'))
+    []
+    """
+    def __bool__(self):
+        return False
+    if six.PY2:
+        __nonzero__ = __bool__
+
+    def __call__(self, fullpath):
+        return iter(())
+
+
+def safe_listdir(path):
+    """
+    Attempt to list contents of path, but suppress some exceptions.
+    """
+    try:
+        return os.listdir(path)
+    except (PermissionError, NotADirectoryError):
+        pass
+    except OSError as e:
+        # Ignore the directory if does not exist, not a directory or
+        # permission denied
+        ignorable = (
+            e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
+            # Python 2 on Windows needs to be handled this way :(
+            or getattr(e, "winerror", None) == 267
+        )
+        if not ignorable:
+            raise
+    return ()
+
+
+def distributions_from_metadata(path):
+    root = os.path.dirname(path)
+    if os.path.isdir(path):
+        if len(os.listdir(path)) == 0:
+            # empty metadata dir; skip
+            return
+        metadata = PathMetadata(root, path)
+    else:
+        metadata = FileMetadata(path)
+    entry = os.path.basename(path)
+    yield Distribution.from_location(
+        root, entry, metadata, precedence=DEVELOP_DIST,
+    )
+
+
+def non_empty_lines(path):
+    """
+    Yield non-empty lines from file at path
+    """
+    with open(path) as f:
+        for line in f:
+            line = line.strip()
+            if line:
+                yield line
+
+
+def resolve_egg_link(path):
+    """
+    Given a path to an .egg-link, resolve distributions
+    present in the referenced path.
+    """
+    referenced_paths = non_empty_lines(path)
+    resolved_paths = (
+        os.path.join(os.path.dirname(path), ref)
+        for ref in referenced_paths
+    )
+    dist_groups = map(find_distributions, resolved_paths)
+    return next(dist_groups, ())
+
+
+register_finder(pkgutil.ImpImporter, find_on_path)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+    register_finder(importlib_machinery.FileFinder, find_on_path)
+
+_declare_state('dict', _namespace_handlers={})
+_declare_state('dict', _namespace_packages={})
+
+
+def register_namespace_handler(importer_type, namespace_handler):
+    """Register `namespace_handler` to declare namespace packages
+
+    `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+    handler), and `namespace_handler` is a callable like this::
+
+        def namespace_handler(importer, path_entry, moduleName, module):
+            # return a path_entry to use for child packages
+
+    Namespace handlers are only called if the importer object has already
+    agreed that it can handle the relevant path item, and they should only
+    return a subpath if the module __path__ does not already contain an
+    equivalent subpath.  For an example namespace handler, see
+    ``pkg_resources.file_ns_handler``.
+    """
+    _namespace_handlers[importer_type] = namespace_handler
+
+
+def _handle_ns(packageName, path_item):
+    """Ensure that named package includes a subpath of path_item (if needed)"""
+
+    importer = get_importer(path_item)
+    if importer is None:
+        return None
+
+    # capture warnings due to #1111
+    with warnings.catch_warnings():
+        warnings.simplefilter("ignore")
+        loader = importer.find_module(packageName)
+
+    if loader is None:
+        return None
+    module = sys.modules.get(packageName)
+    if module is None:
+        module = sys.modules[packageName] = types.ModuleType(packageName)
+        module.__path__ = []
+        _set_parent_ns(packageName)
+    elif not hasattr(module, '__path__'):
+        raise TypeError("Not a package:", packageName)
+    handler = _find_adapter(_namespace_handlers, importer)
+    subpath = handler(importer, path_item, packageName, module)
+    if subpath is not None:
+        path = module.__path__
+        path.append(subpath)
+        loader.load_module(packageName)
+        _rebuild_mod_path(path, packageName, module)
+    return subpath
+
+
+def _rebuild_mod_path(orig_path, package_name, module):
+    """
+    Rebuild module.__path__ ensuring that all entries are ordered
+    corresponding to their sys.path order
+    """
+    sys_path = [_normalize_cached(p) for p in sys.path]
+
+    def safe_sys_path_index(entry):
+        """
+        Workaround for #520 and #513.
+        """
+        try:
+            return sys_path.index(entry)
+        except ValueError:
+            return float('inf')
+
+    def position_in_sys_path(path):
+        """
+        Return the ordinal of the path based on its position in sys.path
+        """
+        path_parts = path.split(os.sep)
+        module_parts = package_name.count('.') + 1
+        parts = path_parts[:-module_parts]
+        return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
+
+    new_path = sorted(orig_path, key=position_in_sys_path)
+    new_path = [_normalize_cached(p) for p in new_path]
+
+    if isinstance(module.__path__, list):
+        module.__path__[:] = new_path
+    else:
+        module.__path__ = new_path
+
+
+def declare_namespace(packageName):
+    """Declare that package 'packageName' is a namespace package"""
+
+    _imp.acquire_lock()
+    try:
+        if packageName in _namespace_packages:
+            return
+
+        path = sys.path
+        parent, _, _ = packageName.rpartition('.')
+
+        if parent:
+            declare_namespace(parent)
+            if parent not in _namespace_packages:
+                __import__(parent)
+            try:
+                path = sys.modules[parent].__path__
+            except AttributeError:
+                raise TypeError("Not a package:", parent)
+
+        # Track what packages are namespaces, so when new path items are added,
+        # they can be updated
+        _namespace_packages.setdefault(parent or None, []).append(packageName)
+        _namespace_packages.setdefault(packageName, [])
+
+        for path_item in path:
+            # Ensure all the parent's path items are reflected in the child,
+            # if they apply
+            _handle_ns(packageName, path_item)
+
+    finally:
+        _imp.release_lock()
+
+
+def fixup_namespace_packages(path_item, parent=None):
+    """Ensure that previously-declared namespace packages include path_item"""
+    _imp.acquire_lock()
+    try:
+        for package in _namespace_packages.get(parent, ()):
+            subpath = _handle_ns(package, path_item)
+            if subpath:
+                fixup_namespace_packages(subpath, package)
+    finally:
+        _imp.release_lock()
+
+
+def file_ns_handler(importer, path_item, packageName, module):
+    """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+    subpath = os.path.join(path_item, packageName.split('.')[-1])
+    normalized = _normalize_cached(subpath)
+    for item in module.__path__:
+        if _normalize_cached(item) == normalized:
+            break
+    else:
+        # Only return the path if it's not already there
+        return subpath
+
+
+register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+    register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
+
+
+def null_ns_handler(importer, path_item, packageName, module):
+    return None
+
+
+register_namespace_handler(object, null_ns_handler)
+
+
+def normalize_path(filename):
+    """Normalize a file/dir name for comparison purposes"""
+    return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
+
+
+def _cygwin_patch(filename):  # pragma: nocover
+    """
+    Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
+    symlink components. Using
+    os.path.abspath() works around this limitation. A fix in os.getcwd()
+    would probably better, in Cygwin even more so, except
+    that this seems to be by design...
+    """
+    return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
+
+
+def _normalize_cached(filename, _cache={}):
+    try:
+        return _cache[filename]
+    except KeyError:
+        _cache[filename] = result = normalize_path(filename)
+        return result
+
+
+def _is_egg_path(path):
+    """
+    Determine if given path appears to be an egg.
+    """
+    return path.lower().endswith('.egg')
+
+
+def _is_unpacked_egg(path):
+    """
+    Determine if given path appears to be an unpacked egg.
+    """
+    return (
+        _is_egg_path(path) and
+        os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
+    )
+
+
+def _set_parent_ns(packageName):
+    parts = packageName.split('.')
+    name = parts.pop()
+    if parts:
+        parent = '.'.join(parts)
+        setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
+def yield_lines(strs):
+    """Yield non-empty/non-comment lines of a string or sequence"""
+    if isinstance(strs, six.string_types):
+        for s in strs.splitlines():
+            s = s.strip()
+            # skip blank lines/comments
+            if s and not s.startswith('#'):
+                yield s
+    else:
+        for ss in strs:
+            for s in yield_lines(ss):
+                yield s
+
+
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+    r"""
+    (?P<name>[^-]+) (
+        -(?P<ver>[^-]+) (
+            -py(?P<pyver>[^-]+) (
+                -(?P<plat>.+)
+            )?
+        )?
+    )?
+    """,
+    re.VERBOSE | re.IGNORECASE,
+).match
+
+
+class EntryPoint:
+    """Object representing an advertised importable object"""
+
+    def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
+        if not MODULE(module_name):
+            raise ValueError("Invalid module name", module_name)
+        self.name = name
+        self.module_name = module_name
+        self.attrs = tuple(attrs)
+        self.extras = tuple(extras)
+        self.dist = dist
+
+    def __str__(self):
+        s = "%s = %s" % (self.name, self.module_name)
+        if self.attrs:
+            s += ':' + '.'.join(self.attrs)
+        if self.extras:
+            s += ' [%s]' % ','.join(self.extras)
+        return s
+
+    def __repr__(self):
+        return "EntryPoint.parse(%r)" % str(self)
+
+    def load(self, require=True, *args, **kwargs):
+        """
+        Require packages for this EntryPoint, then resolve it.
+        """
+        if not require or args or kwargs:
+            warnings.warn(
+                "Parameters to load are deprecated.  Call .resolve and "
+                ".require separately.",
+                PkgResourcesDeprecationWarning,
+                stacklevel=2,
+            )
+        if require:
+            self.require(*args, **kwargs)
+        return self.resolve()
+
+    def resolve(self):
+        """
+        Resolve the entry point from its module and attrs.
+        """
+        module = __import__(self.module_name, fromlist=['__name__'], level=0)
+        try:
+            return functools.reduce(getattr, self.attrs, module)
+        except AttributeError as exc:
+            raise ImportError(str(exc))
+
+    def require(self, env=None, installer=None):
+        if self.extras and not self.dist:
+            raise UnknownExtra("Can't require() without a distribution", self)
+
+        # Get the requirements for this entry point with all its extras and
+        # then resolve them. We have to pass `extras` along when resolving so
+        # that the working set knows what extras we want. Otherwise, for
+        # dist-info distributions, the working set will assume that the
+        # requirements for that extra are purely optional and skip over them.
+        reqs = self.dist.requires(self.extras)
+        items = working_set.resolve(reqs, env, installer, extras=self.extras)
+        list(map(working_set.add, items))
+
+    pattern = re.compile(
+        r'\s*'
+        r'(?P<name>.+?)\s*'
+        r'=\s*'
+        r'(?P<module>[\w.]+)\s*'
+        r'(:\s*(?P<attr>[\w.]+))?\s*'
+        r'(?P<extras>\[.*\])?\s*$'
+    )
+
+    @classmethod
+    def parse(cls, src, dist=None):
+        """Parse a single entry point from string `src`
+
+        Entry point syntax follows the form::
+
+            name = some.module:some.attr [extra1, extra2]
+
+        The entry name and module name are required, but the ``:attrs`` and
+        ``[extras]`` parts are optional
+        """
+        m = cls.pattern.match(src)
+        if not m:
+            msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
+            raise ValueError(msg, src)
+        res = m.groupdict()
+        extras = cls._parse_extras(res['extras'])
+        attrs = res['attr'].split('.') if res['attr'] else ()
+        return cls(res['name'], res['module'], attrs, extras, dist)
+
+    @classmethod
+    def _parse_extras(cls, extras_spec):
+        if not extras_spec:
+            return ()
+        req = Requirement.parse('x' + extras_spec)
+        if req.specs:
+            raise ValueError()
+        return req.extras
+
+    @classmethod
+    def parse_group(cls, group, lines, dist=None):
+        """Parse an entry point group"""
+        if not MODULE(group):
+            raise ValueError("Invalid group name", group)
+        this = {}
+        for line in yield_lines(lines):
+            ep = cls.parse(line, dist)
+            if ep.name in this:
+                raise ValueError("Duplicate entry point", group, ep.name)
+            this[ep.name] = ep
+        return this
+
+    @classmethod
+    def parse_map(cls, data, dist=None):
+        """Parse a map of entry point groups"""
+        if isinstance(data, dict):
+            data = data.items()
+        else:
+            data = split_sections(data)
+        maps = {}
+        for group, lines in data:
+            if group is None:
+                if not lines:
+                    continue
+                raise ValueError("Entry points must be listed in groups")
+            group = group.strip()
+            if group in maps:
+                raise ValueError("Duplicate group name", group)
+            maps[group] = cls.parse_group(group, lines, dist)
+        return maps
+
+
+def _remove_md5_fragment(location):
+    if not location:
+        return ''
+    parsed = urllib.parse.urlparse(location)
+    if parsed[-1].startswith('md5='):
+        return urllib.parse.urlunparse(parsed[:-1] + ('',))
+    return location
+
+
+def _version_from_file(lines):
+    """
+    Given an iterable of lines from a Metadata file, return
+    the value of the Version field, if present, or None otherwise.
+    """
+    def is_version_line(line):
+        return line.lower().startswith('version:')
+    version_lines = filter(is_version_line, lines)
+    line = next(iter(version_lines), '')
+    _, _, value = line.partition(':')
+    return safe_version(value.strip()) or None
+
+
+class Distribution:
+    """Wrap an actual or potential sys.path entry w/metadata"""
+    PKG_INFO = 'PKG-INFO'
+
+    def __init__(
+            self, location=None, metadata=None, project_name=None,
+            version=None, py_version=PY_MAJOR, platform=None,
+            precedence=EGG_DIST):
+        self.project_name = safe_name(project_name or 'Unknown')
+        if version is not None:
+            self._version = safe_version(version)
+        self.py_version = py_version
+        self.platform = platform
+        self.location = location
+        self.precedence = precedence
+        self._provider = metadata or empty_provider
+
+    @classmethod
+    def from_location(cls, location, basename, metadata=None, **kw):
+        project_name, version, py_version, platform = [None] * 4
+        basename, ext = os.path.splitext(basename)
+        if ext.lower() in _distributionImpl:
+            cls = _distributionImpl[ext.lower()]
+
+            match = EGG_NAME(basename)
+            if match:
+                project_name, version, py_version, platform = match.group(
+                    'name', 'ver', 'pyver', 'plat'
+                )
+        return cls(
+            location, metadata, project_name=project_name, version=version,
+            py_version=py_version, platform=platform, **kw
+        )._reload_version()
+
+    def _reload_version(self):
+        return self
+
+    @property
+    def hashcmp(self):
+        return (
+            self.parsed_version,
+            self.precedence,
+            self.key,
+            _remove_md5_fragment(self.location),
+            self.py_version or '',
+            self.platform or '',
+        )
+
+    def __hash__(self):
+        return hash(self.hashcmp)
+
+    def __lt__(self, other):
+        return self.hashcmp < other.hashcmp
+
+    def __le__(self, other):
+        return self.hashcmp <= other.hashcmp
+
+    def __gt__(self, other):
+        return self.hashcmp > other.hashcmp
+
+    def __ge__(self, other):
+        return self.hashcmp >= other.hashcmp
+
+    def __eq__(self, other):
+        if not isinstance(other, self.__class__):
+            # It's not a Distribution, so they are not equal
+            return False
+        return self.hashcmp == other.hashcmp
+
+    def __ne__(self, other):
+        return not self == other
+
+    # These properties have to be lazy so that we don't have to load any
+    # metadata until/unless it's actually needed.  (i.e., some distributions
+    # may not know their name or version without loading PKG-INFO)
+
+    @property
+    def key(self):
+        try:
+            return self._key
+        except AttributeError:
+            self._key = key = self.project_name.lower()
+            return key
+
+    @property
+    def parsed_version(self):
+        if not hasattr(self, "_parsed_version"):
+            self._parsed_version = parse_version(self.version)
+
+        return self._parsed_version
+
+    def _warn_legacy_version(self):
+        LV = packaging.version.LegacyVersion
+        is_legacy = isinstance(self._parsed_version, LV)
+        if not is_legacy:
+            return
+
+        # While an empty version is technically a legacy version and
+        # is not a valid PEP 440 version, it's also unlikely to
+        # actually come from someone and instead it is more likely that
+        # it comes from setuptools attempting to parse a filename and
+        # including it in the list. So for that we'll gate this warning
+        # on if the version is anything at all or not.
+        if not self.version:
+            return
+
+        tmpl = textwrap.dedent("""
+            '{project_name} ({version})' is being parsed as a legacy,
+            non PEP 440,
+            version. You may find odd behavior and sort order.
+            In particular it will be sorted as less than 0.0. It
+            is recommended to migrate to PEP 440 compatible
+            versions.
+            """).strip().replace('\n', ' ')
+
+        warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
+
+    @property
+    def version(self):
+        try:
+            return self._version
+        except AttributeError:
+            version = self._get_version()
+            if version is None:
+                path = self._get_metadata_path_for_display(self.PKG_INFO)
+                msg = (
+                    "Missing 'Version:' header and/or {} file at path: {}"
+                ).format(self.PKG_INFO, path)
+                raise ValueError(msg, self)
+
+            return version
+
+    @property
+    def _dep_map(self):
+        """
+        A map of extra to its list of (direct) requirements
+        for this distribution, including the null extra.
+        """
+        try:
+            return self.__dep_map
+        except AttributeError:
+            self.__dep_map = self._filter_extras(self._build_dep_map())
+        return self.__dep_map
+
+    @staticmethod
+    def _filter_extras(dm):
+        """
+        Given a mapping of extras to dependencies, strip off
+        environment markers and filter out any dependencies
+        not matching the markers.
+        """
+        for extra in list(filter(None, dm)):
+            new_extra = extra
+            reqs = dm.pop(extra)
+            new_extra, _, marker = extra.partition(':')
+            fails_marker = marker and (
+                invalid_marker(marker)
+                or not evaluate_marker(marker)
+            )
+            if fails_marker:
+                reqs = []
+            new_extra = safe_extra(new_extra) or None
+
+            dm.setdefault(new_extra, []).extend(reqs)
+        return dm
+
+    def _build_dep_map(self):
+        dm = {}
+        for name in 'requires.txt', 'depends.txt':
+            for extra, reqs in split_sections(self._get_metadata(name)):
+                dm.setdefault(extra, []).extend(parse_requirements(reqs))
+        return dm
+
+    def requires(self, extras=()):
+        """List of Requirements needed for this distro if `extras` are used"""
+        dm = self._dep_map
+        deps = []
+        deps.extend(dm.get(None, ()))
+        for ext in extras:
+            try:
+                deps.extend(dm[safe_extra(ext)])
+            except KeyError:
+                raise UnknownExtra(
+                    "%s has no such extra feature %r" % (self, ext)
+                )
+        return deps
+
+    def _get_metadata_path_for_display(self, name):
+        """
+        Return the path to the given metadata file, if available.
+        """
+        try:
+            # We need to access _get_metadata_path() on the provider object
+            # directly rather than through this class's __getattr__()
+            # since _get_metadata_path() is marked private.
+            path = self._provider._get_metadata_path(name)
+
+        # Handle exceptions e.g. in case the distribution's metadata
+        # provider doesn't support _get_metadata_path().
+        except Exception:
+            return '[could not detect]'
+
+        return path
+
+    def _get_metadata(self, name):
+        if self.has_metadata(name):
+            for line in self.get_metadata_lines(name):
+                yield line
+
+    def _get_version(self):
+        lines = self._get_metadata(self.PKG_INFO)
+        version = _version_from_file(lines)
+
+        return version
+
+    def activate(self, path=None, replace=False):
+        """Ensure distribution is importable on `path` (default=sys.path)"""
+        if path is None:
+            path = sys.path
+        self.insert_on(path, replace=replace)
+        if path is sys.path:
+            fixup_namespace_packages(self.location)
+            for pkg in self._get_metadata('namespace_packages.txt'):
+                if pkg in sys.modules:
+                    declare_namespace(pkg)
+
+    def egg_name(self):
+        """Return what this distribution's standard .egg filename should be"""
+        filename = "%s-%s-py%s" % (
+            to_filename(self.project_name), to_filename(self.version),
+            self.py_version or PY_MAJOR
+        )
+
+        if self.platform:
+            filename += '-' + self.platform
+        return filename
+
+    def __repr__(self):
+        if self.location:
+            return "%s (%s)" % (self, self.location)
+        else:
+            return str(self)
+
+    def __str__(self):
+        try:
+            version = getattr(self, 'version', None)
+        except ValueError:
+            version = None
+        version = version or "[unknown version]"
+        return "%s %s" % (self.project_name, version)
+
+    def __getattr__(self, attr):
+        """Delegate all unrecognized public attributes to .metadata provider"""
+        if attr.startswith('_'):
+            raise AttributeError(attr)
+        return getattr(self._provider, attr)
+
+    def __dir__(self):
+        return list(
+            set(super(Distribution, self).__dir__())
+            | set(
+                attr for attr in self._provider.__dir__()
+                if not attr.startswith('_')
+            )
+        )
+
+    if not hasattr(object, '__dir__'):
+        # python 2.7 not supported
+        del __dir__
+
+    @classmethod
+    def from_filename(cls, filename, metadata=None, **kw):
+        return cls.from_location(
+            _normalize_cached(filename), os.path.basename(filename), metadata,
+            **kw
+        )
+
+    def as_requirement(self):
+        """Return a ``Requirement`` that matches this distribution exactly"""
+        if isinstance(self.parsed_version, packaging.version.Version):
+            spec = "%s==%s" % (self.project_name, self.parsed_version)
+        else:
+            spec = "%s===%s" % (self.project_name, self.parsed_version)
+
+        return Requirement.parse(spec)
+
+    def load_entry_point(self, group, name):
+        """Return the `name` entry point of `group` or raise ImportError"""
+        ep = self.get_entry_info(group, name)
+        if ep is None:
+            raise ImportError("Entry point %r not found" % ((group, name),))
+        return ep.load()
+
+    def get_entry_map(self, group=None):
+        """Return the entry point map for `group`, or the full entry map"""
+        try:
+            ep_map = self._ep_map
+        except AttributeError:
+            ep_map = self._ep_map = EntryPoint.parse_map(
+                self._get_metadata('entry_points.txt'), self
+            )
+        if group is not None:
+            return ep_map.get(group, {})
+        return ep_map
+
+    def get_entry_info(self, group, name):
+        """Return the EntryPoint object for `group`+`name`, or ``None``"""
+        return self.get_entry_map(group).get(name)
+
+    def insert_on(self, path, loc=None, replace=False):
+        """Ensure self.location is on path
+
+        If replace=False (default):
+            - If location is already in path anywhere, do nothing.
+            - Else:
+              - If it's an egg and its parent directory is on path,
+                insert just ahead of the parent.
+              - Else: add to the end of path.
+        If replace=True:
+            - If location is already on path anywhere (not eggs)
+              or higher priority than its parent (eggs)
+              do nothing.
+            - Else:
+              - If it's an egg and its parent directory is on path,
+                insert just ahead of the parent,
+                removing any lower-priority entries.
+              - Else: add it to the front of path.
+        """
+
+        loc = loc or self.location
+        if not loc:
+            return
+
+        nloc = _normalize_cached(loc)
+        bdir = os.path.dirname(nloc)
+        npath = [(p and _normalize_cached(p) or p) for p in path]
+
+        for p, item in enumerate(npath):
+            if item == nloc:
+                if replace:
+                    break
+                else:
+                    # don't modify path (even removing duplicates) if
+                    # found and not replace
+                    return
+            elif item == bdir and self.precedence == EGG_DIST:
+                # if it's an .egg, give it precedence over its directory
+                # UNLESS it's already been added to sys.path and replace=False
+                if (not replace) and nloc in npath[p:]:
+                    return
+                if path is sys.path:
+                    self.check_version_conflict()
+                path.insert(p, loc)
+                npath.insert(p, nloc)
+                break
+        else:
+            if path is sys.path:
+                self.check_version_conflict()
+            if replace:
+                path.insert(0, loc)
+            else:
+                path.append(loc)
+            return
+
+        # p is the spot where we found or inserted loc; now remove duplicates
+        while True:
+            try:
+                np = npath.index(nloc, p + 1)
+            except ValueError:
+                break
+            else:
+                del npath[np], path[np]
+                # ha!
+                p = np
+
+        return
+
+    def check_version_conflict(self):
+        if self.key == 'setuptools':
+            # ignore the inevitable setuptools self-conflicts  :(
+            return
+
+        nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+        loc = normalize_path(self.location)
+        for modname in self._get_metadata('top_level.txt'):
+            if (modname not in sys.modules or modname in nsp
+                    or modname in _namespace_packages):
+                continue
+            if modname in ('pkg_resources', 'setuptools', 'site'):
+                continue
+            fn = getattr(sys.modules[modname], '__file__', None)
+            if fn and (normalize_path(fn).startswith(loc) or
+                       fn.startswith(self.location)):
+                continue
+            issue_warning(
+                "Module %s was already imported from %s, but %s is being added"
+                " to sys.path" % (modname, fn, self.location),
+            )
+
+    def has_version(self):
+        try:
+            self.version
+        except ValueError:
+            issue_warning("Unbuilt egg for " + repr(self))
+            return False
+        return True
+
+    def clone(self, **kw):
+        """Copy this distribution, substituting in any changed keyword args"""
+        names = 'project_name version py_version platform location precedence'
+        for attr in names.split():
+            kw.setdefault(attr, getattr(self, attr, None))
+        kw.setdefault('metadata', self._provider)
+        return self.__class__(**kw)
+
+    @property
+    def extras(self):
+        return [dep for dep in self._dep_map if dep]
+
+
+class EggInfoDistribution(Distribution):
+    def _reload_version(self):
+        """
+        Packages installed by distutils (e.g. numpy or scipy),
+        which uses an old safe_version, and so
+        their version numbers can get mangled when
+        converted to filenames (e.g., 1.11.0.dev0+2329eae to
+        1.11.0.dev0_2329eae). These distributions will not be
+        parsed properly
+        downstream by Distribution and safe_version, so
+        take an extra step and try to get the version number from
+        the metadata file itself instead of the filename.
+        """
+        md_version = self._get_version()
+        if md_version:
+            self._version = md_version
+        return self
+
+
+class DistInfoDistribution(Distribution):
+    """
+    Wrap an actual or potential sys.path entry
+    w/metadata, .dist-info style.
+    """
+    PKG_INFO = 'METADATA'
+    EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
+
+    @property
+    def _parsed_pkg_info(self):
+        """Parse and cache metadata"""
+        try:
+            return self._pkg_info
+        except AttributeError:
+            metadata = self.get_metadata(self.PKG_INFO)
+            self._pkg_info = email.parser.Parser().parsestr(metadata)
+            return self._pkg_info
+
+    @property
+    def _dep_map(self):
+        try:
+            return self.__dep_map
+        except AttributeError:
+            self.__dep_map = self._compute_dependencies()
+            return self.__dep_map
+
+    def _compute_dependencies(self):
+        """Recompute this distribution's dependencies."""
+        dm = self.__dep_map = {None: []}
+
+        reqs = []
+        # Including any condition expressions
+        for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
+            reqs.extend(parse_requirements(req))
+
+        def reqs_for_extra(extra):
+            for req in reqs:
+                if not req.marker or req.marker.evaluate({'extra': extra}):
+                    yield req
+
+        common = frozenset(reqs_for_extra(None))
+        dm[None].extend(common)
+
+        for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
+            s_extra = safe_extra(extra.strip())
+            dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
+
+        return dm
+
+
+_distributionImpl = {
+    '.egg': Distribution,
+    '.egg-info': EggInfoDistribution,
+    '.dist-info': DistInfoDistribution,
+}
+
+
+def issue_warning(*args, **kw):
+    level = 1
+    g = globals()
+    try:
+        # find the first stack frame that is *not* code in
+        # the pkg_resources module, to use for the warning
+        while sys._getframe(level).f_globals is g:
+            level += 1
+    except ValueError:
+        pass
+    warnings.warn(stacklevel=level + 1, *args, **kw)
+
+
+class RequirementParseError(ValueError):
+    def __str__(self):
+        return ' '.join(self.args)
+
+
+def parse_requirements(strs):
+    """Yield ``Requirement`` objects for each specification in `strs`
+
+    `strs` must be a string, or a (possibly-nested) iterable thereof.
+    """
+    # create a steppable iterator, so we can handle \-continuations
+    lines = iter(yield_lines(strs))
+
+    for line in lines:
+        # Drop comments -- a hash without a space may be in a URL.
+        if ' #' in line:
+            line = line[:line.find(' #')]
+        # If there is a line continuation, drop it, and append the next line.
+        if line.endswith('\\'):
+            line = line[:-2].strip()
+            try:
+                line += next(lines)
+            except StopIteration:
+                return
+        yield Requirement(line)
+
+
+class Requirement(packaging.requirements.Requirement):
+    def __init__(self, requirement_string):
+        """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+        try:
+            super(Requirement, self).__init__(requirement_string)
+        except packaging.requirements.InvalidRequirement as e:
+            raise RequirementParseError(str(e))
+        self.unsafe_name = self.name
+        project_name = safe_name(self.name)
+        self.project_name, self.key = project_name, project_name.lower()
+        self.specs = [
+            (spec.operator, spec.version) for spec in self.specifier]
+        self.extras = tuple(map(safe_extra, self.extras))
+        self.hashCmp = (
+            self.key,
+            self.specifier,
+            frozenset(self.extras),
+            str(self.marker) if self.marker else None,
+        )
+        self.__hash = hash(self.hashCmp)
+
+    def __eq__(self, other):
+        return (
+            isinstance(other, Requirement) and
+            self.hashCmp == other.hashCmp
+        )
+
+    def __ne__(self, other):
+        return not self == other
+
+    def __contains__(self, item):
+        if isinstance(item, Distribution):
+            if item.key != self.key:
+                return False
+
+            item = item.version
+
+        # Allow prereleases always in order to match the previous behavior of
+        # this method. In the future this should be smarter and follow PEP 440
+        # more accurately.
+        return self.specifier.contains(item, prereleases=True)
+
+    def __hash__(self):
+        return self.__hash
+
+    def __repr__(self):
+        return "Requirement.parse(%r)" % str(self)
+
+    @staticmethod
+    def parse(s):
+        req, = parse_requirements(s)
+        return req
+
+
+def _always_object(classes):
+    """
+    Ensure object appears in the mro even
+    for old-style classes.
+    """
+    if object not in classes:
+        return classes + (object,)
+    return classes
+
+
+def _find_adapter(registry, ob):
+    """Return an adapter factory for `ob` from `registry`"""
+    types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
+    for t in types:
+        if t in registry:
+            return registry[t]
+
+
+def ensure_directory(path):
+    """Ensure that the parent directory of `path` exists"""
+    dirname = os.path.dirname(path)
+    py31compat.makedirs(dirname, exist_ok=True)
+
+
+def _bypass_ensure_directory(path):
+    """Sandbox-bypassing version of ensure_directory()"""
+    if not WRITE_SUPPORT:
+        raise IOError('"os.mkdir" not supported on this platform.')
+    dirname, filename = split(path)
+    if dirname and filename and not isdir(dirname):
+        _bypass_ensure_directory(dirname)
+        try:
+            mkdir(dirname, 0o755)
+        except FileExistsError:
+            pass
+
+
+def split_sections(s):
+    """Split a string or iterable thereof into (section, content) pairs
+
+    Each ``section`` is a stripped version of the section header ("[section]")
+    and each ``content`` is a list of stripped lines excluding blank lines and
+    comment-only lines.  If there are any such lines before the first section
+    header, they're returned in a first ``section`` of ``None``.
+    """
+    section = None
+    content = []
+    for line in yield_lines(s):
+        if line.startswith("["):
+            if line.endswith("]"):
+                if section or content:
+                    yield section, content
+                section = line[1:-1].strip()
+                content = []
+            else:
+                raise ValueError("Invalid section heading", line)
+        else:
+            content.append(line)
+
+    # wrap up last segment
+    yield section, content
+
+
+def _mkstemp(*args, **kw):
+    old_open = os.open
+    try:
+        # temporarily bypass sandboxing
+        os.open = os_open
+        return tempfile.mkstemp(*args, **kw)
+    finally:
+        # and then put it back
+        os.open = old_open
+
+
+# Silence the PEP440Warning by default, so that end users don't get hit by it
+# randomly just because they use pkg_resources. We want to append the rule
+# because we want earlier uses of filterwarnings to take precedence over this
+# one.
+warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
+
+
+# from jaraco.functools 1.3
+def _call_aside(f, *args, **kwargs):
+    f(*args, **kwargs)
+    return f
+
+
+@_call_aside
+def _initialize(g=globals()):
+    "Set up global resource manager (deliberately not state-saved)"
+    manager = ResourceManager()
+    g['_manager'] = manager
+    g.update(
+        (name, getattr(manager, name))
+        for name in dir(manager)
+        if not name.startswith('_')
+    )
+
+
+@_call_aside
+def _initialize_master_working_set():
+    """
+    Prepare the master working set and make the ``require()``
+    API available.
+
+    This function has explicit effects on the global state
+    of pkg_resources. It is intended to be invoked once at
+    the initialization of this module.
+
+    Invocation by other packages is unsupported and done
+    at their own risk.
+    """
+    working_set = WorkingSet._build_master()
+    _declare_state('object', working_set=working_set)
+
+    require = working_set.require
+    iter_entry_points = working_set.iter_entry_points
+    add_activation_listener = working_set.subscribe
+    run_script = working_set.run_script
+    # backward compatibility
+    run_main = run_script
+    # Activate all distributions already on sys.path with replace=False and
+    # ensure that all distributions added to the working set in the future
+    # (e.g. by calling ``require()``) will get activated as well,
+    # with higher priority (replace=True).
+    tuple(
+        dist.activate(replace=False)
+        for dist in working_set
+    )
+    add_activation_listener(
+        lambda dist: dist.activate(replace=True),
+        existing=False,
+    )
+    working_set.entries = []
+    # match order
+    list(map(working_set.add_entry, sys.path))
+    globals().update(locals())
+
+class PkgResourcesDeprecationWarning(Warning):
+    """
+    Base class for warning about deprecations in ``pkg_resources``
+
+    This class is not derived from ``DeprecationWarning``, and as such is
+    visible by default.
+    """
diff --git a/lib/python3.7/site-packages/pkg_resources/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..23b868d3bbc94b4d8d9356a27c35ba2bf6cbc58c
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/__pycache__/py31compat.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/__pycache__/py31compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..456c3c73afeae9a572a76b66c36b30bad71197b3
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/__pycache__/py31compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/__init__.py b/lib/python3.7/site-packages/pkg_resources/_vendor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8c6f9a968098d411fc5c3cbfd3ab65fc374423fe
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6756a488578a9c89fa60fe13635243abeee4f192
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/pyparsing.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/pyparsing.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a96f461a7872cebd598ff414feb9a533d4f88fee
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/pyparsing.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/six.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/six.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..409a66d7936ee7de3f64a8d88ed54675f38d5360
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/__pycache__/six.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/appdirs.py b/lib/python3.7/site-packages/pkg_resources/_vendor/appdirs.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae67001af8b661373edeee2eb327b9f63e630d62
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/appdirs.py
@@ -0,0 +1,608 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2005-2010 ActiveState Software Inc.
+# Copyright (c) 2013 Eddy Petrișor
+
+"""Utilities for determining application-specific dirs.
+
+See <http://github.com/ActiveState/appdirs> for details and usage.
+"""
+# Dev Notes:
+# - MSDN on where to store app data files:
+#   http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
+# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
+# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+__version_info__ = (1, 4, 3)
+__version__ = '.'.join(map(str, __version_info__))
+
+
+import sys
+import os
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    unicode = str
+
+if sys.platform.startswith('java'):
+    import platform
+    os_name = platform.java_ver()[3][0]
+    if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
+        system = 'win32'
+    elif os_name.startswith('Mac'): # "Mac OS X", etc.
+        system = 'darwin'
+    else: # "Linux", "SunOS", "FreeBSD", etc.
+        # Setting this to "linux2" is not ideal, but only Windows or Mac
+        # are actually checked for and the rest of the module expects
+        # *sys.platform* style strings.
+        system = 'linux2'
+else:
+    system = sys.platform
+
+
+
+def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
+    r"""Return full path to the user-specific data dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "roaming" (boolean, default False) can be set True to use the Windows
+            roaming appdata directory. That means that for users on a Windows
+            network setup for roaming profiles, this user data will be
+            sync'd on login. See
+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+            for a discussion of issues.
+
+    Typical user data directories are:
+        Mac OS X:               ~/Library/Application Support/<AppName>
+        Unix:                   ~/.local/share/<AppName>    # or in $XDG_DATA_HOME, if defined
+        Win XP (not roaming):   C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
+        Win XP (roaming):       C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
+        Win 7  (not roaming):   C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
+        Win 7  (roaming):       C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
+
+    For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
+    That means, by default "~/.local/share/<AppName>".
+    """
+    if system == "win32":
+        if appauthor is None:
+            appauthor = appname
+        const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
+        path = os.path.normpath(_get_win_folder(const))
+        if appname:
+            if appauthor is not False:
+                path = os.path.join(path, appauthor, appname)
+            else:
+                path = os.path.join(path, appname)
+    elif system == 'darwin':
+        path = os.path.expanduser('~/Library/Application Support/')
+        if appname:
+            path = os.path.join(path, appname)
+    else:
+        path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
+    r"""Return full path to the user-shared data dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "multipath" is an optional parameter only applicable to *nix
+            which indicates that the entire list of data dirs should be
+            returned. By default, the first item from XDG_DATA_DIRS is
+            returned, or '/usr/local/share/<AppName>',
+            if XDG_DATA_DIRS is not set
+
+    Typical site data directories are:
+        Mac OS X:   /Library/Application Support/<AppName>
+        Unix:       /usr/local/share/<AppName> or /usr/share/<AppName>
+        Win XP:     C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
+        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+        Win 7:      C:\ProgramData\<AppAuthor>\<AppName>   # Hidden, but writeable on Win 7.
+
+    For Unix, this is using the $XDG_DATA_DIRS[0] default.
+
+    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+    """
+    if system == "win32":
+        if appauthor is None:
+            appauthor = appname
+        path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
+        if appname:
+            if appauthor is not False:
+                path = os.path.join(path, appauthor, appname)
+            else:
+                path = os.path.join(path, appname)
+    elif system == 'darwin':
+        path = os.path.expanduser('/Library/Application Support')
+        if appname:
+            path = os.path.join(path, appname)
+    else:
+        # XDG default for $XDG_DATA_DIRS
+        # only first, if multipath is False
+        path = os.getenv('XDG_DATA_DIRS',
+                         os.pathsep.join(['/usr/local/share', '/usr/share']))
+        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+        if appname:
+            if version:
+                appname = os.path.join(appname, version)
+            pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+        if multipath:
+            path = os.pathsep.join(pathlist)
+        else:
+            path = pathlist[0]
+        return path
+
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
+    r"""Return full path to the user-specific config dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "roaming" (boolean, default False) can be set True to use the Windows
+            roaming appdata directory. That means that for users on a Windows
+            network setup for roaming profiles, this user data will be
+            sync'd on login. See
+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+            for a discussion of issues.
+
+    Typical user config directories are:
+        Mac OS X:               same as user_data_dir
+        Unix:                   ~/.config/<AppName>     # or in $XDG_CONFIG_HOME, if defined
+        Win *:                  same as user_data_dir
+
+    For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
+    That means, by default "~/.config/<AppName>".
+    """
+    if system in ["win32", "darwin"]:
+        path = user_data_dir(appname, appauthor, None, roaming)
+    else:
+        path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
+    r"""Return full path to the user-shared data dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "multipath" is an optional parameter only applicable to *nix
+            which indicates that the entire list of config dirs should be
+            returned. By default, the first item from XDG_CONFIG_DIRS is
+            returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
+
+    Typical site config directories are:
+        Mac OS X:   same as site_data_dir
+        Unix:       /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
+                    $XDG_CONFIG_DIRS
+        Win *:      same as site_data_dir
+        Vista:      (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+
+    For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
+
+    WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+    """
+    if system in ["win32", "darwin"]:
+        path = site_data_dir(appname, appauthor)
+        if appname and version:
+            path = os.path.join(path, version)
+    else:
+        # XDG default for $XDG_CONFIG_DIRS
+        # only first, if multipath is False
+        path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
+        pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+        if appname:
+            if version:
+                appname = os.path.join(appname, version)
+            pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+        if multipath:
+            path = os.pathsep.join(pathlist)
+        else:
+            path = pathlist[0]
+    return path
+
+
+def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
+    r"""Return full path to the user-specific cache dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "opinion" (boolean) can be False to disable the appending of
+            "Cache" to the base app data dir for Windows. See
+            discussion below.
+
+    Typical user cache directories are:
+        Mac OS X:   ~/Library/Caches/<AppName>
+        Unix:       ~/.cache/<AppName> (XDG default)
+        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
+        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
+
+    On Windows the only suggestion in the MSDN docs is that local settings go in
+    the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
+    app data dir (the default returned by `user_data_dir` above). Apps typically
+    put cache data somewhere *under* the given dir here. Some examples:
+        ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
+        ...\Acme\SuperApp\Cache\1.0
+    OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
+    This can be disabled with the `opinion=False` option.
+    """
+    if system == "win32":
+        if appauthor is None:
+            appauthor = appname
+        path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
+        if appname:
+            if appauthor is not False:
+                path = os.path.join(path, appauthor, appname)
+            else:
+                path = os.path.join(path, appname)
+            if opinion:
+                path = os.path.join(path, "Cache")
+    elif system == 'darwin':
+        path = os.path.expanduser('~/Library/Caches')
+        if appname:
+            path = os.path.join(path, appname)
+    else:
+        path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
+    r"""Return full path to the user-specific state dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "roaming" (boolean, default False) can be set True to use the Windows
+            roaming appdata directory. That means that for users on a Windows
+            network setup for roaming profiles, this user data will be
+            sync'd on login. See
+            <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+            for a discussion of issues.
+
+    Typical user state directories are:
+        Mac OS X:  same as user_data_dir
+        Unix:      ~/.local/state/<AppName>   # or in $XDG_STATE_HOME, if defined
+        Win *:     same as user_data_dir
+
+    For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
+    to extend the XDG spec and support $XDG_STATE_HOME.
+
+    That means, by default "~/.local/state/<AppName>".
+    """
+    if system in ["win32", "darwin"]:
+        path = user_data_dir(appname, appauthor, None, roaming)
+    else:
+        path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
+        if appname:
+            path = os.path.join(path, appname)
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
+    r"""Return full path to the user-specific log dir for this application.
+
+        "appname" is the name of application.
+            If None, just the system directory is returned.
+        "appauthor" (only used on Windows) is the name of the
+            appauthor or distributing body for this application. Typically
+            it is the owning company name. This falls back to appname. You may
+            pass False to disable it.
+        "version" is an optional version path element to append to the
+            path. You might want to use this if you want multiple versions
+            of your app to be able to run independently. If used, this
+            would typically be "<major>.<minor>".
+            Only applied when appname is present.
+        "opinion" (boolean) can be False to disable the appending of
+            "Logs" to the base app data dir for Windows, and "log" to the
+            base cache dir for Unix. See discussion below.
+
+    Typical user log directories are:
+        Mac OS X:   ~/Library/Logs/<AppName>
+        Unix:       ~/.cache/<AppName>/log  # or under $XDG_CACHE_HOME if defined
+        Win XP:     C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
+        Vista:      C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
+
+    On Windows the only suggestion in the MSDN docs is that local settings
+    go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
+    examples of what some windows apps use for a logs dir.)
+
+    OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
+    value for Windows and appends "log" to the user cache dir for Unix.
+    This can be disabled with the `opinion=False` option.
+    """
+    if system == "darwin":
+        path = os.path.join(
+            os.path.expanduser('~/Library/Logs'),
+            appname)
+    elif system == "win32":
+        path = user_data_dir(appname, appauthor, version)
+        version = False
+        if opinion:
+            path = os.path.join(path, "Logs")
+    else:
+        path = user_cache_dir(appname, appauthor, version)
+        version = False
+        if opinion:
+            path = os.path.join(path, "log")
+    if appname and version:
+        path = os.path.join(path, version)
+    return path
+
+
+class AppDirs(object):
+    """Convenience wrapper for getting application dirs."""
+    def __init__(self, appname=None, appauthor=None, version=None,
+            roaming=False, multipath=False):
+        self.appname = appname
+        self.appauthor = appauthor
+        self.version = version
+        self.roaming = roaming
+        self.multipath = multipath
+
+    @property
+    def user_data_dir(self):
+        return user_data_dir(self.appname, self.appauthor,
+                             version=self.version, roaming=self.roaming)
+
+    @property
+    def site_data_dir(self):
+        return site_data_dir(self.appname, self.appauthor,
+                             version=self.version, multipath=self.multipath)
+
+    @property
+    def user_config_dir(self):
+        return user_config_dir(self.appname, self.appauthor,
+                               version=self.version, roaming=self.roaming)
+
+    @property
+    def site_config_dir(self):
+        return site_config_dir(self.appname, self.appauthor,
+                             version=self.version, multipath=self.multipath)
+
+    @property
+    def user_cache_dir(self):
+        return user_cache_dir(self.appname, self.appauthor,
+                              version=self.version)
+
+    @property
+    def user_state_dir(self):
+        return user_state_dir(self.appname, self.appauthor,
+                              version=self.version)
+
+    @property
+    def user_log_dir(self):
+        return user_log_dir(self.appname, self.appauthor,
+                            version=self.version)
+
+
+#---- internal support stuff
+
+def _get_win_folder_from_registry(csidl_name):
+    """This is a fallback technique at best. I'm not sure if using the
+    registry for this guarantees us the correct answer for all CSIDL_*
+    names.
+    """
+    if PY3:
+      import winreg as _winreg
+    else:
+      import _winreg
+
+    shell_folder_name = {
+        "CSIDL_APPDATA": "AppData",
+        "CSIDL_COMMON_APPDATA": "Common AppData",
+        "CSIDL_LOCAL_APPDATA": "Local AppData",
+    }[csidl_name]
+
+    key = _winreg.OpenKey(
+        _winreg.HKEY_CURRENT_USER,
+        r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+    )
+    dir, type = _winreg.QueryValueEx(key, shell_folder_name)
+    return dir
+
+
+def _get_win_folder_with_pywin32(csidl_name):
+    from win32com.shell import shellcon, shell
+    dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
+    # Try to make this a unicode path because SHGetFolderPath does
+    # not return unicode strings when there is unicode data in the
+    # path.
+    try:
+        dir = unicode(dir)
+
+        # Downgrade to short path name if have highbit chars. See
+        # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+        has_high_char = False
+        for c in dir:
+            if ord(c) > 255:
+                has_high_char = True
+                break
+        if has_high_char:
+            try:
+                import win32api
+                dir = win32api.GetShortPathName(dir)
+            except ImportError:
+                pass
+    except UnicodeError:
+        pass
+    return dir
+
+
+def _get_win_folder_with_ctypes(csidl_name):
+    import ctypes
+
+    csidl_const = {
+        "CSIDL_APPDATA": 26,
+        "CSIDL_COMMON_APPDATA": 35,
+        "CSIDL_LOCAL_APPDATA": 28,
+    }[csidl_name]
+
+    buf = ctypes.create_unicode_buffer(1024)
+    ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+    # Downgrade to short path name if have highbit chars. See
+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+    has_high_char = False
+    for c in buf:
+        if ord(c) > 255:
+            has_high_char = True
+            break
+    if has_high_char:
+        buf2 = ctypes.create_unicode_buffer(1024)
+        if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+            buf = buf2
+
+    return buf.value
+
+def _get_win_folder_with_jna(csidl_name):
+    import array
+    from com.sun import jna
+    from com.sun.jna.platform import win32
+
+    buf_size = win32.WinDef.MAX_PATH * 2
+    buf = array.zeros('c', buf_size)
+    shell = win32.Shell32.INSTANCE
+    shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
+    dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+    # Downgrade to short path name if have highbit chars. See
+    # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+    has_high_char = False
+    for c in dir:
+        if ord(c) > 255:
+            has_high_char = True
+            break
+    if has_high_char:
+        buf = array.zeros('c', buf_size)
+        kernel = win32.Kernel32.INSTANCE
+        if kernel.GetShortPathName(dir, buf, buf_size):
+            dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+    return dir
+
+if system == "win32":
+    try:
+        import win32com.shell
+        _get_win_folder = _get_win_folder_with_pywin32
+    except ImportError:
+        try:
+            from ctypes import windll
+            _get_win_folder = _get_win_folder_with_ctypes
+        except ImportError:
+            try:
+                import com.sun.jna
+                _get_win_folder = _get_win_folder_with_jna
+            except ImportError:
+                _get_win_folder = _get_win_folder_from_registry
+
+
+#---- self test code
+
+if __name__ == "__main__":
+    appname = "MyApp"
+    appauthor = "MyCompany"
+
+    props = ("user_data_dir",
+             "user_config_dir",
+             "user_cache_dir",
+             "user_state_dir",
+             "user_log_dir",
+             "site_data_dir",
+             "site_config_dir")
+
+    print("-- app dirs %s --" % __version__)
+
+    print("-- app dirs (with optional 'version')")
+    dirs = AppDirs(appname, appauthor, version="1.0")
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
+
+    print("\n-- app dirs (without optional 'version')")
+    dirs = AppDirs(appname, appauthor)
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
+
+    print("\n-- app dirs (without optional 'appauthor')")
+    dirs = AppDirs(appname)
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
+
+    print("\n-- app dirs (with disabled 'appauthor')")
+    dirs = AppDirs(appname, appauthor=False)
+    for prop in props:
+        print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__about__.py b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__about__.py
new file mode 100644
index 0000000000000000000000000000000000000000..95d330ef823aa2e12f7846bc63c0955b25df6029
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__about__.py
@@ -0,0 +1,21 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+__all__ = [
+    "__title__", "__summary__", "__uri__", "__version__", "__author__",
+    "__email__", "__license__", "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "16.8"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD or Apache License, Version 2.0"
+__copyright__ = "Copyright 2014-2016 %s" % __author__
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__init__.py b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ee6220203e5425f900fb5a43676c24ea377c2fa
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__init__.py
@@ -0,0 +1,14 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+from .__about__ import (
+    __author__, __copyright__, __email__, __license__, __summary__, __title__,
+    __uri__, __version__
+)
+
+__all__ = [
+    "__title__", "__summary__", "__uri__", "__version__", "__author__",
+    "__email__", "__license__", "__copyright__",
+]
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eb8a69a71251f2104591d246de6180809489f6a6
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b02ca05cefb3e21562891566cc1386a9590218b
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0bce5738e5aef2880cca3ecdec907da48f4d288c
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7e621f4c5f898a75b6438104289d425bc36cd4c0
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c1973ed7f5ebaddca67939534ae667401862161
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0644f1e5318998cc8c53c38fca80a10937c25e03
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c4855c474b50dc8c3d6f0aa42337b12d2ee5b7a9
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..79b470c3e33bfee4a47f5b8c87897eafcff647d1
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..31961dac5c0ef3b7936f86f991a25a4859fc4aec
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/_compat.py b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..210bb80b7e7b64cb79f7e7cdf3e42819fe3471fe
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/_compat.py
@@ -0,0 +1,30 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+# flake8: noqa
+
+if PY3:
+    string_types = str,
+else:
+    string_types = basestring,
+
+
+def with_metaclass(meta, *bases):
+    """
+    Create a base class with a metaclass.
+    """
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/_structures.py b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/_structures.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccc27861c3a4d9efaa3db753c77c4515a627bd98
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/_structures.py
@@ -0,0 +1,68 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+
+class Infinity(object):
+
+    def __repr__(self):
+        return "Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return False
+
+    def __le__(self, other):
+        return False
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return True
+
+    def __ge__(self, other):
+        return True
+
+    def __neg__(self):
+        return NegativeInfinity
+
+Infinity = Infinity()
+
+
+class NegativeInfinity(object):
+
+    def __repr__(self):
+        return "-Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return True
+
+    def __le__(self, other):
+        return True
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return False
+
+    def __ge__(self, other):
+        return False
+
+    def __neg__(self):
+        return Infinity
+
+NegativeInfinity = NegativeInfinity()
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/markers.py b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/markers.py
new file mode 100644
index 0000000000000000000000000000000000000000..892e578edd4b992cc2996c31d9deb13af73d62c0
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/markers.py
@@ -0,0 +1,301 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import operator
+import os
+import platform
+import sys
+
+from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
+from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
+from pkg_resources.extern.pyparsing import Literal as L  # noqa
+
+from ._compat import string_types
+from .specifiers import Specifier, InvalidSpecifier
+
+
+__all__ = [
+    "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
+    "Marker", "default_environment",
+]
+
+
+class InvalidMarker(ValueError):
+    """
+    An invalid marker was found, users should refer to PEP 508.
+    """
+
+
+class UndefinedComparison(ValueError):
+    """
+    An invalid operation was attempted on a value that doesn't support it.
+    """
+
+
+class UndefinedEnvironmentName(ValueError):
+    """
+    A name was attempted to be used that does not exist inside of the
+    environment.
+    """
+
+
+class Node(object):
+
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return str(self.value)
+
+    def __repr__(self):
+        return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
+
+    def serialize(self):
+        raise NotImplementedError
+
+
+class Variable(Node):
+
+    def serialize(self):
+        return str(self)
+
+
+class Value(Node):
+
+    def serialize(self):
+        return '"{0}"'.format(self)
+
+
+class Op(Node):
+
+    def serialize(self):
+        return str(self)
+
+
+VARIABLE = (
+    L("implementation_version") |
+    L("platform_python_implementation") |
+    L("implementation_name") |
+    L("python_full_version") |
+    L("platform_release") |
+    L("platform_version") |
+    L("platform_machine") |
+    L("platform_system") |
+    L("python_version") |
+    L("sys_platform") |
+    L("os_name") |
+    L("os.name") |  # PEP-345
+    L("sys.platform") |  # PEP-345
+    L("platform.version") |  # PEP-345
+    L("platform.machine") |  # PEP-345
+    L("platform.python_implementation") |  # PEP-345
+    L("python_implementation") |  # undocumented setuptools legacy
+    L("extra")
+)
+ALIASES = {
+    'os.name': 'os_name',
+    'sys.platform': 'sys_platform',
+    'platform.version': 'platform_version',
+    'platform.machine': 'platform_machine',
+    'platform.python_implementation': 'platform_python_implementation',
+    'python_implementation': 'platform_python_implementation'
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+    L("===") |
+    L("==") |
+    L(">=") |
+    L("<=") |
+    L("!=") |
+    L("~=") |
+    L(">") |
+    L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results):
+    if isinstance(results, ParseResults):
+        return [_coerce_parse_result(i) for i in results]
+    else:
+        return results
+
+
+def _format_marker(marker, first=True):
+    assert isinstance(marker, (list, tuple, string_types))
+
+    # Sometimes we have a structure like [[...]] which is a single item list
+    # where the single item is itself it's own list. In that case we want skip
+    # the rest of this function so that we don't get extraneous () on the
+    # outside.
+    if (isinstance(marker, list) and len(marker) == 1 and
+            isinstance(marker[0], (list, tuple))):
+        return _format_marker(marker[0])
+
+    if isinstance(marker, list):
+        inner = (_format_marker(m, first=False) for m in marker)
+        if first:
+            return " ".join(inner)
+        else:
+            return "(" + " ".join(inner) + ")"
+    elif isinstance(marker, tuple):
+        return " ".join([m.serialize() for m in marker])
+    else:
+        return marker
+
+
+_operators = {
+    "in": lambda lhs, rhs: lhs in rhs,
+    "not in": lambda lhs, rhs: lhs not in rhs,
+    "<": operator.lt,
+    "<=": operator.le,
+    "==": operator.eq,
+    "!=": operator.ne,
+    ">=": operator.ge,
+    ">": operator.gt,
+}
+
+
+def _eval_op(lhs, op, rhs):
+    try:
+        spec = Specifier("".join([op.serialize(), rhs]))
+    except InvalidSpecifier:
+        pass
+    else:
+        return spec.contains(lhs)
+
+    oper = _operators.get(op.serialize())
+    if oper is None:
+        raise UndefinedComparison(
+            "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
+        )
+
+    return oper(lhs, rhs)
+
+
+_undefined = object()
+
+
+def _get_env(environment, name):
+    value = environment.get(name, _undefined)
+
+    if value is _undefined:
+        raise UndefinedEnvironmentName(
+            "{0!r} does not exist in evaluation environment.".format(name)
+        )
+
+    return value
+
+
+def _evaluate_markers(markers, environment):
+    groups = [[]]
+
+    for marker in markers:
+        assert isinstance(marker, (list, tuple, string_types))
+
+        if isinstance(marker, list):
+            groups[-1].append(_evaluate_markers(marker, environment))
+        elif isinstance(marker, tuple):
+            lhs, op, rhs = marker
+
+            if isinstance(lhs, Variable):
+                lhs_value = _get_env(environment, lhs.value)
+                rhs_value = rhs.value
+            else:
+                lhs_value = lhs.value
+                rhs_value = _get_env(environment, rhs.value)
+
+            groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+        else:
+            assert marker in ["and", "or"]
+            if marker == "or":
+                groups.append([])
+
+    return any(all(item) for item in groups)
+
+
+def format_full_version(info):
+    version = '{0.major}.{0.minor}.{0.micro}'.format(info)
+    kind = info.releaselevel
+    if kind != 'final':
+        version += kind[0] + str(info.serial)
+    return version
+
+
+def default_environment():
+    if hasattr(sys, 'implementation'):
+        iver = format_full_version(sys.implementation.version)
+        implementation_name = sys.implementation.name
+    else:
+        iver = '0'
+        implementation_name = ''
+
+    return {
+        "implementation_name": implementation_name,
+        "implementation_version": iver,
+        "os_name": os.name,
+        "platform_machine": platform.machine(),
+        "platform_release": platform.release(),
+        "platform_system": platform.system(),
+        "platform_version": platform.version(),
+        "python_full_version": platform.python_version(),
+        "platform_python_implementation": platform.python_implementation(),
+        "python_version": platform.python_version()[:3],
+        "sys_platform": sys.platform,
+    }
+
+
+class Marker(object):
+
+    def __init__(self, marker):
+        try:
+            self._markers = _coerce_parse_result(MARKER.parseString(marker))
+        except ParseException as e:
+            err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
+                marker, marker[e.loc:e.loc + 8])
+            raise InvalidMarker(err_str)
+
+    def __str__(self):
+        return _format_marker(self._markers)
+
+    def __repr__(self):
+        return "<Marker({0!r})>".format(str(self))
+
+    def evaluate(self, environment=None):
+        """Evaluate a marker.
+
+        Return the boolean from evaluating the given marker against the
+        environment. environment is an optional argument to override all or
+        part of the determined environment.
+
+        The environment is determined from the current Python process.
+        """
+        current_environment = default_environment()
+        if environment is not None:
+            current_environment.update(environment)
+
+        return _evaluate_markers(self._markers, current_environment)
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/requirements.py b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/requirements.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c8c4a3852fd37053fd552846aa7787805c30a48
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/requirements.py
@@ -0,0 +1,127 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import string
+import re
+
+from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
+from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
+from pkg_resources.extern.pyparsing import Literal as L  # noqa
+from pkg_resources.extern.six.moves.urllib import parse as urlparse
+
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+
+class InvalidRequirement(ValueError):
+    """
+    An invalid requirement was found, users should refer to PEP 508.
+    """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r'[^ ]+')("url")
+URL = (AT + URI)
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
+                       joinString=",", adjacent=False)("_raw_spec")
+_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+    lambda s, l, t: Marker(s[t._original_start:t._original_end])
+)
+MARKER_SEPERATOR = SEMICOLON
+MARKER = MARKER_SEPERATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = \
+    NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+
+
+class Requirement(object):
+    """Parse a requirement.
+
+    Parse a given requirement string into its parts, such as name, specifier,
+    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+    string.
+    """
+
+    # TODO: Can we test whether something is contained within a requirement?
+    #       If so how do we do that? Do we need to test against the _name_ of
+    #       the thing as well as the version? What about the markers?
+    # TODO: Can we normalize the name and extra name?
+
+    def __init__(self, requirement_string):
+        try:
+            req = REQUIREMENT.parseString(requirement_string)
+        except ParseException as e:
+            raise InvalidRequirement(
+                "Invalid requirement, parse error at \"{0!r}\"".format(
+                    requirement_string[e.loc:e.loc + 8]))
+
+        self.name = req.name
+        if req.url:
+            parsed_url = urlparse.urlparse(req.url)
+            if not (parsed_url.scheme and parsed_url.netloc) or (
+                    not parsed_url.scheme and not parsed_url.netloc):
+                raise InvalidRequirement("Invalid URL given")
+            self.url = req.url
+        else:
+            self.url = None
+        self.extras = set(req.extras.asList() if req.extras else [])
+        self.specifier = SpecifierSet(req.specifier)
+        self.marker = req.marker if req.marker else None
+
+    def __str__(self):
+        parts = [self.name]
+
+        if self.extras:
+            parts.append("[{0}]".format(",".join(sorted(self.extras))))
+
+        if self.specifier:
+            parts.append(str(self.specifier))
+
+        if self.url:
+            parts.append("@ {0}".format(self.url))
+
+        if self.marker:
+            parts.append("; {0}".format(self.marker))
+
+        return "".join(parts)
+
+    def __repr__(self):
+        return "<Requirement({0!r})>".format(str(self))
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f5a76cfd63f47dcce29b3ea82f59d10f4e8d771
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py
@@ -0,0 +1,774 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import abc
+import functools
+import itertools
+import re
+
+from ._compat import string_types, with_metaclass
+from .version import Version, LegacyVersion, parse
+
+
+class InvalidSpecifier(ValueError):
+    """
+    An invalid specifier was found, users should refer to PEP 440.
+    """
+
+
+class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
+
+    @abc.abstractmethod
+    def __str__(self):
+        """
+        Returns the str representation of this Specifier like object. This
+        should be representative of the Specifier itself.
+        """
+
+    @abc.abstractmethod
+    def __hash__(self):
+        """
+        Returns a hash value for this Specifier like object.
+        """
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        """
+        Returns a boolean representing whether or not the two Specifier like
+        objects are equal.
+        """
+
+    @abc.abstractmethod
+    def __ne__(self, other):
+        """
+        Returns a boolean representing whether or not the two Specifier like
+        objects are not equal.
+        """
+
+    @abc.abstractproperty
+    def prereleases(self):
+        """
+        Returns whether or not pre-releases as a whole are allowed by this
+        specifier.
+        """
+
+    @prereleases.setter
+    def prereleases(self, value):
+        """
+        Sets whether or not pre-releases as a whole are allowed by this
+        specifier.
+        """
+
+    @abc.abstractmethod
+    def contains(self, item, prereleases=None):
+        """
+        Determines if the given item is contained within this specifier.
+        """
+
+    @abc.abstractmethod
+    def filter(self, iterable, prereleases=None):
+        """
+        Takes an iterable of items and filters them so that only items which
+        are contained within this specifier are allowed in it.
+        """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+    _operators = {}
+
+    def __init__(self, spec="", prereleases=None):
+        match = self._regex.search(spec)
+        if not match:
+            raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
+
+        self._spec = (
+            match.group("operator").strip(),
+            match.group("version").strip(),
+        )
+
+        # Store whether or not this Specifier should accept prereleases
+        self._prereleases = prereleases
+
+    def __repr__(self):
+        pre = (
+            ", prereleases={0!r}".format(self.prereleases)
+            if self._prereleases is not None
+            else ""
+        )
+
+        return "<{0}({1!r}{2})>".format(
+            self.__class__.__name__,
+            str(self),
+            pre,
+        )
+
+    def __str__(self):
+        return "{0}{1}".format(*self._spec)
+
+    def __hash__(self):
+        return hash(self._spec)
+
+    def __eq__(self, other):
+        if isinstance(other, string_types):
+            try:
+                other = self.__class__(other)
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._spec == other._spec
+
+    def __ne__(self, other):
+        if isinstance(other, string_types):
+            try:
+                other = self.__class__(other)
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._spec != other._spec
+
+    def _get_operator(self, op):
+        return getattr(self, "_compare_{0}".format(self._operators[op]))
+
+    def _coerce_version(self, version):
+        if not isinstance(version, (LegacyVersion, Version)):
+            version = parse(version)
+        return version
+
+    @property
+    def operator(self):
+        return self._spec[0]
+
+    @property
+    def version(self):
+        return self._spec[1]
+
+    @property
+    def prereleases(self):
+        return self._prereleases
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+    def __contains__(self, item):
+        return self.contains(item)
+
+    def contains(self, item, prereleases=None):
+        # Determine if prereleases are to be allowed or not.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # Normalize item to a Version or LegacyVersion, this allows us to have
+        # a shortcut for ``"2.0" in Specifier(">=2")
+        item = self._coerce_version(item)
+
+        # Determine if we should be supporting prereleases in this specifier
+        # or not, if we do not support prereleases than we can short circuit
+        # logic if this version is a prereleases.
+        if item.is_prerelease and not prereleases:
+            return False
+
+        # Actually do the comparison to determine if this item is contained
+        # within this Specifier or not.
+        return self._get_operator(self.operator)(item, self.version)
+
+    def filter(self, iterable, prereleases=None):
+        yielded = False
+        found_prereleases = []
+
+        kw = {"prereleases": prereleases if prereleases is not None else True}
+
+        # Attempt to iterate over all the values in the iterable and if any of
+        # them match, yield them.
+        for version in iterable:
+            parsed_version = self._coerce_version(version)
+
+            if self.contains(parsed_version, **kw):
+                # If our version is a prerelease, and we were not set to allow
+                # prereleases, then we'll store it for later incase nothing
+                # else matches this specifier.
+                if (parsed_version.is_prerelease and not
+                        (prereleases or self.prereleases)):
+                    found_prereleases.append(version)
+                # Either this is not a prerelease, or we should have been
+                # accepting prereleases from the begining.
+                else:
+                    yielded = True
+                    yield version
+
+        # Now that we've iterated over everything, determine if we've yielded
+        # any values, and if we have not and we have any prereleases stored up
+        # then we will go ahead and yield the prereleases.
+        if not yielded and found_prereleases:
+            for version in found_prereleases:
+                yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+    _regex_str = (
+        r"""
+        (?P<operator>(==|!=|<=|>=|<|>))
+        \s*
+        (?P<version>
+            [^,;\s)]* # Since this is a "legacy" specifier, and the version
+                      # string can be just about anything, we match everything
+                      # except for whitespace, a semi-colon for marker support,
+                      # a closing paren since versions can be enclosed in
+                      # them, and a comma since it's a version separator.
+        )
+        """
+    )
+
+    _regex = re.compile(
+        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    _operators = {
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+    }
+
+    def _coerce_version(self, version):
+        if not isinstance(version, LegacyVersion):
+            version = LegacyVersion(str(version))
+        return version
+
+    def _compare_equal(self, prospective, spec):
+        return prospective == self._coerce_version(spec)
+
+    def _compare_not_equal(self, prospective, spec):
+        return prospective != self._coerce_version(spec)
+
+    def _compare_less_than_equal(self, prospective, spec):
+        return prospective <= self._coerce_version(spec)
+
+    def _compare_greater_than_equal(self, prospective, spec):
+        return prospective >= self._coerce_version(spec)
+
+    def _compare_less_than(self, prospective, spec):
+        return prospective < self._coerce_version(spec)
+
+    def _compare_greater_than(self, prospective, spec):
+        return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(fn):
+    @functools.wraps(fn)
+    def wrapped(self, prospective, spec):
+        if not isinstance(prospective, Version):
+            return False
+        return fn(self, prospective, spec)
+    return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+    _regex_str = (
+        r"""
+        (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+        (?P<version>
+            (?:
+                # The identity operators allow for an escape hatch that will
+                # do an exact string match of the version you wish to install.
+                # This will not be parsed by PEP 440 and we cannot determine
+                # any semantic meaning from it. This operator is discouraged
+                # but included entirely as an escape hatch.
+                (?<====)  # Only match for the identity operator
+                \s*
+                [^\s]*    # We just match everything, except for whitespace
+                          # since we are only testing for strict identity.
+            )
+            |
+            (?:
+                # The (non)equality operators allow for wild card and local
+                # versions to be specified so we have to define these two
+                # operators separately to enable that.
+                (?<===|!=)            # Only match for equals and not equals
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+
+                # You cannot use a wild card and a dev or local version
+                # together so group them with a | and make them optional.
+                (?:
+                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
+                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+                    |
+                    \.\*  # Wild card syntax of .*
+                )?
+            )
+            |
+            (?:
+                # The compatible operator requires at least two digits in the
+                # release segment.
+                (?<=~=)               # Only match for the compatible operator
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+            |
+            (?:
+                # All other operators only allow a sub set of what the
+                # (non)equality operators do. Specifically they do not allow
+                # local versions to be specified nor do they allow the prefix
+                # matching wild cards.
+                (?<!==|!=|~=)         # We have special cases for these
+                                      # operators so we want to make sure they
+                                      # don't match here.
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+        )
+        """
+    )
+
+    _regex = re.compile(
+        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    _operators = {
+        "~=": "compatible",
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+        "===": "arbitrary",
+    }
+
+    @_require_version_compare
+    def _compare_compatible(self, prospective, spec):
+        # Compatible releases have an equivalent combination of >= and ==. That
+        # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+        # implement this in terms of the other specifiers instead of
+        # implementing it ourselves. The only thing we need to do is construct
+        # the other specifiers.
+
+        # We want everything but the last item in the version, but we want to
+        # ignore post and dev releases and we want to treat the pre-release as
+        # it's own separate segment.
+        prefix = ".".join(
+            list(
+                itertools.takewhile(
+                    lambda x: (not x.startswith("post") and not
+                               x.startswith("dev")),
+                    _version_split(spec),
+                )
+            )[:-1]
+        )
+
+        # Add the prefix notation to the end of our string
+        prefix += ".*"
+
+        return (self._get_operator(">=")(prospective, spec) and
+                self._get_operator("==")(prospective, prefix))
+
+    @_require_version_compare
+    def _compare_equal(self, prospective, spec):
+        # We need special logic to handle prefix matching
+        if spec.endswith(".*"):
+            # In the case of prefix matching we want to ignore local segment.
+            prospective = Version(prospective.public)
+            # Split the spec out by dots, and pretend that there is an implicit
+            # dot in between a release segment and a pre-release segment.
+            spec = _version_split(spec[:-2])  # Remove the trailing .*
+
+            # Split the prospective version out by dots, and pretend that there
+            # is an implicit dot in between a release segment and a pre-release
+            # segment.
+            prospective = _version_split(str(prospective))
+
+            # Shorten the prospective version to be the same length as the spec
+            # so that we can determine if the specifier is a prefix of the
+            # prospective version or not.
+            prospective = prospective[:len(spec)]
+
+            # Pad out our two sides with zeros so that they both equal the same
+            # length.
+            spec, prospective = _pad_version(spec, prospective)
+        else:
+            # Convert our spec string into a Version
+            spec = Version(spec)
+
+            # If the specifier does not have a local segment, then we want to
+            # act as if the prospective version also does not have a local
+            # segment.
+            if not spec.local:
+                prospective = Version(prospective.public)
+
+        return prospective == spec
+
+    @_require_version_compare
+    def _compare_not_equal(self, prospective, spec):
+        return not self._compare_equal(prospective, spec)
+
+    @_require_version_compare
+    def _compare_less_than_equal(self, prospective, spec):
+        return prospective <= Version(spec)
+
+    @_require_version_compare
+    def _compare_greater_than_equal(self, prospective, spec):
+        return prospective >= Version(spec)
+
+    @_require_version_compare
+    def _compare_less_than(self, prospective, spec):
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec)
+
+        # Check to see if the prospective version is less than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective < spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a pre-release version, that we do not accept pre-release
+        # versions for the version mentioned in the specifier (e.g. <3.1 should
+        # not match 3.1.dev0, but should match 3.0.dev0).
+        if not spec.is_prerelease and prospective.is_prerelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # less than the spec version *and* it's not a pre-release of the same
+        # version in the spec.
+        return True
+
+    @_require_version_compare
+    def _compare_greater_than(self, prospective, spec):
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec)
+
+        # Check to see if the prospective version is greater than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective > spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a post-release version, that we do not accept
+        # post-release versions for the version mentioned in the specifier
+        # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+        if not spec.is_postrelease and prospective.is_postrelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # Ensure that we do not allow a local version of the version mentioned
+        # in the specifier, which is techincally greater than, to match.
+        if prospective.local is not None:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # greater than the spec version *and* it's not a pre-release of the
+        # same version in the spec.
+        return True
+
+    def _compare_arbitrary(self, prospective, spec):
+        return str(prospective).lower() == str(spec).lower()
+
+    @property
+    def prereleases(self):
+        # If there is an explicit prereleases set for this, then we'll just
+        # blindly use that.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # Look at all of our specifiers and determine if they are inclusive
+        # operators, and if they are if they are including an explicit
+        # prerelease.
+        operator, version = self._spec
+        if operator in ["==", ">=", "<=", "~=", "==="]:
+            # The == specifier can include a trailing .*, if it does we
+            # want to remove before parsing.
+            if operator == "==" and version.endswith(".*"):
+                version = version[:-2]
+
+            # Parse the version, and if it is a pre-release than this
+            # specifier allows pre-releases.
+            if parse(version).is_prerelease:
+                return True
+
+        return False
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version):
+    result = []
+    for item in version.split("."):
+        match = _prefix_regex.search(item)
+        if match:
+            result.extend(match.groups())
+        else:
+            result.append(item)
+    return result
+
+
+def _pad_version(left, right):
+    left_split, right_split = [], []
+
+    # Get the release segment of our versions
+    left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+    right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+    # Get the rest of our versions
+    left_split.append(left[len(left_split[0]):])
+    right_split.append(right[len(right_split[0]):])
+
+    # Insert our padding
+    left_split.insert(
+        1,
+        ["0"] * max(0, len(right_split[0]) - len(left_split[0])),
+    )
+    right_split.insert(
+        1,
+        ["0"] * max(0, len(left_split[0]) - len(right_split[0])),
+    )
+
+    return (
+        list(itertools.chain(*left_split)),
+        list(itertools.chain(*right_split)),
+    )
+
+
+class SpecifierSet(BaseSpecifier):
+
+    def __init__(self, specifiers="", prereleases=None):
+        # Split on , to break each indidivual specifier into it's own item, and
+        # strip each item to remove leading/trailing whitespace.
+        specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+        # Parsed each individual specifier, attempting first to make it a
+        # Specifier and falling back to a LegacySpecifier.
+        parsed = set()
+        for specifier in specifiers:
+            try:
+                parsed.add(Specifier(specifier))
+            except InvalidSpecifier:
+                parsed.add(LegacySpecifier(specifier))
+
+        # Turn our parsed specifiers into a frozen set and save them for later.
+        self._specs = frozenset(parsed)
+
+        # Store our prereleases value so we can use it later to determine if
+        # we accept prereleases or not.
+        self._prereleases = prereleases
+
+    def __repr__(self):
+        pre = (
+            ", prereleases={0!r}".format(self.prereleases)
+            if self._prereleases is not None
+            else ""
+        )
+
+        return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
+
+    def __str__(self):
+        return ",".join(sorted(str(s) for s in self._specs))
+
+    def __hash__(self):
+        return hash(self._specs)
+
+    def __and__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        specifier = SpecifierSet()
+        specifier._specs = frozenset(self._specs | other._specs)
+
+        if self._prereleases is None and other._prereleases is not None:
+            specifier._prereleases = other._prereleases
+        elif self._prereleases is not None and other._prereleases is None:
+            specifier._prereleases = self._prereleases
+        elif self._prereleases == other._prereleases:
+            specifier._prereleases = self._prereleases
+        else:
+            raise ValueError(
+                "Cannot combine SpecifierSets with True and False prerelease "
+                "overrides."
+            )
+
+        return specifier
+
+    def __eq__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif isinstance(other, _IndividualSpecifier):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs == other._specs
+
+    def __ne__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif isinstance(other, _IndividualSpecifier):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs != other._specs
+
+    def __len__(self):
+        return len(self._specs)
+
+    def __iter__(self):
+        return iter(self._specs)
+
+    @property
+    def prereleases(self):
+        # If we have been given an explicit prerelease modifier, then we'll
+        # pass that through here.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # If we don't have any specifiers, and we don't have a forced value,
+        # then we'll just return None since we don't know if this should have
+        # pre-releases or not.
+        if not self._specs:
+            return None
+
+        # Otherwise we'll see if any of the given specifiers accept
+        # prereleases, if any of them do we'll return True, otherwise False.
+        return any(s.prereleases for s in self._specs)
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+    def __contains__(self, item):
+        return self.contains(item)
+
+    def contains(self, item, prereleases=None):
+        # Ensure that our item is a Version or LegacyVersion instance.
+        if not isinstance(item, (LegacyVersion, Version)):
+            item = parse(item)
+
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # We can determine if we're going to allow pre-releases by looking to
+        # see if any of the underlying items supports them. If none of them do
+        # and this item is a pre-release then we do not allow it and we can
+        # short circuit that here.
+        # Note: This means that 1.0.dev1 would not be contained in something
+        #       like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+        if not prereleases and item.is_prerelease:
+            return False
+
+        # We simply dispatch to the underlying specs here to make sure that the
+        # given version is contained within all of them.
+        # Note: This use of all() here means that an empty set of specifiers
+        #       will always return True, this is an explicit design decision.
+        return all(
+            s.contains(item, prereleases=prereleases)
+            for s in self._specs
+        )
+
+    def filter(self, iterable, prereleases=None):
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # If we have any specifiers, then we want to wrap our iterable in the
+        # filter method for each one, this will act as a logical AND amongst
+        # each specifier.
+        if self._specs:
+            for spec in self._specs:
+                iterable = spec.filter(iterable, prereleases=bool(prereleases))
+            return iterable
+        # If we do not have any specifiers, then we need to have a rough filter
+        # which will filter out any pre-releases, unless there are no final
+        # releases, and which will filter out LegacyVersion in general.
+        else:
+            filtered = []
+            found_prereleases = []
+
+            for item in iterable:
+                # Ensure that we some kind of Version class for this item.
+                if not isinstance(item, (LegacyVersion, Version)):
+                    parsed_version = parse(item)
+                else:
+                    parsed_version = item
+
+                # Filter out any item which is parsed as a LegacyVersion
+                if isinstance(parsed_version, LegacyVersion):
+                    continue
+
+                # Store any item which is a pre-release for later unless we've
+                # already found a final version or we are accepting prereleases
+                if parsed_version.is_prerelease and not prereleases:
+                    if not filtered:
+                        found_prereleases.append(item)
+                else:
+                    filtered.append(item)
+
+            # If we've found no items except for pre-releases, then we'll go
+            # ahead and use the pre-releases
+            if not filtered and found_prereleases and prereleases is None:
+                return found_prereleases
+
+            return filtered
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/utils.py b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..942387cef5d75f299a769b1eb43b6c7679e7a3a0
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/utils.py
@@ -0,0 +1,14 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import re
+
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+
+
+def canonicalize_name(name):
+    # This is taken from PEP 503.
+    return _canonicalize_regex.sub("-", name).lower()
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/version.py b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..83b5ee8c5efadf22ce2f16ff08c8a8d75f1eb5df
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/packaging/version.py
@@ -0,0 +1,393 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import collections
+import itertools
+import re
+
+from ._structures import Infinity
+
+
+__all__ = [
+    "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
+]
+
+
+_Version = collections.namedtuple(
+    "_Version",
+    ["epoch", "release", "dev", "pre", "post", "local"],
+)
+
+
+def parse(version):
+    """
+    Parse the given version string and return either a :class:`Version` object
+    or a :class:`LegacyVersion` object depending on if the given version is
+    a valid PEP 440 version or a legacy version.
+    """
+    try:
+        return Version(version)
+    except InvalidVersion:
+        return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+    """
+    An invalid version was found, users should refer to PEP 440.
+    """
+
+
+class _BaseVersion(object):
+
+    def __hash__(self):
+        return hash(self._key)
+
+    def __lt__(self, other):
+        return self._compare(other, lambda s, o: s < o)
+
+    def __le__(self, other):
+        return self._compare(other, lambda s, o: s <= o)
+
+    def __eq__(self, other):
+        return self._compare(other, lambda s, o: s == o)
+
+    def __ge__(self, other):
+        return self._compare(other, lambda s, o: s >= o)
+
+    def __gt__(self, other):
+        return self._compare(other, lambda s, o: s > o)
+
+    def __ne__(self, other):
+        return self._compare(other, lambda s, o: s != o)
+
+    def _compare(self, other, method):
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+
+    def __init__(self, version):
+        self._version = str(version)
+        self._key = _legacy_cmpkey(self._version)
+
+    def __str__(self):
+        return self._version
+
+    def __repr__(self):
+        return "<LegacyVersion({0})>".format(repr(str(self)))
+
+    @property
+    def public(self):
+        return self._version
+
+    @property
+    def base_version(self):
+        return self._version
+
+    @property
+    def local(self):
+        return None
+
+    @property
+    def is_prerelease(self):
+        return False
+
+    @property
+    def is_postrelease(self):
+        return False
+
+
+_legacy_version_component_re = re.compile(
+    r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
+)
+
+_legacy_version_replacement_map = {
+    "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+    for part in _legacy_version_component_re.split(s):
+        part = _legacy_version_replacement_map.get(part, part)
+
+        if not part or part == ".":
+            continue
+
+        if part[:1] in "0123456789":
+            # pad for numeric comparison
+            yield part.zfill(8)
+        else:
+            yield "*" + part
+
+    # ensure that alpha/beta/candidate are before final
+    yield "*final"
+
+
+def _legacy_cmpkey(version):
+    # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+    # greater than or equal to 0. This will effectively put the LegacyVersion,
+    # which uses the defacto standard originally implemented by setuptools,
+    # as before all PEP 440 versions.
+    epoch = -1
+
+    # This scheme is taken from pkg_resources.parse_version setuptools prior to
+    # it's adoption of the packaging library.
+    parts = []
+    for part in _parse_version_parts(version.lower()):
+        if part.startswith("*"):
+            # remove "-" before a prerelease tag
+            if part < "*final":
+                while parts and parts[-1] == "*final-":
+                    parts.pop()
+
+            # remove trailing zeros from each series of numeric parts
+            while parts and parts[-1] == "00000000":
+                parts.pop()
+
+        parts.append(part)
+    parts = tuple(parts)
+
+    return epoch, parts
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+    v?
+    (?:
+        (?:(?P<epoch>[0-9]+)!)?                           # epoch
+        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
+        (?P<pre>                                          # pre-release
+            [-_\.]?
+            (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P<pre_n>[0-9]+)?
+        )?
+        (?P<post>                                         # post release
+            (?:-(?P<post_n1>[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?P<post_l>post|rev|r)
+                [-_\.]?
+                (?P<post_n2>[0-9]+)?
+            )
+        )?
+        (?P<dev>                                          # dev release
+            [-_\.]?
+            (?P<dev_l>dev)
+            [-_\.]?
+            (?P<dev_n>[0-9]+)?
+        )?
+    )
+    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "<Version({0})>".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(".post{0}".format(self._version.post[1]))
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(".dev{0}".format(self._version.dev[1]))
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                "+{0}".format(".".join(str(x) for x in self._version.local))
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/pyparsing.py b/lib/python3.7/site-packages/pkg_resources/_vendor/pyparsing.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf75e1e5fcbfe7eac41d2a9e446c5c980741087b
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/pyparsing.py
@@ -0,0 +1,5742 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2018  Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and executing simple grammars,
+vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
+provides a library of classes that you use to construct the grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form 
+C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements 
+(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
+L{Literal} expressions)::
+
+    from pyparsing import Word, alphas
+
+    # define grammar of a greeting
+    greet = Word(alphas) + "," + Word(alphas) + "!"
+
+    hello = "Hello, World!"
+    print (hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+    Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the self-explanatory
+class names, and the use of '+', '|' and '^' operators.
+
+The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
+object with named attributes.
+
+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
+ - quoted strings
+ - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+ - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
+ - construct character word-group expressions using the L{Word} class
+ - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
+ - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
+ - associate names with your parsed results using L{ParserElement.setResultsName}
+ - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
+ - find more useful common expressions in the L{pyparsing_common} namespace class
+"""
+
+__version__ = "2.2.1"
+__versionTime__ = "18 Sep 2018 00:49 UTC"
+__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+import collections
+import pprint
+import traceback
+import types
+from datetime import datetime
+
+try:
+    from _thread import RLock
+except ImportError:
+    from threading import RLock
+
+try:
+    # Python 3
+    from collections.abc import Iterable
+    from collections.abc import MutableMapping
+except ImportError:
+    # Python 2.7
+    from collections import Iterable
+    from collections import MutableMapping
+
+try:
+    from collections import OrderedDict as _OrderedDict
+except ImportError:
+    try:
+        from ordereddict import OrderedDict as _OrderedDict
+    except ImportError:
+        _OrderedDict = None
+
+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
+
+__all__ = [
+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 
+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 
+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
+'CloseMatch', 'tokenMap', 'pyparsing_common',
+]
+
+system_version = tuple(sys.version_info)[:3]
+PY_3 = system_version[0] == 3
+if PY_3:
+    _MAX_INT = sys.maxsize
+    basestring = str
+    unichr = chr
+    _ustr = str
+
+    # build list of single arg builtins, that can be used as parse actions
+    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
+
+else:
+    _MAX_INT = sys.maxint
+    range = xrange
+
+    def _ustr(obj):
+        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
+           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
+           then < returns the unicode object | encodes it with the default encoding | ... >.
+        """
+        if isinstance(obj,unicode):
+            return obj
+
+        try:
+            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+            # it won't break any existing code.
+            return str(obj)
+
+        except UnicodeEncodeError:
+            # Else encode it
+            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
+            xmlcharref = Regex(r'&#\d+;')
+            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
+            return xmlcharref.transformString(ret)
+
+    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+    singleArgBuiltins = []
+    import __builtin__
+    for fname in "sum len sorted reversed list tuple set any all min max".split():
+        try:
+            singleArgBuiltins.append(getattr(__builtin__,fname))
+        except AttributeError:
+            continue
+            
+_generatorType = type((y for y in range(1)))
+ 
+def _xml_escape(data):
+    """Escape &, <, >, ", ', etc. in a string of data."""
+
+    # ampersand must be replaced first
+    from_symbols = '&><"\''
+    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
+    for from_,to_ in zip(from_symbols, to_symbols):
+        data = data.replace(from_, to_)
+    return data
+
+class _Constants(object):
+    pass
+
+alphas     = string.ascii_uppercase + string.ascii_lowercase
+nums       = "0123456789"
+hexnums    = nums + "ABCDEFabcdef"
+alphanums  = alphas + nums
+_bslash    = chr(92)
+printables = "".join(c for c in string.printable if c not in string.whitespace)
+
+class ParseBaseException(Exception):
+    """base exception class for all parsing runtime exceptions"""
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, pstr, loc=0, msg=None, elem=None ):
+        self.loc = loc
+        if msg is None:
+            self.msg = pstr
+            self.pstr = ""
+        else:
+            self.msg = msg
+            self.pstr = pstr
+        self.parserElement = elem
+        self.args = (pstr, loc, msg)
+
+    @classmethod
+    def _from_exception(cls, pe):
+        """
+        internal factory method to simplify creating one type of ParseException 
+        from another - avoids having __init__ signature conflicts among subclasses
+        """
+        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+    def __getattr__( self, aname ):
+        """supported attributes by name are:
+            - lineno - returns the line number of the exception text
+            - col - returns the column number of the exception text
+            - line - returns the line containing the exception text
+        """
+        if( aname == "lineno" ):
+            return lineno( self.loc, self.pstr )
+        elif( aname in ("col", "column") ):
+            return col( self.loc, self.pstr )
+        elif( aname == "line" ):
+            return line( self.loc, self.pstr )
+        else:
+            raise AttributeError(aname)
+
+    def __str__( self ):
+        return "%s (at char %d), (line:%d, col:%d)" % \
+                ( self.msg, self.loc, self.lineno, self.column )
+    def __repr__( self ):
+        return _ustr(self)
+    def markInputline( self, markerString = ">!<" ):
+        """Extracts the exception line from the input string, and marks
+           the location of the exception with a special symbol.
+        """
+        line_str = self.line
+        line_column = self.column - 1
+        if markerString:
+            line_str = "".join((line_str[:line_column],
+                                markerString, line_str[line_column:]))
+        return line_str.strip()
+    def __dir__(self):
+        return "lineno col line".split() + dir(type(self))
+
+class ParseException(ParseBaseException):
+    """
+    Exception thrown when parse expressions don't match class;
+    supported attributes by name are:
+     - lineno - returns the line number of the exception text
+     - col - returns the column number of the exception text
+     - line - returns the line containing the exception text
+        
+    Example::
+        try:
+            Word(nums).setName("integer").parseString("ABC")
+        except ParseException as pe:
+            print(pe)
+            print("column: {}".format(pe.col))
+            
+    prints::
+       Expected integer (at char 0), (line:1, col:1)
+        column: 1
+    """
+    pass
+
+class ParseFatalException(ParseBaseException):
+    """user-throwable exception thrown when inconsistent parse content
+       is found; stops all parsing immediately"""
+    pass
+
+class ParseSyntaxException(ParseFatalException):
+    """just like L{ParseFatalException}, but thrown internally when an
+       L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop 
+       immediately because an unbacktrackable syntax error has been found"""
+    pass
+
+#~ class ReparseException(ParseBaseException):
+    #~ """Experimental class - parse actions can raise this exception to cause
+       #~ pyparsing to reparse the input string:
+        #~ - with a modified input string, and/or
+        #~ - with a modified start location
+       #~ Set the values of the ReparseException in the constructor, and raise the
+       #~ exception in a parse action to cause pyparsing to use the new string/location.
+       #~ Setting the values as None causes no change to be made.
+       #~ """
+    #~ def __init_( self, newstring, restartLoc ):
+        #~ self.newParseText = newstring
+        #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
+    def __init__( self, parseElementList ):
+        self.parseElementTrace = parseElementList
+
+    def __str__( self ):
+        return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+    def __init__(self,p1,p2):
+        self.tup = (p1,p2)
+    def __getitem__(self,i):
+        return self.tup[i]
+    def __repr__(self):
+        return repr(self.tup[0])
+    def setOffset(self,i):
+        self.tup = (self.tup[0],i)
+
+class ParseResults(object):
+    """
+    Structured parse results, to provide multiple means of access to the parsed data:
+       - as a list (C{len(results)})
+       - by list index (C{results[0], results[1]}, etc.)
+       - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
+
+    Example::
+        integer = Word(nums)
+        date_str = (integer.setResultsName("year") + '/' 
+                        + integer.setResultsName("month") + '/' 
+                        + integer.setResultsName("day"))
+        # equivalent form:
+        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+        # parseString returns a ParseResults object
+        result = date_str.parseString("1999/12/31")
+
+        def test(s, fn=repr):
+            print("%s -> %s" % (s, fn(eval(s))))
+        test("list(result)")
+        test("result[0]")
+        test("result['month']")
+        test("result.day")
+        test("'month' in result")
+        test("'minutes' in result")
+        test("result.dump()", str)
+    prints::
+        list(result) -> ['1999', '/', '12', '/', '31']
+        result[0] -> '1999'
+        result['month'] -> '12'
+        result.day -> '31'
+        'month' in result -> True
+        'minutes' in result -> False
+        result.dump() -> ['1999', '/', '12', '/', '31']
+        - day: 31
+        - month: 12
+        - year: 1999
+    """
+    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
+        if isinstance(toklist, cls):
+            return toklist
+        retobj = object.__new__(cls)
+        retobj.__doinit = True
+        return retobj
+
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
+        if self.__doinit:
+            self.__doinit = False
+            self.__name = None
+            self.__parent = None
+            self.__accumNames = {}
+            self.__asList = asList
+            self.__modal = modal
+            if toklist is None:
+                toklist = []
+            if isinstance(toklist, list):
+                self.__toklist = toklist[:]
+            elif isinstance(toklist, _generatorType):
+                self.__toklist = list(toklist)
+            else:
+                self.__toklist = [toklist]
+            self.__tokdict = dict()
+
+        if name is not None and name:
+            if not modal:
+                self.__accumNames[name] = 0
+            if isinstance(name,int):
+                name = _ustr(name) # will always return a str, but use _ustr for consistency
+            self.__name = name
+            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
+                if isinstance(toklist,basestring):
+                    toklist = [ toklist ]
+                if asList:
+                    if isinstance(toklist,ParseResults):
+                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)
+                    else:
+                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
+                    self[name].__name = name
+                else:
+                    try:
+                        self[name] = toklist[0]
+                    except (KeyError,TypeError,IndexError):
+                        self[name] = toklist
+
+    def __getitem__( self, i ):
+        if isinstance( i, (int,slice) ):
+            return self.__toklist[i]
+        else:
+            if i not in self.__accumNames:
+                return self.__tokdict[i][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[i] ])
+
+    def __setitem__( self, k, v, isinstance=isinstance ):
+        if isinstance(v,_ParseResultsWithOffset):
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
+            sub = v[0]
+        elif isinstance(k,(int,slice)):
+            self.__toklist[k] = v
+            sub = v
+        else:
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
+            sub = v
+        if isinstance(sub,ParseResults):
+            sub.__parent = wkref(self)
+
+    def __delitem__( self, i ):
+        if isinstance(i,(int,slice)):
+            mylen = len( self.__toklist )
+            del self.__toklist[i]
+
+            # convert int to slice
+            if isinstance(i, int):
+                if i < 0:
+                    i += mylen
+                i = slice(i, i+1)
+            # get removed indices
+            removed = list(range(*i.indices(mylen)))
+            removed.reverse()
+            # fixup indices in token dictionary
+            for name,occurrences in self.__tokdict.items():
+                for j in removed:
+                    for k, (value, position) in enumerate(occurrences):
+                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+        else:
+            del self.__tokdict[i]
+
+    def __contains__( self, k ):
+        return k in self.__tokdict
+
+    def __len__( self ): return len( self.__toklist )
+    def __bool__(self): return ( not not self.__toklist )
+    __nonzero__ = __bool__
+    def __iter__( self ): return iter( self.__toklist )
+    def __reversed__( self ): return iter( self.__toklist[::-1] )
+    def _iterkeys( self ):
+        if hasattr(self.__tokdict, "iterkeys"):
+            return self.__tokdict.iterkeys()
+        else:
+            return iter(self.__tokdict)
+
+    def _itervalues( self ):
+        return (self[k] for k in self._iterkeys())
+            
+    def _iteritems( self ):
+        return ((k, self[k]) for k in self._iterkeys())
+
+    if PY_3:
+        keys = _iterkeys       
+        """Returns an iterator of all named result keys (Python 3.x only)."""
+
+        values = _itervalues
+        """Returns an iterator of all named result values (Python 3.x only)."""
+
+        items = _iteritems
+        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
+
+    else:
+        iterkeys = _iterkeys
+        """Returns an iterator of all named result keys (Python 2.x only)."""
+
+        itervalues = _itervalues
+        """Returns an iterator of all named result values (Python 2.x only)."""
+
+        iteritems = _iteritems
+        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
+
+        def keys( self ):
+            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.iterkeys())
+
+        def values( self ):
+            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.itervalues())
+                
+        def items( self ):
+            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.iteritems())
+
+    def haskeys( self ):
+        """Since keys() returns an iterator, this method is helpful in bypassing
+           code that looks for the existence of any defined results names."""
+        return bool(self.__tokdict)
+        
+    def pop( self, *args, **kwargs):
+        """
+        Removes and returns item at specified index (default=C{last}).
+        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
+        argument or an integer argument, it will use C{list} semantics
+        and pop tokens from the list of parsed tokens. If passed a 
+        non-integer argument (most likely a string), it will use C{dict}
+        semantics and pop the corresponding value from any defined 
+        results names. A second default return value argument is 
+        supported, just as in C{dict.pop()}.
+
+        Example::
+            def remove_first(tokens):
+                tokens.pop(0)
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
+
+            label = Word(alphas)
+            patt = label("LABEL") + OneOrMore(Word(nums))
+            print(patt.parseString("AAB 123 321").dump())
+
+            # Use pop() in a parse action to remove named result (note that corresponding value is not
+            # removed from list form of results)
+            def remove_LABEL(tokens):
+                tokens.pop("LABEL")
+                return tokens
+            patt.addParseAction(remove_LABEL)
+            print(patt.parseString("AAB 123 321").dump())
+        prints::
+            ['AAB', '123', '321']
+            - LABEL: AAB
+
+            ['AAB', '123', '321']
+        """
+        if not args:
+            args = [-1]
+        for k,v in kwargs.items():
+            if k == 'default':
+                args = (args[0], v)
+            else:
+                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
+        if (isinstance(args[0], int) or 
+                        len(args) == 1 or 
+                        args[0] in self):
+            index = args[0]
+            ret = self[index]
+            del self[index]
+            return ret
+        else:
+            defaultvalue = args[1]
+            return defaultvalue
+
+    def get(self, key, defaultValue=None):
+        """
+        Returns named result matching the given key, or if there is no
+        such name, then returns the given C{defaultValue} or C{None} if no
+        C{defaultValue} is specified.
+
+        Similar to C{dict.get()}.
+        
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            result = date_str.parseString("1999/12/31")
+            print(result.get("year")) # -> '1999'
+            print(result.get("hour", "not specified")) # -> 'not specified'
+            print(result.get("hour")) # -> None
+        """
+        if key in self:
+            return self[key]
+        else:
+            return defaultValue
+
+    def insert( self, index, insStr ):
+        """
+        Inserts new element at location index in the list of parsed tokens.
+        
+        Similar to C{list.insert()}.
+
+        Example::
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+            # use a parse action to insert the parse location in the front of the parsed results
+            def insert_locn(locn, tokens):
+                tokens.insert(0, locn)
+            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
+        """
+        self.__toklist.insert(index, insStr)
+        # fixup indices in token dictionary
+        for name,occurrences in self.__tokdict.items():
+            for k, (value, position) in enumerate(occurrences):
+                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+    def append( self, item ):
+        """
+        Add single element to end of ParseResults list of elements.
+
+        Example::
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+            
+            # use a parse action to compute the sum of the parsed integers, and add it to the end
+            def append_sum(tokens):
+                tokens.append(sum(map(int, tokens)))
+            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
+        """
+        self.__toklist.append(item)
+
+    def extend( self, itemseq ):
+        """
+        Add sequence of elements to end of ParseResults list of elements.
+
+        Example::
+            patt = OneOrMore(Word(alphas))
+            
+            # use a parse action to append the reverse of the matched strings, to make a palindrome
+            def make_palindrome(tokens):
+                tokens.extend(reversed([t[::-1] for t in tokens]))
+                return ''.join(tokens)
+            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+        """
+        if isinstance(itemseq, ParseResults):
+            self += itemseq
+        else:
+            self.__toklist.extend(itemseq)
+
+    def clear( self ):
+        """
+        Clear all elements and results names.
+        """
+        del self.__toklist[:]
+        self.__tokdict.clear()
+
+    def __getattr__( self, name ):
+        try:
+            return self[name]
+        except KeyError:
+            return ""
+            
+        if name in self.__tokdict:
+            if name not in self.__accumNames:
+                return self.__tokdict[name][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[name] ])
+        else:
+            return ""
+
+    def __add__( self, other ):
+        ret = self.copy()
+        ret += other
+        return ret
+
+    def __iadd__( self, other ):
+        if other.__tokdict:
+            offset = len(self.__toklist)
+            addoffset = lambda a: offset if a<0 else a+offset
+            otheritems = other.__tokdict.items()
+            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
+                                for (k,vlist) in otheritems for v in vlist]
+            for k,v in otherdictitems:
+                self[k] = v
+                if isinstance(v[0],ParseResults):
+                    v[0].__parent = wkref(self)
+            
+        self.__toklist += other.__toklist
+        self.__accumNames.update( other.__accumNames )
+        return self
+
+    def __radd__(self, other):
+        if isinstance(other,int) and other == 0:
+            # useful for merging many ParseResults using sum() builtin
+            return self.copy()
+        else:
+            # this may raise a TypeError - so be it
+            return other + self
+        
+    def __repr__( self ):
+        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
+
+    def __str__( self ):
+        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
+
+    def _asStringList( self, sep='' ):
+        out = []
+        for item in self.__toklist:
+            if out and sep:
+                out.append(sep)
+            if isinstance( item, ParseResults ):
+                out += item._asStringList()
+            else:
+                out.append( _ustr(item) )
+        return out
+
+    def asList( self ):
+        """
+        Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+        Example::
+            patt = OneOrMore(Word(alphas))
+            result = patt.parseString("sldkj lsdkj sldkj")
+            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+            print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
+            
+            # Use asList() to create an actual list
+            result_list = result.asList()
+            print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
+        """
+        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
+
+    def asDict( self ):
+        """
+        Returns the named parse results as a nested dictionary.
+
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+            
+            result = date_str.parseString('12/31/1999')
+            print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+            
+            result_dict = result.asDict()
+            print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
+
+            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+            import json
+            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
+        """
+        if PY_3:
+            item_fn = self.items
+        else:
+            item_fn = self.iteritems
+            
+        def toItem(obj):
+            if isinstance(obj, ParseResults):
+                if obj.haskeys():
+                    return obj.asDict()
+                else:
+                    return [toItem(v) for v in obj]
+            else:
+                return obj
+                
+        return dict((k,toItem(v)) for k,v in item_fn())
+
+    def copy( self ):
+        """
+        Returns a new copy of a C{ParseResults} object.
+        """
+        ret = ParseResults( self.__toklist )
+        ret.__tokdict = self.__tokdict.copy()
+        ret.__parent = self.__parent
+        ret.__accumNames.update( self.__accumNames )
+        ret.__name = self.__name
+        return ret
+
+    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
+        """
+        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
+        """
+        nl = "\n"
+        out = []
+        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
+                                                            for v in vlist)
+        nextLevelIndent = indent + "  "
+
+        # collapse out indents if formatting is not desired
+        if not formatted:
+            indent = ""
+            nextLevelIndent = ""
+            nl = ""
+
+        selfTag = None
+        if doctag is not None:
+            selfTag = doctag
+        else:
+            if self.__name:
+                selfTag = self.__name
+
+        if not selfTag:
+            if namedItemsOnly:
+                return ""
+            else:
+                selfTag = "ITEM"
+
+        out += [ nl, indent, "<", selfTag, ">" ]
+
+        for i,res in enumerate(self.__toklist):
+            if isinstance(res,ParseResults):
+                if i in namedItems:
+                    out += [ res.asXML(namedItems[i],
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+                else:
+                    out += [ res.asXML(None,
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+            else:
+                # individual token, see if there is a name for it
+                resTag = None
+                if i in namedItems:
+                    resTag = namedItems[i]
+                if not resTag:
+                    if namedItemsOnly:
+                        continue
+                    else:
+                        resTag = "ITEM"
+                xmlBodyText = _xml_escape(_ustr(res))
+                out += [ nl, nextLevelIndent, "<", resTag, ">",
+                                                xmlBodyText,
+                                                "</", resTag, ">" ]
+
+        out += [ nl, indent, "</", selfTag, ">" ]
+        return "".join(out)
+
+    def __lookup(self,sub):
+        for k,vlist in self.__tokdict.items():
+            for v,loc in vlist:
+                if sub is v:
+                    return k
+        return None
+
+    def getName(self):
+        r"""
+        Returns the results name for this token expression. Useful when several 
+        different expressions might match at a particular location.
+
+        Example::
+            integer = Word(nums)
+            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+            house_number_expr = Suppress('#') + Word(nums, alphanums)
+            user_data = (Group(house_number_expr)("house_number") 
+                        | Group(ssn_expr)("ssn")
+                        | Group(integer)("age"))
+            user_info = OneOrMore(user_data)
+            
+            result = user_info.parseString("22 111-22-3333 #221B")
+            for item in result:
+                print(item.getName(), ':', item[0])
+        prints::
+            age : 22
+            ssn : 111-22-3333
+            house_number : 221B
+        """
+        if self.__name:
+            return self.__name
+        elif self.__parent:
+            par = self.__parent()
+            if par:
+                return par.__lookup(self)
+            else:
+                return None
+        elif (len(self) == 1 and
+               len(self.__tokdict) == 1 and
+               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
+            return next(iter(self.__tokdict.keys()))
+        else:
+            return None
+
+    def dump(self, indent='', depth=0, full=True):
+        """
+        Diagnostic method for listing out the contents of a C{ParseResults}.
+        Accepts an optional C{indent} argument so that this string can be embedded
+        in a nested display of other data.
+
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+            
+            result = date_str.parseString('12/31/1999')
+            print(result.dump())
+        prints::
+            ['12', '/', '31', '/', '1999']
+            - day: 1999
+            - month: 31
+            - year: 12
+        """
+        out = []
+        NL = '\n'
+        out.append( indent+_ustr(self.asList()) )
+        if full:
+            if self.haskeys():
+                items = sorted((str(k), v) for k,v in self.items())
+                for k,v in items:
+                    if out:
+                        out.append(NL)
+                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
+                    if isinstance(v,ParseResults):
+                        if v:
+                            out.append( v.dump(indent,depth+1) )
+                        else:
+                            out.append(_ustr(v))
+                    else:
+                        out.append(repr(v))
+            elif any(isinstance(vv,ParseResults) for vv in self):
+                v = self
+                for i,vv in enumerate(v):
+                    if isinstance(vv,ParseResults):
+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))
+                    else:
+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))
+            
+        return "".join(out)
+
+    def pprint(self, *args, **kwargs):
+        """
+        Pretty-printer for parsed results as a list, using the C{pprint} module.
+        Accepts additional positional or keyword args as defined for the 
+        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
+
+        Example::
+            ident = Word(alphas, alphanums)
+            num = Word(nums)
+            func = Forward()
+            term = ident | num | Group('(' + func + ')')
+            func <<= ident + Group(Optional(delimitedList(term)))
+            result = func.parseString("fna a,b,(fnb c,d,200),100")
+            result.pprint(width=40)
+        prints::
+            ['fna',
+             ['a',
+              'b',
+              ['(', 'fnb', ['c', 'd', '200'], ')'],
+              '100']]
+        """
+        pprint.pprint(self.asList(), *args, **kwargs)
+
+    # add support for pickle protocol
+    def __getstate__(self):
+        return ( self.__toklist,
+                 ( self.__tokdict.copy(),
+                   self.__parent is not None and self.__parent() or None,
+                   self.__accumNames,
+                   self.__name ) )
+
+    def __setstate__(self,state):
+        self.__toklist = state[0]
+        (self.__tokdict,
+         par,
+         inAccumNames,
+         self.__name) = state[1]
+        self.__accumNames = {}
+        self.__accumNames.update(inAccumNames)
+        if par is not None:
+            self.__parent = wkref(par)
+        else:
+            self.__parent = None
+
+    def __getnewargs__(self):
+        return self.__toklist, self.__name, self.__asList, self.__modal
+
+    def __dir__(self):
+        return (dir(type(self)) + list(self.keys()))
+
+MutableMapping.register(ParseResults)
+
+def col (loc,strg):
+    """Returns current column within a string, counting newlines as line separators.
+   The first column is number 1.
+
+   Note: the default parsing behavior is to expand tabs in the input string
+   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+   consistent view of the parsed string, the parse location, and line and column
+   positions within the parsed string.
+   """
+    s = strg
+    return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
+
+def lineno(loc,strg):
+    """Returns current line number within a string, counting newlines as line separators.
+   The first line is number 1.
+
+   Note: the default parsing behavior is to expand tabs in the input string
+   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+   consistent view of the parsed string, the parse location, and line and column
+   positions within the parsed string.
+   """
+    return strg.count("\n",0,loc) + 1
+
+def line( loc, strg ):
+    """Returns the line of text containing loc within a string, counting newlines as line separators.
+       """
+    lastCR = strg.rfind("\n", 0, loc)
+    nextCR = strg.find("\n", loc)
+    if nextCR >= 0:
+        return strg[lastCR+1:nextCR]
+    else:
+        return strg[lastCR+1:]
+
+def _defaultStartDebugAction( instring, loc, expr ):
+    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
+
+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
+    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction( instring, loc, expr, exc ):
+    print ("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+    """'Do-nothing' debug action, to suppress debugging output during parsing."""
+    pass
+
+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
+#~ 'decorator to trim function calls to match the arity of the target'
+#~ def _trim_arity(func, maxargs=3):
+    #~ if func in singleArgBuiltins:
+        #~ return lambda s,l,t: func(t)
+    #~ limit = 0
+    #~ foundArity = False
+    #~ def wrapper(*args):
+        #~ nonlocal limit,foundArity
+        #~ while 1:
+            #~ try:
+                #~ ret = func(*args[limit:])
+                #~ foundArity = True
+                #~ return ret
+            #~ except TypeError:
+                #~ if limit == maxargs or foundArity:
+                    #~ raise
+                #~ limit += 1
+                #~ continue
+    #~ return wrapper
+
+# this version is Python 2.x-3.x cross-compatible
+'decorator to trim function calls to match the arity of the target'
+def _trim_arity(func, maxargs=2):
+    if func in singleArgBuiltins:
+        return lambda s,l,t: func(t)
+    limit = [0]
+    foundArity = [False]
+    
+    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
+    if system_version[:2] >= (3,5):
+        def extract_stack(limit=0):
+            # special handling for Python 3.5.0 - extra deep call stack by 1
+            offset = -3 if system_version == (3,5,0) else -2
+            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
+            return [frame_summary[:2]]
+        def extract_tb(tb, limit=0):
+            frames = traceback.extract_tb(tb, limit=limit)
+            frame_summary = frames[-1]
+            return [frame_summary[:2]]
+    else:
+        extract_stack = traceback.extract_stack
+        extract_tb = traceback.extract_tb
+    
+    # synthesize what would be returned by traceback.extract_stack at the call to 
+    # user's parse action 'func', so that we don't incur call penalty at parse time
+    
+    LINE_DIFF = 6
+    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 
+    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+    this_line = extract_stack(limit=2)[-1]
+    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
+
+    def wrapper(*args):
+        while 1:
+            try:
+                ret = func(*args[limit[0]:])
+                foundArity[0] = True
+                return ret
+            except TypeError:
+                # re-raise TypeErrors if they did not come from our arity testing
+                if foundArity[0]:
+                    raise
+                else:
+                    try:
+                        tb = sys.exc_info()[-1]
+                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
+                            raise
+                    finally:
+                        del tb
+
+                if limit[0] <= maxargs:
+                    limit[0] += 1
+                    continue
+                raise
+
+    # copy func name to wrapper for sensible debug output
+    func_name = "<parse action>"
+    try:
+        func_name = getattr(func, '__name__', 
+                            getattr(func, '__class__').__name__)
+    except Exception:
+        func_name = str(func)
+    wrapper.__name__ = func_name
+
+    return wrapper
+
+class ParserElement(object):
+    """Abstract base level parser element class."""
+    DEFAULT_WHITE_CHARS = " \n\t\r"
+    verbose_stacktrace = False
+
+    @staticmethod
+    def setDefaultWhitespaceChars( chars ):
+        r"""
+        Overrides the default whitespace chars
+
+        Example::
+            # default whitespace chars are space, <TAB> and newline
+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
+            
+            # change to just treat newline as significant
+            ParserElement.setDefaultWhitespaceChars(" \t")
+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
+        """
+        ParserElement.DEFAULT_WHITE_CHARS = chars
+
+    @staticmethod
+    def inlineLiteralsUsing(cls):
+        """
+        Set class to be used for inclusion of string literals into a parser.
+        
+        Example::
+            # default literal class used is Literal
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
+
+
+            # change to Suppress
+            ParserElement.inlineLiteralsUsing(Suppress)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
+        """
+        ParserElement._literalStringClass = cls
+
+    def __init__( self, savelist=False ):
+        self.parseAction = list()
+        self.failAction = None
+        #~ self.name = "<unknown>"  # don't define self.name, let subclasses try/except upcall
+        self.strRepr = None
+        self.resultsName = None
+        self.saveAsList = savelist
+        self.skipWhitespace = True
+        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        self.copyDefaultWhiteChars = True
+        self.mayReturnEmpty = False # used when checking for left-recursion
+        self.keepTabs = False
+        self.ignoreExprs = list()
+        self.debug = False
+        self.streamlined = False
+        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+        self.errmsg = ""
+        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+        self.debugActions = ( None, None, None ) #custom debug actions
+        self.re = None
+        self.callPreparse = True # used to avoid redundant calls to preParse
+        self.callDuringTry = False
+
+    def copy( self ):
+        """
+        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
+        for the same parsing pattern, using copies of the original parse element.
+        
+        Example::
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
+            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+            
+            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
+        prints::
+            [5120, 100, 655360, 268435456]
+        Equivalent form of C{expr.copy()} is just C{expr()}::
+            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+        """
+        cpy = copy.copy( self )
+        cpy.parseAction = self.parseAction[:]
+        cpy.ignoreExprs = self.ignoreExprs[:]
+        if self.copyDefaultWhiteChars:
+            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        return cpy
+
+    def setName( self, name ):
+        """
+        Define name for this expression, makes debugging and exception messages clearer.
+        
+        Example::
+            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
+            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
+        """
+        self.name = name
+        self.errmsg = "Expected " + self.name
+        if hasattr(self,"exception"):
+            self.exception.msg = self.errmsg
+        return self
+
+    def setResultsName( self, name, listAllMatches=False ):
+        """
+        Define name for referencing matching tokens as a nested attribute
+        of the returned parse results.
+        NOTE: this returns a *copy* of the original C{ParserElement} object;
+        this is so that the client can define a basic element, such as an
+        integer, and reference it in multiple places with different names.
+
+        You can also set results names using the abbreviated syntax,
+        C{expr("name")} in place of C{expr.setResultsName("name")} - 
+        see L{I{__call__}<__call__>}.
+
+        Example::
+            date_str = (integer.setResultsName("year") + '/' 
+                        + integer.setResultsName("month") + '/' 
+                        + integer.setResultsName("day"))
+
+            # equivalent form:
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+        """
+        newself = self.copy()
+        if name.endswith("*"):
+            name = name[:-1]
+            listAllMatches=True
+        newself.resultsName = name
+        newself.modalResults = not listAllMatches
+        return newself
+
+    def setBreak(self,breakFlag = True):
+        """Method to invoke the Python pdb debugger when this element is
+           about to be parsed. Set C{breakFlag} to True to enable, False to
+           disable.
+        """
+        if breakFlag:
+            _parseMethod = self._parse
+            def breaker(instring, loc, doActions=True, callPreParse=True):
+                import pdb
+                pdb.set_trace()
+                return _parseMethod( instring, loc, doActions, callPreParse )
+            breaker._originalParseMethod = _parseMethod
+            self._parse = breaker
+        else:
+            if hasattr(self._parse,"_originalParseMethod"):
+                self._parse = self._parse._originalParseMethod
+        return self
+
+    def setParseAction( self, *fns, **kwargs ):
+        """
+        Define one or more actions to perform when successfully matching parse element definition.
+        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
+        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
+         - s   = the original string being parsed (see note below)
+         - loc = the location of the matching substring
+         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
+        If the functions in fns modify the tokens, they can return them as the return
+        value from fn, and the modified list of tokens will replace the original.
+        Otherwise, fn does not need to return any value.
+
+        Optional keyword arguments:
+         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
+
+        Note: the default parsing behavior is to expand tabs in the input string
+        before starting the parsing process.  See L{I{parseString}<parseString>} for more information
+        on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+        consistent view of the parsed string, the parse location, and line and column
+        positions within the parsed string.
+        
+        Example::
+            integer = Word(nums)
+            date_str = integer + '/' + integer + '/' + integer
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
+
+            # use parse action to convert to ints at parse time
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            date_str = integer + '/' + integer + '/' + integer
+
+            # note that integer fields are now ints, not strings
+            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
+        """
+        self.parseAction = list(map(_trim_arity, list(fns)))
+        self.callDuringTry = kwargs.get("callDuringTry", False)
+        return self
+
+    def addParseAction( self, *fns, **kwargs ):
+        """
+        Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
+        
+        See examples in L{I{copy}<copy>}.
+        """
+        self.parseAction += list(map(_trim_arity, list(fns)))
+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+        return self
+
+    def addCondition(self, *fns, **kwargs):
+        """Add a boolean predicate function to expression's list of parse actions. See 
+        L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, 
+        functions passed to C{addCondition} need to return boolean success/fail of the condition.
+
+        Optional keyword arguments:
+         - message = define a custom message to be used in the raised exception
+         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
+         
+        Example::
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            year_int = integer.copy()
+            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+            date_str = year_int + '/' + integer + '/' + integer
+
+            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
+        """
+        msg = kwargs.get("message", "failed user-defined condition")
+        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
+        for fn in fns:
+            def pa(s,l,t):
+                if not bool(_trim_arity(fn)(s,l,t)):
+                    raise exc_type(s,l,msg)
+            self.parseAction.append(pa)
+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+        return self
+
+    def setFailAction( self, fn ):
+        """Define action to perform if parsing fails at this expression.
+           Fail acton fn is a callable function that takes the arguments
+           C{fn(s,loc,expr,err)} where:
+            - s = string being parsed
+            - loc = location where expression match was attempted and failed
+            - expr = the parse expression that failed
+            - err = the exception thrown
+           The function returns no value.  It may throw C{L{ParseFatalException}}
+           if it is desired to stop parsing immediately."""
+        self.failAction = fn
+        return self
+
+    def _skipIgnorables( self, instring, loc ):
+        exprsFound = True
+        while exprsFound:
+            exprsFound = False
+            for e in self.ignoreExprs:
+                try:
+                    while 1:
+                        loc,dummy = e._parse( instring, loc )
+                        exprsFound = True
+                except ParseException:
+                    pass
+        return loc
+
+    def preParse( self, instring, loc ):
+        if self.ignoreExprs:
+            loc = self._skipIgnorables( instring, loc )
+
+        if self.skipWhitespace:
+            wt = self.whiteChars
+            instrlen = len(instring)
+            while loc < instrlen and instring[loc] in wt:
+                loc += 1
+
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        return loc, []
+
+    def postParse( self, instring, loc, tokenlist ):
+        return tokenlist
+
+    #~ @profile
+    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
+        debugging = ( self.debug ) #and doActions )
+
+        if debugging or self.failAction:
+            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+            if (self.debugActions[0] ):
+                self.debugActions[0]( instring, loc, self )
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            try:
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            except ParseBaseException as err:
+                #~ print ("Exception raised:", err)
+                if self.debugActions[2]:
+                    self.debugActions[2]( instring, tokensStart, self, err )
+                if self.failAction:
+                    self.failAction( instring, tokensStart, self, err )
+                raise
+        else:
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            if self.mayIndexError or preloc >= len(instring):
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            else:
+                loc,tokens = self.parseImpl( instring, preloc, doActions )
+
+        tokens = self.postParse( instring, loc, tokens )
+
+        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
+        if self.parseAction and (doActions or self.callDuringTry):
+            if debugging:
+                try:
+                    for fn in self.parseAction:
+                        tokens = fn( instring, tokensStart, retTokens )
+                        if tokens is not None:
+                            retTokens = ParseResults( tokens,
+                                                      self.resultsName,
+                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                      modal=self.modalResults )
+                except ParseBaseException as err:
+                    #~ print "Exception raised in user parse action:", err
+                    if (self.debugActions[2] ):
+                        self.debugActions[2]( instring, tokensStart, self, err )
+                    raise
+            else:
+                for fn in self.parseAction:
+                    tokens = fn( instring, tokensStart, retTokens )
+                    if tokens is not None:
+                        retTokens = ParseResults( tokens,
+                                                  self.resultsName,
+                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                  modal=self.modalResults )
+        if debugging:
+            #~ print ("Matched",self,"->",retTokens.asList())
+            if (self.debugActions[1] ):
+                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
+
+        return loc, retTokens
+
+    def tryParse( self, instring, loc ):
+        try:
+            return self._parse( instring, loc, doActions=False )[0]
+        except ParseFatalException:
+            raise ParseException( instring, loc, self.errmsg, self)
+    
+    def canParseNext(self, instring, loc):
+        try:
+            self.tryParse(instring, loc)
+        except (ParseException, IndexError):
+            return False
+        else:
+            return True
+
+    class _UnboundedCache(object):
+        def __init__(self):
+            cache = {}
+            self.not_in_cache = not_in_cache = object()
+
+            def get(self, key):
+                return cache.get(key, not_in_cache)
+
+            def set(self, key, value):
+                cache[key] = value
+
+            def clear(self):
+                cache.clear()
+                
+            def cache_len(self):
+                return len(cache)
+
+            self.get = types.MethodType(get, self)
+            self.set = types.MethodType(set, self)
+            self.clear = types.MethodType(clear, self)
+            self.__len__ = types.MethodType(cache_len, self)
+
+    if _OrderedDict is not None:
+        class _FifoCache(object):
+            def __init__(self, size):
+                self.not_in_cache = not_in_cache = object()
+
+                cache = _OrderedDict()
+
+                def get(self, key):
+                    return cache.get(key, not_in_cache)
+
+                def set(self, key, value):
+                    cache[key] = value
+                    while len(cache) > size:
+                        try:
+                            cache.popitem(False)
+                        except KeyError:
+                            pass
+
+                def clear(self):
+                    cache.clear()
+
+                def cache_len(self):
+                    return len(cache)
+
+                self.get = types.MethodType(get, self)
+                self.set = types.MethodType(set, self)
+                self.clear = types.MethodType(clear, self)
+                self.__len__ = types.MethodType(cache_len, self)
+
+    else:
+        class _FifoCache(object):
+            def __init__(self, size):
+                self.not_in_cache = not_in_cache = object()
+
+                cache = {}
+                key_fifo = collections.deque([], size)
+
+                def get(self, key):
+                    return cache.get(key, not_in_cache)
+
+                def set(self, key, value):
+                    cache[key] = value
+                    while len(key_fifo) > size:
+                        cache.pop(key_fifo.popleft(), None)
+                    key_fifo.append(key)
+
+                def clear(self):
+                    cache.clear()
+                    key_fifo.clear()
+
+                def cache_len(self):
+                    return len(cache)
+
+                self.get = types.MethodType(get, self)
+                self.set = types.MethodType(set, self)
+                self.clear = types.MethodType(clear, self)
+                self.__len__ = types.MethodType(cache_len, self)
+
+    # argument cache for optimizing repeated calls when backtracking through recursive expressions
+    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
+    packrat_cache_lock = RLock()
+    packrat_cache_stats = [0, 0]
+
+    # this method gets repeatedly called during backtracking with the same arguments -
+    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
+        HIT, MISS = 0, 1
+        lookup = (self, instring, loc, callPreParse, doActions)
+        with ParserElement.packrat_cache_lock:
+            cache = ParserElement.packrat_cache
+            value = cache.get(lookup)
+            if value is cache.not_in_cache:
+                ParserElement.packrat_cache_stats[MISS] += 1
+                try:
+                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
+                except ParseBaseException as pe:
+                    # cache a copy of the exception, without the traceback
+                    cache.set(lookup, pe.__class__(*pe.args))
+                    raise
+                else:
+                    cache.set(lookup, (value[0], value[1].copy()))
+                    return value
+            else:
+                ParserElement.packrat_cache_stats[HIT] += 1
+                if isinstance(value, Exception):
+                    raise value
+                return (value[0], value[1].copy())
+
+    _parse = _parseNoCache
+
+    @staticmethod
+    def resetCache():
+        ParserElement.packrat_cache.clear()
+        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
+
+    _packratEnabled = False
+    @staticmethod
+    def enablePackrat(cache_size_limit=128):
+        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+           Repeated parse attempts at the same string location (which happens
+           often in many complex grammars) can immediately return a cached value,
+           instead of re-executing parsing/validating code.  Memoizing is done of
+           both valid results and parsing exceptions.
+           
+           Parameters:
+            - cache_size_limit - (default=C{128}) - if an integer value is provided
+              will limit the size of the packrat cache; if None is passed, then
+              the cache size will be unbounded; if 0 is passed, the cache will
+              be effectively disabled.
+            
+           This speedup may break existing programs that use parse actions that
+           have side-effects.  For this reason, packrat parsing is disabled when
+           you first import pyparsing.  To activate the packrat feature, your
+           program must call the class method C{ParserElement.enablePackrat()}.  If
+           your program uses C{psyco} to "compile as you go", you must call
+           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
+           Python will crash.  For best results, call C{enablePackrat()} immediately
+           after importing pyparsing.
+           
+           Example::
+               import pyparsing
+               pyparsing.ParserElement.enablePackrat()
+        """
+        if not ParserElement._packratEnabled:
+            ParserElement._packratEnabled = True
+            if cache_size_limit is None:
+                ParserElement.packrat_cache = ParserElement._UnboundedCache()
+            else:
+                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
+            ParserElement._parse = ParserElement._parseCache
+
+    def parseString( self, instring, parseAll=False ):
+        """
+        Execute the parse expression with the given string.
+        This is the main interface to the client code, once the complete
+        expression has been built.
+
+        If you want the grammar to require that the entire input string be
+        successfully parsed, then set C{parseAll} to True (equivalent to ending
+        the grammar with C{L{StringEnd()}}).
+
+        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
+        in order to report proper column numbers in parse actions.
+        If the input string contains tabs and
+        the grammar uses parse actions that use the C{loc} argument to index into the
+        string being parsed, you can ensure you have a consistent view of the input
+        string by:
+         - calling C{parseWithTabs} on your grammar before calling C{parseString}
+           (see L{I{parseWithTabs}<parseWithTabs>})
+         - define your parse action using the full C{(s,loc,toks)} signature, and
+           reference the input string using the parse action's C{s} argument
+         - explictly expand the tabs in your input string before calling
+           C{parseString}
+        
+        Example::
+            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
+            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
+        """
+        ParserElement.resetCache()
+        if not self.streamlined:
+            self.streamline()
+            #~ self.saveAsList = True
+        for e in self.ignoreExprs:
+            e.streamline()
+        if not self.keepTabs:
+            instring = instring.expandtabs()
+        try:
+            loc, tokens = self._parse( instring, 0 )
+            if parseAll:
+                loc = self.preParse( instring, loc )
+                se = Empty() + StringEnd()
+                se._parse( instring, loc )
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+        else:
+            return tokens
+
+    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
+        """
+        Scan the input string for expression matches.  Each match will return the
+        matching tokens, start location, and end location.  May be called with optional
+        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
+        C{overlap} is specified, then overlapping matches will be reported.
+
+        Note that the start and end locations are reported relative to the string
+        being parsed.  See L{I{parseString}<parseString>} for more information on parsing
+        strings with embedded tabs.
+
+        Example::
+            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+            print(source)
+            for tokens,start,end in Word(alphas).scanString(source):
+                print(' '*start + '^'*(end-start))
+                print(' '*start + tokens[0])
+        
+        prints::
+        
+            sldjf123lsdjjkf345sldkjf879lkjsfd987
+            ^^^^^
+            sldjf
+                    ^^^^^^^
+                    lsdjjkf
+                              ^^^^^^
+                              sldkjf
+                                       ^^^^^^
+                                       lkjsfd
+        """
+        if not self.streamlined:
+            self.streamline()
+        for e in self.ignoreExprs:
+            e.streamline()
+
+        if not self.keepTabs:
+            instring = _ustr(instring).expandtabs()
+        instrlen = len(instring)
+        loc = 0
+        preparseFn = self.preParse
+        parseFn = self._parse
+        ParserElement.resetCache()
+        matches = 0
+        try:
+            while loc <= instrlen and matches < maxMatches:
+                try:
+                    preloc = preparseFn( instring, loc )
+                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
+                except ParseException:
+                    loc = preloc+1
+                else:
+                    if nextLoc > loc:
+                        matches += 1
+                        yield tokens, preloc, nextLoc
+                        if overlap:
+                            nextloc = preparseFn( instring, loc )
+                            if nextloc > loc:
+                                loc = nextLoc
+                            else:
+                                loc += 1
+                        else:
+                            loc = nextLoc
+                    else:
+                        loc = preloc+1
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def transformString( self, instring ):
+        """
+        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
+        be returned from a parse action.  To use C{transformString}, define a grammar and
+        attach a parse action to it that modifies the returned token list.
+        Invoking C{transformString()} on a target string will then scan for matches,
+        and replace the matched text patterns according to the logic in the parse
+        action.  C{transformString()} returns the resulting transformed string.
+        
+        Example::
+            wd = Word(alphas)
+            wd.setParseAction(lambda toks: toks[0].title())
+            
+            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
+        Prints::
+            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+        """
+        out = []
+        lastE = 0
+        # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
+        # keep string locs straight between transformString and scanString
+        self.keepTabs = True
+        try:
+            for t,s,e in self.scanString( instring ):
+                out.append( instring[lastE:s] )
+                if t:
+                    if isinstance(t,ParseResults):
+                        out += t.asList()
+                    elif isinstance(t,list):
+                        out += t
+                    else:
+                        out.append(t)
+                lastE = e
+            out.append(instring[lastE:])
+            out = [o for o in out if o]
+            return "".join(map(_ustr,_flatten(out)))
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def searchString( self, instring, maxMatches=_MAX_INT ):
+        """
+        Another extension to C{L{scanString}}, simplifying the access to the tokens found
+        to match the given parse expression.  May be called with optional
+        C{maxMatches} argument, to clip searching after 'n' matches are found.
+        
+        Example::
+            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+            cap_word = Word(alphas.upper(), alphas.lower())
+            
+            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+            # the sum() builtin can be used to merge results into a single ParseResults object
+            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
+        prints::
+            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+        """
+        try:
+            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
+        """
+        Generator method to split a string using the given expression as a separator.
+        May be called with optional C{maxsplit} argument, to limit the number of splits;
+        and the optional C{includeSeparators} argument (default=C{False}), if the separating
+        matching text should be included in the split results.
+        
+        Example::        
+            punc = oneOf(list(".,;:/-!?"))
+            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+        prints::
+            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+        """
+        splits = 0
+        last = 0
+        for t,s,e in self.scanString(instring, maxMatches=maxsplit):
+            yield instring[last:s]
+            if includeSeparators:
+                yield t[0]
+            last = e
+        yield instring[last:]
+
+    def __add__(self, other ):
+        """
+        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
+        converts them to L{Literal}s by default.
+        
+        Example::
+            greet = Word(alphas) + "," + Word(alphas) + "!"
+            hello = "Hello, World!"
+            print (hello, "->", greet.parseString(hello))
+        Prints::
+            Hello, World! -> ['Hello', ',', 'World', '!']
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return And( [ self, other ] )
+
+    def __radd__(self, other ):
+        """
+        Implementation of + operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other + self
+
+    def __sub__(self, other):
+        """
+        Implementation of - operator, returns C{L{And}} with error stop
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return self + And._ErrorStop() + other
+
+    def __rsub__(self, other ):
+        """
+        Implementation of - operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other - self
+
+    def __mul__(self,other):
+        """
+        Implementation of * operator, allows use of C{expr * 3} in place of
+        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
+        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
+        may also include C{None} as in:
+         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
+              to C{expr*n + L{ZeroOrMore}(expr)}
+              (read as "at least n instances of C{expr}")
+         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
+              (read as "0 to n instances of C{expr}")
+         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
+         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
+
+        Note that C{expr*(None,n)} does not raise an exception if
+        more than n exprs exist in the input stream; that is,
+        C{expr*(None,n)} does not enforce a maximum number of expr
+        occurrences.  If this behavior is desired, then write
+        C{expr*(None,n) + ~expr}
+        """
+        if isinstance(other,int):
+            minElements, optElements = other,0
+        elif isinstance(other,tuple):
+            other = (other + (None, None))[:2]
+            if other[0] is None:
+                other = (0, other[1])
+            if isinstance(other[0],int) and other[1] is None:
+                if other[0] == 0:
+                    return ZeroOrMore(self)
+                if other[0] == 1:
+                    return OneOrMore(self)
+                else:
+                    return self*other[0] + ZeroOrMore(self)
+            elif isinstance(other[0],int) and isinstance(other[1],int):
+                minElements, optElements = other
+                optElements -= minElements
+            else:
+                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
+        else:
+            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+        if minElements < 0:
+            raise ValueError("cannot multiply ParserElement by negative value")
+        if optElements < 0:
+            raise ValueError("second tuple value must be greater or equal to first tuple value")
+        if minElements == optElements == 0:
+            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
+
+        if (optElements):
+            def makeOptionalList(n):
+                if n>1:
+                    return Optional(self + makeOptionalList(n-1))
+                else:
+                    return Optional(self)
+            if minElements:
+                if minElements == 1:
+                    ret = self + makeOptionalList(optElements)
+                else:
+                    ret = And([self]*minElements) + makeOptionalList(optElements)
+            else:
+                ret = makeOptionalList(optElements)
+        else:
+            if minElements == 1:
+                ret = self
+            else:
+                ret = And([self]*minElements)
+        return ret
+
+    def __rmul__(self, other):
+        return self.__mul__(other)
+
+    def __or__(self, other ):
+        """
+        Implementation of | operator - returns C{L{MatchFirst}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return MatchFirst( [ self, other ] )
+
+    def __ror__(self, other ):
+        """
+        Implementation of | operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other | self
+
+    def __xor__(self, other ):
+        """
+        Implementation of ^ operator - returns C{L{Or}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Or( [ self, other ] )
+
+    def __rxor__(self, other ):
+        """
+        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other ^ self
+
+    def __and__(self, other ):
+        """
+        Implementation of & operator - returns C{L{Each}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Each( [ self, other ] )
+
+    def __rand__(self, other ):
+        """
+        Implementation of & operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other & self
+
+    def __invert__( self ):
+        """
+        Implementation of ~ operator - returns C{L{NotAny}}
+        """
+        return NotAny( self )
+
+    def __call__(self, name=None):
+        """
+        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
+        
+        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
+        passed as C{True}.
+           
+        If C{name} is omitted, same as calling C{L{copy}}.
+
+        Example::
+            # these are equivalent
+            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
+            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
+        """
+        if name is not None:
+            return self.setResultsName(name)
+        else:
+            return self.copy()
+
+    def suppress( self ):
+        """
+        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
+        cluttering up returned output.
+        """
+        return Suppress( self )
+
+    def leaveWhitespace( self ):
+        """
+        Disables the skipping of whitespace before matching the characters in the
+        C{ParserElement}'s defined pattern.  This is normally only used internally by
+        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+        """
+        self.skipWhitespace = False
+        return self
+
+    def setWhitespaceChars( self, chars ):
+        """
+        Overrides the default whitespace chars
+        """
+        self.skipWhitespace = True
+        self.whiteChars = chars
+        self.copyDefaultWhiteChars = False
+        return self
+
+    def parseWithTabs( self ):
+        """
+        Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
+        Must be called before C{parseString} when the input grammar contains elements that
+        match C{<TAB>} characters.
+        """
+        self.keepTabs = True
+        return self
+
+    def ignore( self, other ):
+        """
+        Define expression to be ignored (e.g., comments) while doing pattern
+        matching; may be called repeatedly, to define multiple comment or other
+        ignorable patterns.
+        
+        Example::
+            patt = OneOrMore(Word(alphas))
+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
+            
+            patt.ignore(cStyleComment)
+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
+        """
+        if isinstance(other, basestring):
+            other = Suppress(other)
+
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                self.ignoreExprs.append(other)
+        else:
+            self.ignoreExprs.append( Suppress( other.copy() ) )
+        return self
+
+    def setDebugActions( self, startAction, successAction, exceptionAction ):
+        """
+        Enable display of debugging messages while doing pattern matching.
+        """
+        self.debugActions = (startAction or _defaultStartDebugAction,
+                             successAction or _defaultSuccessDebugAction,
+                             exceptionAction or _defaultExceptionDebugAction)
+        self.debug = True
+        return self
+
+    def setDebug( self, flag=True ):
+        """
+        Enable display of debugging messages while doing pattern matching.
+        Set C{flag} to True to enable, False to disable.
+
+        Example::
+            wd = Word(alphas).setName("alphaword")
+            integer = Word(nums).setName("numword")
+            term = wd | integer
+            
+            # turn on debugging for wd
+            wd.setDebug()
+
+            OneOrMore(term).parseString("abc 123 xyz 890")
+        
+        prints::
+            Match alphaword at loc 0(1,1)
+            Matched alphaword -> ['abc']
+            Match alphaword at loc 3(1,4)
+            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+            Match alphaword at loc 7(1,8)
+            Matched alphaword -> ['xyz']
+            Match alphaword at loc 11(1,12)
+            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+            Match alphaword at loc 15(1,16)
+            Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+        The output shown is that produced by the default debug actions - custom debug actions can be
+        specified using L{setDebugActions}. Prior to attempting
+        to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
+        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
+        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
+        which makes debugging and exception messages easier to understand - for instance, the default
+        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
+        """
+        if flag:
+            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
+        else:
+            self.debug = False
+        return self
+
+    def __str__( self ):
+        return self.name
+
+    def __repr__( self ):
+        return _ustr(self)
+
+    def streamline( self ):
+        self.streamlined = True
+        self.strRepr = None
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        pass
+
+    def validate( self, validateTrace=[] ):
+        """
+        Check defined expressions for valid structure, check for infinite recursive definitions.
+        """
+        self.checkRecursion( [] )
+
+    def parseFile( self, file_or_filename, parseAll=False ):
+        """
+        Execute the parse expression on the given file or filename.
+        If a filename is specified (instead of a file object),
+        the entire file is opened, read, and closed before parsing.
+        """
+        try:
+            file_contents = file_or_filename.read()
+        except AttributeError:
+            with open(file_or_filename, "r") as f:
+                file_contents = f.read()
+        try:
+            return self.parseString(file_contents, parseAll)
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def __eq__(self,other):
+        if isinstance(other, ParserElement):
+            return self is other or vars(self) == vars(other)
+        elif isinstance(other, basestring):
+            return self.matches(other)
+        else:
+            return super(ParserElement,self)==other
+
+    def __ne__(self,other):
+        return not (self == other)
+
+    def __hash__(self):
+        return hash(id(self))
+
+    def __req__(self,other):
+        return self == other
+
+    def __rne__(self,other):
+        return not (self == other)
+
+    def matches(self, testString, parseAll=True):
+        """
+        Method for quick testing of a parser against a test string. Good for simple 
+        inline microtests of sub expressions while building up larger parser.
+           
+        Parameters:
+         - testString - to test against this expression for a match
+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
+            
+        Example::
+            expr = Word(nums)
+            assert expr.matches("100")
+        """
+        try:
+            self.parseString(_ustr(testString), parseAll=parseAll)
+            return True
+        except ParseBaseException:
+            return False
+                
+    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
+        """
+        Execute the parse expression on a series of test strings, showing each
+        test, the parsed results or where the parse failed. Quick and easy way to
+        run a parse expression against a list of sample strings.
+           
+        Parameters:
+         - tests - a list of separate test strings, or a multiline string of test strings
+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
+         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
+              string; pass None to disable comment filtering
+         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
+              if False, only dump nested list
+         - printResults - (default=C{True}) prints test output to stdout
+         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
+
+        Returns: a (success, results) tuple, where success indicates that all tests succeeded
+        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
+        test's output
+        
+        Example::
+            number_expr = pyparsing_common.number.copy()
+
+            result = number_expr.runTests('''
+                # unsigned integer
+                100
+                # negative integer
+                -100
+                # float with scientific notation
+                6.02e23
+                # integer with scientific notation
+                1e-12
+                ''')
+            print("Success" if result[0] else "Failed!")
+
+            result = number_expr.runTests('''
+                # stray character
+                100Z
+                # missing leading digit before '.'
+                -.100
+                # too many '.'
+                3.14.159
+                ''', failureTests=True)
+            print("Success" if result[0] else "Failed!")
+        prints::
+            # unsigned integer
+            100
+            [100]
+
+            # negative integer
+            -100
+            [-100]
+
+            # float with scientific notation
+            6.02e23
+            [6.02e+23]
+
+            # integer with scientific notation
+            1e-12
+            [1e-12]
+
+            Success
+            
+            # stray character
+            100Z
+               ^
+            FAIL: Expected end of text (at char 3), (line:1, col:4)
+
+            # missing leading digit before '.'
+            -.100
+            ^
+            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
+
+            # too many '.'
+            3.14.159
+                ^
+            FAIL: Expected end of text (at char 4), (line:1, col:5)
+
+            Success
+
+        Each test string must be on a single line. If you want to test a string that spans multiple
+        lines, create a test like this::
+
+            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
+        
+        (Note that this is a raw string literal, you must include the leading 'r'.)
+        """
+        if isinstance(tests, basestring):
+            tests = list(map(str.strip, tests.rstrip().splitlines()))
+        if isinstance(comment, basestring):
+            comment = Literal(comment)
+        allResults = []
+        comments = []
+        success = True
+        for t in tests:
+            if comment is not None and comment.matches(t, False) or comments and not t:
+                comments.append(t)
+                continue
+            if not t:
+                continue
+            out = ['\n'.join(comments), t]
+            comments = []
+            try:
+                t = t.replace(r'\n','\n')
+                result = self.parseString(t, parseAll=parseAll)
+                out.append(result.dump(full=fullDump))
+                success = success and not failureTests
+            except ParseBaseException as pe:
+                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
+                if '\n' in t:
+                    out.append(line(pe.loc, t))
+                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
+                else:
+                    out.append(' '*pe.loc + '^' + fatal)
+                out.append("FAIL: " + str(pe))
+                success = success and failureTests
+                result = pe
+            except Exception as exc:
+                out.append("FAIL-EXCEPTION: " + str(exc))
+                success = success and failureTests
+                result = exc
+
+            if printResults:
+                if fullDump:
+                    out.append('')
+                print('\n'.join(out))
+
+            allResults.append((t, result))
+        
+        return success, allResults
+
+        
+class Token(ParserElement):
+    """
+    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
+    """
+    def __init__( self ):
+        super(Token,self).__init__( savelist=False )
+
+
+class Empty(Token):
+    """
+    An empty token, will always match.
+    """
+    def __init__( self ):
+        super(Empty,self).__init__()
+        self.name = "Empty"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+
+class NoMatch(Token):
+    """
+    A token that will never match.
+    """
+    def __init__( self ):
+        super(NoMatch,self).__init__()
+        self.name = "NoMatch"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.errmsg = "Unmatchable token"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Literal(Token):
+    """
+    Token to exactly match a specified string.
+    
+    Example::
+        Literal('blah').parseString('blah')  # -> ['blah']
+        Literal('blah').parseString('blahfooblah')  # -> ['blah']
+        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
+    
+    For case-insensitive matching, use L{CaselessLiteral}.
+    
+    For keyword matching (force word break before and after the matched string),
+    use L{Keyword} or L{CaselessKeyword}.
+    """
+    def __init__( self, matchString ):
+        super(Literal,self).__init__()
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Literal; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+            self.__class__ = Empty
+        self.name = '"%s"' % _ustr(self.match)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+
+    # Performance tuning: this routine gets called a *lot*
+    # if this is a single character match string  and the first character matches,
+    # short-circuit as quickly as possible, and avoid calling startswith
+    #~ @profile
+    def parseImpl( self, instring, loc, doActions=True ):
+        if (instring[loc] == self.firstMatchChar and
+            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
+            return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+_L = Literal
+ParserElement._literalStringClass = Literal
+
+class Keyword(Token):
+    """
+    Token to exactly match a specified string as a keyword, that is, it must be
+    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
+     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
+     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
+    Accepts two optional constructor arguments in addition to the keyword string:
+     - C{identChars} is a string of characters that would be valid identifier characters,
+          defaulting to all alphanumerics + "_" and "$"
+     - C{caseless} allows case-insensitive matching, default is C{False}.
+       
+    Example::
+        Keyword("start").parseString("start")  # -> ['start']
+        Keyword("start").parseString("starting")  # -> Exception
+
+    For case-insensitive matching, use L{CaselessKeyword}.
+    """
+    DEFAULT_KEYWORD_CHARS = alphanums+"_$"
+
+    def __init__( self, matchString, identChars=None, caseless=False ):
+        super(Keyword,self).__init__()
+        if identChars is None:
+            identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Keyword; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+        self.name = '"%s"' % self.match
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+        self.caseless = caseless
+        if caseless:
+            self.caselessmatch = matchString.upper()
+            identChars = identChars.upper()
+        self.identChars = set(identChars)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.caseless:
+            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
+                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        else:
+            if (instring[loc] == self.firstMatchChar and
+                (self.matchLen==1 or instring.startswith(self.match,loc)) and
+                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
+                (loc == 0 or instring[loc-1] not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+
+    def copy(self):
+        c = super(Keyword,self).copy()
+        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        return c
+
+    @staticmethod
+    def setDefaultKeywordChars( chars ):
+        """Overrides the default Keyword chars
+        """
+        Keyword.DEFAULT_KEYWORD_CHARS = chars
+
+class CaselessLiteral(Literal):
+    """
+    Token to match a specified string, ignoring case of letters.
+    Note: the matched results will always be in the case of the given
+    match string, NOT the case of the input text.
+
+    Example::
+        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
+        
+    (Contrast with example for L{CaselessKeyword}.)
+    """
+    def __init__( self, matchString ):
+        super(CaselessLiteral,self).__init__( matchString.upper() )
+        # Preserve the defining literal.
+        self.returnString = matchString
+        self.name = "'%s'" % self.returnString
+        self.errmsg = "Expected " + self.name
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[ loc:loc+self.matchLen ].upper() == self.match:
+            return loc+self.matchLen, self.returnString
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class CaselessKeyword(Keyword):
+    """
+    Caseless version of L{Keyword}.
+
+    Example::
+        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
+        
+    (Contrast with example for L{CaselessLiteral}.)
+    """
+    def __init__( self, matchString, identChars=None ):
+        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
+            return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class CloseMatch(Token):
+    """
+    A variation on L{Literal} which matches "close" matches, that is, 
+    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
+     - C{match_string} - string to be matched
+     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
+    
+    The results from a successful parse will contain the matched text from the input string and the following named results:
+     - C{mismatches} - a list of the positions within the match_string where mismatches were found
+     - C{original} - the original match_string used to compare against the input string
+    
+    If C{mismatches} is an empty list, then the match was an exact match.
+    
+    Example::
+        patt = CloseMatch("ATCATCGAATGGA")
+        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
+        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
+
+        # exact match
+        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
+
+        # close match allowing up to 2 mismatches
+        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
+        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
+    """
+    def __init__(self, match_string, maxMismatches=1):
+        super(CloseMatch,self).__init__()
+        self.name = match_string
+        self.match_string = match_string
+        self.maxMismatches = maxMismatches
+        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
+        self.mayIndexError = False
+        self.mayReturnEmpty = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        start = loc
+        instrlen = len(instring)
+        maxloc = start + len(self.match_string)
+
+        if maxloc <= instrlen:
+            match_string = self.match_string
+            match_stringloc = 0
+            mismatches = []
+            maxMismatches = self.maxMismatches
+
+            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
+                src,mat = s_m
+                if src != mat:
+                    mismatches.append(match_stringloc)
+                    if len(mismatches) > maxMismatches:
+                        break
+            else:
+                loc = match_stringloc + 1
+                results = ParseResults([instring[start:loc]])
+                results['original'] = self.match_string
+                results['mismatches'] = mismatches
+                return loc, results
+
+        raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Word(Token):
+    """
+    Token for matching words composed of allowed character sets.
+    Defined with string containing all allowed initial characters,
+    an optional string containing allowed body characters (if omitted,
+    defaults to the initial character set), and an optional minimum,
+    maximum, and/or exact length.  The default value for C{min} is 1 (a
+    minimum value < 1 is not valid); the default values for C{max} and C{exact}
+    are 0, meaning no maximum or exact length restriction. An optional
+    C{excludeChars} parameter can list characters that might be found in 
+    the input C{bodyChars} string; useful to define a word of all printables
+    except for one or two characters, for instance.
+    
+    L{srange} is useful for defining custom character set strings for defining 
+    C{Word} expressions, using range notation from regular expression character sets.
+    
+    A common mistake is to use C{Word} to match a specific literal string, as in 
+    C{Word("Address")}. Remember that C{Word} uses the string argument to define
+    I{sets} of matchable characters. This expression would match "Add", "AAA",
+    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
+    To match an exact literal string, use L{Literal} or L{Keyword}.
+
+    pyparsing includes helper strings for building Words:
+     - L{alphas}
+     - L{nums}
+     - L{alphanums}
+     - L{hexnums}
+     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
+     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
+     - L{printables} (any non-whitespace character)
+
+    Example::
+        # a word composed of digits
+        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
+        
+        # a word with a leading capital, and zero or more lowercase
+        capital_word = Word(alphas.upper(), alphas.lower())
+
+        # hostnames are alphanumeric, with leading alpha, and '-'
+        hostname = Word(alphas, alphanums+'-')
+        
+        # roman numeral (not a strict parser, accepts invalid mix of characters)
+        roman = Word("IVXLCDM")
+        
+        # any string of non-whitespace characters, except for ','
+        csv_value = Word(printables, excludeChars=",")
+    """
+    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
+        super(Word,self).__init__()
+        if excludeChars:
+            initChars = ''.join(c for c in initChars if c not in excludeChars)
+            if bodyChars:
+                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
+        self.initCharsOrig = initChars
+        self.initChars = set(initChars)
+        if bodyChars :
+            self.bodyCharsOrig = bodyChars
+            self.bodyChars = set(bodyChars)
+        else:
+            self.bodyCharsOrig = initChars
+            self.bodyChars = set(initChars)
+
+        self.maxSpecified = max > 0
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.asKeyword = asKeyword
+
+        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
+            if self.bodyCharsOrig == self.initCharsOrig:
+                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
+            elif len(self.initCharsOrig) == 1:
+                self.reString = "%s[%s]*" % \
+                                      (re.escape(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            else:
+                self.reString = "[%s][%s]*" % \
+                                      (_escapeRegexRangeChars(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            if self.asKeyword:
+                self.reString = r"\b"+self.reString+r"\b"
+            try:
+                self.re = re.compile( self.reString )
+            except Exception:
+                self.re = None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.re:
+            result = self.re.match(instring,loc)
+            if not result:
+                raise ParseException(instring, loc, self.errmsg, self)
+
+            loc = result.end()
+            return loc, result.group()
+
+        if not(instring[ loc ] in self.initChars):
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        start = loc
+        loc += 1
+        instrlen = len(instring)
+        bodychars = self.bodyChars
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, instrlen )
+        while loc < maxloc and instring[loc] in bodychars:
+            loc += 1
+
+        throwException = False
+        if loc - start < self.minLen:
+            throwException = True
+        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+            throwException = True
+        if self.asKeyword:
+            if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
+                throwException = True
+
+        if throwException:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+    def __str__( self ):
+        try:
+            return super(Word,self).__str__()
+        except Exception:
+            pass
+
+
+        if self.strRepr is None:
+
+            def charsAsStr(s):
+                if len(s)>4:
+                    return s[:4]+"..."
+                else:
+                    return s
+
+            if ( self.initCharsOrig != self.bodyCharsOrig ):
+                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
+            else:
+                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
+
+        return self.strRepr
+
+
+class Regex(Token):
+    r"""
+    Token for matching strings that match a given regular expression.
+    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
+    If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as 
+    named parse results.
+
+    Example::
+        realnum = Regex(r"[+-]?\d+\.\d*")
+        date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
+        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
+        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
+    """
+    compiledREtype = type(re.compile("[A-Z]"))
+    def __init__( self, pattern, flags=0):
+        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
+        super(Regex,self).__init__()
+
+        if isinstance(pattern, basestring):
+            if not pattern:
+                warnings.warn("null string passed to Regex; use Empty() instead",
+                        SyntaxWarning, stacklevel=2)
+
+            self.pattern = pattern
+            self.flags = flags
+
+            try:
+                self.re = re.compile(self.pattern, self.flags)
+                self.reString = self.pattern
+            except sre_constants.error:
+                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
+                    SyntaxWarning, stacklevel=2)
+                raise
+
+        elif isinstance(pattern, Regex.compiledREtype):
+            self.re = pattern
+            self.pattern = \
+            self.reString = str(pattern)
+            self.flags = flags
+            
+        else:
+            raise ValueError("Regex may only be constructed with a string or a compiled RE object")
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = self.re.match(instring,loc)
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        d = result.groupdict()
+        ret = ParseResults(result.group())
+        if d:
+            for k in d:
+                ret[k] = d[k]
+        return loc,ret
+
+    def __str__( self ):
+        try:
+            return super(Regex,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "Re:(%s)" % repr(self.pattern)
+
+        return self.strRepr
+
+
+class QuotedString(Token):
+    r"""
+    Token for matching strings that are delimited by quoting characters.
+    
+    Defined with the following parameters:
+        - quoteChar - string of one or more characters defining the quote delimiting string
+        - escChar - character to escape quotes, typically backslash (default=C{None})
+        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
+        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
+        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
+        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
+        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
+
+    Example::
+        qs = QuotedString('"')
+        print(qs.searchString('lsjdf "This is the quote" sldjf'))
+        complex_qs = QuotedString('{{', endQuoteChar='}}')
+        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
+        sql_qs = QuotedString('"', escQuote='""')
+        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
+    prints::
+        [['This is the quote']]
+        [['This is the "quote"']]
+        [['This is the quote with "embedded" quotes']]
+    """
+    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
+        super(QuotedString,self).__init__()
+
+        # remove white space from quote chars - wont work anyway
+        quoteChar = quoteChar.strip()
+        if not quoteChar:
+            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+            raise SyntaxError()
+
+        if endQuoteChar is None:
+            endQuoteChar = quoteChar
+        else:
+            endQuoteChar = endQuoteChar.strip()
+            if not endQuoteChar:
+                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+                raise SyntaxError()
+
+        self.quoteChar = quoteChar
+        self.quoteCharLen = len(quoteChar)
+        self.firstQuoteChar = quoteChar[0]
+        self.endQuoteChar = endQuoteChar
+        self.endQuoteCharLen = len(endQuoteChar)
+        self.escChar = escChar
+        self.escQuote = escQuote
+        self.unquoteResults = unquoteResults
+        self.convertWhitespaceEscapes = convertWhitespaceEscapes
+
+        if multiline:
+            self.flags = re.MULTILINE | re.DOTALL
+            self.pattern = r'%s(?:[^%s%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        else:
+            self.flags = 0
+            self.pattern = r'%s(?:[^%s\n\r%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        if len(self.endQuoteChar) > 1:
+            self.pattern += (
+                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
+                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
+                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
+                )
+        if escQuote:
+            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
+        if escChar:
+            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
+            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
+        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
+
+        try:
+            self.re = re.compile(self.pattern, self.flags)
+            self.reString = self.pattern
+        except sre_constants.error:
+            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
+                SyntaxWarning, stacklevel=2)
+            raise
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        ret = result.group()
+
+        if self.unquoteResults:
+
+            # strip off quotes
+            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
+
+            if isinstance(ret,basestring):
+                # replace escaped whitespace
+                if '\\' in ret and self.convertWhitespaceEscapes:
+                    ws_map = {
+                        r'\t' : '\t',
+                        r'\n' : '\n',
+                        r'\f' : '\f',
+                        r'\r' : '\r',
+                    }
+                    for wslit,wschar in ws_map.items():
+                        ret = ret.replace(wslit, wschar)
+
+                # replace escaped characters
+                if self.escChar:
+                    ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
+
+                # replace escaped quotes
+                if self.escQuote:
+                    ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+        return loc, ret
+
+    def __str__( self ):
+        try:
+            return super(QuotedString,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
+
+        return self.strRepr
+
+
+class CharsNotIn(Token):
+    """
+    Token for matching words composed of characters I{not} in a given set (will
+    include whitespace in matched characters if not listed in the provided exclusion set - see example).
+    Defined with string containing all disallowed characters, and an optional
+    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
+    minimum value < 1 is not valid); the default values for C{max} and C{exact}
+    are 0, meaning no maximum or exact length restriction.
+
+    Example::
+        # define a comma-separated-value as anything that is not a ','
+        csv_value = CharsNotIn(',')
+        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
+    prints::
+        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
+    """
+    def __init__( self, notChars, min=1, max=0, exact=0 ):
+        super(CharsNotIn,self).__init__()
+        self.skipWhitespace = False
+        self.notChars = notChars
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = ( self.minLen == 0 )
+        self.mayIndexError = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[loc] in self.notChars:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        start = loc
+        loc += 1
+        notchars = self.notChars
+        maxlen = min( start+self.maxLen, len(instring) )
+        while loc < maxlen and \
+              (instring[loc] not in notchars):
+            loc += 1
+
+        if loc - start < self.minLen:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+    def __str__( self ):
+        try:
+            return super(CharsNotIn, self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            if len(self.notChars) > 4:
+                self.strRepr = "!W:(%s...)" % self.notChars[:4]
+            else:
+                self.strRepr = "!W:(%s)" % self.notChars
+
+        return self.strRepr
+
+class White(Token):
+    """
+    Special matching class for matching whitespace.  Normally, whitespace is ignored
+    by pyparsing grammars.  This class is included when some whitespace structures
+    are significant.  Define with a string containing the whitespace characters to be
+    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
+    as defined for the C{L{Word}} class.
+    """
+    whiteStrs = {
+        " " : "<SPC>",
+        "\t": "<TAB>",
+        "\n": "<LF>",
+        "\r": "<CR>",
+        "\f": "<FF>",
+        }
+    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
+        super(White,self).__init__()
+        self.matchWhite = ws
+        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
+        #~ self.leaveWhitespace()
+        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
+        self.mayReturnEmpty = True
+        self.errmsg = "Expected " + self.name
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if not(instring[ loc ] in self.matchWhite):
+            raise ParseException(instring, loc, self.errmsg, self)
+        start = loc
+        loc += 1
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, len(instring) )
+        while loc < maxloc and instring[loc] in self.matchWhite:
+            loc += 1
+
+        if loc - start < self.minLen:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+
+class _PositionToken(Token):
+    def __init__( self ):
+        super(_PositionToken,self).__init__()
+        self.name=self.__class__.__name__
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+class GoToColumn(_PositionToken):
+    """
+    Token to advance to a specific column of input text; useful for tabular report scraping.
+    """
+    def __init__( self, colno ):
+        super(GoToColumn,self).__init__()
+        self.col = colno
+
+    def preParse( self, instring, loc ):
+        if col(loc,instring) != self.col:
+            instrlen = len(instring)
+            if self.ignoreExprs:
+                loc = self._skipIgnorables( instring, loc )
+            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
+                loc += 1
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        thiscol = col( loc, instring )
+        if thiscol > self.col:
+            raise ParseException( instring, loc, "Text not in expected column", self )
+        newloc = loc + self.col - thiscol
+        ret = instring[ loc: newloc ]
+        return newloc, ret
+
+
+class LineStart(_PositionToken):
+    """
+    Matches if current position is at the beginning of a line within the parse string
+    
+    Example::
+    
+        test = '''\
+        AAA this line
+        AAA and this line
+          AAA but not this one
+        B AAA and definitely not this one
+        '''
+
+        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
+            print(t)
+    
+    Prints::
+        ['AAA', ' this line']
+        ['AAA', ' and this line']    
+
+    """
+    def __init__( self ):
+        super(LineStart,self).__init__()
+        self.errmsg = "Expected start of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if col(loc, instring) == 1:
+            return loc, []
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class LineEnd(_PositionToken):
+    """
+    Matches if current position is at the end of a line within the parse string
+    """
+    def __init__( self ):
+        super(LineEnd,self).__init__()
+        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
+        self.errmsg = "Expected end of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc<len(instring):
+            if instring[loc] == "\n":
+                return loc+1, "\n"
+            else:
+                raise ParseException(instring, loc, self.errmsg, self)
+        elif loc == len(instring):
+            return loc+1, []
+        else:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+class StringStart(_PositionToken):
+    """
+    Matches if current position is at the beginning of the parse string
+    """
+    def __init__( self ):
+        super(StringStart,self).__init__()
+        self.errmsg = "Expected start of text"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc != 0:
+            # see if entire string up to here is just whitespace and ignoreables
+            if loc != self.preParse( instring, 0 ):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+class StringEnd(_PositionToken):
+    """
+    Matches if current position is at the end of the parse string
+    """
+    def __init__( self ):
+        super(StringEnd,self).__init__()
+        self.errmsg = "Expected end of text"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc < len(instring):
+            raise ParseException(instring, loc, self.errmsg, self)
+        elif loc == len(instring):
+            return loc+1, []
+        elif loc > len(instring):
+            return loc, []
+        else:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+class WordStart(_PositionToken):
+    """
+    Matches if the current position is at the beginning of a Word, and
+    is not preceded by any character in a given set of C{wordChars}
+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
+    the string being parsed, or at the beginning of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordStart,self).__init__()
+        self.wordChars = set(wordChars)
+        self.errmsg = "Not at the start of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        if loc != 0:
+            if (instring[loc-1] in self.wordChars or
+                instring[loc] not in self.wordChars):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+class WordEnd(_PositionToken):
+    """
+    Matches if the current position is at the end of a Word, and
+    is not followed by any character in a given set of C{wordChars}
+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
+    the string being parsed, or at the end of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordEnd,self).__init__()
+        self.wordChars = set(wordChars)
+        self.skipWhitespace = False
+        self.errmsg = "Not at the end of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        instrlen = len(instring)
+        if instrlen>0 and loc<instrlen:
+            if (instring[loc] in self.wordChars or
+                instring[loc-1] not in self.wordChars):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+
+class ParseExpression(ParserElement):
+    """
+    Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(ParseExpression,self).__init__(savelist)
+        if isinstance( exprs, _generatorType ):
+            exprs = list(exprs)
+
+        if isinstance( exprs, basestring ):
+            self.exprs = [ ParserElement._literalStringClass( exprs ) ]
+        elif isinstance( exprs, Iterable ):
+            exprs = list(exprs)
+            # if sequence of strings provided, wrap with Literal
+            if all(isinstance(expr, basestring) for expr in exprs):
+                exprs = map(ParserElement._literalStringClass, exprs)
+            self.exprs = list(exprs)
+        else:
+            try:
+                self.exprs = list( exprs )
+            except TypeError:
+                self.exprs = [ exprs ]
+        self.callPreparse = False
+
+    def __getitem__( self, i ):
+        return self.exprs[i]
+
+    def append( self, other ):
+        self.exprs.append( other )
+        self.strRepr = None
+        return self
+
+    def leaveWhitespace( self ):
+        """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
+           all contained expressions."""
+        self.skipWhitespace = False
+        self.exprs = [ e.copy() for e in self.exprs ]
+        for e in self.exprs:
+            e.leaveWhitespace()
+        return self
+
+    def ignore( self, other ):
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                super( ParseExpression, self).ignore( other )
+                for e in self.exprs:
+                    e.ignore( self.ignoreExprs[-1] )
+        else:
+            super( ParseExpression, self).ignore( other )
+            for e in self.exprs:
+                e.ignore( self.ignoreExprs[-1] )
+        return self
+
+    def __str__( self ):
+        try:
+            return super(ParseExpression,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
+        return self.strRepr
+
+    def streamline( self ):
+        super(ParseExpression,self).streamline()
+
+        for e in self.exprs:
+            e.streamline()
+
+        # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
+        # but only if there are no parse actions or resultsNames on the nested And's
+        # (likewise for Or's and MatchFirst's)
+        if ( len(self.exprs) == 2 ):
+            other = self.exprs[0]
+            if ( isinstance( other, self.__class__ ) and
+                  not(other.parseAction) and
+                  other.resultsName is None and
+                  not other.debug ):
+                self.exprs = other.exprs[:] + [ self.exprs[1] ]
+                self.strRepr = None
+                self.mayReturnEmpty |= other.mayReturnEmpty
+                self.mayIndexError  |= other.mayIndexError
+
+            other = self.exprs[-1]
+            if ( isinstance( other, self.__class__ ) and
+                  not(other.parseAction) and
+                  other.resultsName is None and
+                  not other.debug ):
+                self.exprs = self.exprs[:-1] + other.exprs[:]
+                self.strRepr = None
+                self.mayReturnEmpty |= other.mayReturnEmpty
+                self.mayIndexError  |= other.mayIndexError
+
+        self.errmsg = "Expected " + _ustr(self)
+        
+        return self
+
+    def setResultsName( self, name, listAllMatches=False ):
+        ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
+        return ret
+
+    def validate( self, validateTrace=[] ):
+        tmp = validateTrace[:]+[self]
+        for e in self.exprs:
+            e.validate(tmp)
+        self.checkRecursion( [] )
+        
+    def copy(self):
+        ret = super(ParseExpression,self).copy()
+        ret.exprs = [e.copy() for e in self.exprs]
+        return ret
+
+class And(ParseExpression):
+    """
+    Requires all given C{ParseExpression}s to be found in the given order.
+    Expressions may be separated by whitespace.
+    May be constructed using the C{'+'} operator.
+    May also be constructed using the C{'-'} operator, which will suppress backtracking.
+
+    Example::
+        integer = Word(nums)
+        name_expr = OneOrMore(Word(alphas))
+
+        expr = And([integer("id"),name_expr("name"),integer("age")])
+        # more easily written as:
+        expr = integer("id") + name_expr("name") + integer("age")
+    """
+
+    class _ErrorStop(Empty):
+        def __init__(self, *args, **kwargs):
+            super(And._ErrorStop,self).__init__(*args, **kwargs)
+            self.name = '-'
+            self.leaveWhitespace()
+
+    def __init__( self, exprs, savelist = True ):
+        super(And,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+        self.setWhitespaceChars( self.exprs[0].whiteChars )
+        self.skipWhitespace = self.exprs[0].skipWhitespace
+        self.callPreparse = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        # pass False as last arg to _parse for first element, since we already
+        # pre-parsed the string as part of our And pre-parsing
+        loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
+        errorStop = False
+        for e in self.exprs[1:]:
+            if isinstance(e, And._ErrorStop):
+                errorStop = True
+                continue
+            if errorStop:
+                try:
+                    loc, exprtokens = e._parse( instring, loc, doActions )
+                except ParseSyntaxException:
+                    raise
+                except ParseBaseException as pe:
+                    pe.__traceback__ = None
+                    raise ParseSyntaxException._from_exception(pe)
+                except IndexError:
+                    raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
+            else:
+                loc, exprtokens = e._parse( instring, loc, doActions )
+            if exprtokens or exprtokens.haskeys():
+                resultlist += exprtokens
+        return loc, resultlist
+
+    def __iadd__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #And( [ self, other ] )
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+            if not e.mayReturnEmpty:
+                break
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+
+class Or(ParseExpression):
+    """
+    Requires that at least one C{ParseExpression} is found.
+    If two expressions match, the expression that matches the longest string will be used.
+    May be constructed using the C{'^'} operator.
+
+    Example::
+        # construct Or using '^' operator
+        
+        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
+        print(number.searchString("123 3.1416 789"))
+    prints::
+        [['123'], ['3.1416'], ['789']]
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(Or,self).__init__(exprs, savelist)
+        if self.exprs:
+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+        else:
+            self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        maxExcLoc = -1
+        maxException = None
+        matches = []
+        for e in self.exprs:
+            try:
+                loc2 = e.tryParse( instring, loc )
+            except ParseException as err:
+                err.__traceback__ = None
+                if err.loc > maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+            else:
+                # save match among all matches, to retry longest to shortest
+                matches.append((loc2, e))
+
+        if matches:
+            matches.sort(key=lambda x: -x[0])
+            for _,e in matches:
+                try:
+                    return e._parse( instring, loc, doActions )
+                except ParseException as err:
+                    err.__traceback__ = None
+                    if err.loc > maxExcLoc:
+                        maxException = err
+                        maxExcLoc = err.loc
+
+        if maxException is not None:
+            maxException.msg = self.errmsg
+            raise maxException
+        else:
+            raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+
+    def __ixor__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #Or( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class MatchFirst(ParseExpression):
+    """
+    Requires that at least one C{ParseExpression} is found.
+    If two expressions match, the first one listed is the one that will match.
+    May be constructed using the C{'|'} operator.
+
+    Example::
+        # construct MatchFirst using '|' operator
+        
+        # watch the order of expressions to match
+        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
+        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]
+
+        # put more selective expression first
+        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
+        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(MatchFirst,self).__init__(exprs, savelist)
+        if self.exprs:
+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+        else:
+            self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        maxExcLoc = -1
+        maxException = None
+        for e in self.exprs:
+            try:
+                ret = e._parse( instring, loc, doActions )
+                return ret
+            except ParseException as err:
+                if err.loc > maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+
+        # only got here if no expression matched, raise exception for match that made it the furthest
+        else:
+            if maxException is not None:
+                maxException.msg = self.errmsg
+                raise maxException
+            else:
+                raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+    def __ior__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #MatchFirst( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class Each(ParseExpression):
+    """
+    Requires all given C{ParseExpression}s to be found, but in any order.
+    Expressions may be separated by whitespace.
+    May be constructed using the C{'&'} operator.
+
+    Example::
+        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
+        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
+        integer = Word(nums)
+        shape_attr = "shape:" + shape_type("shape")
+        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
+        color_attr = "color:" + color("color")
+        size_attr = "size:" + integer("size")
+
+        # use Each (using operator '&') to accept attributes in any order 
+        # (shape and posn are required, color and size are optional)
+        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
+
+        shape_spec.runTests('''
+            shape: SQUARE color: BLACK posn: 100, 120
+            shape: CIRCLE size: 50 color: BLUE posn: 50,80
+            color:GREEN size:20 shape:TRIANGLE posn:20,40
+            '''
+            )
+    prints::
+        shape: SQUARE color: BLACK posn: 100, 120
+        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
+        - color: BLACK
+        - posn: ['100', ',', '120']
+          - x: 100
+          - y: 120
+        - shape: SQUARE
+
+
+        shape: CIRCLE size: 50 color: BLUE posn: 50,80
+        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
+        - color: BLUE
+        - posn: ['50', ',', '80']
+          - x: 50
+          - y: 80
+        - shape: CIRCLE
+        - size: 50
+
+
+        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
+        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
+        - color: GREEN
+        - posn: ['20', ',', '40']
+          - x: 20
+          - y: 40
+        - shape: TRIANGLE
+        - size: 20
+    """
+    def __init__( self, exprs, savelist = True ):
+        super(Each,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+        self.skipWhitespace = True
+        self.initExprGroups = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.initExprGroups:
+            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
+            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
+            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
+            self.optionals = opt1 + opt2
+            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
+            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
+            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
+            self.required += self.multirequired
+            self.initExprGroups = False
+        tmpLoc = loc
+        tmpReqd = self.required[:]
+        tmpOpt  = self.optionals[:]
+        matchOrder = []
+
+        keepMatching = True
+        while keepMatching:
+            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
+            failed = []
+            for e in tmpExprs:
+                try:
+                    tmpLoc = e.tryParse( instring, tmpLoc )
+                except ParseException:
+                    failed.append(e)
+                else:
+                    matchOrder.append(self.opt1map.get(id(e),e))
+                    if e in tmpReqd:
+                        tmpReqd.remove(e)
+                    elif e in tmpOpt:
+                        tmpOpt.remove(e)
+            if len(failed) == len(tmpExprs):
+                keepMatching = False
+
+        if tmpReqd:
+            missing = ", ".join(_ustr(e) for e in tmpReqd)
+            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
+
+        # add any unmatched Optionals, in case they have default values defined
+        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
+
+        resultlist = []
+        for e in matchOrder:
+            loc,results = e._parse(instring,loc,doActions)
+            resultlist.append(results)
+
+        finalResults = sum(resultlist, ParseResults([]))
+        return loc, finalResults
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class ParseElementEnhance(ParserElement):
+    """
+    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
+    """
+    def __init__( self, expr, savelist=False ):
+        super(ParseElementEnhance,self).__init__(savelist)
+        if isinstance( expr, basestring ):
+            if issubclass(ParserElement._literalStringClass, Token):
+                expr = ParserElement._literalStringClass(expr)
+            else:
+                expr = ParserElement._literalStringClass(Literal(expr))
+        self.expr = expr
+        self.strRepr = None
+        if expr is not None:
+            self.mayIndexError = expr.mayIndexError
+            self.mayReturnEmpty = expr.mayReturnEmpty
+            self.setWhitespaceChars( expr.whiteChars )
+            self.skipWhitespace = expr.skipWhitespace
+            self.saveAsList = expr.saveAsList
+            self.callPreparse = expr.callPreparse
+            self.ignoreExprs.extend(expr.ignoreExprs)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr is not None:
+            return self.expr._parse( instring, loc, doActions, callPreParse=False )
+        else:
+            raise ParseException("",loc,self.errmsg,self)
+
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        self.expr = self.expr.copy()
+        if self.expr is not None:
+            self.expr.leaveWhitespace()
+        return self
+
+    def ignore( self, other ):
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                super( ParseElementEnhance, self).ignore( other )
+                if self.expr is not None:
+                    self.expr.ignore( self.ignoreExprs[-1] )
+        else:
+            super( ParseElementEnhance, self).ignore( other )
+            if self.expr is not None:
+                self.expr.ignore( self.ignoreExprs[-1] )
+        return self
+
+    def streamline( self ):
+        super(ParseElementEnhance,self).streamline()
+        if self.expr is not None:
+            self.expr.streamline()
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        if self in parseElementList:
+            raise RecursiveGrammarException( parseElementList+[self] )
+        subRecCheckList = parseElementList[:] + [ self ]
+        if self.expr is not None:
+            self.expr.checkRecursion( subRecCheckList )
+
+    def validate( self, validateTrace=[] ):
+        tmp = validateTrace[:]+[self]
+        if self.expr is not None:
+            self.expr.validate(tmp)
+        self.checkRecursion( [] )
+
+    def __str__( self ):
+        try:
+            return super(ParseElementEnhance,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None and self.expr is not None:
+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
+        return self.strRepr
+
+
+class FollowedBy(ParseElementEnhance):
+    """
+    Lookahead matching of the given parse expression.  C{FollowedBy}
+    does I{not} advance the parsing position within the input string, it only
+    verifies that the specified parse expression matches at the current
+    position.  C{FollowedBy} always returns a null token list.
+
+    Example::
+        # use FollowedBy to match a label only if it is followed by a ':'
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        
+        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
+    prints::
+        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
+    """
+    def __init__( self, expr ):
+        super(FollowedBy,self).__init__(expr)
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        self.expr.tryParse( instring, loc )
+        return loc, []
+
+
+class NotAny(ParseElementEnhance):
+    """
+    Lookahead to disallow matching with the given parse expression.  C{NotAny}
+    does I{not} advance the parsing position within the input string, it only
+    verifies that the specified parse expression does I{not} match at the current
+    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
+    always returns a null token list.  May be constructed using the '~' operator.
+
+    Example::
+        
+    """
+    def __init__( self, expr ):
+        super(NotAny,self).__init__(expr)
+        #~ self.leaveWhitespace()
+        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
+        self.mayReturnEmpty = True
+        self.errmsg = "Found unwanted token, "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr.canParseNext(instring, loc):
+            raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "~{" + _ustr(self.expr) + "}"
+
+        return self.strRepr
+
+class _MultipleMatch(ParseElementEnhance):
+    def __init__( self, expr, stopOn=None):
+        super(_MultipleMatch, self).__init__(expr)
+        self.saveAsList = True
+        ender = stopOn
+        if isinstance(ender, basestring):
+            ender = ParserElement._literalStringClass(ender)
+        self.not_ender = ~ender if ender is not None else None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        self_expr_parse = self.expr._parse
+        self_skip_ignorables = self._skipIgnorables
+        check_ender = self.not_ender is not None
+        if check_ender:
+            try_not_ender = self.not_ender.tryParse
+        
+        # must be at least one (but first see if we are the stopOn sentinel;
+        # if so, fail)
+        if check_ender:
+            try_not_ender(instring, loc)
+        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
+        try:
+            hasIgnoreExprs = (not not self.ignoreExprs)
+            while 1:
+                if check_ender:
+                    try_not_ender(instring, loc)
+                if hasIgnoreExprs:
+                    preloc = self_skip_ignorables( instring, loc )
+                else:
+                    preloc = loc
+                loc, tmptokens = self_expr_parse( instring, preloc, doActions )
+                if tmptokens or tmptokens.haskeys():
+                    tokens += tmptokens
+        except (ParseException,IndexError):
+            pass
+
+        return loc, tokens
+        
+class OneOrMore(_MultipleMatch):
+    """
+    Repetition of one or more of the given expression.
+    
+    Parameters:
+     - expr - expression that must match one or more times
+     - stopOn - (default=C{None}) - expression for a terminating sentinel
+          (only required if the sentinel would ordinarily match the repetition 
+          expression)          
+
+    Example::
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+        text = "shape: SQUARE posn: upper left color: BLACK"
+        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+
+        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
+        
+        # could also be written as
+        (attr_expr * (1,)).parseString(text).pprint()
+    """
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + _ustr(self.expr) + "}..."
+
+        return self.strRepr
+
+class ZeroOrMore(_MultipleMatch):
+    """
+    Optional repetition of zero or more of the given expression.
+    
+    Parameters:
+     - expr - expression that must match zero or more times
+     - stopOn - (default=C{None}) - expression for a terminating sentinel
+          (only required if the sentinel would ordinarily match the repetition 
+          expression)          
+
+    Example: similar to L{OneOrMore}
+    """
+    def __init__( self, expr, stopOn=None):
+        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
+        self.mayReturnEmpty = True
+        
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
+        except (ParseException,IndexError):
+            return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]..."
+
+        return self.strRepr
+
+class _NullToken(object):
+    def __bool__(self):
+        return False
+    __nonzero__ = __bool__
+    def __str__(self):
+        return ""
+
+_optionalNotMatched = _NullToken()
+class Optional(ParseElementEnhance):
+    """
+    Optional matching of the given expression.
+
+    Parameters:
+     - expr - expression that must match zero or more times
+     - default (optional) - value to be returned if the optional expression is not found.
+
+    Example::
+        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
+        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
+        zip.runTests('''
+            # traditional ZIP code
+            12345
+            
+            # ZIP+4 form
+            12101-0001
+            
+            # invalid ZIP
+            98765-
+            ''')
+    prints::
+        # traditional ZIP code
+        12345
+        ['12345']
+
+        # ZIP+4 form
+        12101-0001
+        ['12101-0001']
+
+        # invalid ZIP
+        98765-
+             ^
+        FAIL: Expected end of text (at char 5), (line:1, col:6)
+    """
+    def __init__( self, expr, default=_optionalNotMatched ):
+        super(Optional,self).__init__( expr, savelist=False )
+        self.saveAsList = self.expr.saveAsList
+        self.defaultValue = default
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+        except (ParseException,IndexError):
+            if self.defaultValue is not _optionalNotMatched:
+                if self.expr.resultsName:
+                    tokens = ParseResults([ self.defaultValue ])
+                    tokens[self.expr.resultsName] = self.defaultValue
+                else:
+                    tokens = [ self.defaultValue ]
+            else:
+                tokens = []
+        return loc, tokens
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]"
+
+        return self.strRepr
+
+class SkipTo(ParseElementEnhance):
+    """
+    Token for skipping over all undefined text until the matched expression is found.
+
+    Parameters:
+     - expr - target expression marking the end of the data to be skipped
+     - include - (default=C{False}) if True, the target expression is also parsed 
+          (the skipped text and target expression are returned as a 2-element list).
+     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
+          comments) that might contain false matches to the target expression
+     - failOn - (default=C{None}) define expressions that are not allowed to be 
+          included in the skipped test; if found before the target expression is found, 
+          the SkipTo is not a match
+
+    Example::
+        report = '''
+            Outstanding Issues Report - 1 Jan 2000
+
+               # | Severity | Description                               |  Days Open
+            -----+----------+-------------------------------------------+-----------
+             101 | Critical | Intermittent system crash                 |          6
+              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
+              79 | Minor    | System slow when running too many reports |         47
+            '''
+        integer = Word(nums)
+        SEP = Suppress('|')
+        # use SkipTo to simply match everything up until the next SEP
+        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
+        # - parse action will call token.strip() for each matched token, i.e., the description body
+        string_data = SkipTo(SEP, ignore=quotedString)
+        string_data.setParseAction(tokenMap(str.strip))
+        ticket_expr = (integer("issue_num") + SEP 
+                      + string_data("sev") + SEP 
+                      + string_data("desc") + SEP 
+                      + integer("days_open"))
+        
+        for tkt in ticket_expr.searchString(report):
+            print tkt.dump()
+    prints::
+        ['101', 'Critical', 'Intermittent system crash', '6']
+        - days_open: 6
+        - desc: Intermittent system crash
+        - issue_num: 101
+        - sev: Critical
+        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
+        - days_open: 14
+        - desc: Spelling error on Login ('log|n')
+        - issue_num: 94
+        - sev: Cosmetic
+        ['79', 'Minor', 'System slow when running too many reports', '47']
+        - days_open: 47
+        - desc: System slow when running too many reports
+        - issue_num: 79
+        - sev: Minor
+    """
+    def __init__( self, other, include=False, ignore=None, failOn=None ):
+        super( SkipTo, self ).__init__( other )
+        self.ignoreExpr = ignore
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.includeMatch = include
+        self.asList = False
+        if isinstance(failOn, basestring):
+            self.failOn = ParserElement._literalStringClass(failOn)
+        else:
+            self.failOn = failOn
+        self.errmsg = "No match found for "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        startloc = loc
+        instrlen = len(instring)
+        expr = self.expr
+        expr_parse = self.expr._parse
+        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
+        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
+        
+        tmploc = loc
+        while tmploc <= instrlen:
+            if self_failOn_canParseNext is not None:
+                # break if failOn expression matches
+                if self_failOn_canParseNext(instring, tmploc):
+                    break
+                    
+            if self_ignoreExpr_tryParse is not None:
+                # advance past ignore expressions
+                while 1:
+                    try:
+                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
+                    except ParseBaseException:
+                        break
+            
+            try:
+                expr_parse(instring, tmploc, doActions=False, callPreParse=False)
+            except (ParseException, IndexError):
+                # no match, advance loc in string
+                tmploc += 1
+            else:
+                # matched skipto expr, done
+                break
+
+        else:
+            # ran off the end of the input string without matching skipto expr, fail
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        # build up return values
+        loc = tmploc
+        skiptext = instring[startloc:loc]
+        skipresult = ParseResults(skiptext)
+        
+        if self.includeMatch:
+            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
+            skipresult += mat
+
+        return loc, skipresult
+
+class Forward(ParseElementEnhance):
+    """
+    Forward declaration of an expression to be defined later -
+    used for recursive grammars, such as algebraic infix notation.
+    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
+
+    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
+    Specifically, '|' has a lower precedence than '<<', so that::
+        fwdExpr << a | b | c
+    will actually be evaluated as::
+        (fwdExpr << a) | b | c
+    thereby leaving b and c out as parseable alternatives.  It is recommended that you
+    explicitly group the values inserted into the C{Forward}::
+        fwdExpr << (a | b | c)
+    Converting to use the '<<=' operator instead will avoid this problem.
+
+    See L{ParseResults.pprint} for an example of a recursive parser created using
+    C{Forward}.
+    """
+    def __init__( self, other=None ):
+        super(Forward,self).__init__( other, savelist=False )
+
+    def __lshift__( self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass(other)
+        self.expr = other
+        self.strRepr = None
+        self.mayIndexError = self.expr.mayIndexError
+        self.mayReturnEmpty = self.expr.mayReturnEmpty
+        self.setWhitespaceChars( self.expr.whiteChars )
+        self.skipWhitespace = self.expr.skipWhitespace
+        self.saveAsList = self.expr.saveAsList
+        self.ignoreExprs.extend(self.expr.ignoreExprs)
+        return self
+        
+    def __ilshift__(self, other):
+        return self << other
+    
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        return self
+
+    def streamline( self ):
+        if not self.streamlined:
+            self.streamlined = True
+            if self.expr is not None:
+                self.expr.streamline()
+        return self
+
+    def validate( self, validateTrace=[] ):
+        if self not in validateTrace:
+            tmp = validateTrace[:]+[self]
+            if self.expr is not None:
+                self.expr.validate(tmp)
+        self.checkRecursion([])
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+        return self.__class__.__name__ + ": ..."
+
+        # stubbed out for now - creates awful memory and perf issues
+        self._revertClass = self.__class__
+        self.__class__ = _ForwardNoRecurse
+        try:
+            if self.expr is not None:
+                retString = _ustr(self.expr)
+            else:
+                retString = "None"
+        finally:
+            self.__class__ = self._revertClass
+        return self.__class__.__name__ + ": " + retString
+
+    def copy(self):
+        if self.expr is not None:
+            return super(Forward,self).copy()
+        else:
+            ret = Forward()
+            ret <<= self
+            return ret
+
+class _ForwardNoRecurse(Forward):
+    def __str__( self ):
+        return "..."
+
+class TokenConverter(ParseElementEnhance):
+    """
+    Abstract subclass of C{ParseExpression}, for converting parsed results.
+    """
+    def __init__( self, expr, savelist=False ):
+        super(TokenConverter,self).__init__( expr )#, savelist )
+        self.saveAsList = False
+
+class Combine(TokenConverter):
+    """
+    Converter to concatenate all matching tokens to a single string.
+    By default, the matching patterns must also be contiguous in the input string;
+    this can be disabled by specifying C{'adjacent=False'} in the constructor.
+
+    Example::
+        real = Word(nums) + '.' + Word(nums)
+        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
+        # will also erroneously match the following
+        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
+
+        real = Combine(Word(nums) + '.' + Word(nums))
+        print(real.parseString('3.1416')) # -> ['3.1416']
+        # no match when there are internal spaces
+        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
+    """
+    def __init__( self, expr, joinString="", adjacent=True ):
+        super(Combine,self).__init__( expr )
+        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+        if adjacent:
+            self.leaveWhitespace()
+        self.adjacent = adjacent
+        self.skipWhitespace = True
+        self.joinString = joinString
+        self.callPreparse = True
+
+    def ignore( self, other ):
+        if self.adjacent:
+            ParserElement.ignore(self, other)
+        else:
+            super( Combine, self).ignore( other )
+        return self
+
+    def postParse( self, instring, loc, tokenlist ):
+        retToks = tokenlist.copy()
+        del retToks[:]
+        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
+
+        if self.resultsName and retToks.haskeys():
+            return [ retToks ]
+        else:
+            return retToks
+
+class Group(TokenConverter):
+    """
+    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
+
+    Example::
+        ident = Word(alphas)
+        num = Word(nums)
+        term = ident | num
+        func = ident + Optional(delimitedList(term))
+        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']
+
+        func = ident + Group(Optional(delimitedList(term)))
+        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
+    """
+    def __init__( self, expr ):
+        super(Group,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        return [ tokenlist ]
+
+class Dict(TokenConverter):
+    """
+    Converter to return a repetitive expression as a list, but also as a dictionary.
+    Each element can also be referenced using the first token in the expression as its key.
+    Useful for tabular report scraping when the first column can be used as a item key.
+
+    Example::
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        
+        # print attributes as plain groups
+        print(OneOrMore(attr_expr).parseString(text).dump())
+        
+        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
+        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
+        print(result.dump())
+        
+        # access named fields as dict entries, or output as dict
+        print(result['shape'])        
+        print(result.asDict())
+    prints::
+        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
+
+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+        - color: light blue
+        - posn: upper left
+        - shape: SQUARE
+        - texture: burlap
+        SQUARE
+        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
+    See more examples at L{ParseResults} of accessing fields by results name.
+    """
+    def __init__( self, expr ):
+        super(Dict,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        for i,tok in enumerate(tokenlist):
+            if len(tok) == 0:
+                continue
+            ikey = tok[0]
+            if isinstance(ikey,int):
+                ikey = _ustr(tok[0]).strip()
+            if len(tok)==1:
+                tokenlist[ikey] = _ParseResultsWithOffset("",i)
+            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
+                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
+            else:
+                dictvalue = tok.copy() #ParseResults(i)
+                del dictvalue[0]
+                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
+                else:
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
+
+        if self.resultsName:
+            return [ tokenlist ]
+        else:
+            return tokenlist
+
+
+class Suppress(TokenConverter):
+    """
+    Converter for ignoring the results of a parsed expression.
+
+    Example::
+        source = "a, b, c,d"
+        wd = Word(alphas)
+        wd_list1 = wd + ZeroOrMore(',' + wd)
+        print(wd_list1.parseString(source))
+
+        # often, delimiters that are useful during parsing are just in the
+        # way afterward - use Suppress to keep them out of the parsed output
+        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
+        print(wd_list2.parseString(source))
+    prints::
+        ['a', ',', 'b', ',', 'c', ',', 'd']
+        ['a', 'b', 'c', 'd']
+    (See also L{delimitedList}.)
+    """
+    def postParse( self, instring, loc, tokenlist ):
+        return []
+
+    def suppress( self ):
+        return self
+
+
+class OnlyOnce(object):
+    """
+    Wrapper for parse actions, to ensure they are only called once.
+    """
+    def __init__(self, methodCall):
+        self.callable = _trim_arity(methodCall)
+        self.called = False
+    def __call__(self,s,l,t):
+        if not self.called:
+            results = self.callable(s,l,t)
+            self.called = True
+            return results
+        raise ParseException(s,l,"")
+    def reset(self):
+        self.called = False
+
+def traceParseAction(f):
+    """
+    Decorator for debugging parse actions. 
+    
+    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
+    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
+
+    Example::
+        wd = Word(alphas)
+
+        @traceParseAction
+        def remove_duplicate_chars(tokens):
+            return ''.join(sorted(set(''.join(tokens))))
+
+        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
+        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
+    prints::
+        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
+        <<leaving remove_duplicate_chars (ret: 'dfjkls')
+        ['dfjkls']
+    """
+    f = _trim_arity(f)
+    def z(*paArgs):
+        thisFunc = f.__name__
+        s,l,t = paArgs[-3:]
+        if len(paArgs)>3:
+            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
+        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
+        try:
+            ret = f(*paArgs)
+        except Exception as exc:
+            sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
+            raise
+        sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
+        return ret
+    try:
+        z.__name__ = f.__name__
+    except AttributeError:
+        pass
+    return z
+
+#
+# global helpers
+#
+def delimitedList( expr, delim=",", combine=False ):
+    """
+    Helper to define a delimited list of expressions - the delimiter defaults to ','.
+    By default, the list elements and delimiters can have intervening whitespace, and
+    comments, but this can be overridden by passing C{combine=True} in the constructor.
+    If C{combine} is set to C{True}, the matching tokens are returned as a single token
+    string, with the delimiters included; otherwise, the matching tokens are returned
+    as a list of tokens, with the delimiters suppressed.
+
+    Example::
+        delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
+        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
+    """
+    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
+    if combine:
+        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
+    else:
+        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
+
+def countedArray( expr, intExpr=None ):
+    """
+    Helper to define a counted list of expressions.
+    This helper defines a pattern of the form::
+        integer expr expr expr...
+    where the leading integer tells how many expr expressions follow.
+    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
+    
+    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
+
+    Example::
+        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']
+
+        # in this parser, the leading integer value is given in binary,
+        # '10' indicating that 2 values are in the array
+        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
+        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
+    """
+    arrayExpr = Forward()
+    def countFieldParseAction(s,l,t):
+        n = t[0]
+        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
+        return []
+    if intExpr is None:
+        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
+    else:
+        intExpr = intExpr.copy()
+    intExpr.setName("arrayLen")
+    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
+    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
+
+def _flatten(L):
+    ret = []
+    for i in L:
+        if isinstance(i,list):
+            ret.extend(_flatten(i))
+        else:
+            ret.append(i)
+    return ret
+
+def matchPreviousLiteral(expr):
+    """
+    Helper to define an expression that is indirectly defined from
+    the tokens matched in a previous expression, that is, it looks
+    for a 'repeat' of a previous expression.  For example::
+        first = Word(nums)
+        second = matchPreviousLiteral(first)
+        matchExpr = first + ":" + second
+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
+    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
+    If this is not desired, use C{matchPreviousExpr}.
+    Do I{not} use with packrat parsing enabled.
+    """
+    rep = Forward()
+    def copyTokenToRepeater(s,l,t):
+        if t:
+            if len(t) == 1:
+                rep << t[0]
+            else:
+                # flatten t tokens
+                tflat = _flatten(t.asList())
+                rep << And(Literal(tt) for tt in tflat)
+        else:
+            rep << Empty()
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    rep.setName('(prev) ' + _ustr(expr))
+    return rep
+
+def matchPreviousExpr(expr):
+    """
+    Helper to define an expression that is indirectly defined from
+    the tokens matched in a previous expression, that is, it looks
+    for a 'repeat' of a previous expression.  For example::
+        first = Word(nums)
+        second = matchPreviousExpr(first)
+        matchExpr = first + ":" + second
+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
+    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
+    the expressions are evaluated first, and then compared, so
+    C{"1"} is compared with C{"10"}.
+    Do I{not} use with packrat parsing enabled.
+    """
+    rep = Forward()
+    e2 = expr.copy()
+    rep <<= e2
+    def copyTokenToRepeater(s,l,t):
+        matchTokens = _flatten(t.asList())
+        def mustMatchTheseTokens(s,l,t):
+            theseTokens = _flatten(t.asList())
+            if  theseTokens != matchTokens:
+                raise ParseException("",0,"")
+        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    rep.setName('(prev) ' + _ustr(expr))
+    return rep
+
+def _escapeRegexRangeChars(s):
+    #~  escape these chars: ^-]
+    for c in r"\^-]":
+        s = s.replace(c,_bslash+c)
+    s = s.replace("\n",r"\n")
+    s = s.replace("\t",r"\t")
+    return _ustr(s)
+
+def oneOf( strs, caseless=False, useRegex=True ):
+    """
+    Helper to quickly define a set of alternative Literals, and makes sure to do
+    longest-first testing when there is a conflict, regardless of the input order,
+    but returns a C{L{MatchFirst}} for best performance.
+
+    Parameters:
+     - strs - a string of space-delimited literals, or a collection of string literals
+     - caseless - (default=C{False}) - treat all literals as caseless
+     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
+          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
+          if creating a C{Regex} raises an exception)
+
+    Example::
+        comp_oper = oneOf("< = > <= >= !=")
+        var = Word(alphas)
+        number = Word(nums)
+        term = var | number
+        comparison_expr = term + comp_oper + term
+        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
+    prints::
+        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+    """
+    if caseless:
+        isequal = ( lambda a,b: a.upper() == b.upper() )
+        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
+        parseElementClass = CaselessLiteral
+    else:
+        isequal = ( lambda a,b: a == b )
+        masks = ( lambda a,b: b.startswith(a) )
+        parseElementClass = Literal
+
+    symbols = []
+    if isinstance(strs,basestring):
+        symbols = strs.split()
+    elif isinstance(strs, Iterable):
+        symbols = list(strs)
+    else:
+        warnings.warn("Invalid argument to oneOf, expected string or iterable",
+                SyntaxWarning, stacklevel=2)
+    if not symbols:
+        return NoMatch()
+
+    i = 0
+    while i < len(symbols)-1:
+        cur = symbols[i]
+        for j,other in enumerate(symbols[i+1:]):
+            if ( isequal(other, cur) ):
+                del symbols[i+j+1]
+                break
+            elif ( masks(cur, other) ):
+                del symbols[i+j+1]
+                symbols.insert(i,other)
+                cur = other
+                break
+        else:
+            i += 1
+
+    if not caseless and useRegex:
+        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
+        try:
+            if len(symbols)==len("".join(symbols)):
+                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
+            else:
+                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
+        except Exception:
+            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
+                    SyntaxWarning, stacklevel=2)
+
+
+    # last resort, just use MatchFirst
+    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
+
+def dictOf( key, value ):
+    """
+    Helper to easily and clearly define a dictionary by specifying the respective patterns
+    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
+    in the proper order.  The key pattern can include delimiting markers or punctuation,
+    as long as they are suppressed, thereby leaving the significant key text.  The value
+    pattern can include named results, so that the C{Dict} results can include named token
+    fields.
+
+    Example::
+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        print(OneOrMore(attr_expr).parseString(text).dump())
+        
+        attr_label = label
+        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
+
+        # similar to Dict, but simpler call format
+        result = dictOf(attr_label, attr_value).parseString(text)
+        print(result.dump())
+        print(result['shape'])
+        print(result.shape)  # object attribute access works too
+        print(result.asDict())
+    prints::
+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+        - color: light blue
+        - posn: upper left
+        - shape: SQUARE
+        - texture: burlap
+        SQUARE
+        SQUARE
+        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+    """
+    return Dict( ZeroOrMore( Group ( key + value ) ) )
+
+def originalTextFor(expr, asString=True):
+    """
+    Helper to return the original, untokenized text for a given expression.  Useful to
+    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
+    revert separate tokens with intervening whitespace back to the original matching
+    input text. By default, returns astring containing the original parsed text.  
+       
+    If the optional C{asString} argument is passed as C{False}, then the return value is a 
+    C{L{ParseResults}} containing any results names that were originally matched, and a 
+    single token containing the original matched text from the input string.  So if 
+    the expression passed to C{L{originalTextFor}} contains expressions with defined
+    results names, you must set C{asString} to C{False} if you want to preserve those
+    results name values.
+
+    Example::
+        src = "this is test <b> bold <i>text</i> </b> normal text "
+        for tag in ("b","i"):
+            opener,closer = makeHTMLTags(tag)
+            patt = originalTextFor(opener + SkipTo(closer) + closer)
+            print(patt.searchString(src)[0])
+    prints::
+        ['<b> bold <i>text</i> </b>']
+        ['<i>text</i>']
+    """
+    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
+    endlocMarker = locMarker.copy()
+    endlocMarker.callPreparse = False
+    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+    if asString:
+        extractText = lambda s,l,t: s[t._original_start:t._original_end]
+    else:
+        def extractText(s,l,t):
+            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
+    matchExpr.setParseAction(extractText)
+    matchExpr.ignoreExprs = expr.ignoreExprs
+    return matchExpr
+
+def ungroup(expr): 
+    """
+    Helper to undo pyparsing's default grouping of And expressions, even
+    if all but one are non-empty.
+    """
+    return TokenConverter(expr).setParseAction(lambda t:t[0])
+
+def locatedExpr(expr):
+    """
+    Helper to decorate a returned token with its starting and ending locations in the input string.
+    This helper adds the following results names:
+     - locn_start = location where matched expression begins
+     - locn_end = location where matched expression ends
+     - value = the actual parsed results
+
+    Be careful if the input text contains C{<TAB>} characters, you may want to call
+    C{L{ParserElement.parseWithTabs}}
+
+    Example::
+        wd = Word(alphas)
+        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
+            print(match)
+    prints::
+        [[0, 'ljsdf', 5]]
+        [[8, 'lksdjjf', 15]]
+        [[18, 'lkkjj', 23]]
+    """
+    locator = Empty().setParseAction(lambda s,l,t: l)
+    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
+
+
+# convenience constants for positional expressions
+empty       = Empty().setName("empty")
+lineStart   = LineStart().setName("lineStart")
+lineEnd     = LineEnd().setName("lineEnd")
+stringStart = StringStart().setName("stringStart")
+stringEnd   = StringEnd().setName("stringEnd")
+
+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
+
+def srange(s):
+    r"""
+    Helper to easily define string ranges for use in Word construction.  Borrows
+    syntax from regexp '[]' string range definitions::
+        srange("[0-9]")   -> "0123456789"
+        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
+        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+    The input string must be enclosed in []'s, and the returned string is the expanded
+    character set joined into a single string.
+    The values enclosed in the []'s may be:
+     - a single character
+     - an escaped character with a leading backslash (such as C{\-} or C{\]})
+     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
+         (C{\0x##} is also supported for backwards compatibility) 
+     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
+     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
+     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
+    """
+    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
+    try:
+        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
+    except Exception:
+        return ""
+
+def matchOnlyAtCol(n):
+    """
+    Helper method for defining parse actions that require matching at a specific
+    column in the input text.
+    """
+    def verifyCol(strg,locn,toks):
+        if col(locn,strg) != n:
+            raise ParseException(strg,locn,"matched token not at column %d" % n)
+    return verifyCol
+
+def replaceWith(replStr):
+    """
+    Helper method for common parse actions that simply return a literal value.  Especially
+    useful when used with C{L{transformString<ParserElement.transformString>}()}.
+
+    Example::
+        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
+        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
+        term = na | num
+        
+        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
+    """
+    return lambda s,l,t: [replStr]
+
+def removeQuotes(s,l,t):
+    """
+    Helper parse action for removing quotation marks from parsed quoted strings.
+
+    Example::
+        # by default, quotation marks are included in parsed results
+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+        # use removeQuotes to strip quotation marks from parsed results
+        quotedString.setParseAction(removeQuotes)
+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+    """
+    return t[0][1:-1]
+
+def tokenMap(func, *args):
+    """
+    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
+    args are passed, they are forwarded to the given function as additional arguments after
+    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
+    parsed data to an integer using base 16.
+
+    Example (compare the last to example in L{ParserElement.transformString}::
+        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
+        hex_ints.runTests('''
+            00 11 22 aa FF 0a 0d 1a
+            ''')
+        
+        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
+        OneOrMore(upperword).runTests('''
+            my kingdom for a horse
+            ''')
+
+        wd = Word(alphas).setParseAction(tokenMap(str.title))
+        OneOrMore(wd).setParseAction(' '.join).runTests('''
+            now is the winter of our discontent made glorious summer by this sun of york
+            ''')
+    prints::
+        00 11 22 aa FF 0a 0d 1a
+        [0, 17, 34, 170, 255, 10, 13, 26]
+
+        my kingdom for a horse
+        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
+
+        now is the winter of our discontent made glorious summer by this sun of york
+        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
+    """
+    def pa(s,l,t):
+        return [func(tokn, *args) for tokn in t]
+
+    try:
+        func_name = getattr(func, '__name__', 
+                            getattr(func, '__class__').__name__)
+    except Exception:
+        func_name = str(func)
+    pa.__name__ = func_name
+
+    return pa
+
+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
+"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
+
+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
+"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
+    
+def _makeTags(tagStr, xml):
+    """Internal helper to construct opening and closing tag expressions, given a tag name"""
+    if isinstance(tagStr,basestring):
+        resname = tagStr
+        tagStr = Keyword(tagStr, caseless=not xml)
+    else:
+        resname = tagStr.name
+
+    tagAttrName = Word(alphas,alphanums+"_-:")
+    if (xml):
+        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
+        openTag = Suppress("<") + tagStr("tag") + \
+                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+    else:
+        printablesLessRAbrack = "".join(c for c in printables if c not in ">")
+        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
+        openTag = Suppress("<") + tagStr("tag") + \
+                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
+                Optional( Suppress("=") + tagAttrValue ) ))) + \
+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+    closeTag = Combine(_L("</") + tagStr + ">")
+
+    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
+    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
+    openTag.tag = resname
+    closeTag.tag = resname
+    return openTag, closeTag
+
+def makeHTMLTags(tagStr):
+    """
+    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
+    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
+
+    Example::
+        text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
+        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
+        a,a_end = makeHTMLTags("A")
+        link_expr = a + SkipTo(a_end)("link_text") + a_end
+        
+        for link in link_expr.searchString(text):
+            # attributes in the <A> tag (like "href" shown here) are also accessible as named results
+            print(link.link_text, '->', link.href)
+    prints::
+        pyparsing -> http://pyparsing.wikispaces.com
+    """
+    return _makeTags( tagStr, False )
+
+def makeXMLTags(tagStr):
+    """
+    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
+    tags only in the given upper/lower case.
+
+    Example: similar to L{makeHTMLTags}
+    """
+    return _makeTags( tagStr, True )
+
+def withAttribute(*args,**attrDict):
+    """
+    Helper to create a validating parse action to be used with start tags created
+    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
+    with a required attribute value, to avoid false matches on common tags such as
+    C{<TD>} or C{<DIV>}.
+
+    Call C{withAttribute} with a series of attribute names and values. Specify the list
+    of filter attributes names and values as:
+     - keyword arguments, as in C{(align="right")}, or
+     - as an explicit dict with C{**} operator, when an attribute name is also a Python
+          reserved word, as in C{**{"class":"Customer", "align":"right"}}
+     - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
+    For attribute names with a namespace prefix, you must use the second form.  Attribute
+    names are matched insensitive to upper/lower case.
+       
+    If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
+
+    To verify that the attribute exists, but without specifying a value, pass
+    C{withAttribute.ANY_VALUE} as the value.
+
+    Example::
+        html = '''
+            <div>
+            Some text
+            <div type="grid">1 4 0 1 0</div>
+            <div type="graph">1,3 2,3 1,1</div>
+            <div>this has no type</div>
+            </div>
+                
+        '''
+        div,div_end = makeHTMLTags("div")
+
+        # only match div tag having a type attribute with value "grid"
+        div_grid = div().setParseAction(withAttribute(type="grid"))
+        grid_expr = div_grid + SkipTo(div | div_end)("body")
+        for grid_header in grid_expr.searchString(html):
+            print(grid_header.body)
+        
+        # construct a match with any div tag having a type attribute, regardless of the value
+        div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
+        div_expr = div_any_type + SkipTo(div | div_end)("body")
+        for div_header in div_expr.searchString(html):
+            print(div_header.body)
+    prints::
+        1 4 0 1 0
+
+        1 4 0 1 0
+        1,3 2,3 1,1
+    """
+    if args:
+        attrs = args[:]
+    else:
+        attrs = attrDict.items()
+    attrs = [(k,v) for k,v in attrs]
+    def pa(s,l,tokens):
+        for attrName,attrValue in attrs:
+            if attrName not in tokens:
+                raise ParseException(s,l,"no matching attribute " + attrName)
+            if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
+                raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
+                                            (attrName, tokens[attrName], attrValue))
+    return pa
+withAttribute.ANY_VALUE = object()
+
+def withClass(classname, namespace=''):
+    """
+    Simplified version of C{L{withAttribute}} when matching on a div class - made
+    difficult because C{class} is a reserved word in Python.
+
+    Example::
+        html = '''
+            <div>
+            Some text
+            <div class="grid">1 4 0 1 0</div>
+            <div class="graph">1,3 2,3 1,1</div>
+            <div>this &lt;div&gt; has no class</div>
+            </div>
+                
+        '''
+        div,div_end = makeHTMLTags("div")
+        div_grid = div().setParseAction(withClass("grid"))
+        
+        grid_expr = div_grid + SkipTo(div | div_end)("body")
+        for grid_header in grid_expr.searchString(html):
+            print(grid_header.body)
+        
+        div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
+        div_expr = div_any_type + SkipTo(div | div_end)("body")
+        for div_header in div_expr.searchString(html):
+            print(div_header.body)
+    prints::
+        1 4 0 1 0
+
+        1 4 0 1 0
+        1,3 2,3 1,1
+    """
+    classattr = "%s:class" % namespace if namespace else "class"
+    return withAttribute(**{classattr : classname})        
+
+opAssoc = _Constants()
+opAssoc.LEFT = object()
+opAssoc.RIGHT = object()
+
+def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
+    """
+    Helper method for constructing grammars of expressions made up of
+    operators working in a precedence hierarchy.  Operators may be unary or
+    binary, left- or right-associative.  Parse actions can also be attached
+    to operator expressions. The generated parser will also recognize the use 
+    of parentheses to override operator precedences (see example below).
+    
+    Note: if you define a deep operator list, you may see performance issues
+    when using infixNotation. See L{ParserElement.enablePackrat} for a
+    mechanism to potentially improve your parser performance.
+
+    Parameters:
+     - baseExpr - expression representing the most basic element for the nested
+     - opList - list of tuples, one for each operator precedence level in the
+      expression grammar; each tuple is of the form
+      (opExpr, numTerms, rightLeftAssoc, parseAction), where:
+       - opExpr is the pyparsing expression for the operator;
+          may also be a string, which will be converted to a Literal;
+          if numTerms is 3, opExpr is a tuple of two expressions, for the
+          two operators separating the 3 terms
+       - numTerms is the number of terms for this operator (must
+          be 1, 2, or 3)
+       - rightLeftAssoc is the indicator whether the operator is
+          right or left associative, using the pyparsing-defined
+          constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
+       - parseAction is the parse action to be associated with
+          expressions matching this operator expression (the
+          parse action tuple member may be omitted); if the parse action
+          is passed a tuple or list of functions, this is equivalent to
+          calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
+     - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
+     - rpar - expression for matching right-parentheses (default=C{Suppress(')')})
+
+    Example::
+        # simple example of four-function arithmetic with ints and variable names
+        integer = pyparsing_common.signed_integer
+        varname = pyparsing_common.identifier 
+        
+        arith_expr = infixNotation(integer | varname,
+            [
+            ('-', 1, opAssoc.RIGHT),
+            (oneOf('* /'), 2, opAssoc.LEFT),
+            (oneOf('+ -'), 2, opAssoc.LEFT),
+            ])
+        
+        arith_expr.runTests('''
+            5+3*6
+            (5+3)*6
+            -2--11
+            ''', fullDump=False)
+    prints::
+        5+3*6
+        [[5, '+', [3, '*', 6]]]
+
+        (5+3)*6
+        [[[5, '+', 3], '*', 6]]
+
+        -2--11
+        [[['-', 2], '-', ['-', 11]]]
+    """
+    ret = Forward()
+    lastExpr = baseExpr | ( lpar + ret + rpar )
+    for i,operDef in enumerate(opList):
+        opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
+        termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
+        if arity == 3:
+            if opExpr is None or len(opExpr) != 2:
+                raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
+            opExpr1, opExpr2 = opExpr
+        thisExpr = Forward().setName(termName)
+        if rightLeftAssoc == opAssoc.LEFT:
+            if arity == 1:
+                matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
+            elif arity == 2:
+                if opExpr is not None:
+                    matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
+                else:
+                    matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
+            elif arity == 3:
+                matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
+                            Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
+            else:
+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+        elif rightLeftAssoc == opAssoc.RIGHT:
+            if arity == 1:
+                # try to avoid LR with this extra test
+                if not isinstance(opExpr, Optional):
+                    opExpr = Optional(opExpr)
+                matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
+            elif arity == 2:
+                if opExpr is not None:
+                    matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
+                else:
+                    matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
+            elif arity == 3:
+                matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
+                            Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
+            else:
+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+        else:
+            raise ValueError("operator must indicate right or left associativity")
+        if pa:
+            if isinstance(pa, (tuple, list)):
+                matchExpr.setParseAction(*pa)
+            else:
+                matchExpr.setParseAction(pa)
+        thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
+        lastExpr = thisExpr
+    ret <<= lastExpr
+    return ret
+
+operatorPrecedence = infixNotation
+"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
+
+dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
+sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
+quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
+                       Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
+unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
+
+def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
+    """
+    Helper method for defining nested lists enclosed in opening and closing
+    delimiters ("(" and ")" are the default).
+
+    Parameters:
+     - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
+     - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
+     - content - expression for items within the nested lists (default=C{None})
+     - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
+
+    If an expression is not provided for the content argument, the nested
+    expression will capture all whitespace-delimited content between delimiters
+    as a list of separate values.
+
+    Use the C{ignoreExpr} argument to define expressions that may contain
+    opening or closing characters that should not be treated as opening
+    or closing characters for nesting, such as quotedString or a comment
+    expression.  Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
+    The default is L{quotedString}, but if no expressions are to be ignored,
+    then pass C{None} for this argument.
+
+    Example::
+        data_type = oneOf("void int short long char float double")
+        decl_data_type = Combine(data_type + Optional(Word('*')))
+        ident = Word(alphas+'_', alphanums+'_')
+        number = pyparsing_common.number
+        arg = Group(decl_data_type + ident)
+        LPAR,RPAR = map(Suppress, "()")
+
+        code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
+
+        c_function = (decl_data_type("type") 
+                      + ident("name")
+                      + LPAR + Optional(delimitedList(arg), [])("args") + RPAR 
+                      + code_body("body"))
+        c_function.ignore(cStyleComment)
+        
+        source_code = '''
+            int is_odd(int x) { 
+                return (x%2); 
+            }
+                
+            int dec_to_hex(char hchar) { 
+                if (hchar >= '0' && hchar <= '9') { 
+                    return (ord(hchar)-ord('0')); 
+                } else { 
+                    return (10+ord(hchar)-ord('A'));
+                } 
+            }
+        '''
+        for func in c_function.searchString(source_code):
+            print("%(name)s (%(type)s) args: %(args)s" % func)
+
+    prints::
+        is_odd (int) args: [['int', 'x']]
+        dec_to_hex (int) args: [['char', 'hchar']]
+    """
+    if opener == closer:
+        raise ValueError("opening and closing strings cannot be the same")
+    if content is None:
+        if isinstance(opener,basestring) and isinstance(closer,basestring):
+            if len(opener) == 1 and len(closer)==1:
+                if ignoreExpr is not None:
+                    content = (Combine(OneOrMore(~ignoreExpr +
+                                    CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+                else:
+                    content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
+                                ).setParseAction(lambda t:t[0].strip()))
+            else:
+                if ignoreExpr is not None:
+                    content = (Combine(OneOrMore(~ignoreExpr + 
+                                    ~Literal(opener) + ~Literal(closer) +
+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+                else:
+                    content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+        else:
+            raise ValueError("opening and closing arguments must be strings if no content expression is given")
+    ret = Forward()
+    if ignoreExpr is not None:
+        ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
+    else:
+        ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content )  + Suppress(closer) )
+    ret.setName('nested %s%s expression' % (opener,closer))
+    return ret
+
+def indentedBlock(blockStatementExpr, indentStack, indent=True):
+    """
+    Helper method for defining space-delimited indentation blocks, such as
+    those used to define block statements in Python source code.
+
+    Parameters:
+     - blockStatementExpr - expression defining syntax of statement that
+            is repeated within the indented block
+     - indentStack - list created by caller to manage indentation stack
+            (multiple statementWithIndentedBlock expressions within a single grammar
+            should share a common indentStack)
+     - indent - boolean indicating whether block must be indented beyond the
+            the current level; set to False for block of left-most statements
+            (default=C{True})
+
+    A valid block must contain at least one C{blockStatement}.
+
+    Example::
+        data = '''
+        def A(z):
+          A1
+          B = 100
+          G = A2
+          A2
+          A3
+        B
+        def BB(a,b,c):
+          BB1
+          def BBA():
+            bba1
+            bba2
+            bba3
+        C
+        D
+        def spam(x,y):
+             def eggs(z):
+                 pass
+        '''
+
+
+        indentStack = [1]
+        stmt = Forward()
+
+        identifier = Word(alphas, alphanums)
+        funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
+        func_body = indentedBlock(stmt, indentStack)
+        funcDef = Group( funcDecl + func_body )
+
+        rvalue = Forward()
+        funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
+        rvalue << (funcCall | identifier | Word(nums))
+        assignment = Group(identifier + "=" + rvalue)
+        stmt << ( funcDef | assignment | identifier )
+
+        module_body = OneOrMore(stmt)
+
+        parseTree = module_body.parseString(data)
+        parseTree.pprint()
+    prints::
+        [['def',
+          'A',
+          ['(', 'z', ')'],
+          ':',
+          [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
+         'B',
+         ['def',
+          'BB',
+          ['(', 'a', 'b', 'c', ')'],
+          ':',
+          [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
+         'C',
+         'D',
+         ['def',
+          'spam',
+          ['(', 'x', 'y', ')'],
+          ':',
+          [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] 
+    """
+    def checkPeerIndent(s,l,t):
+        if l >= len(s): return
+        curCol = col(l,s)
+        if curCol != indentStack[-1]:
+            if curCol > indentStack[-1]:
+                raise ParseFatalException(s,l,"illegal nesting")
+            raise ParseException(s,l,"not a peer entry")
+
+    def checkSubIndent(s,l,t):
+        curCol = col(l,s)
+        if curCol > indentStack[-1]:
+            indentStack.append( curCol )
+        else:
+            raise ParseException(s,l,"not a subentry")
+
+    def checkUnindent(s,l,t):
+        if l >= len(s): return
+        curCol = col(l,s)
+        if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
+            raise ParseException(s,l,"not an unindent")
+        indentStack.pop()
+
+    NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
+    INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
+    PEER   = Empty().setParseAction(checkPeerIndent).setName('')
+    UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
+    if indent:
+        smExpr = Group( Optional(NL) +
+            #~ FollowedBy(blockStatementExpr) +
+            INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
+    else:
+        smExpr = Group( Optional(NL) +
+            (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
+    blockStatementExpr.ignore(_bslash + LineEnd())
+    return smExpr.setName('indented block')
+
+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
+
+anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
+_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
+commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
+def replaceHTMLEntity(t):
+    """Helper parser action to replace common HTML entities with their special characters"""
+    return _htmlEntityMap.get(t.entity)
+
+# it's easy to get these comment structures wrong - they're very common, so may as well make them available
+cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
+"Comment of the form C{/* ... */}"
+
+htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
+"Comment of the form C{<!-- ... -->}"
+
+restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
+dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
+"Comment of the form C{// ... (to end of line)}"
+
+cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
+"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
+
+javaStyleComment = cppStyleComment
+"Same as C{L{cppStyleComment}}"
+
+pythonStyleComment = Regex(r"#.*").setName("Python style comment")
+"Comment of the form C{# ... (to end of line)}"
+
+_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
+                                  Optional( Word(" \t") +
+                                            ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
+commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
+"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
+   This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
+
+# some other useful expressions - using lower-case class name since we are really using this as a namespace
+class pyparsing_common:
+    """
+    Here are some common low-level expressions that may be useful in jump-starting parser development:
+     - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
+     - common L{programming identifiers<identifier>}
+     - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
+     - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
+     - L{UUID<uuid>}
+     - L{comma-separated list<comma_separated_list>}
+    Parse actions:
+     - C{L{convertToInteger}}
+     - C{L{convertToFloat}}
+     - C{L{convertToDate}}
+     - C{L{convertToDatetime}}
+     - C{L{stripHTMLTags}}
+     - C{L{upcaseTokens}}
+     - C{L{downcaseTokens}}
+
+    Example::
+        pyparsing_common.number.runTests('''
+            # any int or real number, returned as the appropriate type
+            100
+            -100
+            +100
+            3.14159
+            6.02e23
+            1e-12
+            ''')
+
+        pyparsing_common.fnumber.runTests('''
+            # any int or real number, returned as float
+            100
+            -100
+            +100
+            3.14159
+            6.02e23
+            1e-12
+            ''')
+
+        pyparsing_common.hex_integer.runTests('''
+            # hex numbers
+            100
+            FF
+            ''')
+
+        pyparsing_common.fraction.runTests('''
+            # fractions
+            1/2
+            -3/4
+            ''')
+
+        pyparsing_common.mixed_integer.runTests('''
+            # mixed fractions
+            1
+            1/2
+            -3/4
+            1-3/4
+            ''')
+
+        import uuid
+        pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+        pyparsing_common.uuid.runTests('''
+            # uuid
+            12345678-1234-5678-1234-567812345678
+            ''')
+    prints::
+        # any int or real number, returned as the appropriate type
+        100
+        [100]
+
+        -100
+        [-100]
+
+        +100
+        [100]
+
+        3.14159
+        [3.14159]
+
+        6.02e23
+        [6.02e+23]
+
+        1e-12
+        [1e-12]
+
+        # any int or real number, returned as float
+        100
+        [100.0]
+
+        -100
+        [-100.0]
+
+        +100
+        [100.0]
+
+        3.14159
+        [3.14159]
+
+        6.02e23
+        [6.02e+23]
+
+        1e-12
+        [1e-12]
+
+        # hex numbers
+        100
+        [256]
+
+        FF
+        [255]
+
+        # fractions
+        1/2
+        [0.5]
+
+        -3/4
+        [-0.75]
+
+        # mixed fractions
+        1
+        [1]
+
+        1/2
+        [0.5]
+
+        -3/4
+        [-0.75]
+
+        1-3/4
+        [1.75]
+
+        # uuid
+        12345678-1234-5678-1234-567812345678
+        [UUID('12345678-1234-5678-1234-567812345678')]
+    """
+
+    convertToInteger = tokenMap(int)
+    """
+    Parse action for converting parsed integers to Python int
+    """
+
+    convertToFloat = tokenMap(float)
+    """
+    Parse action for converting parsed numbers to Python float
+    """
+
+    integer = Word(nums).setName("integer").setParseAction(convertToInteger)
+    """expression that parses an unsigned integer, returns an int"""
+
+    hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
+    """expression that parses a hexadecimal integer, returns an int"""
+
+    signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
+    """expression that parses an integer with optional leading sign, returns an int"""
+
+    fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
+    """fractional expression of an integer divided by an integer, returns a float"""
+    fraction.addParseAction(lambda t: t[0]/t[-1])
+
+    mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
+    """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
+    mixed_integer.addParseAction(sum)
+
+    real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
+    """expression that parses a floating point number and returns a float"""
+
+    sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
+    """expression that parses a floating point number with optional scientific notation and returns a float"""
+
+    # streamlining this expression makes the docs nicer-looking
+    number = (sci_real | real | signed_integer).streamline()
+    """any numeric expression, returns the corresponding Python type"""
+
+    fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
+    """any int or real number, returned as float"""
+    
+    identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
+    """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
+    
+    ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
+    "IPv4 address (C{0.0.0.0 - 255.255.255.255})"
+
+    _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
+    _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
+    _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
+    _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
+    _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
+    ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
+    "IPv6 address (long, short, or mixed form)"
+    
+    mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
+    "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
+
+    @staticmethod
+    def convertToDate(fmt="%Y-%m-%d"):
+        """
+        Helper to create a parse action for converting parsed date string to Python datetime.date
+
+        Params -
+         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
+
+        Example::
+            date_expr = pyparsing_common.iso8601_date.copy()
+            date_expr.setParseAction(pyparsing_common.convertToDate())
+            print(date_expr.parseString("1999-12-31"))
+        prints::
+            [datetime.date(1999, 12, 31)]
+        """
+        def cvt_fn(s,l,t):
+            try:
+                return datetime.strptime(t[0], fmt).date()
+            except ValueError as ve:
+                raise ParseException(s, l, str(ve))
+        return cvt_fn
+
+    @staticmethod
+    def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
+        """
+        Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
+
+        Params -
+         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
+
+        Example::
+            dt_expr = pyparsing_common.iso8601_datetime.copy()
+            dt_expr.setParseAction(pyparsing_common.convertToDatetime())
+            print(dt_expr.parseString("1999-12-31T23:59:59.999"))
+        prints::
+            [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
+        """
+        def cvt_fn(s,l,t):
+            try:
+                return datetime.strptime(t[0], fmt)
+            except ValueError as ve:
+                raise ParseException(s, l, str(ve))
+        return cvt_fn
+
+    iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
+    "ISO8601 date (C{yyyy-mm-dd})"
+
+    iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
+    "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
+
+    uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
+    "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
+
+    _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
+    @staticmethod
+    def stripHTMLTags(s, l, tokens):
+        """
+        Parse action to remove HTML tags from web page HTML source
+
+        Example::
+            # strip HTML links from normal text 
+            text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
+            td,td_end = makeHTMLTags("TD")
+            table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
+            
+            print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
+        """
+        return pyparsing_common._html_stripper.transformString(tokens[0])
+
+    _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') 
+                                        + Optional( White(" \t") ) ) ).streamline().setName("commaItem")
+    comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
+    """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
+
+    upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
+    """Parse action to convert tokens to upper case."""
+
+    downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
+    """Parse action to convert tokens to lower case."""
+
+
+if __name__ == "__main__":
+
+    selectToken    = CaselessLiteral("select")
+    fromToken      = CaselessLiteral("from")
+
+    ident          = Word(alphas, alphanums + "_$")
+
+    columnName     = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+    columnNameList = Group(delimitedList(columnName)).setName("columns")
+    columnSpec     = ('*' | columnNameList)
+
+    tableName      = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+    tableNameList  = Group(delimitedList(tableName)).setName("tables")
+    
+    simpleSQL      = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
+
+    # demo runTests method, including embedded comments in test string
+    simpleSQL.runTests("""
+        # '*' as column list and dotted table name
+        select * from SYS.XYZZY
+
+        # caseless match on "SELECT", and casts back to "select"
+        SELECT * from XYZZY, ABC
+
+        # list of column names, and mixed case SELECT keyword
+        Select AA,BB,CC from Sys.dual
+
+        # multiple tables
+        Select A, B, C from Sys.dual, Table2
+
+        # invalid SELECT keyword - should fail
+        Xelect A, B, C from Sys.dual
+
+        # incomplete command - should fail
+        Select
+
+        # invalid column name - should fail
+        Select ^^^ frox Sys.dual
+
+        """)
+
+    pyparsing_common.number.runTests("""
+        100
+        -100
+        +100
+        3.14159
+        6.02e23
+        1e-12
+        """)
+
+    # any int or real number, returned as float
+    pyparsing_common.fnumber.runTests("""
+        100
+        -100
+        +100
+        3.14159
+        6.02e23
+        1e-12
+        """)
+
+    pyparsing_common.hex_integer.runTests("""
+        100
+        FF
+        """)
+
+    import uuid
+    pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+    pyparsing_common.uuid.runTests("""
+        12345678-1234-5678-1234-567812345678
+        """)
diff --git a/lib/python3.7/site-packages/pkg_resources/_vendor/six.py b/lib/python3.7/site-packages/pkg_resources/_vendor/six.py
new file mode 100644
index 0000000000000000000000000000000000000000..190c0239cd7d7af82a6e0cbc8d68053fa2e3dfaf
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/_vendor/six.py
@@ -0,0 +1,868 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+        del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)  # Invokes __set__.
+        try:
+            # This is a bit ugly, but it avoids running this again by
+            # removing this descriptor.
+            delattr(obj.__class__, self.name)
+        except AttributeError:
+            pass
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+    def __getattr__(self, attr):
+        _module = self._resolve()
+        value = getattr(_module, attr)
+        setattr(self, attr, value)
+        return value
+
+
+class _LazyModule(types.ModuleType):
+
+    def __init__(self, name):
+        super(_LazyModule, self).__init__(name)
+        self.__doc__ = self.__class__.__doc__
+
+    def __dir__(self):
+        attrs = ["__doc__", "__name__"]
+        attrs += [attr.name for attr in self._moved_attributes]
+        return attrs
+
+    # Subclasses should override this
+    _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+    """
+    A meta path importer to import six.moves and its submodules.
+
+    This class implements a PEP302 finder and loader. It should be compatible
+    with Python 2.5 and all existing versions of Python3
+    """
+
+    def __init__(self, six_module_name):
+        self.name = six_module_name
+        self.known_modules = {}
+
+    def _add_module(self, mod, *fullnames):
+        for fullname in fullnames:
+            self.known_modules[self.name + "." + fullname] = mod
+
+    def _get_module(self, fullname):
+        return self.known_modules[self.name + "." + fullname]
+
+    def find_module(self, fullname, path=None):
+        if fullname in self.known_modules:
+            return self
+        return None
+
+    def __get_module(self, fullname):
+        try:
+            return self.known_modules[fullname]
+        except KeyError:
+            raise ImportError("This loader does not know module " + fullname)
+
+    def load_module(self, fullname):
+        try:
+            # in case of a reload
+            return sys.modules[fullname]
+        except KeyError:
+            pass
+        mod = self.__get_module(fullname)
+        if isinstance(mod, MovedModule):
+            mod = mod._resolve()
+        else:
+            mod.__loader__ = self
+        sys.modules[fullname] = mod
+        return mod
+
+    def is_package(self, fullname):
+        """
+        Return true, if the named module is a package.
+
+        We need this method to get correct spec objects with
+        Python 3.4 (see PEP451)
+        """
+        return hasattr(self.__get_module(fullname), "__path__")
+
+    def get_code(self, fullname):
+        """Return None
+
+        Required, if is_package is implemented"""
+        self.__get_module(fullname)  # eventually raises ImportError
+        return None
+    get_source = get_code  # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+    """Lazy loading of moved objects"""
+    __path__ = []  # mark as package
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("intern", "__builtin__", "sys"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+    MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserDict", "UserDict", "collections"),
+    MovedAttribute("UserList", "UserList", "collections"),
+    MovedAttribute("UserString", "UserString", "collections"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+    MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("_thread", "thread", "_thread"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+    MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+    _moved_attributes += [
+        MovedModule("winreg", "_winreg"),
+    ]
+
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+    if isinstance(attr, MovedModule):
+        _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+    MovedAttribute("splitquery", "urllib", "urllib.parse"),
+    MovedAttribute("splittag", "urllib", "urllib.parse"),
+    MovedAttribute("splituser", "urllib", "urllib.parse"),
+    MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+                      "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+                      "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+    MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+                      "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+                      "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+                      "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    __path__ = []  # mark as package
+    parse = _importer._get_module("moves.urllib_parse")
+    error = _importer._get_module("moves.urllib_error")
+    request = _importer._get_module("moves.urllib_request")
+    response = _importer._get_module("moves.urllib_response")
+    robotparser = _importer._get_module("moves.urllib_robotparser")
+
+    def __dir__(self):
+        return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+                      "moves.urllib")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    create_bound_method = types.MethodType
+
+    def create_unbound_method(func, cls):
+        return func
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
+    def create_unbound_method(func, cls):
+        return types.MethodType(func, None, cls)
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+    def iterkeys(d, **kw):
+        return iter(d.keys(**kw))
+
+    def itervalues(d, **kw):
+        return iter(d.values(**kw))
+
+    def iteritems(d, **kw):
+        return iter(d.items(**kw))
+
+    def iterlists(d, **kw):
+        return iter(d.lists(**kw))
+
+    viewkeys = operator.methodcaller("keys")
+
+    viewvalues = operator.methodcaller("values")
+
+    viewitems = operator.methodcaller("items")
+else:
+    def iterkeys(d, **kw):
+        return d.iterkeys(**kw)
+
+    def itervalues(d, **kw):
+        return d.itervalues(**kw)
+
+    def iteritems(d, **kw):
+        return d.iteritems(**kw)
+
+    def iterlists(d, **kw):
+        return d.iterlists(**kw)
+
+    viewkeys = operator.methodcaller("viewkeys")
+
+    viewvalues = operator.methodcaller("viewvalues")
+
+    viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+         "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+         "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+
+    def u(s):
+        return s
+    unichr = chr
+    import struct
+    int2byte = struct.Struct(">B").pack
+    del struct
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+    _assertCountEqual = "assertCountEqual"
+    if sys.version_info[1] <= 1:
+        _assertRaisesRegex = "assertRaisesRegexp"
+        _assertRegex = "assertRegexpMatches"
+    else:
+        _assertRaisesRegex = "assertRaisesRegex"
+        _assertRegex = "assertRegex"
+else:
+    def b(s):
+        return s
+    # Workaround for standalone backslash
+
+    def u(s):
+        return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+    unichr = unichr
+    int2byte = chr
+
+    def byte2int(bs):
+        return ord(bs[0])
+
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    iterbytes = functools.partial(itertools.imap, ord)
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+    _assertCountEqual = "assertItemsEqual"
+    _assertRaisesRegex = "assertRaisesRegexp"
+    _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+    return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+    return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+    return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+    exec_ = getattr(moves.builtins, "exec")
+
+    def reraise(tp, value, tb=None):
+        if value is None:
+            value = tp()
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+    exec_("""def raise_from(value, from_value):
+    if from_value is None:
+        raise value
+    raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+    exec_("""def raise_from(value, from_value):
+    raise value from from_value
+""")
+else:
+    def raise_from(value, from_value):
+        raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+    def print_(*args, **kwargs):
+        """The new-style print function for Python 2.4 and 2.5."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            # If the file has an encoding, encode unicode with it.
+            if (isinstance(fp, file) and
+                    isinstance(data, unicode) and
+                    fp.encoding is not None):
+                errors = getattr(fp, "errors", None)
+                if errors is None:
+                    errors = "strict"
+                data = data.encode(fp.encoding, errors)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+if sys.version_info[:2] < (3, 3):
+    _print = print_
+
+    def print_(*args, **kwargs):
+        fp = kwargs.get("file", sys.stdout)
+        flush = kwargs.pop("flush", False)
+        _print(*args, **kwargs)
+        if flush and fp is not None:
+            fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+              updated=functools.WRAPPER_UPDATES):
+        def wrapper(f):
+            f = functools.wraps(wrapped, assigned, updated)(f)
+            f.__wrapped__ = wrapped
+            return f
+        return wrapper
+else:
+    wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        slots = orig_vars.get('__slots__')
+        if slots is not None:
+            if isinstance(slots, str):
+                slots = [slots]
+            for slots_var in slots:
+                orig_vars.pop(slots_var)
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper
+
+
+def python_2_unicode_compatible(klass):
+    """
+    A decorator that defines __unicode__ and __str__ methods under Python 2.
+    Under Python 3 it does nothing.
+
+    To support Python 2 and 3 with a single code base, define a __str__ method
+    returning text and apply this decorator to the class.
+    """
+    if PY2:
+        if '__str__' not in klass.__dict__:
+            raise ValueError("@python_2_unicode_compatible cannot be applied "
+                             "to %s because it doesn't define __str__()." %
+                             klass.__name__)
+        klass.__unicode__ = klass.__str__
+        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+    return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = []  # required for PEP 302 and PEP 451
+__package__ = __name__  # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+    __spec__.submodule_search_locations = []  # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+    for i, importer in enumerate(sys.meta_path):
+        # Here's some real nastiness: Another "instance" of the six module might
+        # be floating around. Therefore, we can't use isinstance() to check for
+        # the six meta path importer, since the other six instance will have
+        # inserted an importer with different class.
+        if (type(importer).__name__ == "_SixMetaPathImporter" and
+                importer.name == __name__):
+            del sys.meta_path[i]
+            break
+    del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/lib/python3.7/site-packages/pkg_resources/extern/__init__.py b/lib/python3.7/site-packages/pkg_resources/extern/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1eb9e998f8e117c82c176bc83ab1d350c729cd7
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/extern/__init__.py
@@ -0,0 +1,73 @@
+import sys
+
+
+class VendorImporter:
+    """
+    A PEP 302 meta path importer for finding optionally-vendored
+    or otherwise naturally-installed packages from root_name.
+    """
+
+    def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
+        self.root_name = root_name
+        self.vendored_names = set(vendored_names)
+        self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
+
+    @property
+    def search_path(self):
+        """
+        Search first the vendor package then as a natural package.
+        """
+        yield self.vendor_pkg + '.'
+        yield ''
+
+    def find_module(self, fullname, path=None):
+        """
+        Return self when fullname starts with root_name and the
+        target module is one vendored through this importer.
+        """
+        root, base, target = fullname.partition(self.root_name + '.')
+        if root:
+            return
+        if not any(map(target.startswith, self.vendored_names)):
+            return
+        return self
+
+    def load_module(self, fullname):
+        """
+        Iterate over the search path to locate and load fullname.
+        """
+        root, base, target = fullname.partition(self.root_name + '.')
+        for prefix in self.search_path:
+            try:
+                extant = prefix + target
+                __import__(extant)
+                mod = sys.modules[extant]
+                sys.modules[fullname] = mod
+                # mysterious hack:
+                # Remove the reference to the extant package/module
+                # on later Python versions to cause relative imports
+                # in the vendor package to resolve the same modules
+                # as those going through this importer.
+                if prefix and sys.version_info > (3, 3):
+                    del sys.modules[extant]
+                return mod
+            except ImportError:
+                pass
+        else:
+            raise ImportError(
+                "The '{target}' package is required; "
+                "normally this is bundled with this package so if you get "
+                "this warning, consult the packager of your "
+                "distribution.".format(**locals())
+            )
+
+    def install(self):
+        """
+        Install this importer into sys.meta_path if not already present.
+        """
+        if self not in sys.meta_path:
+            sys.meta_path.append(self)
+
+
+names = 'packaging', 'pyparsing', 'six', 'appdirs'
+VendorImporter(__name__, names).install()
diff --git a/lib/python3.7/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b172c8144e25f73f03270576e1c65c9f74979e07
Binary files /dev/null and b/lib/python3.7/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/pkg_resources/py31compat.py b/lib/python3.7/site-packages/pkg_resources/py31compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..a381c424f9eaacb4126d4b8a474052551e34ccfb
--- /dev/null
+++ b/lib/python3.7/site-packages/pkg_resources/py31compat.py
@@ -0,0 +1,23 @@
+import os
+import errno
+import sys
+
+from .extern import six
+
+
+def _makedirs_31(path, exist_ok=False):
+    try:
+        os.makedirs(path)
+    except OSError as exc:
+        if not exist_ok or exc.errno != errno.EEXIST:
+            raise
+
+
+# rely on compatibility behavior until mode considerations
+#  and exists_ok considerations are disentangled.
+# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663
+needs_makedirs = (
+    six.PY2 or
+    (3, 4) <= sys.version_info < (3, 4, 1)
+)
+makedirs = _makedirs_31 if needs_makedirs else os.makedirs
diff --git a/lib/python3.7/site-packages/playhouse/__init__.py b/lib/python3.7/site-packages/playhouse/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2c4182d56429cf3196e930c59ffb8edcdb280629
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/apsw_ext.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/apsw_ext.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e1b1719b8ff80021a8f876251c230cbd3b599847
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/apsw_ext.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/dataset.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/dataset.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f69ff469fe7d4309801554d5d6cf2c3402dc88f7
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/dataset.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/db_url.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/db_url.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..341a4bd6d509c943e2e52422f68d3b7cf8af6998
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/db_url.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/fields.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/fields.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..482fc21dc46746e4485f196d09816592f37896cf
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/fields.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/flask_utils.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/flask_utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0fec2c22633c369f3e70470b7936b5e09652e533
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/flask_utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/hybrid.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/hybrid.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6cb2f0ceb773686afdd6e2f1bcf034f4ab987909
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/hybrid.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/kv.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/kv.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..45d51bbb4c3446274c87e163897af11e796c3d81
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/kv.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/migrate.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/migrate.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d2f82c17d51316c1c6c8e08f56b4193e7736ae7a
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/migrate.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/mysql_ext.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/mysql_ext.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d4d065d5492661217d01102ad3354524b786b539
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/mysql_ext.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/pool.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/pool.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a5061450ecab6db15bed27506f431f2c2d9a602
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/pool.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/postgres_ext.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/postgres_ext.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..63ff4b6e0012fa4b28af8e8825e0ed4a9262c79c
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/postgres_ext.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/reflection.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/reflection.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37e7b2913f49eba4ae392af1f1341e57be0d12fd
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/reflection.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/shortcuts.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/shortcuts.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d3a0cacea448e1fa6551eaad7595b4037e90d2c
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/shortcuts.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/signals.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/signals.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0634f8ad72624b13a25e76902c16bb5240957cd0
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/signals.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/sqlcipher_ext.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/sqlcipher_ext.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c9640d8824defd2b54af1c8be3b322e64eb587c9
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/sqlcipher_ext.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/sqlite_ext.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/sqlite_ext.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cab8cfb611ba0d576d2eb66a0fd26b9e3dfe7600
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/sqlite_ext.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/sqlite_udf.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/sqlite_udf.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ea0b31343c49260e2c12094bcc90e4d2e2ed434
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/sqlite_udf.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/sqliteq.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/sqliteq.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cb83bfce51f3242985c1f163addfecbe8a37210d
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/sqliteq.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/__pycache__/test_utils.cpython-37.pyc b/lib/python3.7/site-packages/playhouse/__pycache__/test_utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ad66e6027b11ac0a0ae66907bc00a60bd82ba9bc
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/__pycache__/test_utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/playhouse/_sqlite_ext.cpython-37m-x86_64-linux-gnu.so b/lib/python3.7/site-packages/playhouse/_sqlite_ext.cpython-37m-x86_64-linux-gnu.so
new file mode 100755
index 0000000000000000000000000000000000000000..45367944b152d5255f82760b513c264fb4026626
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/_sqlite_ext.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/python3.7/site-packages/playhouse/_sqlite_udf.cpython-37m-x86_64-linux-gnu.so b/lib/python3.7/site-packages/playhouse/_sqlite_udf.cpython-37m-x86_64-linux-gnu.so
new file mode 100755
index 0000000000000000000000000000000000000000..d264ebacc375fddc4499571c4d3f0d0531e950d6
Binary files /dev/null and b/lib/python3.7/site-packages/playhouse/_sqlite_udf.cpython-37m-x86_64-linux-gnu.so differ
diff --git a/lib/python3.7/site-packages/playhouse/apsw_ext.py b/lib/python3.7/site-packages/playhouse/apsw_ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..0aa35939b307b96f4ae3918cef4cc86bbf0f7b10
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/apsw_ext.py
@@ -0,0 +1,136 @@
+"""
+Peewee integration with APSW, "another python sqlite wrapper".
+
+Project page: https://rogerbinns.github.io/apsw/
+
+APSW is a really neat library that provides a thin wrapper on top of SQLite's
+C interface.
+
+Here are just a few reasons to use APSW, taken from the documentation:
+
+* APSW gives all functionality of SQLite, including virtual tables, virtual
+  file system, blob i/o, backups and file control.
+* Connections can be shared across threads without any additional locking.
+* Transactions are managed explicitly by your code.
+* APSW can handle nested transactions.
+* Unicode is handled correctly.
+* APSW is faster.
+"""
+import apsw
+from peewee import *
+from peewee import __exception_wrapper__
+from peewee import BooleanField as _BooleanField
+from peewee import DateField as _DateField
+from peewee import DateTimeField as _DateTimeField
+from peewee import DecimalField as _DecimalField
+from peewee import TimeField as _TimeField
+from peewee import logger
+
+from playhouse.sqlite_ext import SqliteExtDatabase
+
+
+class APSWDatabase(SqliteExtDatabase):
+    server_version = tuple(int(i) for i in apsw.sqlitelibversion().split('.'))
+
+    def __init__(self, database, **kwargs):
+        self._modules = {}
+        super(APSWDatabase, self).__init__(database, **kwargs)
+
+    def register_module(self, mod_name, mod_inst):
+        self._modules[mod_name] = mod_inst
+        if not self.is_closed():
+            self.connection().createmodule(mod_name, mod_inst)
+
+    def unregister_module(self, mod_name):
+        del(self._modules[mod_name])
+
+    def _connect(self):
+        conn = apsw.Connection(self.database, **self.connect_params)
+        if self._timeout is not None:
+            conn.setbusytimeout(self._timeout * 1000)
+        try:
+            self._add_conn_hooks(conn)
+        except:
+            conn.close()
+            raise
+        return conn
+
+    def _add_conn_hooks(self, conn):
+        super(APSWDatabase, self)._add_conn_hooks(conn)
+        self._load_modules(conn)  # APSW-only.
+
+    def _load_modules(self, conn):
+        for mod_name, mod_inst in self._modules.items():
+            conn.createmodule(mod_name, mod_inst)
+        return conn
+
+    def _load_aggregates(self, conn):
+        for name, (klass, num_params) in self._aggregates.items():
+            def make_aggregate():
+                return (klass(), klass.step, klass.finalize)
+            conn.createaggregatefunction(name, make_aggregate)
+
+    def _load_collations(self, conn):
+        for name, fn in self._collations.items():
+            conn.createcollation(name, fn)
+
+    def _load_functions(self, conn):
+        for name, (fn, num_params) in self._functions.items():
+            conn.createscalarfunction(name, fn, num_params)
+
+    def _load_extensions(self, conn):
+        conn.enableloadextension(True)
+        for extension in self._extensions:
+            conn.loadextension(extension)
+
+    def load_extension(self, extension):
+        self._extensions.add(extension)
+        if not self.is_closed():
+            conn = self.connection()
+            conn.enableloadextension(True)
+            conn.loadextension(extension)
+
+    def last_insert_id(self, cursor, query_type=None):
+        return cursor.getconnection().last_insert_rowid()
+
+    def rows_affected(self, cursor):
+        return cursor.getconnection().changes()
+
+    def begin(self, lock_type='deferred'):
+        self.cursor().execute('begin %s;' % lock_type)
+
+    def commit(self):
+        self.cursor().execute('commit;')
+
+    def rollback(self):
+        self.cursor().execute('rollback;')
+
+    def execute_sql(self, sql, params=None, commit=True):
+        logger.debug((sql, params))
+        with __exception_wrapper__:
+            cursor = self.cursor()
+            cursor.execute(sql, params or ())
+        return cursor
+
+
+def nh(s, v):
+    if v is not None:
+        return str(v)
+
+class BooleanField(_BooleanField):
+    def db_value(self, v):
+        v = super(BooleanField, self).db_value(v)
+        if v is not None:
+            return v and 1 or 0
+
+class DateField(_DateField):
+    db_value = nh
+
+class TimeField(_TimeField):
+    db_value = nh
+
+class DateTimeField(_DateTimeField):
+    db_value = nh
+
+class DecimalField(_DecimalField):
+    db_value = nh
diff --git a/lib/python3.7/site-packages/playhouse/dataset.py b/lib/python3.7/site-packages/playhouse/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..27f8189bb948458497858fbd7b376f2bc25028a0
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/dataset.py
@@ -0,0 +1,415 @@
+import csv
+import datetime
+from decimal import Decimal
+import json
+import operator
+try:
+    from urlparse import urlparse
+except ImportError:
+    from urllib.parse import urlparse
+import sys
+
+from peewee import *
+from playhouse.db_url import connect
+from playhouse.migrate import migrate
+from playhouse.migrate import SchemaMigrator
+from playhouse.reflection import Introspector
+
+if sys.version_info[0] == 3:
+    basestring = str
+    from functools import reduce
+    def open_file(f, mode):
+        return open(f, mode, encoding='utf8')
+else:
+    open_file = open
+
+
+class DataSet(object):
+    def __init__(self, url, bare_fields=False):
+        if isinstance(url, Database):
+            self._url = None
+            self._database = url
+            self._database_path = self._database.database
+        else:
+            self._url = url
+            parse_result = urlparse(url)
+            self._database_path = parse_result.path[1:]
+
+            # Connect to the database.
+            self._database = connect(url)
+
+        self._database.connect()
+
+        # Introspect the database and generate models.
+        self._introspector = Introspector.from_database(self._database)
+        self._models = self._introspector.generate_models(
+            skip_invalid=True,
+            literal_column_names=True,
+            bare_fields=bare_fields)
+        self._migrator = SchemaMigrator.from_database(self._database)
+
+        class BaseModel(Model):
+            class Meta:
+                database = self._database
+        self._base_model = BaseModel
+        self._export_formats = self.get_export_formats()
+        self._import_formats = self.get_import_formats()
+
+    def __repr__(self):
+        return '<DataSet: %s>' % self._database_path
+
+    def get_export_formats(self):
+        return {
+            'csv': CSVExporter,
+            'json': JSONExporter}
+
+    def get_import_formats(self):
+        return {
+            'csv': CSVImporter,
+            'json': JSONImporter}
+
+    def __getitem__(self, table):
+        if table not in self._models and table in self.tables:
+            self.update_cache(table)
+        return Table(self, table, self._models.get(table))
+
+    @property
+    def tables(self):
+        return self._database.get_tables()
+
+    def __contains__(self, table):
+        return table in self.tables
+
+    def connect(self):
+        self._database.connect()
+
+    def close(self):
+        self._database.close()
+
+    def update_cache(self, table=None):
+        if table:
+            dependencies = [table]
+            if table in self._models:
+                model_class = self._models[table]
+                dependencies.extend([
+                    related._meta.table_name for _, related, _ in
+                    model_class._meta.model_graph()])
+            else:
+                dependencies.extend(self.get_table_dependencies(table))
+        else:
+            dependencies = None  # Update all tables.
+            self._models = {}
+        updated = self._introspector.generate_models(
+            skip_invalid=True,
+            table_names=dependencies,
+            literal_column_names=True)
+        self._models.update(updated)
+
+    def get_table_dependencies(self, table):
+        stack = [table]
+        accum = []
+        seen = set()
+        while stack:
+            table = stack.pop()
+            for fk_meta in self._database.get_foreign_keys(table):
+                dest = fk_meta.dest_table
+                if dest not in seen:
+                    stack.append(dest)
+                    accum.append(dest)
+        return accum
+
+    def __enter__(self):
+        self.connect()
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if not self._database.is_closed():
+            self.close()
+
+    def query(self, sql, params=None, commit=True):
+        return self._database.execute_sql(sql, params, commit)
+
+    def transaction(self):
+        if self._database.transaction_depth() == 0:
+            return self._database.transaction()
+        else:
+            return self._database.savepoint()
+
+    def _check_arguments(self, filename, file_obj, format, format_dict):
+        if filename and file_obj:
+            raise ValueError('file is over-specified. Please use either '
+                             'filename or file_obj, but not both.')
+        if not filename and not file_obj:
+            raise ValueError('A filename or file-like object must be '
+                             'specified.')
+        if format not in format_dict:
+            valid_formats = ', '.join(sorted(format_dict.keys()))
+            raise ValueError('Unsupported format "%s". Use one of %s.' % (
+                format, valid_formats))
+
+    def freeze(self, query, format='csv', filename=None, file_obj=None,
+               **kwargs):
+        self._check_arguments(filename, file_obj, format, self._export_formats)
+        if filename:
+            file_obj = open_file(filename, 'w')
+
+        exporter = self._export_formats[format](query)
+        exporter.export(file_obj, **kwargs)
+
+        if filename:
+            file_obj.close()
+
+    def thaw(self, table, format='csv', filename=None, file_obj=None,
+             strict=False, **kwargs):
+        self._check_arguments(filename, file_obj, format, self._export_formats)
+        if filename:
+            file_obj = open_file(filename, 'r')
+
+        importer = self._import_formats[format](self[table], strict)
+        count = importer.load(file_obj, **kwargs)
+
+        if filename:
+            file_obj.close()
+
+        return count
+
+
+class Table(object):
+    def __init__(self, dataset, name, model_class):
+        self.dataset = dataset
+        self.name = name
+        if model_class is None:
+            model_class = self._create_model()
+            model_class.create_table()
+            self.dataset._models[name] = model_class
+
+    @property
+    def model_class(self):
+        return self.dataset._models[self.name]
+
+    def __repr__(self):
+        return '<Table: %s>' % self.name
+
+    def __len__(self):
+        return self.find().count()
+
+    def __iter__(self):
+        return iter(self.find().iterator())
+
+    def _create_model(self):
+        class Meta:
+            table_name = self.name
+        return type(
+            str(self.name),
+            (self.dataset._base_model,),
+            {'Meta': Meta})
+
+    def create_index(self, columns, unique=False):
+        self.dataset._database.create_index(
+            self.model_class,
+            columns,
+            unique=unique)
+
+    def _guess_field_type(self, value):
+        if isinstance(value, basestring):
+            return TextField
+        if isinstance(value, (datetime.date, datetime.datetime)):
+            return DateTimeField
+        elif value is True or value is False:
+            return BooleanField
+        elif isinstance(value, int):
+            return IntegerField
+        elif isinstance(value, float):
+            return FloatField
+        elif isinstance(value, Decimal):
+            return DecimalField
+        return TextField
+
+    @property
+    def columns(self):
+        return [f.name for f in self.model_class._meta.sorted_fields]
+
+    def _migrate_new_columns(self, data):
+        new_keys = set(data) - set(self.model_class._meta.fields)
+        if new_keys:
+            operations = []
+            for key in new_keys:
+                field_class = self._guess_field_type(data[key])
+                field = field_class(null=True)
+                operations.append(
+                    self.dataset._migrator.add_column(self.name, key, field))
+                field.bind(self.model_class, key)
+
+            migrate(*operations)
+
+            self.dataset.update_cache(self.name)
+
+    def insert(self, **data):
+        self._migrate_new_columns(data)
+        return self.model_class.insert(**data).execute()
+
+    def _apply_where(self, query, filters, conjunction=None):
+        conjunction = conjunction or operator.and_
+        if filters:
+            expressions = [
+                (self.model_class._meta.fields[column] == value)
+                for column, value in filters.items()]
+            query = query.where(reduce(conjunction, expressions))
+        return query
+
+    def update(self, columns=None, conjunction=None, **data):
+        self._migrate_new_columns(data)
+        filters = {}
+        if columns:
+            for column in columns:
+                filters[column] = data.pop(column)
+
+        return self._apply_where(
+            self.model_class.update(**data),
+            filters,
+            conjunction).execute()
+
+    def _query(self, **query):
+        return self._apply_where(self.model_class.select(), query)
+
+    def find(self, **query):
+        return self._query(**query).dicts()
+
+    def find_one(self, **query):
+        try:
+            return self.find(**query).get()
+        except self.model_class.DoesNotExist:
+            return None
+
+    def all(self):
+        return self.find()
+
+    def delete(self, **query):
+        return self._apply_where(self.model_class.delete(), query).execute()
+
+    def freeze(self, *args, **kwargs):
+        return self.dataset.freeze(self.all(), *args, **kwargs)
+
+    def thaw(self, *args, **kwargs):
+        return self.dataset.thaw(self.name, *args, **kwargs)
+
+
+class Exporter(object):
+    def __init__(self, query):
+        self.query = query
+
+    def export(self, file_obj):
+        raise NotImplementedError
+
+
+class JSONExporter(Exporter):
+    def __init__(self, query, iso8601_datetimes=False):
+        super(JSONExporter, self).__init__(query)
+        self.iso8601_datetimes = iso8601_datetimes
+
+    def _make_default(self):
+        datetime_types = (datetime.datetime, datetime.date, datetime.time)
+
+        if self.iso8601_datetimes:
+            def default(o):
+                if isinstance(o, datetime_types):
+                    return o.isoformat()
+                elif isinstance(o, Decimal):
+                    return str(o)
+                raise TypeError('Unable to serialize %r as JSON' % o)
+        else:
+            def default(o):
+                if isinstance(o, datetime_types + (Decimal,)):
+                    return str(o)
+                raise TypeError('Unable to serialize %r as JSON' % o)
+        return default
+
+    def export(self, file_obj, **kwargs):
+        json.dump(
+            list(self.query),
+            file_obj,
+            default=self._make_default(),
+            **kwargs)
+
+
+class CSVExporter(Exporter):
+    def export(self, file_obj, header=True, **kwargs):
+        writer = csv.writer(file_obj, **kwargs)
+        tuples = self.query.tuples().execute()
+        tuples.initialize()
+        if header and getattr(tuples, 'columns', None):
+            writer.writerow([column for column in tuples.columns])
+        for row in tuples:
+            writer.writerow(row)
+
+
+class Importer(object):
+    def __init__(self, table, strict=False):
+        self.table = table
+        self.strict = strict
+
+        model = self.table.model_class
+        self.columns = model._meta.columns
+        self.columns.update(model._meta.fields)
+
+    def load(self, file_obj):
+        raise NotImplementedError
+
+
+class JSONImporter(Importer):
+    def load(self, file_obj, **kwargs):
+        data = json.load(file_obj, **kwargs)
+        count = 0
+
+        for row in data:
+            if self.strict:
+                obj = {}
+                for key in row:
+                    field = self.columns.get(key)
+                    if field is not None:
+                        obj[field.name] = field.python_value(row[key])
+            else:
+                obj = row
+
+            if obj:
+                self.table.insert(**obj)
+                count += 1
+
+        return count
+
+
+class CSVImporter(Importer):
+    def load(self, file_obj, header=True, **kwargs):
+        count = 0
+        reader = csv.reader(file_obj, **kwargs)
+        if header:
+            try:
+                header_keys = next(reader)
+            except StopIteration:
+                return count
+
+            if self.strict:
+                header_fields = []
+                for idx, key in enumerate(header_keys):
+                    if key in self.columns:
+                        header_fields.append((idx, self.columns[key]))
+            else:
+                header_fields = list(enumerate(header_keys))
+        else:
+            header_fields = list(enumerate(self.model._meta.sorted_fields))
+
+        if not header_fields:
+            return count
+
+        for row in reader:
+            obj = {}
+            for idx, field in header_fields:
+                if self.strict:
+                    obj[field.name] = field.python_value(row[idx])
+                else:
+                    obj[field] = row[idx]
+
+            self.table.insert(**obj)
+            count += 1
+
+        return count
diff --git a/lib/python3.7/site-packages/playhouse/db_url.py b/lib/python3.7/site-packages/playhouse/db_url.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcc4ab87a56d64db6fb89cfb86c6810ef660b046
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/db_url.py
@@ -0,0 +1,124 @@
+try:
+    from urlparse import parse_qsl, unquote, urlparse
+except ImportError:
+    from urllib.parse import parse_qsl, unquote, urlparse
+
+from peewee import *
+from playhouse.pool import PooledMySQLDatabase
+from playhouse.pool import PooledPostgresqlDatabase
+from playhouse.pool import PooledSqliteDatabase
+from playhouse.pool import PooledSqliteExtDatabase
+from playhouse.sqlite_ext import SqliteExtDatabase
+
+
+schemes = {
+    'mysql': MySQLDatabase,
+    'mysql+pool': PooledMySQLDatabase,
+    'postgres': PostgresqlDatabase,
+    'postgresql': PostgresqlDatabase,
+    'postgres+pool': PooledPostgresqlDatabase,
+    'postgresql+pool': PooledPostgresqlDatabase,
+    'sqlite': SqliteDatabase,
+    'sqliteext': SqliteExtDatabase,
+    'sqlite+pool': PooledSqliteDatabase,
+    'sqliteext+pool': PooledSqliteExtDatabase,
+}
+
+def register_database(db_class, *names):
+    global schemes
+    for name in names:
+        schemes[name] = db_class
+
+def parseresult_to_dict(parsed, unquote_password=False):
+
+    # urlparse in python 2.6 is broken so query will be empty and instead
+    # appended to path complete with '?'
+    path_parts = parsed.path[1:].split('?')
+    try:
+        query = path_parts[1]
+    except IndexError:
+        query = parsed.query
+
+    connect_kwargs = {'database': path_parts[0]}
+    if parsed.username:
+        connect_kwargs['user'] = parsed.username
+    if parsed.password:
+        connect_kwargs['password'] = parsed.password
+        if unquote_password:
+            connect_kwargs['password'] = unquote(connect_kwargs['password'])
+    if parsed.hostname:
+        connect_kwargs['host'] = parsed.hostname
+    if parsed.port:
+        connect_kwargs['port'] = parsed.port
+
+    # Adjust parameters for MySQL.
+    if parsed.scheme == 'mysql' and 'password' in connect_kwargs:
+        connect_kwargs['passwd'] = connect_kwargs.pop('password')
+    elif 'sqlite' in parsed.scheme and not connect_kwargs['database']:
+        connect_kwargs['database'] = ':memory:'
+
+    # Get additional connection args from the query string
+    qs_args = parse_qsl(query, keep_blank_values=True)
+    for key, value in qs_args:
+        if value.lower() == 'false':
+            value = False
+        elif value.lower() == 'true':
+            value = True
+        elif value.isdigit():
+            value = int(value)
+        elif '.' in value and all(p.isdigit() for p in value.split('.', 1)):
+            try:
+                value = float(value)
+            except ValueError:
+                pass
+        elif value.lower() in ('null', 'none'):
+            value = None
+
+        connect_kwargs[key] = value
+
+    return connect_kwargs
+
+def parse(url, unquote_password=False):
+    parsed = urlparse(url)
+    return parseresult_to_dict(parsed, unquote_password)
+
+def connect(url, unquote_password=False, **connect_params):
+    parsed = urlparse(url)
+    connect_kwargs = parseresult_to_dict(parsed, unquote_password)
+    connect_kwargs.update(connect_params)
+    database_class = schemes.get(parsed.scheme)
+
+    if database_class is None:
+        if database_class in schemes:
+            raise RuntimeError('Attempted to use "%s" but a required library '
+                               'could not be imported.' % parsed.scheme)
+        else:
+            raise RuntimeError('Unrecognized or unsupported scheme: "%s".' %
+                               parsed.scheme)
+
+    return database_class(**connect_kwargs)
+
+# Conditionally register additional databases.
+try:
+    from playhouse.pool import PooledPostgresqlExtDatabase
+except ImportError:
+    pass
+else:
+    register_database(
+        PooledPostgresqlExtDatabase,
+        'postgresext+pool',
+        'postgresqlext+pool')
+
+try:
+    from playhouse.apsw_ext import APSWDatabase
+except ImportError:
+    pass
+else:
+    register_database(APSWDatabase, 'apsw')
+
+try:
+    from playhouse.postgres_ext import PostgresqlExtDatabase
+except ImportError:
+    pass
+else:
+    register_database(PostgresqlExtDatabase, 'postgresext', 'postgresqlext')
diff --git a/lib/python3.7/site-packages/playhouse/fields.py b/lib/python3.7/site-packages/playhouse/fields.py
new file mode 100644
index 0000000000000000000000000000000000000000..fce1a3d6d1f5f20be2a07b1f9595b13f3b2e4720
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/fields.py
@@ -0,0 +1,64 @@
+try:
+    import bz2
+except ImportError:
+    bz2 = None
+try:
+    import zlib
+except ImportError:
+    zlib = None
+try:
+    import cPickle as pickle
+except ImportError:
+    import pickle
+import sys
+
+from peewee import BlobField
+from peewee import buffer_type
+
+
+PY2 = sys.version_info[0] == 2
+
+
+class CompressedField(BlobField):
+    ZLIB = 'zlib'
+    BZ2 = 'bz2'
+    algorithm_to_import = {
+        ZLIB: zlib,
+        BZ2: bz2,
+    }
+
+    def __init__(self, compression_level=6, algorithm=ZLIB, *args,
+                 **kwargs):
+        self.compression_level = compression_level
+        if algorithm not in self.algorithm_to_import:
+            raise ValueError('Unrecognized algorithm %s' % algorithm)
+        compress_module = self.algorithm_to_import[algorithm]
+        if compress_module is None:
+            raise ValueError('Missing library required for %s.' % algorithm)
+
+        self.algorithm = algorithm
+        self.compress = compress_module.compress
+        self.decompress = compress_module.decompress
+        super(CompressedField, self).__init__(*args, **kwargs)
+
+    def python_value(self, value):
+        if value is not None:
+            return self.decompress(value)
+
+    def db_value(self, value):
+        if value is not None:
+            return self._constructor(
+                self.compress(value, self.compression_level))
+
+
+class PickleField(BlobField):
+    def python_value(self, value):
+        if value is not None:
+            if isinstance(value, buffer_type):
+                value = bytes(value)
+            return pickle.loads(value)
+
+    def db_value(self, value):
+        if value is not None:
+            pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
+            return self._constructor(pickled)
diff --git a/lib/python3.7/site-packages/playhouse/flask_utils.py b/lib/python3.7/site-packages/playhouse/flask_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..76a2a62f426e9baf475d67a21ebc05e1b0ee7931
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/flask_utils.py
@@ -0,0 +1,185 @@
+import math
+import sys
+
+from flask import abort
+from flask import render_template
+from flask import request
+from peewee import Database
+from peewee import DoesNotExist
+from peewee import Model
+from peewee import Proxy
+from peewee import SelectQuery
+from playhouse.db_url import connect as db_url_connect
+
+
+class PaginatedQuery(object):
+    def __init__(self, query_or_model, paginate_by, page_var='page', page=None,
+                 check_bounds=False):
+        self.paginate_by = paginate_by
+        self.page_var = page_var
+        self.page = page or None
+        self.check_bounds = check_bounds
+
+        if isinstance(query_or_model, SelectQuery):
+            self.query = query_or_model
+            self.model = self.query.model
+        else:
+            self.model = query_or_model
+            self.query = self.model.select()
+
+    def get_page(self):
+        if self.page is not None:
+            return self.page
+
+        curr_page = request.args.get(self.page_var)
+        if curr_page and curr_page.isdigit():
+            return max(1, int(curr_page))
+        return 1
+
+    def get_page_count(self):
+        if not hasattr(self, '_page_count'):
+            self._page_count = int(math.ceil(
+                float(self.query.count()) / self.paginate_by))
+        return self._page_count
+
+    def get_object_list(self):
+        if self.check_bounds and self.get_page() > self.get_page_count():
+            abort(404)
+        return self.query.paginate(self.get_page(), self.paginate_by)
+
+
+def get_object_or_404(query_or_model, *query):
+    if not isinstance(query_or_model, SelectQuery):
+        query_or_model = query_or_model.select()
+    try:
+        return query_or_model.where(*query).get()
+    except DoesNotExist:
+        abort(404)
+
+def object_list(template_name, query, context_variable='object_list',
+                paginate_by=20, page_var='page', page=None, check_bounds=True,
+                **kwargs):
+    paginated_query = PaginatedQuery(
+        query,
+        paginate_by=paginate_by,
+        page_var=page_var,
+        page=page,
+        check_bounds=check_bounds)
+    kwargs[context_variable] = paginated_query.get_object_list()
+    return render_template(
+        template_name,
+        pagination=paginated_query,
+        page=paginated_query.get_page(),
+        **kwargs)
+
+def get_current_url():
+    if not request.query_string:
+        return request.path
+    return '%s?%s' % (request.path, request.query_string)
+
+def get_next_url(default='/'):
+    if request.args.get('next'):
+        return request.args['next']
+    elif request.form.get('next'):
+        return request.form['next']
+    return default
+
+class FlaskDB(object):
+    def __init__(self, app=None, database=None, model_class=Model):
+        self.database = None  # Reference to actual Peewee database instance.
+        self.base_model_class = model_class
+        self._app = app
+        self._db = database  # dict, url, Database, or None (default).
+        if app is not None:
+            self.init_app(app)
+
+    def init_app(self, app):
+        self._app = app
+
+        if self._db is None:
+            if 'DATABASE' in app.config:
+                initial_db = app.config['DATABASE']
+            elif 'DATABASE_URL' in app.config:
+                initial_db = app.config['DATABASE_URL']
+            else:
+                raise ValueError('Missing required configuration data for '
+                                 'database: DATABASE or DATABASE_URL.')
+        else:
+            initial_db = self._db
+
+        self._load_database(app, initial_db)
+        self._register_handlers(app)
+
+    def _load_database(self, app, config_value):
+        if isinstance(config_value, Database):
+            database = config_value
+        elif isinstance(config_value, dict):
+            database = self._load_from_config_dict(dict(config_value))
+        else:
+            # Assume a database connection URL.
+            database = db_url_connect(config_value)
+
+        if isinstance(self.database, Proxy):
+            self.database.initialize(database)
+        else:
+            self.database = database
+
+    def _load_from_config_dict(self, config_dict):
+        try:
+            name = config_dict.pop('name')
+            engine = config_dict.pop('engine')
+        except KeyError:
+            raise RuntimeError('DATABASE configuration must specify a '
+                               '`name` and `engine`.')
+
+        if '.' in engine:
+            path, class_name = engine.rsplit('.', 1)
+        else:
+            path, class_name = 'peewee', engine
+
+        try:
+            __import__(path)
+            module = sys.modules[path]
+            database_class = getattr(module, class_name)
+            assert issubclass(database_class, Database)
+        except ImportError:
+            raise RuntimeError('Unable to import %s' % engine)
+        except AttributeError:
+            raise RuntimeError('Database engine not found %s' % engine)
+        except AssertionError:
+            raise RuntimeError('Database engine not a subclass of '
+                               'peewee.Database: %s' % engine)
+
+        return database_class(name, **config_dict)
+
+    def _register_handlers(self, app):
+        app.before_request(self.connect_db)
+        app.teardown_request(self.close_db)
+
+    def get_model_class(self):
+        if self.database is None:
+            raise RuntimeError('Database must be initialized.')
+
+        class BaseModel(self.base_model_class):
+            class Meta:
+                database = self.database
+
+        return BaseModel
+
+    @property
+    def Model(self):
+        if self._app is None:
+            database = getattr(self, 'database', None)
+            if database is None:
+                self.database = Proxy()
+
+        if not hasattr(self, '_model_class'):
+            self._model_class = self.get_model_class()
+        return self._model_class
+
+    def connect_db(self):
+        self.database.connect()
+
+    def close_db(self, exc):
+        if not self.database.is_closed():
+            self.database.close()
diff --git a/lib/python3.7/site-packages/playhouse/hybrid.py b/lib/python3.7/site-packages/playhouse/hybrid.py
new file mode 100644
index 0000000000000000000000000000000000000000..53f2262882130e0dc05e4b3d2a37af4c5580e7e9
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/hybrid.py
@@ -0,0 +1,50 @@
+# Hybrid methods/attributes, based on similar functionality in SQLAlchemy:
+# http://docs.sqlalchemy.org/en/improve_toc/orm/extensions/hybrid.html
+class hybrid_method(object):
+    def __init__(self, func, expr=None):
+        self.func = func
+        self.expr = expr or func
+
+    def __get__(self, instance, instance_type):
+        if instance is None:
+            return self.expr.__get__(instance_type, instance_type.__class__)
+        return self.func.__get__(instance, instance_type)
+
+    def expression(self, expr):
+        self.expr = expr
+        return self
+
+
+class hybrid_property(object):
+    def __init__(self, fget, fset=None, fdel=None, expr=None):
+        self.fget = fget
+        self.fset = fset
+        self.fdel = fdel
+        self.expr = expr or fget
+
+    def __get__(self, instance, instance_type):
+        if instance is None:
+            return self.expr(instance_type)
+        return self.fget(instance)
+
+    def __set__(self, instance, value):
+        if self.fset is None:
+            raise AttributeError('Cannot set attribute.')
+        self.fset(instance, value)
+
+    def __delete__(self, instance):
+        if self.fdel is None:
+            raise AttributeError('Cannot delete attribute.')
+        self.fdel(instance)
+
+    def setter(self, fset):
+        self.fset = fset
+        return self
+
+    def deleter(self, fdel):
+        self.fdel = fdel
+        return self
+
+    def expression(self, expr):
+        self.expr = expr
+        return self
diff --git a/lib/python3.7/site-packages/playhouse/kv.py b/lib/python3.7/site-packages/playhouse/kv.py
new file mode 100644
index 0000000000000000000000000000000000000000..742b49cad83da0c055fc6e46c0a266f63a115d60
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/kv.py
@@ -0,0 +1,172 @@
+import operator
+
+from peewee import *
+from peewee import Expression
+from playhouse.fields import PickleField
+try:
+    from playhouse.sqlite_ext import CSqliteExtDatabase as SqliteExtDatabase
+except ImportError:
+    from playhouse.sqlite_ext import SqliteExtDatabase
+
+
+Sentinel = type('Sentinel', (object,), {})
+
+
+class KeyValue(object):
+    """
+    Persistent dictionary.
+
+    :param Field key_field: field to use for key. Defaults to CharField.
+    :param Field value_field: field to use for value. Defaults to PickleField.
+    :param bool ordered: data should be returned in key-sorted order.
+    :param Database database: database where key/value data is stored.
+    :param str table_name: table name for data.
+    """
+    def __init__(self, key_field=None, value_field=None, ordered=False,
+                 database=None, table_name='keyvalue'):
+        if key_field is None:
+            key_field = CharField(max_length=255, primary_key=True)
+        if not key_field.primary_key:
+            raise ValueError('key_field must have primary_key=True.')
+
+        if value_field is None:
+            value_field = PickleField()
+
+        self._key_field = key_field
+        self._value_field = value_field
+        self._ordered = ordered
+        self._database = database or SqliteExtDatabase(':memory:')
+        self._table_name = table_name
+        if isinstance(self._database, PostgresqlDatabase):
+            self.upsert = self._postgres_upsert
+            self.update = self._postgres_update
+        else:
+            self.upsert = self._upsert
+            self.update = self._update
+
+        self.model = self.create_model()
+        self.key = self.model.key
+        self.value = self.model.value
+
+        # Ensure table exists.
+        self.model.create_table()
+
+    def create_model(self):
+        class KeyValue(Model):
+            key = self._key_field
+            value = self._value_field
+            class Meta:
+                database = self._database
+                table_name = self._table_name
+        return KeyValue
+
+    def query(self, *select):
+        query = self.model.select(*select).tuples()
+        if self._ordered:
+            query = query.order_by(self.key)
+        return query
+
+    def convert_expression(self, expr):
+        if not isinstance(expr, Expression):
+            return (self.key == expr), True
+        return expr, False
+
+    def __contains__(self, key):
+        expr, _ = self.convert_expression(key)
+        return self.model.select().where(expr).exists()
+
+    def __len__(self):
+        return len(self.model)
+
+    def __getitem__(self, expr):
+        converted, is_single = self.convert_expression(expr)
+        query = self.query(self.value).where(converted)
+        item_getter = operator.itemgetter(0)
+        result = [item_getter(row) for row in query]
+        if len(result) == 0 and is_single:
+            raise KeyError(expr)
+        elif is_single:
+            return result[0]
+        return result
+
+    def _upsert(self, key, value):
+        (self.model
+         .insert(key=key, value=value)
+         .on_conflict('replace')
+         .execute())
+
+    def _postgres_upsert(self, key, value):
+        (self.model
+         .insert(key=key, value=value)
+         .on_conflict(conflict_target=[self.key],
+                      preserve=[self.value])
+         .execute())
+
+    def __setitem__(self, expr, value):
+        if isinstance(expr, Expression):
+            self.model.update(value=value).where(expr).execute()
+        else:
+            self.upsert(expr, value)
+
+    def __delitem__(self, expr):
+        converted, _ = self.convert_expression(expr)
+        self.model.delete().where(converted).execute()
+
+    def __iter__(self):
+        return iter(self.query().execute())
+
+    def keys(self):
+        return map(operator.itemgetter(0), self.query(self.key))
+
+    def values(self):
+        return map(operator.itemgetter(0), self.query(self.value))
+
+    def items(self):
+        return iter(self.query().execute())
+
+    def _update(self, __data=None, **mapping):
+        if __data is not None:
+            mapping.update(__data)
+        return (self.model
+                .insert_many(list(mapping.items()),
+                             fields=[self.key, self.value])
+                .on_conflict('replace')
+                .execute())
+
+    def _postgres_update(self, __data=None, **mapping):
+        if __data is not None:
+            mapping.update(__data)
+        return (self.model
+                .insert_many(list(mapping.items()),
+                             fields=[self.key, self.value])
+                .on_conflict(conflict_target=[self.key],
+                             preserve=[self.value])
+                .execute())
+
+    def get(self, key, default=None):
+        try:
+            return self[key]
+        except KeyError:
+            return default
+
+    def setdefault(self, key, default=None):
+        try:
+            return self[key]
+        except KeyError:
+            self[key] = default
+            return default
+
+    def pop(self, key, default=Sentinel):
+        with self._database.atomic():
+            try:
+                result = self[key]
+            except KeyError:
+                if default is Sentinel:
+                    raise
+                return default
+            del self[key]
+
+        return result
+
+    def clear(self):
+        self.model.delete().execute()
diff --git a/lib/python3.7/site-packages/playhouse/migrate.py b/lib/python3.7/site-packages/playhouse/migrate.py
new file mode 100644
index 0000000000000000000000000000000000000000..0abde2123cf4d22af2680a3a6393d43088a09b21
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/migrate.py
@@ -0,0 +1,823 @@
+"""
+Lightweight schema migrations.
+
+NOTE: Currently tested with SQLite and Postgresql. MySQL may be missing some
+features.
+
+Example Usage
+-------------
+
+Instantiate a migrator:
+
+    # Postgres example:
+    my_db = PostgresqlDatabase(...)
+    migrator = PostgresqlMigrator(my_db)
+
+    # SQLite example:
+    my_db = SqliteDatabase('my_database.db')
+    migrator = SqliteMigrator(my_db)
+
+Then you will use the `migrate` function to run various `Operation`s which
+are generated by the migrator:
+
+    migrate(
+        migrator.add_column('some_table', 'column_name', CharField(default=''))
+    )
+
+Migrations are not run inside a transaction, so if you wish the migration to
+run in a transaction you will need to wrap the call to `migrate` in a
+transaction block, e.g.:
+
+    with my_db.transaction():
+        migrate(...)
+
+Supported Operations
+--------------------
+
+Add new field(s) to an existing model:
+
+    # Create your field instances. For non-null fields you must specify a
+    # default value.
+    pubdate_field = DateTimeField(null=True)
+    comment_field = TextField(default='')
+
+    # Run the migration, specifying the database table, field name and field.
+    migrate(
+        migrator.add_column('comment_tbl', 'pub_date', pubdate_field),
+        migrator.add_column('comment_tbl', 'comment', comment_field),
+    )
+
+Renaming a field:
+
+    # Specify the table, original name of the column, and its new name.
+    migrate(
+        migrator.rename_column('story', 'pub_date', 'publish_date'),
+        migrator.rename_column('story', 'mod_date', 'modified_date'),
+    )
+
+Dropping a field:
+
+    migrate(
+        migrator.drop_column('story', 'some_old_field'),
+    )
+
+Making a field nullable or not nullable:
+
+    # Note that when making a field not null that field must not have any
+    # NULL values present.
+    migrate(
+        # Make `pub_date` allow NULL values.
+        migrator.drop_not_null('story', 'pub_date'),
+
+        # Prevent `modified_date` from containing NULL values.
+        migrator.add_not_null('story', 'modified_date'),
+    )
+
+Renaming a table:
+
+    migrate(
+        migrator.rename_table('story', 'stories_tbl'),
+    )
+
+Adding an index:
+
+    # Specify the table, column names, and whether the index should be
+    # UNIQUE or not.
+    migrate(
+        # Create an index on the `pub_date` column.
+        migrator.add_index('story', ('pub_date',), False),
+
+        # Create a multi-column index on the `pub_date` and `status` fields.
+        migrator.add_index('story', ('pub_date', 'status'), False),
+
+        # Create a unique index on the category and title fields.
+        migrator.add_index('story', ('category_id', 'title'), True),
+    )
+
+Dropping an index:
+
+    # Specify the index name.
+    migrate(migrator.drop_index('story', 'story_pub_date_status'))
+
+Adding or dropping table constraints:
+
+.. code-block:: python
+
+    # Add a CHECK() constraint to enforce the price cannot be negative.
+    migrate(migrator.add_constraint(
+        'products',
+        'price_check',
+        Check('price >= 0')))
+
+    # Remove the price check constraint.
+    migrate(migrator.drop_constraint('products', 'price_check'))
+
+    # Add a UNIQUE constraint on the first and last names.
+    migrate(migrator.add_unique('person', 'first_name', 'last_name'))
+"""
+from collections import namedtuple
+import functools
+import hashlib
+import re
+
+from peewee import *
+from peewee import CommaNodeList
+from peewee import EnclosedNodeList
+from peewee import Entity
+from peewee import Expression
+from peewee import Node
+from peewee import NodeList
+from peewee import OP
+from peewee import callable_
+from peewee import sort_models
+from peewee import _truncate_constraint_name
+
+
+class Operation(object):
+    """Encapsulate a single schema altering operation."""
+    def __init__(self, migrator, method, *args, **kwargs):
+        self.migrator = migrator
+        self.method = method
+        self.args = args
+        self.kwargs = kwargs
+
+    def execute(self, node):
+        self.migrator.database.execute(node)
+
+    def _handle_result(self, result):
+        if isinstance(result, (Node, Context)):
+            self.execute(result)
+        elif isinstance(result, Operation):
+            result.run()
+        elif isinstance(result, (list, tuple)):
+            for item in result:
+                self._handle_result(item)
+
+    def run(self):
+        kwargs = self.kwargs.copy()
+        kwargs['with_context'] = True
+        method = getattr(self.migrator, self.method)
+        self._handle_result(method(*self.args, **kwargs))
+
+
+def operation(fn):
+    @functools.wraps(fn)
+    def inner(self, *args, **kwargs):
+        with_context = kwargs.pop('with_context', False)
+        if with_context:
+            return fn(self, *args, **kwargs)
+        return Operation(self, fn.__name__, *args, **kwargs)
+    return inner
+
+
+def make_index_name(table_name, columns):
+    index_name = '_'.join((table_name,) + tuple(columns))
+    if len(index_name) > 64:
+        index_hash = hashlib.md5(index_name.encode('utf-8')).hexdigest()
+        index_name = '%s_%s' % (index_name[:56], index_hash[:7])
+    return index_name
+
+
+class SchemaMigrator(object):
+    explicit_create_foreign_key = False
+    explicit_delete_foreign_key = False
+
+    def __init__(self, database):
+        self.database = database
+
+    def make_context(self):
+        return self.database.get_sql_context()
+
+    @classmethod
+    def from_database(cls, database):
+        if isinstance(database, PostgresqlDatabase):
+            return PostgresqlMigrator(database)
+        elif isinstance(database, MySQLDatabase):
+            return MySQLMigrator(database)
+        elif isinstance(database, SqliteDatabase):
+            return SqliteMigrator(database)
+        raise ValueError('Unsupported database: %s' % database)
+
+    @operation
+    def apply_default(self, table, column_name, field):
+        default = field.default
+        if callable_(default):
+            default = default()
+
+        return (self.make_context()
+                .literal('UPDATE ')
+                .sql(Entity(table))
+                .literal(' SET ')
+                .sql(Expression(
+                    Entity(column_name),
+                    OP.EQ,
+                    field.db_value(default),
+                    flat=True)))
+
+    def _alter_table(self, ctx, table):
+        return ctx.literal('ALTER TABLE ').sql(Entity(table))
+
+    def _alter_column(self, ctx, table, column):
+        return (self
+                ._alter_table(ctx, table)
+                .literal(' ALTER COLUMN ')
+                .sql(Entity(column)))
+
+    @operation
+    def alter_add_column(self, table, column_name, field):
+        # Make field null at first.
+        ctx = self.make_context()
+        field_null, field.null = field.null, True
+        field.name = field.column_name = column_name
+        (self
+         ._alter_table(ctx, table)
+         .literal(' ADD COLUMN ')
+         .sql(field.ddl(ctx)))
+
+        field.null = field_null
+        if isinstance(field, ForeignKeyField):
+            self.add_inline_fk_sql(ctx, field)
+        return ctx
+
+    @operation
+    def add_constraint(self, table, name, constraint):
+        return (self
+                ._alter_table(self.make_context(), table)
+                .literal(' ADD CONSTRAINT ')
+                .sql(Entity(name))
+                .literal(' ')
+                .sql(constraint))
+
+    @operation
+    def add_unique(self, table, *column_names):
+        constraint_name = 'uniq_%s' % '_'.join(column_names)
+        constraint = NodeList((
+            SQL('UNIQUE'),
+            EnclosedNodeList([Entity(column) for column in column_names])))
+        return self.add_constraint(table, constraint_name, constraint)
+
+    @operation
+    def drop_constraint(self, table, name):
+        return (self
+                ._alter_table(self.make_context(), table)
+                .literal(' DROP CONSTRAINT ')
+                .sql(Entity(name)))
+
+    def add_inline_fk_sql(self, ctx, field):
+        ctx = (ctx
+               .literal(' REFERENCES ')
+               .sql(Entity(field.rel_model._meta.table_name))
+               .literal(' ')
+               .sql(EnclosedNodeList((Entity(field.rel_field.column_name),))))
+        if field.on_delete is not None:
+            ctx = ctx.literal(' ON DELETE %s' % field.on_delete)
+        if field.on_update is not None:
+            ctx = ctx.literal(' ON UPDATE %s' % field.on_update)
+        return ctx
+
+    @operation
+    def add_foreign_key_constraint(self, table, column_name, rel, rel_column,
+                                   on_delete=None, on_update=None):
+        constraint = 'fk_%s_%s_refs_%s' % (table, column_name, rel)
+        ctx = (self
+               .make_context()
+               .literal('ALTER TABLE ')
+               .sql(Entity(table))
+               .literal(' ADD CONSTRAINT ')
+               .sql(Entity(_truncate_constraint_name(constraint)))
+               .literal(' FOREIGN KEY ')
+               .sql(EnclosedNodeList((Entity(column_name),)))
+               .literal(' REFERENCES ')
+               .sql(Entity(rel))
+               .literal(' (')
+               .sql(Entity(rel_column))
+               .literal(')'))
+        if on_delete is not None:
+            ctx = ctx.literal(' ON DELETE %s' % on_delete)
+        if on_update is not None:
+            ctx = ctx.literal(' ON UPDATE %s' % on_update)
+        return ctx
+
+    @operation
+    def add_column(self, table, column_name, field):
+        # Adding a column is complicated by the fact that if there are rows
+        # present and the field is non-null, then we need to first add the
+        # column as a nullable field, then set the value, then add a not null
+        # constraint.
+        if not field.null and field.default is None:
+            raise ValueError('%s is not null but has no default' % column_name)
+
+        is_foreign_key = isinstance(field, ForeignKeyField)
+        if is_foreign_key and not field.rel_field:
+            raise ValueError('Foreign keys must specify a `field`.')
+
+        operations = [self.alter_add_column(table, column_name, field)]
+
+        # In the event the field is *not* nullable, update with the default
+        # value and set not null.
+        if not field.null:
+            operations.extend([
+                self.apply_default(table, column_name, field),
+                self.add_not_null(table, column_name)])
+
+        if is_foreign_key and self.explicit_create_foreign_key:
+            operations.append(
+                self.add_foreign_key_constraint(
+                    table,
+                    column_name,
+                    field.rel_model._meta.table_name,
+                    field.rel_field.column_name,
+                    field.on_delete,
+                    field.on_update))
+
+        if field.index or field.unique:
+            using = getattr(field, 'index_type', None)
+            operations.append(self.add_index(table, (column_name,),
+                                             field.unique, using))
+
+        return operations
+
+    @operation
+    def drop_foreign_key_constraint(self, table, column_name):
+        raise NotImplementedError
+
+    @operation
+    def drop_column(self, table, column_name, cascade=True):
+        ctx = self.make_context()
+        (self._alter_table(ctx, table)
+         .literal(' DROP COLUMN ')
+         .sql(Entity(column_name)))
+
+        if cascade:
+            ctx.literal(' CASCADE')
+
+        fk_columns = [
+            foreign_key.column
+            for foreign_key in self.database.get_foreign_keys(table)]
+        if column_name in fk_columns and self.explicit_delete_foreign_key:
+            return [self.drop_foreign_key_constraint(table, column_name), ctx]
+
+        return ctx
+
+    @operation
+    def rename_column(self, table, old_name, new_name):
+        return (self
+                ._alter_table(self.make_context(), table)
+                .literal(' RENAME COLUMN ')
+                .sql(Entity(old_name))
+                .literal(' TO ')
+                .sql(Entity(new_name)))
+
+    @operation
+    def add_not_null(self, table, column):
+        return (self
+                ._alter_column(self.make_context(), table, column)
+                .literal(' SET NOT NULL'))
+
+    @operation
+    def drop_not_null(self, table, column):
+        return (self
+                ._alter_column(self.make_context(), table, column)
+                .literal(' DROP NOT NULL'))
+
+    @operation
+    def rename_table(self, old_name, new_name):
+        return (self
+                ._alter_table(self.make_context(), old_name)
+                .literal(' RENAME TO ')
+                .sql(Entity(new_name)))
+
+    @operation
+    def add_index(self, table, columns, unique=False, using=None):
+        ctx = self.make_context()
+        index_name = make_index_name(table, columns)
+        table_obj = Table(table)
+        cols = [getattr(table_obj.c, column) for column in columns]
+        index = Index(index_name, table_obj, cols, unique=unique, using=using)
+        return ctx.sql(index)
+
+    @operation
+    def drop_index(self, table, index_name):
+        return (self
+                .make_context()
+                .literal('DROP INDEX ')
+                .sql(Entity(index_name)))
+
+
+class PostgresqlMigrator(SchemaMigrator):
+    def _primary_key_columns(self, tbl):
+        query = """
+            SELECT pg_attribute.attname
+            FROM pg_index, pg_class, pg_attribute
+            WHERE
+                pg_class.oid = '%s'::regclass AND
+                indrelid = pg_class.oid AND
+                pg_attribute.attrelid = pg_class.oid AND
+                pg_attribute.attnum = any(pg_index.indkey) AND
+                indisprimary;
+        """
+        cursor = self.database.execute_sql(query % tbl)
+        return [row[0] for row in cursor.fetchall()]
+
+    @operation
+    def set_search_path(self, schema_name):
+        return (self
+                .make_context()
+                .literal('SET search_path TO %s' % schema_name))
+
+    @operation
+    def rename_table(self, old_name, new_name):
+        pk_names = self._primary_key_columns(old_name)
+        ParentClass = super(PostgresqlMigrator, self)
+
+        operations = [
+            ParentClass.rename_table(old_name, new_name, with_context=True)]
+
+        if len(pk_names) == 1:
+            # Check for existence of primary key sequence.
+            seq_name = '%s_%s_seq' % (old_name, pk_names[0])
+            query = """
+                SELECT 1
+                FROM information_schema.sequences
+                WHERE LOWER(sequence_name) = LOWER(%s)
+            """
+            cursor = self.database.execute_sql(query, (seq_name,))
+            if bool(cursor.fetchone()):
+                new_seq_name = '%s_%s_seq' % (new_name, pk_names[0])
+                operations.append(ParentClass.rename_table(
+                    seq_name, new_seq_name))
+
+        return operations
+
+
+class MySQLColumn(namedtuple('_Column', ('name', 'definition', 'null', 'pk',
+                                         'default', 'extra'))):
+    @property
+    def is_pk(self):
+        return self.pk == 'PRI'
+
+    @property
+    def is_unique(self):
+        return self.pk == 'UNI'
+
+    @property
+    def is_null(self):
+        return self.null == 'YES'
+
+    def sql(self, column_name=None, is_null=None):
+        if is_null is None:
+            is_null = self.is_null
+        if column_name is None:
+            column_name = self.name
+        parts = [
+            Entity(column_name),
+            SQL(self.definition)]
+        if self.is_unique:
+            parts.append(SQL('UNIQUE'))
+        if is_null:
+            parts.append(SQL('NULL'))
+        else:
+            parts.append(SQL('NOT NULL'))
+        if self.is_pk:
+            parts.append(SQL('PRIMARY KEY'))
+        if self.extra:
+            parts.append(SQL(self.extra))
+        return NodeList(parts)
+
+
+class MySQLMigrator(SchemaMigrator):
+    explicit_create_foreign_key = True
+    explicit_delete_foreign_key = True
+
+    @operation
+    def rename_table(self, old_name, new_name):
+        return (self
+                .make_context()
+                .literal('RENAME TABLE ')
+                .sql(Entity(old_name))
+                .literal(' TO ')
+                .sql(Entity(new_name)))
+
+    def _get_column_definition(self, table, column_name):
+        cursor = self.database.execute_sql('DESCRIBE `%s`;' % table)
+        rows = cursor.fetchall()
+        for row in rows:
+            column = MySQLColumn(*row)
+            if column.name == column_name:
+                return column
+        return False
+
+    def get_foreign_key_constraint(self, table, column_name):
+        cursor = self.database.execute_sql(
+            ('SELECT constraint_name '
+             'FROM information_schema.key_column_usage WHERE '
+             'table_schema = DATABASE() AND '
+             'table_name = %s AND '
+             'column_name = %s AND '
+             'referenced_table_name IS NOT NULL AND '
+             'referenced_column_name IS NOT NULL;'),
+            (table, column_name))
+        result = cursor.fetchone()
+        if not result:
+            raise AttributeError(
+                'Unable to find foreign key constraint for '
+                '"%s" on table "%s".' % (table, column_name))
+        return result[0]
+
+    @operation
+    def drop_foreign_key_constraint(self, table, column_name):
+        fk_constraint = self.get_foreign_key_constraint(table, column_name)
+        return (self
+                .make_context()
+                .literal('ALTER TABLE ')
+                .sql(Entity(table))
+                .literal(' DROP FOREIGN KEY ')
+                .sql(Entity(fk_constraint)))
+
+    def add_inline_fk_sql(self, ctx, field):
+        pass
+
+    @operation
+    def add_not_null(self, table, column):
+        column_def = self._get_column_definition(table, column)
+        add_not_null = (self
+                         .make_context()
+                         .literal('ALTER TABLE ')
+                         .sql(Entity(table))
+                         .literal(' MODIFY ')
+                         .sql(column_def.sql(is_null=False)))
+
+        fk_objects = dict(
+            (fk.column, fk)
+            for fk in self.database.get_foreign_keys(table))
+        if column not in fk_objects:
+            return add_not_null
+
+        fk_metadata = fk_objects[column]
+        return (self.drop_foreign_key_constraint(table, column),
+                add_not_null,
+                self.add_foreign_key_constraint(
+                    table,
+                    column,
+                    fk_metadata.dest_table,
+                    fk_metadata.dest_column))
+
+    @operation
+    def drop_not_null(self, table, column):
+        column = self._get_column_definition(table, column)
+        if column.is_pk:
+            raise ValueError('Primary keys can not be null')
+        return (self
+                .make_context()
+                .literal('ALTER TABLE ')
+                .sql(Entity(table))
+                .literal(' MODIFY ')
+                .sql(column.sql(is_null=True)))
+
+    @operation
+    def rename_column(self, table, old_name, new_name):
+        fk_objects = dict(
+            (fk.column, fk)
+            for fk in self.database.get_foreign_keys(table))
+        is_foreign_key = old_name in fk_objects
+
+        column = self._get_column_definition(table, old_name)
+        rename_ctx = (self
+                      .make_context()
+                      .literal('ALTER TABLE ')
+                      .sql(Entity(table))
+                      .literal(' CHANGE ')
+                      .sql(Entity(old_name))
+                      .literal(' ')
+                      .sql(column.sql(column_name=new_name)))
+        if is_foreign_key:
+            fk_metadata = fk_objects[old_name]
+            return [
+                self.drop_foreign_key_constraint(table, old_name),
+                rename_ctx,
+                self.add_foreign_key_constraint(
+                    table,
+                    new_name,
+                    fk_metadata.dest_table,
+                    fk_metadata.dest_column),
+            ]
+        else:
+            return rename_ctx
+
+    @operation
+    def drop_index(self, table, index_name):
+        return (self
+                .make_context()
+                .literal('DROP INDEX ')
+                .sql(Entity(index_name))
+                .literal(' ON ')
+                .sql(Entity(table)))
+
+
+class SqliteMigrator(SchemaMigrator):
+    """
+    SQLite supports a subset of ALTER TABLE queries, view the docs for the
+    full details http://sqlite.org/lang_altertable.html
+    """
+    column_re = re.compile('(.+?)\((.+)\)')
+    column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
+    column_name_re = re.compile('["`\']?([\w]+)')
+    fk_re = re.compile('FOREIGN KEY\s+\("?([\w]+)"?\)\s+', re.I)
+
+    def _get_column_names(self, table):
+        res = self.database.execute_sql('select * from "%s" limit 1' % table)
+        return [item[0] for item in res.description]
+
+    def _get_create_table(self, table):
+        res = self.database.execute_sql(
+            ('select name, sql from sqlite_master '
+             'where type=? and LOWER(name)=?'),
+            ['table', table.lower()])
+        return res.fetchone()
+
+    @operation
+    def _update_column(self, table, column_to_update, fn):
+        columns = set(column.name.lower()
+                      for column in self.database.get_columns(table))
+        if column_to_update.lower() not in columns:
+            raise ValueError('Column "%s" does not exist on "%s"' %
+                             (column_to_update, table))
+
+        # Get the SQL used to create the given table.
+        table, create_table = self._get_create_table(table)
+
+        # Get the indexes and SQL to re-create indexes.
+        indexes = self.database.get_indexes(table)
+
+        # Find any foreign keys we may need to remove.
+        self.database.get_foreign_keys(table)
+
+        # Make sure the create_table does not contain any newlines or tabs,
+        # allowing the regex to work correctly.
+        create_table = re.sub(r'\s+', ' ', create_table)
+
+        # Parse out the `CREATE TABLE` and column list portions of the query.
+        raw_create, raw_columns = self.column_re.search(create_table).groups()
+
+        # Clean up the individual column definitions.
+        split_columns = self.column_split_re.findall(raw_columns)
+        column_defs = [col.strip() for col in split_columns]
+
+        new_column_defs = []
+        new_column_names = []
+        original_column_names = []
+        constraint_terms = ('foreign ', 'primary ', 'constraint ')
+
+        for column_def in column_defs:
+            column_name, = self.column_name_re.match(column_def).groups()
+
+            if column_name == column_to_update:
+                new_column_def = fn(column_name, column_def)
+                if new_column_def:
+                    new_column_defs.append(new_column_def)
+                    original_column_names.append(column_name)
+                    column_name, = self.column_name_re.match(
+                        new_column_def).groups()
+                    new_column_names.append(column_name)
+            else:
+                new_column_defs.append(column_def)
+
+                # Avoid treating constraints as columns.
+                if not column_def.lower().startswith(constraint_terms):
+                    new_column_names.append(column_name)
+                    original_column_names.append(column_name)
+
+        # Create a mapping of original columns to new columns.
+        original_to_new = dict(zip(original_column_names, new_column_names))
+        new_column = original_to_new.get(column_to_update)
+
+        fk_filter_fn = lambda column_def: column_def
+        if not new_column:
+            # Remove any foreign keys associated with this column.
+            fk_filter_fn = lambda column_def: None
+        elif new_column != column_to_update:
+            # Update any foreign keys for this column.
+            fk_filter_fn = lambda column_def: self.fk_re.sub(
+                'FOREIGN KEY ("%s") ' % new_column,
+                column_def)
+
+        cleaned_columns = []
+        for column_def in new_column_defs:
+            match = self.fk_re.match(column_def)
+            if match is not None and match.groups()[0] == column_to_update:
+                column_def = fk_filter_fn(column_def)
+            if column_def:
+                cleaned_columns.append(column_def)
+
+        # Update the name of the new CREATE TABLE query.
+        temp_table = table + '__tmp__'
+        rgx = re.compile('("?)%s("?)' % table, re.I)
+        create = rgx.sub(
+            '\\1%s\\2' % temp_table,
+            raw_create)
+
+        # Create the new table.
+        columns = ', '.join(cleaned_columns)
+        queries = [
+            NodeList([SQL('DROP TABLE IF EXISTS'), Entity(temp_table)]),
+            SQL('%s (%s)' % (create.strip(), columns))]
+
+        # Populate new table.
+        populate_table = NodeList((
+            SQL('INSERT INTO'),
+            Entity(temp_table),
+            EnclosedNodeList([Entity(col) for col in new_column_names]),
+            SQL('SELECT'),
+            CommaNodeList([Entity(col) for col in original_column_names]),
+            SQL('FROM'),
+            Entity(table)))
+        drop_original = NodeList([SQL('DROP TABLE'), Entity(table)])
+
+        # Drop existing table and rename temp table.
+        queries += [
+            populate_table,
+            drop_original,
+            self.rename_table(temp_table, table)]
+
+        # Re-create user-defined indexes. User-defined indexes will have a
+        # non-empty SQL attribute.
+        for index in filter(lambda idx: idx.sql, indexes):
+            if column_to_update not in index.columns:
+                queries.append(SQL(index.sql))
+            elif new_column:
+                sql = self._fix_index(index.sql, column_to_update, new_column)
+                if sql is not None:
+                    queries.append(SQL(sql))
+
+        return queries
+
+    def _fix_index(self, sql, column_to_update, new_column):
+        # Split on the name of the column to update. If it splits into two
+        # pieces, then there's no ambiguity and we can simply replace the
+        # old with the new.
+        parts = sql.split(column_to_update)
+        if len(parts) == 2:
+            return sql.replace(column_to_update, new_column)
+
+        # Find the list of columns in the index expression.
+        lhs, rhs = sql.rsplit('(', 1)
+
+        # Apply the same "split in two" logic to the column list portion of
+        # the query.
+        if len(rhs.split(column_to_update)) == 2:
+            return '%s(%s' % (lhs, rhs.replace(column_to_update, new_column))
+
+        # Strip off the trailing parentheses and go through each column.
+        parts = rhs.rsplit(')', 1)[0].split(',')
+        columns = [part.strip('"`[]\' ') for part in parts]
+
+        # `columns` looks something like: ['status', 'timestamp" DESC']
+        # https://www.sqlite.org/lang_keywords.html
+        # Strip out any junk after the column name.
+        clean = []
+        for column in columns:
+            if re.match('%s(?:[\'"`\]]?\s|$)' % column_to_update, column):
+                column = new_columne + column[len(column_to_update):]
+            clean.append(column)
+
+        return '%s(%s)' % (lhs, ', '.join('"%s"' % c for c in clean))
+
+    @operation
+    def drop_column(self, table, column_name, cascade=True):
+        return self._update_column(table, column_name, lambda a, b: None)
+
+    @operation
+    def rename_column(self, table, old_name, new_name):
+        def _rename(column_name, column_def):
+            return column_def.replace(column_name, new_name)
+        return self._update_column(table, old_name, _rename)
+
+    @operation
+    def add_not_null(self, table, column):
+        def _add_not_null(column_name, column_def):
+            return column_def + ' NOT NULL'
+        return self._update_column(table, column, _add_not_null)
+
+    @operation
+    def drop_not_null(self, table, column):
+        def _drop_not_null(column_name, column_def):
+            return column_def.replace('NOT NULL', '')
+        return self._update_column(table, column, _drop_not_null)
+
+    @operation
+    def add_constraint(self, table, name, constraint):
+        raise NotImplementedError
+
+    @operation
+    def drop_constraint(self, table, name):
+        raise NotImplementedError
+
+    @operation
+    def add_foreign_key_constraint(self, table, column_name, field,
+                                   on_delete=None, on_update=None):
+        raise NotImplementedError
+
+
+def migrate(*operations, **kwargs):
+    for operation in operations:
+        operation.run()
diff --git a/lib/python3.7/site-packages/playhouse/mysql_ext.py b/lib/python3.7/site-packages/playhouse/mysql_ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..8eb2a43fa7bba260caf63eec15545848f46c52ee
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/mysql_ext.py
@@ -0,0 +1,34 @@
+import json
+
+try:
+    import mysql.connector as mysql_connector
+except ImportError:
+    mysql_connector = None
+
+from peewee import ImproperlyConfigured
+from peewee import MySQLDatabase
+from peewee import TextField
+
+
+class MySQLConnectorDatabase(MySQLDatabase):
+    def _connect(self):
+        if mysql_connector is None:
+            raise ImproperlyConfigured('MySQL connector not installed!')
+        return mysql_connector.connect(db=self.database, **self.connect_params)
+
+    def cursor(self, commit=None):
+        if self.is_closed():
+            self.connect()
+        return self._state.conn.cursor(buffered=True)
+
+
+class JSONField(TextField):
+    field_type = 'JSON'
+
+    def db_value(self, value):
+        if value is not None:
+            return json.dumps(value)
+
+    def python_value(self, value):
+        if value is not None:
+            return json.loads(value)
diff --git a/lib/python3.7/site-packages/playhouse/pool.py b/lib/python3.7/site-packages/playhouse/pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ade1da94b1618f7306bfb28be3c8b10b9bd59b6
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/pool.py
@@ -0,0 +1,317 @@
+"""
+Lightweight connection pooling for peewee.
+
+In a multi-threaded application, up to `max_connections` will be opened. Each
+thread (or, if using gevent, greenlet) will have it's own connection.
+
+In a single-threaded application, only one connection will be created. It will
+be continually recycled until either it exceeds the stale timeout or is closed
+explicitly (using `.manual_close()`).
+
+By default, all your application needs to do is ensure that connections are
+closed when you are finished with them, and they will be returned to the pool.
+For web applications, this typically means that at the beginning of a request,
+you will open a connection, and when you return a response, you will close the
+connection.
+
+Simple Postgres pool example code:
+
+    # Use the special postgresql extensions.
+    from playhouse.pool import PooledPostgresqlExtDatabase
+
+    db = PooledPostgresqlExtDatabase(
+        'my_app',
+        max_connections=32,
+        stale_timeout=300,  # 5 minutes.
+        user='postgres')
+
+    class BaseModel(Model):
+        class Meta:
+            database = db
+
+That's it!
+"""
+import heapq
+import logging
+import time
+from collections import namedtuple
+from itertools import chain
+
+try:
+    from psycopg2.extensions import TRANSACTION_STATUS_IDLE
+    from psycopg2.extensions import TRANSACTION_STATUS_INERROR
+    from psycopg2.extensions import TRANSACTION_STATUS_UNKNOWN
+except ImportError:
+    TRANSACTION_STATUS_IDLE = \
+            TRANSACTION_STATUS_INERROR = \
+            TRANSACTION_STATUS_UNKNOWN = None
+
+from peewee import MySQLDatabase
+from peewee import PostgresqlDatabase
+from peewee import SqliteDatabase
+
+logger = logging.getLogger('peewee.pool')
+
+
+def make_int(val):
+    if val is not None and not isinstance(val, (int, float)):
+        return int(val)
+    return val
+
+
+class MaxConnectionsExceeded(ValueError): pass
+
+
+PoolConnection = namedtuple('PoolConnection', ('timestamp', 'connection',
+                                               'checked_out'))
+
+
+class PooledDatabase(object):
+    def __init__(self, database, max_connections=20, stale_timeout=None,
+                 timeout=None, **kwargs):
+        self._max_connections = make_int(max_connections)
+        self._stale_timeout = make_int(stale_timeout)
+        self._wait_timeout = make_int(timeout)
+        if self._wait_timeout == 0:
+            self._wait_timeout = float('inf')
+
+        # Available / idle connections stored in a heap, sorted oldest first.
+        self._connections = []
+
+        # Mapping of connection id to PoolConnection. Ordinarily we would want
+        # to use something like a WeakKeyDictionary, but Python typically won't
+        # allow us to create weak references to connection objects.
+        self._in_use = {}
+
+        # Use the memory address of the connection as the key in the event the
+        # connection object is not hashable. Connections will not get
+        # garbage-collected, however, because a reference to them will persist
+        # in "_in_use" as long as the conn has not been closed.
+        self.conn_key = id
+
+        super(PooledDatabase, self).__init__(database, **kwargs)
+
+    def init(self, database, max_connections=None, stale_timeout=None,
+             timeout=None, **connect_kwargs):
+        super(PooledDatabase, self).init(database, **connect_kwargs)
+        if max_connections is not None:
+            self._max_connections = make_int(max_connections)
+        if stale_timeout is not None:
+            self._stale_timeout = make_int(stale_timeout)
+        if timeout is not None:
+            self._wait_timeout = make_int(timeout)
+            if self._wait_timeout == 0:
+                self._wait_timeout = float('inf')
+
+    def connect(self, reuse_if_open=False):
+        if not self._wait_timeout:
+            return super(PooledDatabase, self).connect(reuse_if_open)
+
+        expires = time.time() + self._wait_timeout
+        while expires > time.time():
+            try:
+                ret = super(PooledDatabase, self).connect(reuse_if_open)
+            except MaxConnectionsExceeded:
+                time.sleep(0.1)
+            else:
+                return ret
+        raise MaxConnectionsExceeded('Max connections exceeded, timed out '
+                                     'attempting to connect.')
+
+    def _connect(self):
+        while True:
+            try:
+                # Remove the oldest connection from the heap.
+                ts, conn = heapq.heappop(self._connections)
+                key = self.conn_key(conn)
+            except IndexError:
+                ts = conn = None
+                logger.debug('No connection available in pool.')
+                break
+            else:
+                if self._is_closed(conn):
+                    # This connecton was closed, but since it was not stale
+                    # it got added back to the queue of available conns. We
+                    # then closed it and marked it as explicitly closed, so
+                    # it's safe to throw it away now.
+                    # (Because Database.close() calls Database._close()).
+                    logger.debug('Connection %s was closed.', key)
+                    ts = conn = None
+                elif self._stale_timeout and self._is_stale(ts):
+                    # If we are attempting to check out a stale connection,
+                    # then close it. We don't need to mark it in the "closed"
+                    # set, because it is not in the list of available conns
+                    # anymore.
+                    logger.debug('Connection %s was stale, closing.', key)
+                    self._close(conn, True)
+                    ts = conn = None
+                else:
+                    break
+
+        if conn is None:
+            if self._max_connections and (
+                    len(self._in_use) >= self._max_connections):
+                raise MaxConnectionsExceeded('Exceeded maximum connections.')
+            conn = super(PooledDatabase, self)._connect()
+            ts = time.time()
+            key = self.conn_key(conn)
+            logger.debug('Created new connection %s.', key)
+
+        self._in_use[key] = PoolConnection(ts, conn, time.time())
+        return conn
+
+    def _is_stale(self, timestamp):
+        # Called on check-out and check-in to ensure the connection has
+        # not outlived the stale timeout.
+        return (time.time() - timestamp) > self._stale_timeout
+
+    def _is_closed(self, conn):
+        return False
+
+    def _can_reuse(self, conn):
+        # Called on check-in to make sure the connection can be re-used.
+        return True
+
+    def _close(self, conn, close_conn=False):
+        key = self.conn_key(conn)
+        if close_conn:
+            super(PooledDatabase, self)._close(conn)
+        elif key in self._in_use:
+            pool_conn = self._in_use.pop(key)
+            if self._stale_timeout and self._is_stale(pool_conn.timestamp):
+                logger.debug('Closing stale connection %s.', key)
+                super(PooledDatabase, self)._close(conn)
+            elif self._can_reuse(conn):
+                logger.debug('Returning %s to pool.', key)
+                heapq.heappush(self._connections, (pool_conn.timestamp, conn))
+            else:
+                logger.debug('Closed %s.', key)
+
+    def manual_close(self):
+        """
+        Close the underlying connection without returning it to the pool.
+        """
+        if self.is_closed():
+            return False
+
+        # Obtain reference to the connection in-use by the calling thread.
+        conn = self.connection()
+
+        # A connection will only be re-added to the available list if it is
+        # marked as "in use" at the time it is closed. We will explicitly
+        # remove it from the "in use" list, call "close()" for the
+        # side-effects, and then explicitly close the connection.
+        self._in_use.pop(self.conn_key(conn), None)
+        self.close()
+        self._close(conn, close_conn=True)
+
+    def close_idle(self):
+        # Close any open connections that are not currently in-use.
+        with self._lock:
+            for _, conn in self._connections:
+                self._close(conn, close_conn=True)
+            self._connections = []
+
+    def close_stale(self, age=600):
+        # Close any connections that are in-use but were checked out quite some
+        # time ago and can be considered stale.
+        with self._lock:
+            in_use = {}
+            cutoff = time.time() - age
+            n = 0
+            for key, pool_conn in self._in_use.items():
+                if pool_conn.checked_out < cutoff:
+                    self._close(pool_conn.connection, close_conn=True)
+                    n += 1
+                else:
+                    in_use[key] = pool_conn
+            self._in_use = in_use
+        return n
+
+    def close_all(self):
+        # Close all connections -- available and in-use. Warning: may break any
+        # active connections used by other threads.
+        self.close()
+        with self._lock:
+            for _, conn in self._connections:
+                self._close(conn, close_conn=True)
+            for pool_conn in self._in_use.values():
+                self._close(pool_conn.connection, close_conn=True)
+            self._connections = []
+            self._in_use = {}
+
+
+class PooledMySQLDatabase(PooledDatabase, MySQLDatabase):
+    def _is_closed(self, conn):
+        try:
+            conn.ping(False)
+        except:
+            return True
+        else:
+            return False
+
+
+class _PooledPostgresqlDatabase(PooledDatabase):
+    def _is_closed(self, conn):
+        if conn.closed:
+            return True
+
+        txn_status = conn.get_transaction_status()
+        if txn_status == TRANSACTION_STATUS_UNKNOWN:
+            return True
+        elif txn_status != TRANSACTION_STATUS_IDLE:
+            conn.rollback()
+        return False
+
+    def _can_reuse(self, conn):
+        txn_status = conn.get_transaction_status()
+        # Do not return connection in an error state, as subsequent queries
+        # will all fail. If the status is unknown then we lost the connection
+        # to the server and the connection should not be re-used.
+        if txn_status == TRANSACTION_STATUS_UNKNOWN:
+            return False
+        elif txn_status == TRANSACTION_STATUS_INERROR:
+            conn.reset()
+        elif txn_status != TRANSACTION_STATUS_IDLE:
+            conn.rollback()
+        return True
+
+class PooledPostgresqlDatabase(_PooledPostgresqlDatabase, PostgresqlDatabase):
+    pass
+
+try:
+    from playhouse.postgres_ext import PostgresqlExtDatabase
+
+    class PooledPostgresqlExtDatabase(_PooledPostgresqlDatabase, PostgresqlExtDatabase):
+        pass
+except ImportError:
+    PooledPostgresqlExtDatabase = None
+
+
+class _PooledSqliteDatabase(PooledDatabase):
+    def _is_closed(self, conn):
+        try:
+            conn.total_changes
+        except:
+            return True
+        else:
+            return False
+
+class PooledSqliteDatabase(_PooledSqliteDatabase, SqliteDatabase):
+    pass
+
+try:
+    from playhouse.sqlite_ext import SqliteExtDatabase
+
+    class PooledSqliteExtDatabase(_PooledSqliteDatabase, SqliteExtDatabase):
+        pass
+except ImportError:
+    PooledSqliteExtDatabase = None
+
+try:
+    from playhouse.sqlite_ext import CSqliteExtDatabase
+
+    class PooledCSqliteExtDatabase(_PooledSqliteDatabase, CSqliteExtDatabase):
+        pass
+except ImportError:
+    PooledCSqliteExtDatabase = None
diff --git a/lib/python3.7/site-packages/playhouse/postgres_ext.py b/lib/python3.7/site-packages/playhouse/postgres_ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a2893eb5f2007fc75f2437ebabc6a31447bc3d4
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/postgres_ext.py
@@ -0,0 +1,468 @@
+"""
+Collection of postgres-specific extensions, currently including:
+
+* Support for hstore, a key/value type storage
+"""
+import logging
+import uuid
+
+from peewee import *
+from peewee import ColumnBase
+from peewee import Expression
+from peewee import Node
+from peewee import NodeList
+from peewee import SENTINEL
+from peewee import __exception_wrapper__
+
+try:
+    from psycopg2cffi import compat
+    compat.register()
+except ImportError:
+    pass
+
+from psycopg2.extras import register_hstore
+try:
+    from psycopg2.extras import Json
+except:
+    Json = None
+
+
+logger = logging.getLogger('peewee')
+
+
+HCONTAINS_DICT = '@>'
+HCONTAINS_KEYS = '?&'
+HCONTAINS_KEY = '?'
+HCONTAINS_ANY_KEY = '?|'
+HKEY = '->'
+HUPDATE = '||'
+ACONTAINS = '@>'
+ACONTAINS_ANY = '&&'
+TS_MATCH = '@@'
+JSONB_CONTAINS = '@>'
+JSONB_CONTAINED_BY = '<@'
+JSONB_CONTAINS_KEY = '?'
+JSONB_CONTAINS_ANY_KEY = '?|'
+JSONB_CONTAINS_ALL_KEYS = '?&'
+JSONB_EXISTS = '?'
+JSONB_REMOVE = '-'
+
+
+class _LookupNode(ColumnBase):
+    def __init__(self, node, parts):
+        self.node = node
+        self.parts = parts
+        super(_LookupNode, self).__init__()
+
+    def clone(self):
+        return type(self)(self.node, list(self.parts))
+
+
+class _JsonLookupBase(_LookupNode):
+    def __init__(self, node, parts, as_json=False):
+        super(_JsonLookupBase, self).__init__(node, parts)
+        self._as_json = as_json
+
+    def clone(self):
+        return type(self)(self.node, list(self.parts), self._as_json)
+
+    @Node.copy
+    def as_json(self, as_json=True):
+        self._as_json = as_json
+
+    def concat(self, rhs):
+        return Expression(self.as_json(True), OP.CONCAT, Json(rhs))
+
+    def contains(self, other):
+        clone = self.as_json(True)
+        if isinstance(other, (list, dict)):
+            return Expression(clone, JSONB_CONTAINS, Json(other))
+        return Expression(clone, JSONB_EXISTS, other)
+
+    def contains_any(self, *keys):
+        return Expression(
+            self.as_json(True),
+            JSONB_CONTAINS_ANY_KEY,
+            Value(list(keys), unpack=False))
+
+    def contains_all(self, *keys):
+        return Expression(
+            self.as_json(True),
+            JSONB_CONTAINS_ALL_KEYS,
+            Value(list(keys), unpack=False))
+
+    def has_key(self, key):
+        return Expression(self.as_json(True), JSONB_CONTAINS_KEY, key)
+
+
+class JsonLookup(_JsonLookupBase):
+    def __getitem__(self, value):
+        return JsonLookup(self.node, self.parts + [value], self._as_json)
+
+    def __sql__(self, ctx):
+        ctx.sql(self.node)
+        for part in self.parts[:-1]:
+            ctx.literal('->').sql(part)
+        if self.parts:
+            (ctx
+             .literal('->' if self._as_json else '->>')
+             .sql(self.parts[-1]))
+
+        return ctx
+
+
+class JsonPath(_JsonLookupBase):
+    def __sql__(self, ctx):
+        return (ctx
+                .sql(self.node)
+                .literal('#>' if self._as_json else '#>>')
+                .sql(Value('{%s}' % ','.join(map(str, self.parts)))))
+
+
+class ObjectSlice(_LookupNode):
+    @classmethod
+    def create(cls, node, value):
+        if isinstance(value, slice):
+            parts = [value.start or 0, value.stop or 0]
+        elif isinstance(value, int):
+            parts = [value]
+        else:
+            parts = map(int, value.split(':'))
+        return cls(node, parts)
+
+    def __sql__(self, ctx):
+        return (ctx
+                .sql(self.node)
+                .literal('[%s]' % ':'.join(str(p + 1) for p in self.parts)))
+
+    def __getitem__(self, value):
+        return ObjectSlice.create(self, value)
+
+
+class IndexedFieldMixin(object):
+    default_index_type = 'GIN'
+
+    def __init__(self, *args, **kwargs):
+        kwargs.setdefault('index', True)  # By default, use an index.
+        super(IndexedFieldMixin, self).__init__(*args, **kwargs)
+
+
+class ArrayField(IndexedFieldMixin, Field):
+    passthrough = True
+
+    def __init__(self, field_class=IntegerField, field_kwargs=None,
+                 dimensions=1, convert_values=False, *args, **kwargs):
+        self.__field = field_class(**(field_kwargs or {}))
+        self.dimensions = dimensions
+        self.convert_values = convert_values
+        self.field_type = self.__field.field_type
+        super(ArrayField, self).__init__(*args, **kwargs)
+
+    def bind(self, model, name, set_attribute=True):
+        ret = super(ArrayField, self).bind(model, name, set_attribute)
+        self.__field.bind(model, '__array_%s' % name, False)
+        return ret
+
+    def ddl_datatype(self, ctx):
+        data_type = self.__field.ddl_datatype(ctx)
+        return NodeList((data_type, SQL('[]' * self.dimensions)), glue='')
+
+    def db_value(self, value):
+        if value is None or isinstance(value, Node):
+            return value
+        elif self.convert_values:
+            return self._process(self.__field.db_value, value, self.dimensions)
+        else:
+            return value if isinstance(value, list) else list(value)
+
+    def python_value(self, value):
+        if self.convert_values and value is not None:
+            conv = self.__field.python_value
+            if isinstance(value, list):
+                return self._process(conv, value, self.dimensions)
+            else:
+                return conv(value)
+        else:
+            return value
+
+    def _process(self, conv, value, dimensions):
+        dimensions -= 1
+        if dimensions == 0:
+            return [conv(v) for v in value]
+        else:
+            return [self._process(conv, v, dimensions) for v in value]
+
+    def __getitem__(self, value):
+        return ObjectSlice.create(self, value)
+
+    def _e(op):
+        def inner(self, rhs):
+            return Expression(self, op, ArrayValue(self, rhs))
+        return inner
+    __eq__ = _e(OP.EQ)
+    __ne__ = _e(OP.NE)
+    __gt__ = _e(OP.GT)
+    __ge__ = _e(OP.GTE)
+    __lt__ = _e(OP.LT)
+    __le__ = _e(OP.LTE)
+    __hash__ = Field.__hash__
+
+    def contains(self, *items):
+        return Expression(self, ACONTAINS, ArrayValue(self, items))
+
+    def contains_any(self, *items):
+        return Expression(self, ACONTAINS_ANY, ArrayValue(self, items))
+
+
+class ArrayValue(Node):
+    def __init__(self, field, value):
+        self.field = field
+        self.value = value
+
+    def __sql__(self, ctx):
+        return (ctx
+                .sql(Value(self.value, unpack=False))
+                .literal('::')
+                .sql(self.field.ddl_datatype(ctx)))
+
+
+class DateTimeTZField(DateTimeField):
+    field_type = 'TIMESTAMPTZ'
+
+
+class HStoreField(IndexedFieldMixin, Field):
+    field_type = 'HSTORE'
+    __hash__ = Field.__hash__
+
+    def __getitem__(self, key):
+        return Expression(self, HKEY, Value(key))
+
+    def keys(self):
+        return fn.akeys(self)
+
+    def values(self):
+        return fn.avals(self)
+
+    def items(self):
+        return fn.hstore_to_matrix(self)
+
+    def slice(self, *args):
+        return fn.slice(self, Value(list(args), unpack=False))
+
+    def exists(self, key):
+        return fn.exist(self, key)
+
+    def defined(self, key):
+        return fn.defined(self, key)
+
+    def update(self, **data):
+        return Expression(self, HUPDATE, data)
+
+    def delete(self, *keys):
+        return fn.delete(self, Value(list(keys), unpack=False))
+
+    def contains(self, value):
+        if isinstance(value, dict):
+            rhs = Value(value, unpack=False)
+            return Expression(self, HCONTAINS_DICT, rhs)
+        elif isinstance(value, (list, tuple)):
+            rhs = Value(value, unpack=False)
+            return Expression(self, HCONTAINS_KEYS, rhs)
+        return Expression(self, HCONTAINS_KEY, value)
+
+    def contains_any(self, *keys):
+        return Expression(self, HCONTAINS_ANY_KEY, Value(list(keys),
+                                                         unpack=False))
+
+
+class JSONField(Field):
+    field_type = 'JSON'
+
+    def __init__(self, dumps=None, *args, **kwargs):
+        if Json is None:
+            raise Exception('Your version of psycopg2 does not support JSON.')
+        self.dumps = dumps
+        super(JSONField, self).__init__(*args, **kwargs)
+
+    def db_value(self, value):
+        if value is None:
+            return value
+        if not isinstance(value, Json):
+            return Json(value, dumps=self.dumps)
+        return value
+
+    def __getitem__(self, value):
+        return JsonLookup(self, [value])
+
+    def path(self, *keys):
+        return JsonPath(self, keys)
+
+    def concat(self, value):
+        return super(JSONField, self).concat(Json(value))
+
+
+def cast_jsonb(node):
+    return NodeList((node, SQL('::jsonb')), glue='')
+
+
+class BinaryJSONField(IndexedFieldMixin, JSONField):
+    field_type = 'JSONB'
+    __hash__ = Field.__hash__
+
+    def contains(self, other):
+        if isinstance(other, (list, dict)):
+            return Expression(self, JSONB_CONTAINS, Json(other))
+        return Expression(cast_jsonb(self), JSONB_EXISTS, other)
+
+    def contained_by(self, other):
+        return Expression(cast_jsonb(self), JSONB_CONTAINED_BY, Json(other))
+
+    def contains_any(self, *items):
+        return Expression(
+            cast_jsonb(self),
+            JSONB_CONTAINS_ANY_KEY,
+            Value(list(items), unpack=False))
+
+    def contains_all(self, *items):
+        return Expression(
+            cast_jsonb(self),
+            JSONB_CONTAINS_ALL_KEYS,
+            Value(list(items), unpack=False))
+
+    def has_key(self, key):
+        return Expression(cast_jsonb(self), JSONB_CONTAINS_KEY, key)
+
+    def remove(self, *items):
+        return Expression(
+            cast_jsonb(self),
+            JSONB_REMOVE,
+            Value(list(items), unpack=False))
+
+
+class TSVectorField(IndexedFieldMixin, TextField):
+    field_type = 'TSVECTOR'
+    __hash__ = Field.__hash__
+
+    def match(self, query, language=None, plain=False):
+        params = (language, query) if language is not None else (query,)
+        func = fn.plainto_tsquery if plain else fn.to_tsquery
+        return Expression(self, TS_MATCH, func(*params))
+
+
+def Match(field, query, language=None):
+    params = (language, query) if language is not None else (query,)
+    field_params = (language, field) if language is not None else (field,)
+    return Expression(
+        fn.to_tsvector(*field_params),
+        TS_MATCH,
+        fn.to_tsquery(*params))
+
+
+class IntervalField(Field):
+    field_type = 'INTERVAL'
+
+
+class FetchManyCursor(object):
+    __slots__ = ('cursor', 'array_size', 'exhausted', 'iterable')
+
+    def __init__(self, cursor, array_size=None):
+        self.cursor = cursor
+        self.array_size = array_size or cursor.itersize
+        self.exhausted = False
+        self.iterable = self.row_gen()
+
+    @property
+    def description(self):
+        return self.cursor.description
+
+    def close(self):
+        self.cursor.close()
+
+    def row_gen(self):
+        while True:
+            rows = self.cursor.fetchmany(self.array_size)
+            if not rows:
+                return
+            for row in rows:
+                yield row
+
+    def fetchone(self):
+        if self.exhausted:
+            return
+        try:
+            return next(self.iterable)
+        except StopIteration:
+            self.exhausted = True
+
+
+class ServerSideQuery(Node):
+    def __init__(self, query, array_size=None):
+        self.query = query
+        self.array_size = array_size
+        self._cursor_wrapper = None
+
+    def __sql__(self, ctx):
+        return self.query.__sql__(ctx)
+
+    def __iter__(self):
+        if self._cursor_wrapper is None:
+            self._execute(self.query._database)
+        return iter(self._cursor_wrapper.iterator())
+
+    def _execute(self, database):
+        if self._cursor_wrapper is None:
+            cursor = database.execute(self.query, named_cursor=True,
+                                      array_size=self.array_size)
+            self._cursor_wrapper = self.query._get_cursor_wrapper(cursor)
+        return self._cursor_wrapper
+
+
+def ServerSide(query, database=None, array_size=None):
+    if database is None:
+        database = query._database
+    with database.transaction():
+        server_side_query = ServerSideQuery(query, array_size=array_size)
+        for row in server_side_query:
+            yield row
+
+
+class _empty_object(object):
+    __slots__ = ()
+    def __nonzero__(self):
+        return False
+    __bool__ = __nonzero__
+
+__named_cursor__ = _empty_object()
+
+
+class PostgresqlExtDatabase(PostgresqlDatabase):
+    def __init__(self, *args, **kwargs):
+        self._register_hstore = kwargs.pop('register_hstore', False)
+        self._server_side_cursors = kwargs.pop('server_side_cursors', False)
+        super(PostgresqlExtDatabase, self).__init__(*args, **kwargs)
+
+    def _connect(self):
+        conn = super(PostgresqlExtDatabase, self)._connect()
+        if self._register_hstore:
+            register_hstore(conn, globally=True)
+        return conn
+
+    def cursor(self, commit=None):
+        if self.is_closed():
+            self.connect()
+        if commit is __named_cursor__:
+            return self._state.conn.cursor(name=str(uuid.uuid1()))
+        return self._state.conn.cursor()
+
+    def execute(self, query, commit=SENTINEL, named_cursor=False,
+                array_size=None, **context_options):
+        ctx = self.get_sql_context(**context_options)
+        sql, params = ctx.sql(query).query()
+        named_cursor = named_cursor or (self._server_side_cursors and
+                                        sql[:6].lower() == 'select')
+        if named_cursor:
+            commit = __named_cursor__
+        cursor = self.execute_sql(sql, params, commit=commit)
+        if named_cursor:
+            cursor = FetchManyCursor(cursor, array_size)
+        return cursor
diff --git a/lib/python3.7/site-packages/playhouse/reflection.py b/lib/python3.7/site-packages/playhouse/reflection.py
new file mode 100644
index 0000000000000000000000000000000000000000..14ab1ba4b808f55c751e363bb04bfdc6eab71211
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/reflection.py
@@ -0,0 +1,799 @@
+try:
+    from collections import OrderedDict
+except ImportError:
+    OrderedDict = dict
+from collections import namedtuple
+from inspect import isclass
+import re
+
+from peewee import *
+from peewee import _StringField
+from peewee import _query_val_transform
+from peewee import CommaNodeList
+from peewee import SCOPE_VALUES
+from peewee import make_snake_case
+from peewee import text_type
+try:
+    from pymysql.constants import FIELD_TYPE
+except ImportError:
+    try:
+        from MySQLdb.constants import FIELD_TYPE
+    except ImportError:
+        FIELD_TYPE = None
+try:
+    from playhouse import postgres_ext
+except ImportError:
+    postgres_ext = None
+
+RESERVED_WORDS = set([
+    'and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif',
+    'else', 'except', 'exec', 'finally', 'for', 'from', 'global', 'if',
+    'import', 'in', 'is', 'lambda', 'not', 'or', 'pass', 'print', 'raise',
+    'return', 'try', 'while', 'with', 'yield',
+])
+
+
+class UnknownField(object):
+    pass
+
+
+class Column(object):
+    """
+    Store metadata about a database column.
+    """
+    primary_key_types = (IntegerField, AutoField)
+
+    def __init__(self, name, field_class, raw_column_type, nullable,
+                 primary_key=False, column_name=None, index=False,
+                 unique=False, default=None, extra_parameters=None):
+        self.name = name
+        self.field_class = field_class
+        self.raw_column_type = raw_column_type
+        self.nullable = nullable
+        self.primary_key = primary_key
+        self.column_name = column_name
+        self.index = index
+        self.unique = unique
+        self.default = default
+        self.extra_parameters = extra_parameters
+
+        # Foreign key metadata.
+        self.rel_model = None
+        self.related_name = None
+        self.to_field = None
+
+    def __repr__(self):
+        attrs = [
+            'field_class',
+            'raw_column_type',
+            'nullable',
+            'primary_key',
+            'column_name']
+        keyword_args = ', '.join(
+            '%s=%s' % (attr, getattr(self, attr))
+            for attr in attrs)
+        return 'Column(%s, %s)' % (self.name, keyword_args)
+
+    def get_field_parameters(self):
+        params = {}
+        if self.extra_parameters is not None:
+            params.update(self.extra_parameters)
+
+        # Set up default attributes.
+        if self.nullable:
+            params['null'] = True
+        if self.field_class is ForeignKeyField or self.name != self.column_name:
+            params['column_name'] = "'%s'" % self.column_name
+        if self.primary_key and not issubclass(self.field_class, AutoField):
+            params['primary_key'] = True
+        if self.default is not None:
+            params['constraints'] = '[SQL("DEFAULT %s")]' % self.default
+
+        # Handle ForeignKeyField-specific attributes.
+        if self.is_foreign_key():
+            params['model'] = self.rel_model
+            if self.to_field:
+                params['field'] = "'%s'" % self.to_field
+            if self.related_name:
+                params['backref'] = "'%s'" % self.related_name
+
+        # Handle indexes on column.
+        if not self.is_primary_key():
+            if self.unique:
+                params['unique'] = 'True'
+            elif self.index and not self.is_foreign_key():
+                params['index'] = 'True'
+
+        return params
+
+    def is_primary_key(self):
+        return self.field_class is AutoField or self.primary_key
+
+    def is_foreign_key(self):
+        return self.field_class is ForeignKeyField
+
+    def is_self_referential_fk(self):
+        return (self.field_class is ForeignKeyField and
+                self.rel_model == "'self'")
+
+    def set_foreign_key(self, foreign_key, model_names, dest=None,
+                        related_name=None):
+        self.foreign_key = foreign_key
+        self.field_class = ForeignKeyField
+        if foreign_key.dest_table == foreign_key.table:
+            self.rel_model = "'self'"
+        else:
+            self.rel_model = model_names[foreign_key.dest_table]
+        self.to_field = dest and dest.name or None
+        self.related_name = related_name or None
+
+    def get_field(self):
+        # Generate the field definition for this column.
+        field_params = {}
+        for key, value in self.get_field_parameters().items():
+            if isclass(value) and issubclass(value, Field):
+                value = value.__name__
+            field_params[key] = value
+
+        param_str = ', '.join('%s=%s' % (k, v)
+                              for k, v in sorted(field_params.items()))
+        field = '%s = %s(%s)' % (
+            self.name,
+            self.field_class.__name__,
+            param_str)
+
+        if self.field_class is UnknownField:
+            field = '%s  # %s' % (field, self.raw_column_type)
+
+        return field
+
+
+class Metadata(object):
+    column_map = {}
+    extension_import = ''
+
+    def __init__(self, database):
+        self.database = database
+        self.requires_extension = False
+
+    def execute(self, sql, *params):
+        return self.database.execute_sql(sql, params)
+
+    def get_columns(self, table, schema=None):
+        metadata = OrderedDict(
+            (metadata.name, metadata)
+            for metadata in self.database.get_columns(table, schema))
+
+        # Look up the actual column type for each column.
+        column_types, extra_params = self.get_column_types(table, schema)
+
+        # Look up the primary keys.
+        pk_names = self.get_primary_keys(table, schema)
+        if len(pk_names) == 1:
+            pk = pk_names[0]
+            if column_types[pk] is IntegerField:
+                column_types[pk] = AutoField
+            elif column_types[pk] is BigIntegerField:
+                column_types[pk] = BigAutoField
+
+        columns = OrderedDict()
+        for name, column_data in metadata.items():
+            field_class = column_types[name]
+            default = self._clean_default(field_class, column_data.default)
+
+            columns[name] = Column(
+                name,
+                field_class=field_class,
+                raw_column_type=column_data.data_type,
+                nullable=column_data.null,
+                primary_key=column_data.primary_key,
+                column_name=name,
+                default=default,
+                extra_parameters=extra_params.get(name))
+
+        return columns
+
+    def get_column_types(self, table, schema=None):
+        raise NotImplementedError
+
+    def _clean_default(self, field_class, default):
+        if default is None or field_class in (AutoField, BigAutoField) or \
+           default.lower() == 'null':
+            return
+        if issubclass(field_class, _StringField) and \
+           isinstance(default, text_type) and not default.startswith("'"):
+            default = "'%s'" % default
+        return default or "''"
+
+    def get_foreign_keys(self, table, schema=None):
+        return self.database.get_foreign_keys(table, schema)
+
+    def get_primary_keys(self, table, schema=None):
+        return self.database.get_primary_keys(table, schema)
+
+    def get_indexes(self, table, schema=None):
+        return self.database.get_indexes(table, schema)
+
+
+class PostgresqlMetadata(Metadata):
+    column_map = {
+        16: BooleanField,
+        17: BlobField,
+        20: BigIntegerField,
+        21: IntegerField,
+        23: IntegerField,
+        25: TextField,
+        700: FloatField,
+        701: FloatField,
+        1042: CharField, # blank-padded CHAR
+        1043: CharField,
+        1082: DateField,
+        1114: DateTimeField,
+        1184: DateTimeField,
+        1083: TimeField,
+        1266: TimeField,
+        1700: DecimalField,
+        2950: TextField, # UUID
+    }
+    array_types = {
+        1000: BooleanField,
+        1001: BlobField,
+        1005: SmallIntegerField,
+        1007: IntegerField,
+        1009: TextField,
+        1014: CharField,
+        1015: CharField,
+        1016: BigIntegerField,
+        1115: DateTimeField,
+        1182: DateField,
+        1183: TimeField,
+    }
+    extension_import = 'from playhouse.postgres_ext import *'
+
+    def __init__(self, database):
+        super(PostgresqlMetadata, self).__init__(database)
+
+        if postgres_ext is not None:
+            # Attempt to add types like HStore and JSON.
+            cursor = self.execute('select oid, typname, format_type(oid, NULL)'
+                                  ' from pg_type;')
+            results = cursor.fetchall()
+
+            for oid, typname, formatted_type in results:
+                if typname == 'json':
+                    self.column_map[oid] = postgres_ext.JSONField
+                elif typname == 'jsonb':
+                    self.column_map[oid] = postgres_ext.BinaryJSONField
+                elif typname == 'hstore':
+                    self.column_map[oid] = postgres_ext.HStoreField
+                elif typname == 'tsvector':
+                    self.column_map[oid] = postgres_ext.TSVectorField
+
+            for oid in self.array_types:
+                self.column_map[oid] = postgres_ext.ArrayField
+
+    def get_column_types(self, table, schema):
+        column_types = {}
+        extra_params = {}
+        extension_types = set((
+            postgres_ext.ArrayField,
+            postgres_ext.BinaryJSONField,
+            postgres_ext.JSONField,
+            postgres_ext.TSVectorField,
+            postgres_ext.HStoreField)) if postgres_ext is not None else set()
+
+        # Look up the actual column type for each column.
+        identifier = '"%s"."%s"' % (schema, table)
+        cursor = self.execute('SELECT * FROM %s LIMIT 1' % identifier)
+
+        # Store column metadata in dictionary keyed by column name.
+        for column_description in cursor.description:
+            name = column_description.name
+            oid = column_description.type_code
+            column_types[name] = self.column_map.get(oid, UnknownField)
+            if column_types[name] in extension_types:
+                self.requires_extension = True
+            if oid in self.array_types:
+                extra_params[name] = {'field_class': self.array_types[oid]}
+
+        return column_types, extra_params
+
+    def get_columns(self, table, schema=None):
+        schema = schema or 'public'
+        return super(PostgresqlMetadata, self).get_columns(table, schema)
+
+    def get_foreign_keys(self, table, schema=None):
+        schema = schema or 'public'
+        return super(PostgresqlMetadata, self).get_foreign_keys(table, schema)
+
+    def get_primary_keys(self, table, schema=None):
+        schema = schema or 'public'
+        return super(PostgresqlMetadata, self).get_primary_keys(table, schema)
+
+    def get_indexes(self, table, schema=None):
+        schema = schema or 'public'
+        return super(PostgresqlMetadata, self).get_indexes(table, schema)
+
+
+class MySQLMetadata(Metadata):
+    if FIELD_TYPE is None:
+        column_map = {}
+    else:
+        column_map = {
+            FIELD_TYPE.BLOB: TextField,
+            FIELD_TYPE.CHAR: CharField,
+            FIELD_TYPE.DATE: DateField,
+            FIELD_TYPE.DATETIME: DateTimeField,
+            FIELD_TYPE.DECIMAL: DecimalField,
+            FIELD_TYPE.DOUBLE: FloatField,
+            FIELD_TYPE.FLOAT: FloatField,
+            FIELD_TYPE.INT24: IntegerField,
+            FIELD_TYPE.LONG_BLOB: TextField,
+            FIELD_TYPE.LONG: IntegerField,
+            FIELD_TYPE.LONGLONG: BigIntegerField,
+            FIELD_TYPE.MEDIUM_BLOB: TextField,
+            FIELD_TYPE.NEWDECIMAL: DecimalField,
+            FIELD_TYPE.SHORT: IntegerField,
+            FIELD_TYPE.STRING: CharField,
+            FIELD_TYPE.TIMESTAMP: DateTimeField,
+            FIELD_TYPE.TIME: TimeField,
+            FIELD_TYPE.TINY_BLOB: TextField,
+            FIELD_TYPE.TINY: IntegerField,
+            FIELD_TYPE.VAR_STRING: CharField,
+        }
+
+    def __init__(self, database, **kwargs):
+        if 'password' in kwargs:
+            kwargs['passwd'] = kwargs.pop('password')
+        super(MySQLMetadata, self).__init__(database, **kwargs)
+
+    def get_column_types(self, table, schema=None):
+        column_types = {}
+
+        # Look up the actual column type for each column.
+        cursor = self.execute('SELECT * FROM `%s` LIMIT 1' % table)
+
+        # Store column metadata in dictionary keyed by column name.
+        for column_description in cursor.description:
+            name, type_code = column_description[:2]
+            column_types[name] = self.column_map.get(type_code, UnknownField)
+
+        return column_types, {}
+
+
+class SqliteMetadata(Metadata):
+    column_map = {
+        'bigint': BigIntegerField,
+        'blob': BlobField,
+        'bool': BooleanField,
+        'boolean': BooleanField,
+        'char': CharField,
+        'date': DateField,
+        'datetime': DateTimeField,
+        'decimal': DecimalField,
+        'float': FloatField,
+        'integer': IntegerField,
+        'integer unsigned': IntegerField,
+        'int': IntegerField,
+        'long': BigIntegerField,
+        'numeric': DecimalField,
+        'real': FloatField,
+        'smallinteger': IntegerField,
+        'smallint': IntegerField,
+        'smallint unsigned': IntegerField,
+        'text': TextField,
+        'time': TimeField,
+        'varchar': CharField,
+    }
+
+    begin = '(?:["\[\(]+)?'
+    end = '(?:["\]\)]+)?'
+    re_foreign_key = (
+        '(?:FOREIGN KEY\s*)?'
+        '{begin}(.+?){end}\s+(?:.+\s+)?'
+        'references\s+{begin}(.+?){end}'
+        '\s*\(["|\[]?(.+?)["|\]]?\)').format(begin=begin, end=end)
+    re_varchar = r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$'
+
+    def _map_col(self, column_type):
+        raw_column_type = column_type.lower()
+        if raw_column_type in self.column_map:
+            field_class = self.column_map[raw_column_type]
+        elif re.search(self.re_varchar, raw_column_type):
+            field_class = CharField
+        else:
+            column_type = re.sub('\(.+\)', '', raw_column_type)
+            if column_type == '':
+                field_class = BareField
+            else:
+                field_class = self.column_map.get(column_type, UnknownField)
+        return field_class
+
+    def get_column_types(self, table, schema=None):
+        column_types = {}
+        columns = self.database.get_columns(table)
+
+        for column in columns:
+            column_types[column.name] = self._map_col(column.data_type)
+
+        return column_types, {}
+
+
+_DatabaseMetadata = namedtuple('_DatabaseMetadata', (
+    'columns',
+    'primary_keys',
+    'foreign_keys',
+    'model_names',
+    'indexes'))
+
+
+class DatabaseMetadata(_DatabaseMetadata):
+    def multi_column_indexes(self, table):
+        accum = []
+        for index in self.indexes[table]:
+            if len(index.columns) > 1:
+                field_names = [self.columns[table][column].name
+                               for column in index.columns
+                               if column in self.columns[table]]
+                accum.append((field_names, index.unique))
+        return accum
+
+    def column_indexes(self, table):
+        accum = {}
+        for index in self.indexes[table]:
+            if len(index.columns) == 1:
+                accum[index.columns[0]] = index.unique
+        return accum
+
+
+class Introspector(object):
+    pk_classes = [AutoField, IntegerField]
+
+    def __init__(self, metadata, schema=None):
+        self.metadata = metadata
+        self.schema = schema
+
+    def __repr__(self):
+        return '<Introspector: %s>' % self.metadata.database
+
+    @classmethod
+    def from_database(cls, database, schema=None):
+        if isinstance(database, PostgresqlDatabase):
+            metadata = PostgresqlMetadata(database)
+        elif isinstance(database, MySQLDatabase):
+            metadata = MySQLMetadata(database)
+        elif isinstance(database, SqliteDatabase):
+            metadata = SqliteMetadata(database)
+        else:
+            raise ValueError('Introspection not supported for %r' % database)
+        return cls(metadata, schema=schema)
+
+    def get_database_class(self):
+        return type(self.metadata.database)
+
+    def get_database_name(self):
+        return self.metadata.database.database
+
+    def get_database_kwargs(self):
+        return self.metadata.database.connect_params
+
+    def get_additional_imports(self):
+        if self.metadata.requires_extension:
+            return '\n' + self.metadata.extension_import
+        return ''
+
+    def make_model_name(self, table, snake_case=True):
+        if snake_case:
+            table = make_snake_case(table)
+        model = re.sub('[^\w]+', '', table)
+        model_name = ''.join(sub.title() for sub in model.split('_'))
+        if not model_name[0].isalpha():
+            model_name = 'T' + model_name
+        return model_name
+
+    def make_column_name(self, column, is_foreign_key=False, snake_case=True):
+        column = column.strip()
+        if snake_case:
+            column = make_snake_case(column)
+        column = column.lower()
+        if is_foreign_key:
+            # Strip "_id" from foreign keys, unless the foreign-key happens to
+            # be named "_id", in which case the name is retained.
+            column = re.sub('_id$', '', column) or column
+
+        # Remove characters that are invalid for Python identifiers.
+        column = re.sub('[^\w]+', '_', column)
+        if column in RESERVED_WORDS:
+            column += '_'
+        if len(column) and column[0].isdigit():
+            column = '_' + column
+        return column
+
+    def introspect(self, table_names=None, literal_column_names=False,
+                   include_views=False, snake_case=True):
+        # Retrieve all the tables in the database.
+        tables = self.metadata.database.get_tables(schema=self.schema)
+        if include_views:
+            views = self.metadata.database.get_views(schema=self.schema)
+            tables.extend([view.name for view in views])
+
+        if table_names is not None:
+            tables = [table for table in tables if table in table_names]
+        table_set = set(tables)
+
+        # Store a mapping of table name -> dictionary of columns.
+        columns = {}
+
+        # Store a mapping of table name -> set of primary key columns.
+        primary_keys = {}
+
+        # Store a mapping of table -> foreign keys.
+        foreign_keys = {}
+
+        # Store a mapping of table name -> model name.
+        model_names = {}
+
+        # Store a mapping of table name -> indexes.
+        indexes = {}
+
+        # Gather the columns for each table.
+        for table in tables:
+            table_indexes = self.metadata.get_indexes(table, self.schema)
+            table_columns = self.metadata.get_columns(table, self.schema)
+            try:
+                foreign_keys[table] = self.metadata.get_foreign_keys(
+                    table, self.schema)
+            except ValueError as exc:
+                err(*exc.args)
+                foreign_keys[table] = []
+            else:
+                # If there is a possibility we could exclude a dependent table,
+                # ensure that we introspect it so FKs will work.
+                if table_names is not None:
+                    for foreign_key in foreign_keys[table]:
+                        if foreign_key.dest_table not in table_set:
+                            tables.append(foreign_key.dest_table)
+                            table_set.add(foreign_key.dest_table)
+
+            model_names[table] = self.make_model_name(table, snake_case)
+
+            # Collect sets of all the column names as well as all the
+            # foreign-key column names.
+            lower_col_names = set(column_name.lower()
+                                  for column_name in table_columns)
+            fks = set(fk_col.column for fk_col in foreign_keys[table])
+
+            for col_name, column in table_columns.items():
+                if literal_column_names:
+                    new_name = re.sub('[^\w]+', '_', col_name)
+                else:
+                    new_name = self.make_column_name(col_name, col_name in fks,
+                                                     snake_case)
+
+                # If we have two columns, "parent" and "parent_id", ensure
+                # that when we don't introduce naming conflicts.
+                lower_name = col_name.lower()
+                if lower_name.endswith('_id') and new_name in lower_col_names:
+                    new_name = col_name.lower()
+
+                column.name = new_name
+
+            for index in table_indexes:
+                if len(index.columns) == 1:
+                    column = index.columns[0]
+                    if column in table_columns:
+                        table_columns[column].unique = index.unique
+                        table_columns[column].index = True
+
+            primary_keys[table] = self.metadata.get_primary_keys(
+                table, self.schema)
+            columns[table] = table_columns
+            indexes[table] = table_indexes
+
+        # Gather all instances where we might have a `related_name` conflict,
+        # either due to multiple FKs on a table pointing to the same table,
+        # or a related_name that would conflict with an existing field.
+        related_names = {}
+        sort_fn = lambda foreign_key: foreign_key.column
+        for table in tables:
+            models_referenced = set()
+            for foreign_key in sorted(foreign_keys[table], key=sort_fn):
+                try:
+                    column = columns[table][foreign_key.column]
+                except KeyError:
+                    continue
+
+                dest_table = foreign_key.dest_table
+                if dest_table in models_referenced:
+                    related_names[column] = '%s_%s_set' % (
+                        dest_table,
+                        column.name)
+                else:
+                    models_referenced.add(dest_table)
+
+        # On the second pass convert all foreign keys.
+        for table in tables:
+            for foreign_key in foreign_keys[table]:
+                src = columns[foreign_key.table][foreign_key.column]
+                try:
+                    dest = columns[foreign_key.dest_table][
+                        foreign_key.dest_column]
+                except KeyError:
+                    dest = None
+
+                src.set_foreign_key(
+                    foreign_key=foreign_key,
+                    model_names=model_names,
+                    dest=dest,
+                    related_name=related_names.get(src))
+
+        return DatabaseMetadata(
+            columns,
+            primary_keys,
+            foreign_keys,
+            model_names,
+            indexes)
+
+    def generate_models(self, skip_invalid=False, table_names=None,
+                        literal_column_names=False, bare_fields=False,
+                        include_views=False):
+        database = self.introspect(table_names, literal_column_names,
+                                   include_views)
+        models = {}
+
+        class BaseModel(Model):
+            class Meta:
+                database = self.metadata.database
+                schema = self.schema
+
+        def _create_model(table, models):
+            for foreign_key in database.foreign_keys[table]:
+                dest = foreign_key.dest_table
+
+                if dest not in models and dest != table:
+                    _create_model(dest, models)
+
+            primary_keys = []
+            columns = database.columns[table]
+            for column_name, column in columns.items():
+                if column.primary_key:
+                    primary_keys.append(column.name)
+
+            multi_column_indexes = database.multi_column_indexes(table)
+            column_indexes = database.column_indexes(table)
+
+            class Meta:
+                indexes = multi_column_indexes
+                table_name = table
+
+            # Fix models with multi-column primary keys.
+            composite_key = False
+            if len(primary_keys) == 0:
+                primary_keys = columns.keys()
+            if len(primary_keys) > 1:
+                Meta.primary_key = CompositeKey(*[
+                    field.name for col, field in columns.items()
+                    if col in primary_keys])
+                composite_key = True
+
+            attrs = {'Meta': Meta}
+            for column_name, column in columns.items():
+                FieldClass = column.field_class
+                if FieldClass is not ForeignKeyField and bare_fields:
+                    FieldClass = BareField
+                elif FieldClass is UnknownField:
+                    FieldClass = BareField
+
+                params = {
+                    'column_name': column_name,
+                    'null': column.nullable}
+                if column.primary_key and composite_key:
+                    if FieldClass is AutoField:
+                        FieldClass = IntegerField
+                    params['primary_key'] = False
+                elif column.primary_key and FieldClass is not AutoField:
+                    params['primary_key'] = True
+                if column.is_foreign_key():
+                    if column.is_self_referential_fk():
+                        params['model'] = 'self'
+                    else:
+                        dest_table = column.foreign_key.dest_table
+                        params['model'] = models[dest_table]
+                    if column.to_field:
+                        params['field'] = column.to_field
+
+                    # Generate a unique related name.
+                    params['backref'] = '%s_%s_rel' % (table, column_name)
+
+                if column.default is not None:
+                    constraint = SQL('DEFAULT %s' % column.default)
+                    params['constraints'] = [constraint]
+
+                if column_name in column_indexes and not \
+                   column.is_primary_key():
+                    if column_indexes[column_name]:
+                        params['unique'] = True
+                    elif not column.is_foreign_key():
+                        params['index'] = True
+
+                attrs[column.name] = FieldClass(**params)
+
+            try:
+                models[table] = type(str(table), (BaseModel,), attrs)
+            except ValueError:
+                if not skip_invalid:
+                    raise
+
+        # Actually generate Model classes.
+        for table, model in sorted(database.model_names.items()):
+            if table not in models:
+                _create_model(table, models)
+
+        return models
+
+
+def introspect(database, schema=None):
+    introspector = Introspector.from_database(database, schema=schema)
+    return introspector.introspect()
+
+
+def generate_models(database, schema=None, **options):
+    introspector = Introspector.from_database(database, schema=schema)
+    return introspector.generate_models(**options)
+
+
+def print_model(model, indexes=True, inline_indexes=False):
+    print(model._meta.name)
+    for field in model._meta.sorted_fields:
+        parts = ['  %s %s' % (field.name, field.field_type)]
+        if field.primary_key:
+            parts.append(' PK')
+        elif inline_indexes:
+            if field.unique:
+                parts.append(' UNIQUE')
+            elif field.index:
+                parts.append(' INDEX')
+        if isinstance(field, ForeignKeyField):
+            parts.append(' FK: %s.%s' % (field.rel_model.__name__,
+                                         field.rel_field.name))
+        print(''.join(parts))
+
+    if indexes:
+        index_list = model._meta.fields_to_index()
+        if not index_list:
+            return
+
+        print('\nindex(es)')
+        for index in index_list:
+            parts = ['  ']
+            ctx = model._meta.database.get_sql_context()
+            with ctx.scope_values(param='%s', quote='""'):
+                ctx.sql(CommaNodeList(index._expressions))
+                if index._where:
+                    ctx.literal(' WHERE ')
+                    ctx.sql(index._where)
+                sql, params = ctx.query()
+
+            clean = sql % tuple(map(_query_val_transform, params))
+            parts.append(clean.replace('"', ''))
+
+            if index._unique:
+                parts.append(' UNIQUE')
+            print(''.join(parts))
+
+
+def get_table_sql(model):
+    sql, params = model._schema._create_table().query()
+    if model._meta.database.param != '%s':
+        sql = sql.replace(model._meta.database.param, '%s')
+
+    # Format and indent the table declaration, simplest possible approach.
+    match_obj = re.match('^(.+?\()(.+)(\).*)', sql)
+    create, columns, extra = match_obj.groups()
+    indented = ',\n'.join('  %s' % column for column in columns.split(', '))
+
+    clean = '\n'.join((create, indented, extra)).strip()
+    return clean % tuple(map(_query_val_transform, params))
+
+def print_table_sql(model):
+    print(get_table_sql(model))
diff --git a/lib/python3.7/site-packages/playhouse/shortcuts.py b/lib/python3.7/site-packages/playhouse/shortcuts.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1851b181a7cd318afd417899e9e4607c33504bd
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/shortcuts.py
@@ -0,0 +1,224 @@
+from peewee import *
+from peewee import Alias
+from peewee import SENTINEL
+from peewee import callable_
+
+
+_clone_set = lambda s: set(s) if s else set()
+
+
+def model_to_dict(model, recurse=True, backrefs=False, only=None,
+                  exclude=None, seen=None, extra_attrs=None,
+                  fields_from_query=None, max_depth=None, manytomany=False):
+    """
+    Convert a model instance (and any related objects) to a dictionary.
+
+    :param bool recurse: Whether foreign-keys should be recursed.
+    :param bool backrefs: Whether lists of related objects should be recursed.
+    :param only: A list (or set) of field instances indicating which fields
+        should be included.
+    :param exclude: A list (or set) of field instances that should be
+        excluded from the dictionary.
+    :param list extra_attrs: Names of model instance attributes or methods
+        that should be included.
+    :param SelectQuery fields_from_query: Query that was source of model. Take
+        fields explicitly selected by the query and serialize them.
+    :param int max_depth: Maximum depth to recurse, value <= 0 means no max.
+    :param bool manytomany: Process many-to-many fields.
+    """
+    max_depth = -1 if max_depth is None else max_depth
+    if max_depth == 0:
+        recurse = False
+
+    only = _clone_set(only)
+    extra_attrs = _clone_set(extra_attrs)
+    should_skip = lambda n: (n in exclude) or (only and (n not in only))
+
+    if fields_from_query is not None:
+        for item in fields_from_query._returning:
+            if isinstance(item, Field):
+                only.add(item)
+            elif isinstance(item, Alias):
+                extra_attrs.add(item._alias)
+
+    data = {}
+    exclude = _clone_set(exclude)
+    seen = _clone_set(seen)
+    exclude |= seen
+    model_class = type(model)
+
+    if manytomany:
+        for name, m2m in model._meta.manytomany.items():
+            if should_skip(name):
+                continue
+
+            exclude.update((m2m, m2m.rel_model._meta.manytomany[m2m.backref]))
+            for fkf in m2m.through_model._meta.refs:
+                exclude.add(fkf)
+
+            accum = []
+            for rel_obj in getattr(model, name):
+                accum.append(model_to_dict(
+                    rel_obj,
+                    recurse=recurse,
+                    backrefs=backrefs,
+                    only=only,
+                    exclude=exclude,
+                    max_depth=max_depth - 1))
+            data[name] = accum
+
+    for field in model._meta.sorted_fields:
+        if should_skip(field):
+            continue
+
+        field_data = model.__data__.get(field.name)
+        if isinstance(field, ForeignKeyField) and recurse:
+            if field_data:
+                seen.add(field)
+                rel_obj = getattr(model, field.name)
+                field_data = model_to_dict(
+                    rel_obj,
+                    recurse=recurse,
+                    backrefs=backrefs,
+                    only=only,
+                    exclude=exclude,
+                    seen=seen,
+                    max_depth=max_depth - 1)
+            else:
+                field_data = None
+
+        data[field.name] = field_data
+
+    if extra_attrs:
+        for attr_name in extra_attrs:
+            attr = getattr(model, attr_name)
+            if callable_(attr):
+                data[attr_name] = attr()
+            else:
+                data[attr_name] = attr
+
+    if backrefs and recurse:
+        for foreign_key, rel_model in model._meta.backrefs.items():
+            if foreign_key.backref == '+': continue
+            descriptor = getattr(model_class, foreign_key.backref)
+            if descriptor in exclude or foreign_key in exclude:
+                continue
+            if only and (descriptor not in only) and (foreign_key not in only):
+                continue
+
+            accum = []
+            exclude.add(foreign_key)
+            related_query = getattr(model, foreign_key.backref)
+
+            for rel_obj in related_query:
+                accum.append(model_to_dict(
+                    rel_obj,
+                    recurse=recurse,
+                    backrefs=backrefs,
+                    only=only,
+                    exclude=exclude,
+                    max_depth=max_depth - 1))
+
+            data[foreign_key.backref] = accum
+
+    return data
+
+
+def update_model_from_dict(instance, data, ignore_unknown=False):
+    meta = instance._meta
+    backrefs = dict([(fk.backref, fk) for fk in meta.backrefs])
+
+    for key, value in data.items():
+        if key in meta.combined:
+            field = meta.combined[key]
+            is_backref = False
+        elif key in backrefs:
+            field = backrefs[key]
+            is_backref = True
+        elif ignore_unknown:
+            setattr(instance, key, value)
+            continue
+        else:
+            raise AttributeError('Unrecognized attribute "%s" for model '
+                                 'class %s.' % (key, type(instance)))
+
+        is_foreign_key = isinstance(field, ForeignKeyField)
+
+        if not is_backref and is_foreign_key and isinstance(value, dict):
+            try:
+                rel_instance = instance.__rel__[field.name]
+            except KeyError:
+                rel_instance = field.rel_model()
+            setattr(
+                instance,
+                field.name,
+                update_model_from_dict(rel_instance, value, ignore_unknown))
+        elif is_backref and isinstance(value, (list, tuple)):
+            instances = [
+                dict_to_model(field.model, row_data, ignore_unknown)
+                for row_data in value]
+            for rel_instance in instances:
+                setattr(rel_instance, field.name, instance)
+            setattr(instance, field.backref, instances)
+        else:
+            setattr(instance, field.name, value)
+
+    return instance
+
+
+def dict_to_model(model_class, data, ignore_unknown=False):
+    return update_model_from_dict(model_class(), data, ignore_unknown)
+
+
+class ReconnectMixin(object):
+    """
+    Mixin class that attempts to automatically reconnect to the database under
+    certain error conditions.
+
+    For example, MySQL servers will typically close connections that are idle
+    for 28800 seconds ("wait_timeout" setting). If your application makes use
+    of long-lived connections, you may find your connections are closed after
+    a period of no activity. This mixin will attempt to reconnect automatically
+    when these errors occur.
+
+    This mixin class probably should not be used with Postgres (unless you
+    REALLY know what you are doing) and definitely has no business being used
+    with Sqlite. If you wish to use with Postgres, you will need to adapt the
+    `reconnect_errors` attribute to something appropriate for Postgres.
+    """
+    reconnect_errors = (
+        # Error class, error message fragment (or empty string for all).
+        (OperationalError, '2006'),  # MySQL server has gone away.
+        (OperationalError, '2013'),  # Lost connection to MySQL server.
+        (OperationalError, '2014'),  # Commands out of sync.
+    )
+
+    def __init__(self, *args, **kwargs):
+        super(ReconnectMixin, self).__init__(*args, **kwargs)
+
+        # Normalize the reconnect errors to a more efficient data-structure.
+        self._reconnect_errors = {}
+        for exc_class, err_fragment in self.reconnect_errors:
+            self._reconnect_errors.setdefault(exc_class, [])
+            self._reconnect_errors[exc_class].append(err_fragment.lower())
+
+    def execute_sql(self, sql, params=None, commit=SENTINEL):
+        try:
+            return super(ReconnectMixin, self).execute_sql(sql, params, commit)
+        except Exception as exc:
+            exc_class = type(exc)
+            if exc_class not in self._reconnect_errors:
+                raise exc
+
+            exc_repr = str(exc).lower()
+            for err_fragment in self._reconnect_errors[exc_class]:
+                if err_fragment in exc_repr:
+                    break
+            else:
+                raise exc
+
+            if not self.is_closed():
+                self.close()
+                self.connect()
+
+            return super(ReconnectMixin, self).execute_sql(sql, params, commit)
diff --git a/lib/python3.7/site-packages/playhouse/signals.py b/lib/python3.7/site-packages/playhouse/signals.py
new file mode 100644
index 0000000000000000000000000000000000000000..f070bdfdb83548608547d2498acc5846dfc3fe1a
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/signals.py
@@ -0,0 +1,79 @@
+"""
+Provide django-style hooks for model events.
+"""
+from peewee import Model as _Model
+
+
+class Signal(object):
+    def __init__(self):
+        self._flush()
+
+    def _flush(self):
+        self._receivers = set()
+        self._receiver_list = []
+
+    def connect(self, receiver, name=None, sender=None):
+        name = name or receiver.__name__
+        key = (name, sender)
+        if key not in self._receivers:
+            self._receivers.add(key)
+            self._receiver_list.append((name, receiver, sender))
+        else:
+            raise ValueError('receiver named %s (for sender=%s) already '
+                             'connected' % (name, sender or 'any'))
+
+    def disconnect(self, receiver=None, name=None, sender=None):
+        if receiver:
+            name = name or receiver.__name__
+        if not name:
+            raise ValueError('a receiver or a name must be provided')
+
+        key = (name, sender)
+        if key not in self._receivers:
+            raise ValueError('receiver named %s for sender=%s not found.' %
+                             (name, sender or 'any'))
+
+        self._receivers.remove(key)
+        self._receiver_list = [(n, r, s) for n, r, s in self._receiver_list
+                               if n != name and s != sender]
+
+    def __call__(self, name=None, sender=None):
+        def decorator(fn):
+            self.connect(fn, name, sender)
+            return fn
+        return decorator
+
+    def send(self, instance, *args, **kwargs):
+        sender = type(instance)
+        responses = []
+        for n, r, s in self._receiver_list:
+            if s is None or isinstance(instance, s):
+                responses.append((r, r(sender, instance, *args, **kwargs)))
+        return responses
+
+
+pre_save = Signal()
+post_save = Signal()
+pre_delete = Signal()
+post_delete = Signal()
+pre_init = Signal()
+
+
+class Model(_Model):
+    def __init__(self, *args, **kwargs):
+        super(Model, self).__init__(*args, **kwargs)
+        pre_init.send(self)
+
+    def save(self, *args, **kwargs):
+        pk_value = self._pk
+        created = kwargs.get('force_insert', False) or not bool(pk_value)
+        pre_save.send(self, created=created)
+        ret = super(Model, self).save(*args, **kwargs)
+        post_save.send(self, created=created)
+        return ret
+
+    def delete_instance(self, *args, **kwargs):
+        pre_delete.send(self)
+        ret = super(Model, self).delete_instance(*args, **kwargs)
+        post_delete.send(self)
+        return ret
diff --git a/lib/python3.7/site-packages/playhouse/sqlcipher_ext.py b/lib/python3.7/site-packages/playhouse/sqlcipher_ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..544adc1b49f707ed1221586e51ae6a6d3dbe4534
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/sqlcipher_ext.py
@@ -0,0 +1,101 @@
+"""
+Peewee integration with pysqlcipher.
+
+Project page: https://github.com/leapcode/pysqlcipher/
+
+**WARNING!!! EXPERIMENTAL!!!**
+
+* Although this extention's code is short, it has not been properly
+  peer-reviewed yet and may have introduced vulnerabilities.
+
+Also note that this code relies on pysqlcipher and sqlcipher, and
+the code there might have vulnerabilities as well, but since these
+are widely used crypto modules, we can expect "short zero days" there.
+
+Example usage:
+
+     from peewee.playground.ciphersql_ext import SqlCipherDatabase
+     db = SqlCipherDatabase('/path/to/my.db', passphrase="don'tuseme4real")
+
+* `passphrase`: should be "long enough".
+  Note that *length beats vocabulary* (much exponential), and even
+  a lowercase-only passphrase like easytorememberyethardforotherstoguess
+  packs more noise than 8 random printable characters and *can* be memorized.
+
+When opening an existing database, passphrase should be the one used when the
+database was created. If the passphrase is incorrect, an exception will only be
+raised **when you access the database**.
+
+If you need to ask for an interactive passphrase, here's example code you can
+put after the `db = ...` line:
+
+    try:  # Just access the database so that it checks the encryption.
+        db.get_tables()
+    # We're looking for a DatabaseError with a specific error message.
+    except peewee.DatabaseError as e:
+        # Check whether the message *means* "passphrase is wrong"
+        if e.args[0] == 'file is encrypted or is not a database':
+            raise Exception('Developer should Prompt user for passphrase '
+                            'again.')
+        else:
+            # A different DatabaseError. Raise it.
+            raise e
+
+See a more elaborate example with this code at
+https://gist.github.com/thedod/11048875
+"""
+import datetime
+import decimal
+import sys
+
+from peewee import *
+from playhouse.sqlite_ext import SqliteExtDatabase
+if sys.version_info[0] != 3:
+    from pysqlcipher import dbapi2 as sqlcipher
+else:
+    from pysqlcipher3 import dbapi2 as sqlcipher
+
+sqlcipher.register_adapter(decimal.Decimal, str)
+sqlcipher.register_adapter(datetime.date, str)
+sqlcipher.register_adapter(datetime.time, str)
+
+
+class _SqlCipherDatabase(object):
+    def _connect(self):
+        params = dict(self.connect_params)
+        passphrase = params.pop('passphrase', '').replace("'", "''")
+
+        conn = sqlcipher.connect(self.database, **params)
+        conn.isolation_level = None
+        try:
+            if passphrase:
+                conn.execute("PRAGMA key='%s'" % passphrase)
+            self._add_conn_hooks(conn)
+        except:
+            conn.close()
+            raise
+        return conn
+
+    def set_passphrase(self, passphrase):
+        if not self.is_closed():
+            raise ImproperlyConfigured('Cannot set passphrase when database '
+                                       'is open. To change passphrase of an '
+                                       'open database use the rekey() method.')
+
+        self.connect_params['passphrase'] = passphrase
+
+    def rekey(self, passphrase):
+        if self.is_closed():
+            self.connect()
+
+        self.execute_sql("PRAGMA rekey='%s'" % passphrase.replace("'", "''"))
+        self.connect_params['passphrase'] = passphrase
+        return True
+
+
+class SqlCipherDatabase(_SqlCipherDatabase, SqliteDatabase):
+    pass
+
+
+class SqlCipherExtDatabase(_SqlCipherDatabase, SqliteExtDatabase):
+    pass
diff --git a/lib/python3.7/site-packages/playhouse/sqlite_ext.py b/lib/python3.7/site-packages/playhouse/sqlite_ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..c97cbd252f84e8c8aa648865eae3d9b07e82d69d
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/sqlite_ext.py
@@ -0,0 +1,1261 @@
+import json
+import math
+import re
+import struct
+import sys
+
+from peewee import *
+from peewee import ColumnBase
+from peewee import EnclosedNodeList
+from peewee import Entity
+from peewee import Expression
+from peewee import Node
+from peewee import NodeList
+from peewee import OP
+from peewee import VirtualField
+from peewee import merge_dict
+from peewee import sqlite3
+try:
+    from playhouse._sqlite_ext import (
+        backup,
+        backup_to_file,
+        Blob,
+        ConnectionHelper,
+        register_bloomfilter,
+        register_hash_functions,
+        register_rank_functions,
+        sqlite_get_db_status,
+        sqlite_get_status,
+        TableFunction,
+        ZeroBlob,
+    )
+    CYTHON_SQLITE_EXTENSIONS = True
+except ImportError:
+    CYTHON_SQLITE_EXTENSIONS = False
+
+
+if sys.version_info[0] == 3:
+    basestring = str
+
+
+FTS3_MATCHINFO = 'pcx'
+FTS4_MATCHINFO = 'pcnalx'
+if sqlite3 is not None:
+    FTS_VERSION = 4 if sqlite3.sqlite_version_info[:3] >= (3, 7, 4) else 3
+else:
+    FTS_VERSION = 3
+
+FTS5_MIN_SQLITE_VERSION = (3, 9, 0)
+
+
+class RowIDField(AutoField):
+    auto_increment = True
+    column_name = name = required_name = 'rowid'
+
+    def bind(self, model, name, *args):
+        if name != self.required_name:
+            raise ValueError('%s must be named "%s".' %
+                             (type(self), self.required_name))
+        super(RowIDField, self).bind(model, name, *args)
+
+
+class DocIDField(RowIDField):
+    column_name = name = required_name = 'docid'
+
+
+class AutoIncrementField(AutoField):
+    def ddl(self, ctx):
+        node_list = super(AutoIncrementField, self).ddl(ctx)
+        return NodeList((node_list, SQL('AUTOINCREMENT')))
+
+
+class JSONPath(ColumnBase):
+    def __init__(self, field, path=None):
+        super(JSONPath, self).__init__()
+        self._field = field
+        self._path = path or ()
+
+    @property
+    def path(self):
+        return Value('$%s' % ''.join(self._path))
+
+    def __getitem__(self, idx):
+        if isinstance(idx, int):
+            item = '[%s]' % idx
+        else:
+            item = '.%s' % idx
+        return JSONPath(self._field, self._path + (item,))
+
+    def set(self, value, as_json=None):
+        if as_json or isinstance(value, (list, dict)):
+            value = fn.json(self._field._json_dumps(value))
+        return fn.json_set(self._field, self.path, value)
+
+    def update(self, value):
+        return self.set(fn.json_patch(self, self._field._json_dumps(value)))
+
+    def remove(self):
+        return fn.json_remove(self._field, self.path)
+
+    def json_type(self):
+        return fn.json_type(self._field, self.path)
+
+    def length(self):
+        return fn.json_array_length(self._field, self.path)
+
+    def children(self):
+        return fn.json_each(self._field, self.path)
+
+    def tree(self):
+        return fn.json_tree(self._field, self.path)
+
+    def __sql__(self, ctx):
+        return ctx.sql(fn.json_extract(self._field, self.path)
+                       if self._path else self._field)
+
+
+class JSONField(TextField):
+    field_type = 'JSON'
+
+    def __init__(self, json_dumps=None, json_loads=None, **kwargs):
+        self._json_dumps = json_dumps or json.dumps
+        self._json_loads = json_loads or json.loads
+        super(JSONField, self).__init__(**kwargs)
+
+    def python_value(self, value):
+        if value is not None:
+            try:
+                return self._json_loads(value)
+            except (TypeError, ValueError):
+                return value
+
+    def db_value(self, value):
+        if value is not None:
+            if not isinstance(value, Node):
+                value = fn.json(self._json_dumps(value))
+            return value
+
+    def _e(op):
+        def inner(self, rhs):
+            if isinstance(rhs, (list, dict)):
+                rhs = Value(rhs, converter=self.db_value, unpack=False)
+            return Expression(self, op, rhs)
+        return inner
+    __eq__ = _e(OP.EQ)
+    __ne__ = _e(OP.NE)
+    __gt__ = _e(OP.GT)
+    __ge__ = _e(OP.GTE)
+    __lt__ = _e(OP.LT)
+    __le__ = _e(OP.LTE)
+    __hash__ = Field.__hash__
+
+    def __getitem__(self, item):
+        return JSONPath(self)[item]
+
+    def set(self, value, as_json=None):
+        return JSONPath(self).set(value, as_json)
+
+    def update(self, data):
+        return JSONPath(self).update(data)
+
+    def remove(self):
+        return JSONPath(self).remove()
+
+    def json_type(self):
+        return fn.json_type(self)
+
+    def length(self):
+        return fn.json_array_length(self)
+
+    def children(self):
+        """
+        Schema of `json_each` and `json_tree`:
+
+        key,
+        value,
+        type TEXT (object, array, string, etc),
+        atom (value for primitive/scalar types, NULL for array and object)
+        id INTEGER (unique identifier for element)
+        parent INTEGER (unique identifier of parent element or NULL)
+        fullkey TEXT (full path describing element)
+        path TEXT (path to the container of the current element)
+        json JSON hidden (1st input parameter to function)
+        root TEXT hidden (2nd input parameter, path at which to start)
+        """
+        return fn.json_each(self)
+
+    def tree(self):
+        return fn.json_tree(self)
+
+
+class SearchField(Field):
+    def __init__(self, unindexed=False, column_name=None, **k):
+        if k:
+            raise ValueError('SearchField does not accept these keyword '
+                             'arguments: %s.' % sorted(k))
+        super(SearchField, self).__init__(unindexed=unindexed,
+                                          column_name=column_name, null=True)
+
+    def match(self, term):
+        return match(self, term)
+
+
+class VirtualTableSchemaManager(SchemaManager):
+    def _create_virtual_table(self, safe=True, **options):
+        options = self.model.clean_options(
+            merge_dict(self.model._meta.options, options))
+
+        # Structure:
+        # CREATE VIRTUAL TABLE <model>
+        # USING <extension_module>
+        # ([prefix_arguments, ...] fields, ... [arguments, ...], [options...])
+        ctx = self._create_context()
+        ctx.literal('CREATE VIRTUAL TABLE ')
+        if safe:
+            ctx.literal('IF NOT EXISTS ')
+        (ctx
+         .sql(self.model)
+         .literal(' USING '))
+
+        ext_module = self.model._meta.extension_module
+        if isinstance(ext_module, Node):
+            return ctx.sql(ext_module)
+
+        ctx.sql(SQL(ext_module)).literal(' ')
+        arguments = []
+        meta = self.model._meta
+
+        if meta.prefix_arguments:
+            arguments.extend([SQL(a) for a in meta.prefix_arguments])
+
+        # Constraints, data-types, foreign and primary keys are all omitted.
+        for field in meta.sorted_fields:
+            if isinstance(field, (RowIDField)) or field._hidden:
+                continue
+            field_def = [Entity(field.column_name)]
+            if field.unindexed:
+                field_def.append(SQL('UNINDEXED'))
+            arguments.append(NodeList(field_def))
+
+        if meta.arguments:
+            arguments.extend([SQL(a) for a in meta.arguments])
+
+        if options:
+            arguments.extend(self._create_table_option_sql(options))
+        return ctx.sql(EnclosedNodeList(arguments))
+
+    def _create_table(self, safe=True, **options):
+        if issubclass(self.model, VirtualModel):
+            return self._create_virtual_table(safe, **options)
+
+        return super(VirtualTableSchemaManager, self)._create_table(
+            safe, **options)
+
+
+class VirtualModel(Model):
+    class Meta:
+        arguments = None
+        extension_module = None
+        prefix_arguments = None
+        primary_key = False
+        schema_manager_class = VirtualTableSchemaManager
+
+    @classmethod
+    def clean_options(cls, options):
+        return options
+
+
+class BaseFTSModel(VirtualModel):
+    @classmethod
+    def clean_options(cls, options):
+        content = options.get('content')
+        prefix = options.get('prefix')
+        tokenize = options.get('tokenize')
+
+        if isinstance(content, basestring) and content == '':
+            # Special-case content-less full-text search tables.
+            options['content'] = "''"
+        elif isinstance(content, Field):
+            # Special-case to ensure fields are fully-qualified.
+            options['content'] = Entity(content.model._meta.table_name,
+                                        content.column_name)
+
+        if prefix:
+            if isinstance(prefix, (list, tuple)):
+                prefix = ','.join([str(i) for i in prefix])
+            options['prefix'] = "'%s'" % prefix.strip("' ")
+
+        if tokenize and cls._meta.extension_module.lower() == 'fts5':
+            # Tokenizers need to be in quoted string for FTS5, but not for FTS3
+            # or FTS4.
+            options['tokenize'] = '"%s"' % tokenize
+
+        return options
+
+
+class FTSModel(BaseFTSModel):
+    """
+    VirtualModel class for creating tables that use either the FTS3 or FTS4
+    search extensions. Peewee automatically determines which version of the
+    FTS extension is supported and will use FTS4 if possible.
+    """
+    # FTS3/4 uses "docid" in the same way a normal table uses "rowid".
+    docid = DocIDField()
+
+    class Meta:
+        extension_module = 'FTS%s' % FTS_VERSION
+
+    @classmethod
+    def _fts_cmd(cls, cmd):
+        tbl = cls._meta.table_name
+        res = cls._meta.database.execute_sql(
+            "INSERT INTO %s(%s) VALUES('%s');" % (tbl, tbl, cmd))
+        return res.fetchone()
+
+    @classmethod
+    def optimize(cls):
+        return cls._fts_cmd('optimize')
+
+    @classmethod
+    def rebuild(cls):
+        return cls._fts_cmd('rebuild')
+
+    @classmethod
+    def integrity_check(cls):
+        return cls._fts_cmd('integrity-check')
+
+    @classmethod
+    def merge(cls, blocks=200, segments=8):
+        return cls._fts_cmd('merge=%s,%s' % (blocks, segments))
+
+    @classmethod
+    def automerge(cls, state=True):
+        return cls._fts_cmd('automerge=%s' % (state and '1' or '0'))
+
+    @classmethod
+    def match(cls, term):
+        """
+        Generate a `MATCH` expression appropriate for searching this table.
+        """
+        return match(cls._meta.entity, term)
+
+    @classmethod
+    def rank(cls, *weights):
+        matchinfo = fn.matchinfo(cls._meta.entity, FTS3_MATCHINFO)
+        return fn.fts_rank(matchinfo, *weights)
+
+    @classmethod
+    def bm25(cls, *weights):
+        match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
+        return fn.fts_bm25(match_info, *weights)
+
+    @classmethod
+    def bm25f(cls, *weights):
+        match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
+        return fn.fts_bm25f(match_info, *weights)
+
+    @classmethod
+    def lucene(cls, *weights):
+        match_info = fn.matchinfo(cls._meta.entity, FTS4_MATCHINFO)
+        return fn.fts_lucene(match_info, *weights)
+
+    @classmethod
+    def _search(cls, term, weights, with_score, score_alias, score_fn,
+                explicit_ordering):
+        if not weights:
+            rank = score_fn()
+        elif isinstance(weights, dict):
+            weight_args = []
+            for field in cls._meta.sorted_fields:
+                # Attempt to get the specified weight of the field by looking
+                # it up using it's field instance followed by name.
+                field_weight = weights.get(field, weights.get(field.name, 1.0))
+                weight_args.append(field_weight)
+            rank = score_fn(*weight_args)
+        else:
+            rank = score_fn(*weights)
+
+        selection = ()
+        order_by = rank
+        if with_score:
+            selection = (cls, rank.alias(score_alias))
+        if with_score and not explicit_ordering:
+            order_by = SQL(score_alias)
+
+        return (cls
+                .select(*selection)
+                .where(cls.match(term))
+                .order_by(order_by))
+
+    @classmethod
+    def search(cls, term, weights=None, with_score=False, score_alias='score',
+               explicit_ordering=False):
+        """Full-text search using selected `term`."""
+        return cls._search(
+            term,
+            weights,
+            with_score,
+            score_alias,
+            cls.rank,
+            explicit_ordering)
+
+    @classmethod
+    def search_bm25(cls, term, weights=None, with_score=False,
+                    score_alias='score', explicit_ordering=False):
+        """Full-text search for selected `term` using BM25 algorithm."""
+        return cls._search(
+            term,
+            weights,
+            with_score,
+            score_alias,
+            cls.bm25,
+            explicit_ordering)
+
+    @classmethod
+    def search_bm25f(cls, term, weights=None, with_score=False,
+                     score_alias='score', explicit_ordering=False):
+        """Full-text search for selected `term` using BM25 algorithm."""
+        return cls._search(
+            term,
+            weights,
+            with_score,
+            score_alias,
+            cls.bm25f,
+            explicit_ordering)
+
+    @classmethod
+    def search_lucene(cls, term, weights=None, with_score=False,
+                      score_alias='score', explicit_ordering=False):
+        """Full-text search for selected `term` using BM25 algorithm."""
+        return cls._search(
+            term,
+            weights,
+            with_score,
+            score_alias,
+            cls.lucene,
+            explicit_ordering)
+
+
+_alphabet = 'abcdefghijklmnopqrstuvwxyz'
+_alphanum = (set('\t ,"(){}*:_+0123456789') |
+             set(_alphabet) |
+             set(_alphabet.upper()) |
+             set((chr(26),)))
+_invalid_ascii = set(chr(p) for p in range(128) if chr(p) not in _alphanum)
+_quote_re = re.compile('(?:[^\s"]|"(?:\\.|[^"])*")+')
+
+
+class FTS5Model(BaseFTSModel):
+    """
+    Requires SQLite >= 3.9.0.
+
+    Table options:
+
+    content: table name of external content, or empty string for "contentless"
+    content_rowid: column name of external content primary key
+    prefix: integer(s). Ex: '2' or '2 3 4'
+    tokenize: porter, unicode61, ascii. Ex: 'porter unicode61'
+
+    The unicode tokenizer supports the following parameters:
+
+    * remove_diacritics (1 or 0, default is 1)
+    * tokenchars (string of characters, e.g. '-_'
+    * separators (string of characters)
+
+    Parameters are passed as alternating parameter name and value, so:
+
+    {'tokenize': "unicode61 remove_diacritics 0 tokenchars '-_'"}
+
+    Content-less tables:
+
+    If you don't need the full-text content in it's original form, you can
+    specify a content-less table. Searches and auxiliary functions will work
+    as usual, but the only values returned when SELECT-ing can be rowid. Also
+    content-less tables do not support UPDATE or DELETE.
+
+    External content tables:
+
+    You can set up triggers to sync these, e.g.
+
+    -- Create a table. And an external content fts5 table to index it.
+    CREATE TABLE tbl(a INTEGER PRIMARY KEY, b);
+    CREATE VIRTUAL TABLE ft USING fts5(b, content='tbl', content_rowid='a');
+
+    -- Triggers to keep the FTS index up to date.
+    CREATE TRIGGER tbl_ai AFTER INSERT ON tbl BEGIN
+      INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
+    END;
+    CREATE TRIGGER tbl_ad AFTER DELETE ON tbl BEGIN
+      INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
+    END;
+    CREATE TRIGGER tbl_au AFTER UPDATE ON tbl BEGIN
+      INSERT INTO ft(fts_idx, rowid, b) VALUES('delete', old.a, old.b);
+      INSERT INTO ft(rowid, b) VALUES (new.a, new.b);
+    END;
+
+    Built-in auxiliary functions:
+
+    * bm25(tbl[, weight_0, ... weight_n])
+    * highlight(tbl, col_idx, prefix, suffix)
+    * snippet(tbl, col_idx, prefix, suffix, ?, max_tokens)
+    """
+    # FTS5 does not support declared primary keys, but we can use the
+    # implicit rowid.
+    rowid = RowIDField()
+
+    class Meta:
+        extension_module = 'fts5'
+
+    _error_messages = {
+        'field_type': ('Besides the implicit `rowid` column, all columns must '
+                       'be instances of SearchField'),
+        'index': 'Secondary indexes are not supported for FTS5 models',
+        'pk': 'FTS5 models must use the default `rowid` primary key',
+    }
+
+    @classmethod
+    def validate_model(cls):
+        # Perform FTS5-specific validation and options post-processing.
+        if cls._meta.primary_key.name != 'rowid':
+            raise ImproperlyConfigured(cls._error_messages['pk'])
+        for field in cls._meta.fields.values():
+            if not isinstance(field, (SearchField, RowIDField)):
+                raise ImproperlyConfigured(cls._error_messages['field_type'])
+        if cls._meta.indexes:
+            raise ImproperlyConfigured(cls._error_messages['index'])
+
+    @classmethod
+    def fts5_installed(cls):
+        if sqlite3.sqlite_version_info[:3] < FTS5_MIN_SQLITE_VERSION:
+            return False
+
+        # Test in-memory DB to determine if the FTS5 extension is installed.
+        tmp_db = sqlite3.connect(':memory:')
+        try:
+            tmp_db.execute('CREATE VIRTUAL TABLE fts5test USING fts5 (data);')
+        except:
+            try:
+                tmp_db.enable_load_extension(True)
+                tmp_db.load_extension('fts5')
+            except:
+                return False
+            else:
+                cls._meta.database.load_extension('fts5')
+        finally:
+            tmp_db.close()
+
+        return True
+
+    @staticmethod
+    def validate_query(query):
+        """
+        Simple helper function to indicate whether a search query is a
+        valid FTS5 query. Note: this simply looks at the characters being
+        used, and is not guaranteed to catch all problematic queries.
+        """
+        tokens = _quote_re.findall(query)
+        for token in tokens:
+            if token.startswith('"') and token.endswith('"'):
+                continue
+            if set(token) & _invalid_ascii:
+                return False
+        return True
+
+    @staticmethod
+    def clean_query(query, replace=chr(26)):
+        """
+        Clean a query of invalid tokens.
+        """
+        accum = []
+        any_invalid = False
+        tokens = _quote_re.findall(query)
+        for token in tokens:
+            if token.startswith('"') and token.endswith('"'):
+                accum.append(token)
+                continue
+            token_set = set(token)
+            invalid_for_token = token_set & _invalid_ascii
+            if invalid_for_token:
+                any_invalid = True
+                for c in invalid_for_token:
+                    token = token.replace(c, replace)
+            accum.append(token)
+
+        if any_invalid:
+            return ' '.join(accum)
+        return query
+
+    @classmethod
+    def match(cls, term):
+        """
+        Generate a `MATCH` expression appropriate for searching this table.
+        """
+        return match(cls._meta.entity, term)
+
+    @classmethod
+    def rank(cls, *args):
+        return cls.bm25(*args) if args else SQL('rank')
+
+    @classmethod
+    def bm25(cls, *weights):
+        return fn.bm25(cls._meta.entity, *weights)
+
+    @classmethod
+    def search(cls, term, weights=None, with_score=False, score_alias='score',
+               explicit_ordering=False):
+        """Full-text search using selected `term`."""
+        return cls.search_bm25(
+            FTS5Model.clean_query(term),
+            weights,
+            with_score,
+            score_alias,
+            explicit_ordering)
+
+    @classmethod
+    def search_bm25(cls, term, weights=None, with_score=False,
+                    score_alias='score', explicit_ordering=False):
+        """Full-text search using selected `term`."""
+        if not weights:
+            rank = SQL('rank')
+        elif isinstance(weights, dict):
+            weight_args = []
+            for field in cls._meta.sorted_fields:
+                if isinstance(field, SearchField) and not field.unindexed:
+                    weight_args.append(
+                        weights.get(field, weights.get(field.name, 1.0)))
+            rank = fn.bm25(cls._meta.entity, *weight_args)
+        else:
+            rank = fn.bm25(cls._meta.entity, *weights)
+
+        selection = ()
+        order_by = rank
+        if with_score:
+            selection = (cls, rank.alias(score_alias))
+        if with_score and not explicit_ordering:
+            order_by = SQL(score_alias)
+
+        return (cls
+                .select(*selection)
+                .where(cls.match(FTS5Model.clean_query(term)))
+                .order_by(order_by))
+
+    @classmethod
+    def _fts_cmd_sql(cls, cmd, **extra_params):
+        tbl = cls._meta.entity
+        columns = [tbl]
+        values = [cmd]
+        for key, value in extra_params.items():
+            columns.append(Entity(key))
+            values.append(value)
+
+        return NodeList((
+            SQL('INSERT INTO'),
+            cls._meta.entity,
+            EnclosedNodeList(columns),
+            SQL('VALUES'),
+            EnclosedNodeList(values)))
+
+    @classmethod
+    def _fts_cmd(cls, cmd, **extra_params):
+        query = cls._fts_cmd_sql(cmd, **extra_params)
+        return cls._meta.database.execute(query)
+
+    @classmethod
+    def automerge(cls, level):
+        if not (0 <= level <= 16):
+            raise ValueError('level must be between 0 and 16')
+        return cls._fts_cmd('automerge', rank=level)
+
+    @classmethod
+    def merge(cls, npages):
+        return cls._fts_cmd('merge', rank=npages)
+
+    @classmethod
+    def set_pgsz(cls, pgsz):
+        return cls._fts_cmd('pgsz', rank=pgsz)
+
+    @classmethod
+    def set_rank(cls, rank_expression):
+        return cls._fts_cmd('rank', rank=rank_expression)
+
+    @classmethod
+    def delete_all(cls):
+        return cls._fts_cmd('delete-all')
+
+    @classmethod
+    def VocabModel(cls, table_type='row', table=None):
+        if table_type not in ('row', 'col', 'instance'):
+            raise ValueError('table_type must be either "row", "col" or '
+                             '"instance".')
+
+        attr = '_vocab_model_%s' % table_type
+
+        if not hasattr(cls, attr):
+            class Meta:
+                database = cls._meta.database
+                table_name = table or cls._meta.table_name + '_v'
+                extension_module = fn.fts5vocab(
+                    cls._meta.entity,
+                    SQL(table_type))
+
+            attrs = {
+                'term': VirtualField(TextField),
+                'doc': IntegerField(),
+                'cnt': IntegerField(),
+                'rowid': RowIDField(),
+                'Meta': Meta,
+            }
+            if table_type == 'col':
+                attrs['col'] = VirtualField(TextField)
+            elif table_type == 'instance':
+                attrs['offset'] = VirtualField(IntegerField)
+
+            class_name = '%sVocab' % cls.__name__
+            setattr(cls, attr, type(class_name, (VirtualModel,), attrs))
+
+        return getattr(cls, attr)
+
+
+def ClosureTable(model_class, foreign_key=None, referencing_class=None,
+                 referencing_key=None):
+    """Model factory for the transitive closure extension."""
+    if referencing_class is None:
+        referencing_class = model_class
+
+    if foreign_key is None:
+        for field_obj in model_class._meta.refs:
+            if field_obj.rel_model is model_class:
+                foreign_key = field_obj
+                break
+        else:
+            raise ValueError('Unable to find self-referential foreign key.')
+
+    source_key = model_class._meta.primary_key
+    if referencing_key is None:
+        referencing_key = source_key
+
+    class BaseClosureTable(VirtualModel):
+        depth = VirtualField(IntegerField)
+        id = VirtualField(IntegerField)
+        idcolumn = VirtualField(TextField)
+        parentcolumn = VirtualField(TextField)
+        root = VirtualField(IntegerField)
+        tablename = VirtualField(TextField)
+
+        class Meta:
+            extension_module = 'transitive_closure'
+
+        @classmethod
+        def descendants(cls, node, depth=None, include_node=False):
+            query = (model_class
+                     .select(model_class, cls.depth.alias('depth'))
+                     .join(cls, on=(source_key == cls.id))
+                     .where(cls.root == node)
+                     .objects())
+            if depth is not None:
+                query = query.where(cls.depth == depth)
+            elif not include_node:
+                query = query.where(cls.depth > 0)
+            return query
+
+        @classmethod
+        def ancestors(cls, node, depth=None, include_node=False):
+            query = (model_class
+                     .select(model_class, cls.depth.alias('depth'))
+                     .join(cls, on=(source_key == cls.root))
+                     .where(cls.id == node)
+                     .objects())
+            if depth:
+                query = query.where(cls.depth == depth)
+            elif not include_node:
+                query = query.where(cls.depth > 0)
+            return query
+
+        @classmethod
+        def siblings(cls, node, include_node=False):
+            if referencing_class is model_class:
+                # self-join
+                fk_value = node.__data__.get(foreign_key.name)
+                query = model_class.select().where(foreign_key == fk_value)
+            else:
+                # siblings as given in reference_class
+                siblings = (referencing_class
+                            .select(referencing_key)
+                            .join(cls, on=(foreign_key == cls.root))
+                            .where((cls.id == node) & (cls.depth == 1)))
+
+                # the according models
+                query = (model_class
+                         .select()
+                         .where(source_key << siblings)
+                         .objects())
+
+            if not include_node:
+                query = query.where(source_key != node)
+
+            return query
+
+    class Meta:
+        database = referencing_class._meta.database
+        options = {
+            'tablename': referencing_class._meta.table_name,
+            'idcolumn': referencing_key.column_name,
+            'parentcolumn': foreign_key.column_name}
+        primary_key = False
+
+    name = '%sClosure' % model_class.__name__
+    return type(name, (BaseClosureTable,), {'Meta': Meta})
+
+
+class LSMTable(VirtualModel):
+    class Meta:
+        extension_module = 'lsm1'
+        filename = None
+
+    @classmethod
+    def clean_options(cls, options):
+        filename = cls._meta.filename
+        if not filename:
+            raise ValueError('LSM1 extension requires that you specify a '
+                             'filename for the LSM database.')
+        else:
+            if len(filename) >= 2 and filename[0] != '"':
+                filename = '"%s"' % filename
+        if not cls._meta.primary_key:
+            raise ValueError('LSM1 models must specify a primary-key field.')
+
+        key = cls._meta.primary_key
+        if isinstance(key, AutoField):
+            raise ValueError('LSM1 models must explicitly declare a primary '
+                             'key field.')
+        if not isinstance(key, (TextField, BlobField, IntegerField)):
+            raise ValueError('LSM1 key must be a TextField, BlobField, or '
+                             'IntegerField.')
+        key._hidden = True
+        if isinstance(key, IntegerField):
+            data_type = 'UINT'
+        elif isinstance(key, BlobField):
+            data_type = 'BLOB'
+        else:
+            data_type = 'TEXT'
+        cls._meta.prefix_arguments = [filename, '"%s"' % key.name, data_type]
+
+        # Does the key map to a scalar value, or a tuple of values?
+        if len(cls._meta.sorted_fields) == 2:
+            cls._meta._value_field = cls._meta.sorted_fields[1]
+        else:
+            cls._meta._value_field = None
+
+        return options
+
+    @classmethod
+    def load_extension(cls, path='lsm.so'):
+        cls._meta.database.load_extension(path)
+
+    @staticmethod
+    def slice_to_expr(key, idx):
+        if idx.start is not None and idx.stop is not None:
+            return key.between(idx.start, idx.stop)
+        elif idx.start is not None:
+            return key >= idx.start
+        elif idx.stop is not None:
+            return key <= idx.stop
+
+    @staticmethod
+    def _apply_lookup_to_query(query, key, lookup):
+        if isinstance(lookup, slice):
+            expr = LSMTable.slice_to_expr(key, lookup)
+            if expr is not None:
+                query = query.where(expr)
+            return query, False
+        elif isinstance(lookup, Expression):
+            return query.where(lookup), False
+        else:
+            return query.where(key == lookup), True
+
+    @classmethod
+    def get_by_id(cls, pk):
+        query, is_single = cls._apply_lookup_to_query(
+            cls.select().namedtuples(),
+            cls._meta.primary_key,
+            pk)
+
+        if is_single:
+            try:
+                row = query.get()
+            except cls.DoesNotExist:
+                raise KeyError(pk)
+            return row[1] if cls._meta._value_field is not None else row
+        else:
+            return query
+
+    @classmethod
+    def set_by_id(cls, key, value):
+        if cls._meta._value_field is not None:
+            data = {cls._meta._value_field: value}
+        elif isinstance(value, tuple):
+            data = {}
+            for field, fval in zip(cls._meta.sorted_fields[1:], value):
+                data[field] = fval
+        elif isinstance(value, dict):
+            data = value
+        elif isinstance(value, cls):
+            data = value.__dict__
+        data[cls._meta.primary_key] = key
+        cls.replace(data).execute()
+
+    @classmethod
+    def delete_by_id(cls, pk):
+        query, is_single = cls._apply_lookup_to_query(
+            cls.delete(),
+            cls._meta.primary_key,
+            pk)
+        return query.execute()
+
+
+OP.MATCH = 'MATCH'
+
+def _sqlite_regexp(regex, value):
+    return re.search(regex, value) is not None
+
+
+class SqliteExtDatabase(SqliteDatabase):
+    def __init__(self, database, c_extensions=None, rank_functions=True,
+                 hash_functions=False, regexp_function=False,
+                 bloomfilter=False, json_contains=False, *args, **kwargs):
+        super(SqliteExtDatabase, self).__init__(database, *args, **kwargs)
+        self._row_factory = None
+
+        if c_extensions and not CYTHON_SQLITE_EXTENSIONS:
+            raise ImproperlyConfigured('SqliteExtDatabase initialized with '
+                                       'C extensions, but shared library was '
+                                       'not found!')
+        prefer_c = CYTHON_SQLITE_EXTENSIONS and (c_extensions is not False)
+        if rank_functions:
+            if prefer_c:
+                register_rank_functions(self)
+            else:
+                self.register_function(bm25, 'fts_bm25')
+                self.register_function(rank, 'fts_rank')
+                self.register_function(bm25, 'fts_bm25f')  # Fall back to bm25.
+                self.register_function(bm25, 'fts_lucene')
+        if hash_functions:
+            if not prefer_c:
+                raise ValueError('C extension required to register hash '
+                                 'functions.')
+            register_hash_functions(self)
+        if regexp_function:
+            self.register_function(_sqlite_regexp, 'regexp', 2)
+        if bloomfilter:
+            if not prefer_c:
+                raise ValueError('C extension required to use bloomfilter.')
+            register_bloomfilter(self)
+        if json_contains:
+            self.register_function(_json_contains, 'json_contains')
+
+        self._c_extensions = prefer_c
+
+    def _add_conn_hooks(self, conn):
+        super(SqliteExtDatabase, self)._add_conn_hooks(conn)
+        if self._row_factory:
+            conn.row_factory = self._row_factory
+
+    def row_factory(self, fn):
+        self._row_factory = fn
+
+
+if CYTHON_SQLITE_EXTENSIONS:
+    SQLITE_STATUS_MEMORY_USED = 0
+    SQLITE_STATUS_PAGECACHE_USED = 1
+    SQLITE_STATUS_PAGECACHE_OVERFLOW = 2
+    SQLITE_STATUS_SCRATCH_USED = 3
+    SQLITE_STATUS_SCRATCH_OVERFLOW = 4
+    SQLITE_STATUS_MALLOC_SIZE = 5
+    SQLITE_STATUS_PARSER_STACK = 6
+    SQLITE_STATUS_PAGECACHE_SIZE = 7
+    SQLITE_STATUS_SCRATCH_SIZE = 8
+    SQLITE_STATUS_MALLOC_COUNT = 9
+    SQLITE_DBSTATUS_LOOKASIDE_USED = 0
+    SQLITE_DBSTATUS_CACHE_USED = 1
+    SQLITE_DBSTATUS_SCHEMA_USED = 2
+    SQLITE_DBSTATUS_STMT_USED = 3
+    SQLITE_DBSTATUS_LOOKASIDE_HIT = 4
+    SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5
+    SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6
+    SQLITE_DBSTATUS_CACHE_HIT = 7
+    SQLITE_DBSTATUS_CACHE_MISS = 8
+    SQLITE_DBSTATUS_CACHE_WRITE = 9
+    SQLITE_DBSTATUS_DEFERRED_FKS = 10
+    #SQLITE_DBSTATUS_CACHE_USED_SHARED = 11
+
+    def __status__(flag, return_highwater=False):
+        """
+        Expose a sqlite3_status() call for a particular flag as a property of
+        the Database object.
+        """
+        def getter(self):
+            result = sqlite_get_status(flag)
+            return result[1] if return_highwater else result
+        return property(getter)
+
+    def __dbstatus__(flag, return_highwater=False, return_current=False):
+        """
+        Expose a sqlite3_dbstatus() call for a particular flag as a property of
+        the Database instance. Unlike sqlite3_status(), the dbstatus properties
+        pertain to the current connection.
+        """
+        def getter(self):
+            if self._state.conn is None:
+                raise ImproperlyConfigured('database connection not opened.')
+            result = sqlite_get_db_status(self._state.conn, flag)
+            if return_current:
+                return result[0]
+            return result[1] if return_highwater else result
+        return property(getter)
+
+    class CSqliteExtDatabase(SqliteExtDatabase):
+        def __init__(self, *args, **kwargs):
+            self._conn_helper = None
+            self._commit_hook = self._rollback_hook = self._update_hook = None
+            self._replace_busy_handler = False
+            super(CSqliteExtDatabase, self).__init__(*args, **kwargs)
+
+        def init(self, database, replace_busy_handler=False, **kwargs):
+            super(CSqliteExtDatabase, self).init(database, **kwargs)
+            self._replace_busy_handler = replace_busy_handler
+
+        def _close(self, conn):
+            if self._commit_hook:
+                self._conn_helper.set_commit_hook(None)
+            if self._rollback_hook:
+                self._conn_helper.set_rollback_hook(None)
+            if self._update_hook:
+                self._conn_helper.set_update_hook(None)
+            return super(CSqliteExtDatabase, self)._close(conn)
+
+        def _add_conn_hooks(self, conn):
+            super(CSqliteExtDatabase, self)._add_conn_hooks(conn)
+            self._conn_helper = ConnectionHelper(conn)
+            if self._commit_hook is not None:
+                self._conn_helper.set_commit_hook(self._commit_hook)
+            if self._rollback_hook is not None:
+                self._conn_helper.set_rollback_hook(self._rollback_hook)
+            if self._update_hook is not None:
+                self._conn_helper.set_update_hook(self._update_hook)
+            if self._replace_busy_handler:
+                timeout = self._timeout or 5
+                self._conn_helper.set_busy_handler(timeout * 1000)
+
+        def on_commit(self, fn):
+            self._commit_hook = fn
+            if not self.is_closed():
+                self._conn_helper.set_commit_hook(fn)
+            return fn
+
+        def on_rollback(self, fn):
+            self._rollback_hook = fn
+            if not self.is_closed():
+                self._conn_helper.set_rollback_hook(fn)
+            return fn
+
+        def on_update(self, fn):
+            self._update_hook = fn
+            if not self.is_closed():
+                self._conn_helper.set_update_hook(fn)
+            return fn
+
+        def changes(self):
+            return self._conn_helper.changes()
+
+        @property
+        def last_insert_rowid(self):
+            return self._conn_helper.last_insert_rowid()
+
+        @property
+        def autocommit(self):
+            return self._conn_helper.autocommit()
+
+        def backup(self, destination, pages=None, name=None, progress=None):
+            return backup(self.connection(), destination.connection(),
+                          pages=pages, name=name, progress=progress)
+
+        def backup_to_file(self, filename, pages=None, name=None,
+                           progress=None):
+            return backup_to_file(self.connection(), filename, pages=pages,
+                                  name=name, progress=progress)
+
+        def blob_open(self, table, column, rowid, read_only=False):
+            return Blob(self, table, column, rowid, read_only)
+
+        # Status properties.
+        memory_used = __status__(SQLITE_STATUS_MEMORY_USED)
+        malloc_size = __status__(SQLITE_STATUS_MALLOC_SIZE, True)
+        malloc_count = __status__(SQLITE_STATUS_MALLOC_COUNT)
+        pagecache_used = __status__(SQLITE_STATUS_PAGECACHE_USED)
+        pagecache_overflow = __status__(SQLITE_STATUS_PAGECACHE_OVERFLOW)
+        pagecache_size = __status__(SQLITE_STATUS_PAGECACHE_SIZE, True)
+        scratch_used = __status__(SQLITE_STATUS_SCRATCH_USED)
+        scratch_overflow = __status__(SQLITE_STATUS_SCRATCH_OVERFLOW)
+        scratch_size = __status__(SQLITE_STATUS_SCRATCH_SIZE, True)
+
+        # Connection status properties.
+        lookaside_used = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_USED)
+        lookaside_hit = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_HIT, True)
+        lookaside_miss = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE,
+                                      True)
+        lookaside_miss_full = __dbstatus__(SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL,
+                                           True)
+        cache_used = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED, False, True)
+        #cache_used_shared = __dbstatus__(SQLITE_DBSTATUS_CACHE_USED_SHARED,
+        #                                 False, True)
+        schema_used = __dbstatus__(SQLITE_DBSTATUS_SCHEMA_USED, False, True)
+        statement_used = __dbstatus__(SQLITE_DBSTATUS_STMT_USED, False, True)
+        cache_hit = __dbstatus__(SQLITE_DBSTATUS_CACHE_HIT, False, True)
+        cache_miss = __dbstatus__(SQLITE_DBSTATUS_CACHE_MISS, False, True)
+        cache_write = __dbstatus__(SQLITE_DBSTATUS_CACHE_WRITE, False, True)
+
+
+def match(lhs, rhs):
+    return Expression(lhs, OP.MATCH, rhs)
+
+def _parse_match_info(buf):
+    # See http://sqlite.org/fts3.html#matchinfo
+    bufsize = len(buf)  # Length in bytes.
+    return [struct.unpack('@I', buf[i:i+4])[0] for i in range(0, bufsize, 4)]
+
+def get_weights(ncol, raw_weights):
+    if not raw_weights:
+        return [1] * ncol
+    else:
+        weights = [0] * ncol
+        for i, weight in enumerate(raw_weights):
+            weights[i] = weight
+    return weights
+
+# Ranking implementation, which parse matchinfo.
+def rank(raw_match_info, *raw_weights):
+    # Handle match_info called w/default args 'pcx' - based on the example rank
+    # function http://sqlite.org/fts3.html#appendix_a
+    match_info = _parse_match_info(raw_match_info)
+    score = 0.0
+
+    p, c = match_info[:2]
+    weights = get_weights(c, raw_weights)
+
+    # matchinfo X value corresponds to, for each phrase in the search query, a
+    # list of 3 values for each column in the search table.
+    # So if we have a two-phrase search query and three columns of data, the
+    # following would be the layout:
+    # p0 : c0=[0, 1, 2],   c1=[3, 4, 5],    c2=[6, 7, 8]
+    # p1 : c0=[9, 10, 11], c1=[12, 13, 14], c2=[15, 16, 17]
+    for phrase_num in range(p):
+        phrase_info_idx = 2 + (phrase_num * c * 3)
+        for col_num in range(c):
+            weight = weights[col_num]
+            if not weight:
+                continue
+
+            col_idx = phrase_info_idx + (col_num * 3)
+
+            # The idea is that we count the number of times the phrase appears
+            # in this column of the current row, compared to how many times it
+            # appears in this column across all rows. The ratio of these values
+            # provides a rough way to score based on "high value" terms.
+            row_hits = match_info[col_idx]
+            all_rows_hits = match_info[col_idx + 1]
+            if row_hits > 0:
+                score += weight * (float(row_hits) / all_rows_hits)
+
+    return -score
+
+# Okapi BM25 ranking implementation (FTS4 only).
+def bm25(raw_match_info, *args):
+    """
+    Usage:
+
+        # Format string *must* be pcnalx
+        # Second parameter to bm25 specifies the index of the column, on
+        # the table being queries.
+        bm25(matchinfo(document_tbl, 'pcnalx'), 1) AS rank
+    """
+    match_info = _parse_match_info(raw_match_info)
+    K = 1.2
+    B = 0.75
+    score = 0.0
+
+    P_O, C_O, N_O, A_O = range(4)  # Offsets into the matchinfo buffer.
+    term_count = match_info[P_O]  # n
+    col_count = match_info[C_O]
+    total_docs = match_info[N_O]  # N
+    L_O = A_O + col_count
+    X_O = L_O + col_count
+
+    weights = get_weights(col_count, args)
+
+    for i in range(term_count):
+        for j in range(col_count):
+            weight = weights[j]
+            if weight == 0:
+                continue
+
+            x = X_O + (3 * (j + i * col_count))
+            term_frequency = float(match_info[x])  # f(qi, D)
+            docs_with_term = float(match_info[x + 2])  # n(qi)
+
+            # log( (N - n(qi) + 0.5) / (n(qi) + 0.5) )
+            idf = math.log(
+                    (total_docs - docs_with_term + 0.5) /
+                    (docs_with_term + 0.5))
+            if idf <= 0.0:
+                idf = 1e-6
+
+            doc_length = float(match_info[L_O + j])  # |D|
+            avg_length = float(match_info[A_O + j]) or 1.  # avgdl
+            ratio = doc_length / avg_length
+
+            num = term_frequency * (K + 1)
+            b_part = 1 - B + (B * ratio)
+            denom = term_frequency + (K * b_part)
+
+            pc_score = idf * (num / denom)
+            score += (pc_score * weight)
+
+    return -score
+
+
+def _json_contains(src_json, obj_json):
+    stack = []
+    try:
+        stack.append((json.loads(obj_json), json.loads(src_json)))
+    except:
+        # Invalid JSON!
+        return False
+
+    while stack:
+        obj, src = stack.pop()
+        if isinstance(src, dict):
+            if isinstance(obj, dict):
+                for key in obj:
+                    if key not in src:
+                        return False
+                    stack.append((obj[key], src[key]))
+            elif isinstance(obj, list):
+                for item in obj:
+                    if item not in src:
+                        return False
+            elif obj not in src:
+                return False
+        elif isinstance(src, list):
+            if isinstance(obj, dict):
+                return False
+            elif isinstance(obj, list):
+                try:
+                    for i in range(len(obj)):
+                        stack.append((obj[i], src[i]))
+                except IndexError:
+                    return False
+            elif obj not in src:
+                return False
+        elif obj != src:
+            return False
+    return True
diff --git a/lib/python3.7/site-packages/playhouse/sqlite_udf.py b/lib/python3.7/site-packages/playhouse/sqlite_udf.py
new file mode 100644
index 0000000000000000000000000000000000000000..28dbd8560ee17eb865d4b02385388f7bd2659740
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/sqlite_udf.py
@@ -0,0 +1,522 @@
+import datetime
+import hashlib
+import heapq
+import math
+import os
+import random
+import re
+import sys
+import threading
+import zlib
+try:
+    from collections import Counter
+except ImportError:
+    Counter = None
+try:
+    from urlparse import urlparse
+except ImportError:
+    from urllib.parse import urlparse
+
+try:
+    from playhouse._sqlite_ext import TableFunction
+except ImportError:
+    TableFunction = None
+
+
+SQLITE_DATETIME_FORMATS = (
+    '%Y-%m-%d %H:%M:%S',
+    '%Y-%m-%d %H:%M:%S.%f',
+    '%Y-%m-%d',
+    '%H:%M:%S',
+    '%H:%M:%S.%f',
+    '%H:%M')
+
+from peewee import format_date_time
+
+def format_date_time_sqlite(date_value):
+    return format_date_time(date_value, SQLITE_DATETIME_FORMATS)
+
+try:
+    from playhouse import _sqlite_udf as cython_udf
+except ImportError:
+    cython_udf = None
+
+
+# Group udf by function.
+CONTROL_FLOW = 'control_flow'
+DATE = 'date'
+FILE = 'file'
+HELPER = 'helpers'
+MATH = 'math'
+STRING = 'string'
+
+AGGREGATE_COLLECTION = {}
+TABLE_FUNCTION_COLLECTION = {}
+UDF_COLLECTION = {}
+
+
+class synchronized_dict(dict):
+    def __init__(self, *args, **kwargs):
+        super(synchronized_dict, self).__init__(*args, **kwargs)
+        self._lock = threading.Lock()
+
+    def __getitem__(self, key):
+        with self._lock:
+            return super(synchronized_dict, self).__getitem__(key)
+
+    def __setitem__(self, key, value):
+        with self._lock:
+            return super(synchronized_dict, self).__setitem__(key, value)
+
+    def __delitem__(self, key):
+        with self._lock:
+            return super(synchronized_dict, self).__delitem__(key)
+
+
+STATE = synchronized_dict()
+SETTINGS = synchronized_dict()
+
+# Class and function decorators.
+def aggregate(*groups):
+    def decorator(klass):
+        for group in groups:
+            AGGREGATE_COLLECTION.setdefault(group, [])
+            AGGREGATE_COLLECTION[group].append(klass)
+        return klass
+    return decorator
+
+def table_function(*groups):
+    def decorator(klass):
+        for group in groups:
+            TABLE_FUNCTION_COLLECTION.setdefault(group, [])
+            TABLE_FUNCTION_COLLECTION[group].append(klass)
+        return klass
+    return decorator
+
+def udf(*groups):
+    def decorator(fn):
+        for group in groups:
+            UDF_COLLECTION.setdefault(group, [])
+            UDF_COLLECTION[group].append(fn)
+        return fn
+    return decorator
+
+# Register aggregates / functions with connection.
+def register_aggregate_groups(db, *groups):
+    seen = set()
+    for group in groups:
+        klasses = AGGREGATE_COLLECTION.get(group, ())
+        for klass in klasses:
+            name = getattr(klass, 'name', klass.__name__)
+            if name not in seen:
+                seen.add(name)
+                db.register_aggregate(klass, name)
+
+def register_table_function_groups(db, *groups):
+    seen = set()
+    for group in groups:
+        klasses = TABLE_FUNCTION_COLLECTION.get(group, ())
+        for klass in klasses:
+            if klass.name not in seen:
+                seen.add(klass.name)
+                db.register_table_function(klass)
+
+def register_udf_groups(db, *groups):
+    seen = set()
+    for group in groups:
+        functions = UDF_COLLECTION.get(group, ())
+        for function in functions:
+            name = function.__name__
+            if name not in seen:
+                seen.add(name)
+                db.register_function(function, name)
+
+def register_groups(db, *groups):
+    register_aggregate_groups(db, *groups)
+    register_table_function_groups(db, *groups)
+    register_udf_groups(db, *groups)
+
+def register_all(db):
+    register_aggregate_groups(db, *AGGREGATE_COLLECTION)
+    register_table_function_groups(db, *TABLE_FUNCTION_COLLECTION)
+    register_udf_groups(db, *UDF_COLLECTION)
+
+
+# Begin actual user-defined functions and aggregates.
+
+# Scalar functions.
+@udf(CONTROL_FLOW)
+def if_then_else(cond, truthy, falsey=None):
+    if cond:
+        return truthy
+    return falsey
+
+@udf(DATE)
+def strip_tz(date_str):
+    date_str = date_str.replace('T', ' ')
+    tz_idx1 = date_str.find('+')
+    if tz_idx1 != -1:
+        return date_str[:tz_idx1]
+    tz_idx2 = date_str.find('-')
+    if tz_idx2 > 13:
+        return date_str[:tz_idx2]
+    return date_str
+
+@udf(DATE)
+def human_delta(nseconds, glue=', '):
+    parts = (
+        (86400 * 365, 'year'),
+        (86400 * 30, 'month'),
+        (86400 * 7, 'week'),
+        (86400, 'day'),
+        (3600, 'hour'),
+        (60, 'minute'),
+        (1, 'second'),
+    )
+    accum = []
+    for offset, name in parts:
+        val, nseconds = divmod(nseconds, offset)
+        if val:
+            suffix = val != 1 and 's' or ''
+            accum.append('%s %s%s' % (val, name, suffix))
+    if not accum:
+        return '0 seconds'
+    return glue.join(accum)
+
+@udf(FILE)
+def file_ext(filename):
+    try:
+        res = os.path.splitext(filename)
+    except ValueError:
+        return None
+    return res[1]
+
+@udf(FILE)
+def file_read(filename):
+    try:
+        with open(filename) as fh:
+            return fh.read()
+    except:
+        pass
+
+if sys.version_info[0] == 2:
+    @udf(HELPER)
+    def gzip(data, compression=9):
+        return buffer(zlib.compress(data, compression))
+
+    @udf(HELPER)
+    def gunzip(data):
+        return zlib.decompress(data)
+else:
+    @udf(HELPER)
+    def gzip(data, compression=9):
+        if isinstance(data, str):
+            data = bytes(data.encode('raw_unicode_escape'))
+        return zlib.compress(data, compression)
+
+    @udf(HELPER)
+    def gunzip(data):
+        return zlib.decompress(data)
+
+@udf(HELPER)
+def hostname(url):
+    parse_result = urlparse(url)
+    if parse_result:
+        return parse_result.netloc
+
+@udf(HELPER)
+def toggle(key):
+    key = key.lower()
+    STATE[key] = ret = not STATE.get(key)
+    return ret
+
+@udf(HELPER)
+def setting(key, value=None):
+    if value is None:
+        return SETTINGS.get(key)
+    else:
+        SETTINGS[key] = value
+        return value
+
+@udf(HELPER)
+def clear_settings():
+    SETTINGS.clear()
+
+@udf(HELPER)
+def clear_toggles():
+    STATE.clear()
+
+@udf(MATH)
+def randomrange(start, end=None, step=None):
+    if end is None:
+        start, end = 0, start
+    elif step is None:
+        step = 1
+    return random.randrange(start, end, step)
+
+@udf(MATH)
+def gauss_distribution(mean, sigma):
+    try:
+        return random.gauss(mean, sigma)
+    except ValueError:
+        return None
+
+@udf(MATH)
+def sqrt(n):
+    try:
+        return math.sqrt(n)
+    except ValueError:
+        return None
+
+@udf(MATH)
+def tonumber(s):
+    try:
+        return int(s)
+    except ValueError:
+        try:
+            return float(s)
+        except:
+            return None
+
+@udf(STRING)
+def substr_count(haystack, needle):
+    if not haystack or not needle:
+        return 0
+    return haystack.count(needle)
+
+@udf(STRING)
+def strip_chars(haystack, chars):
+    return haystack.strip(chars)
+
+def _hash(constructor, *args):
+    hash_obj = constructor()
+    for arg in args:
+        hash_obj.update(arg)
+    return hash_obj.hexdigest()
+
+# Aggregates.
+class _heap_agg(object):
+    def __init__(self):
+        self.heap = []
+        self.ct = 0
+
+    def process(self, value):
+        return value
+
+    def step(self, value):
+        self.ct += 1
+        heapq.heappush(self.heap, self.process(value))
+
+class _datetime_heap_agg(_heap_agg):
+    def process(self, value):
+        return format_date_time_sqlite(value)
+
+if sys.version_info[:2] == (2, 6):
+    def total_seconds(td):
+        return (td.seconds +
+                (td.days * 86400) +
+                (td.microseconds / (10.**6)))
+else:
+    total_seconds = lambda td: td.total_seconds()
+
+@aggregate(DATE)
+class mintdiff(_datetime_heap_agg):
+    def finalize(self):
+        dtp = min_diff = None
+        while self.heap:
+            if min_diff is None:
+                if dtp is None:
+                    dtp = heapq.heappop(self.heap)
+                    continue
+            dt = heapq.heappop(self.heap)
+            diff = dt - dtp
+            if min_diff is None or min_diff > diff:
+                min_diff = diff
+            dtp = dt
+        if min_diff is not None:
+            return total_seconds(min_diff)
+
+@aggregate(DATE)
+class avgtdiff(_datetime_heap_agg):
+    def finalize(self):
+        if self.ct < 1:
+            return
+        elif self.ct == 1:
+            return 0
+
+        total = ct = 0
+        dtp = None
+        while self.heap:
+            if total == 0:
+                if dtp is None:
+                    dtp = heapq.heappop(self.heap)
+                    continue
+
+            dt = heapq.heappop(self.heap)
+            diff = dt - dtp
+            ct += 1
+            total += total_seconds(diff)
+            dtp = dt
+
+        return float(total) / ct
+
+@aggregate(DATE)
+class duration(object):
+    def __init__(self):
+        self._min = self._max = None
+
+    def step(self, value):
+        dt = format_date_time_sqlite(value)
+        if self._min is None or dt < self._min:
+            self._min = dt
+        if self._max is None or dt > self._max:
+            self._max = dt
+
+    def finalize(self):
+        if self._min and self._max:
+            td = (self._max - self._min)
+            return total_seconds(td)
+        return None
+
+@aggregate(MATH)
+class mode(object):
+    if Counter:
+        def __init__(self):
+            self.items = Counter()
+
+        def step(self, *args):
+            self.items.update(args)
+
+        def finalize(self):
+            if self.items:
+                return self.items.most_common(1)[0][0]
+    else:
+        def __init__(self):
+            self.items = []
+
+        def step(self, item):
+            self.items.append(item)
+
+        def finalize(self):
+            if self.items:
+                return max(set(self.items), key=self.items.count)
+
+@aggregate(MATH)
+class minrange(_heap_agg):
+    def finalize(self):
+        if self.ct == 0:
+            return
+        elif self.ct == 1:
+            return 0
+
+        prev = min_diff = None
+
+        while self.heap:
+            if min_diff is None:
+                if prev is None:
+                    prev = heapq.heappop(self.heap)
+                    continue
+            curr = heapq.heappop(self.heap)
+            diff = curr - prev
+            if min_diff is None or min_diff > diff:
+                min_diff = diff
+            prev = curr
+        return min_diff
+
+@aggregate(MATH)
+class avgrange(_heap_agg):
+    def finalize(self):
+        if self.ct == 0:
+            return
+        elif self.ct == 1:
+            return 0
+
+        total = ct = 0
+        prev = None
+        while self.heap:
+            if total == 0:
+                if prev is None:
+                    prev = heapq.heappop(self.heap)
+                    continue
+
+            curr = heapq.heappop(self.heap)
+            diff = curr - prev
+            ct += 1
+            total += diff
+            prev = curr
+
+        return float(total) / ct
+
+@aggregate(MATH)
+class _range(object):
+    name = 'range'
+
+    def __init__(self):
+        self._min = self._max = None
+
+    def step(self, value):
+        if self._min is None or value < self._min:
+            self._min = value
+        if self._max is None or value > self._max:
+            self._max = value
+
+    def finalize(self):
+        if self._min is not None and self._max is not None:
+            return self._max - self._min
+        return None
+
+
+if cython_udf is not None:
+    damerau_levenshtein_dist = udf(STRING)(cython_udf.damerau_levenshtein_dist)
+    levenshtein_dist = udf(STRING)(cython_udf.levenshtein_dist)
+    str_dist = udf(STRING)(cython_udf.str_dist)
+    median = aggregate(MATH)(cython_udf.median)
+
+
+if TableFunction is not None:
+    @table_function(STRING)
+    class RegexSearch(TableFunction):
+        params = ['regex', 'search_string']
+        columns = ['match']
+        name = 'regex_search'
+
+        def initialize(self, regex=None, search_string=None):
+            self._iter = re.finditer(regex, search_string)
+
+        def iterate(self, idx):
+            return (next(self._iter).group(0),)
+
+    @table_function(DATE)
+    class DateSeries(TableFunction):
+        params = ['start', 'stop', 'step_seconds']
+        columns = ['date']
+        name = 'date_series'
+
+        def initialize(self, start, stop, step_seconds=86400):
+            self.start = format_date_time_sqlite(start)
+            self.stop = format_date_time_sqlite(stop)
+            step_seconds = int(step_seconds)
+            self.step_seconds = datetime.timedelta(seconds=step_seconds)
+
+            if (self.start.hour == 0 and
+                self.start.minute == 0 and
+                self.start.second == 0 and
+                step_seconds >= 86400):
+                self.format = '%Y-%m-%d'
+            elif (self.start.year == 1900 and
+                  self.start.month == 1 and
+                  self.start.day == 1 and
+                  self.stop.year == 1900 and
+                  self.stop.month == 1 and
+                  self.stop.day == 1 and
+                  step_seconds < 86400):
+                self.format = '%H:%M:%S'
+            else:
+                self.format = '%Y-%m-%d %H:%M:%S'
+
+        def iterate(self, idx):
+            if self.start > self.stop:
+                raise StopIteration
+            current = self.start
+            self.start += self.step_seconds
+            return (current.strftime(self.format),)
diff --git a/lib/python3.7/site-packages/playhouse/sqliteq.py b/lib/python3.7/site-packages/playhouse/sqliteq.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd213549d5e0777fc099c723d9d91cd457a595fd
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/sqliteq.py
@@ -0,0 +1,330 @@
+import logging
+import weakref
+from threading import local as thread_local
+from threading import Event
+from threading import Thread
+try:
+    from Queue import Queue
+except ImportError:
+    from queue import Queue
+
+try:
+    import gevent
+    from gevent import Greenlet as GThread
+    from gevent.event import Event as GEvent
+    from gevent.local import local as greenlet_local
+    from gevent.queue import Queue as GQueue
+except ImportError:
+    GThread = GQueue = GEvent = None
+
+from peewee import SENTINEL
+from playhouse.sqlite_ext import SqliteExtDatabase
+
+
+logger = logging.getLogger('peewee.sqliteq')
+
+
+class ResultTimeout(Exception):
+    pass
+
+class WriterPaused(Exception):
+    pass
+
+class ShutdownException(Exception):
+    pass
+
+
+class AsyncCursor(object):
+    __slots__ = ('sql', 'params', 'commit', 'timeout',
+                 '_event', '_cursor', '_exc', '_idx', '_rows', '_ready')
+
+    def __init__(self, event, sql, params, commit, timeout):
+        self._event = event
+        self.sql = sql
+        self.params = params
+        self.commit = commit
+        self.timeout = timeout
+        self._cursor = self._exc = self._idx = self._rows = None
+        self._ready = False
+
+    def set_result(self, cursor, exc=None):
+        self._cursor = cursor
+        self._exc = exc
+        self._idx = 0
+        self._rows = cursor.fetchall() if exc is None else []
+        self._event.set()
+        return self
+
+    def _wait(self, timeout=None):
+        timeout = timeout if timeout is not None else self.timeout
+        if not self._event.wait(timeout=timeout) and timeout:
+            raise ResultTimeout('results not ready, timed out.')
+        if self._exc is not None:
+            raise self._exc
+        self._ready = True
+
+    def __iter__(self):
+        if not self._ready:
+            self._wait()
+        if self._exc is not None:
+            raise self._exec
+        return self
+
+    def next(self):
+        if not self._ready:
+            self._wait()
+        try:
+            obj = self._rows[self._idx]
+        except IndexError:
+            raise StopIteration
+        else:
+            self._idx += 1
+            return obj
+    __next__ = next
+
+    @property
+    def lastrowid(self):
+        if not self._ready:
+            self._wait()
+        return self._cursor.lastrowid
+
+    @property
+    def rowcount(self):
+        if not self._ready:
+            self._wait()
+        return self._cursor.rowcount
+
+    @property
+    def description(self):
+        return self._cursor.description
+
+    def close(self):
+        self._cursor.close()
+
+    def fetchall(self):
+        return list(self)  # Iterating implies waiting until populated.
+
+    def fetchone(self):
+        if not self._ready:
+            self._wait()
+        try:
+            return next(self)
+        except StopIteration:
+            return None
+
+SHUTDOWN = StopIteration
+PAUSE = object()
+UNPAUSE = object()
+
+
+class Writer(object):
+    __slots__ = ('database', 'queue')
+
+    def __init__(self, database, queue):
+        self.database = database
+        self.queue = queue
+
+    def run(self):
+        conn = self.database.connection()
+        try:
+            while True:
+                try:
+                    if conn is None:  # Paused.
+                        if self.wait_unpause():
+                            conn = self.database.connection()
+                    else:
+                        conn = self.loop(conn)
+                except ShutdownException:
+                    logger.info('writer received shutdown request, exiting.')
+                    return
+        finally:
+            if conn is not None:
+                self.database._close(conn)
+                self.database._state.reset()
+
+    def wait_unpause(self):
+        obj = self.queue.get()
+        if obj is UNPAUSE:
+            logger.info('writer unpaused - reconnecting to database.')
+            return True
+        elif obj is SHUTDOWN:
+            raise ShutdownException()
+        elif obj is PAUSE:
+            logger.error('writer received pause, but is already paused.')
+        else:
+            obj.set_result(None, WriterPaused())
+            logger.warning('writer paused, not handling %s', obj)
+
+    def loop(self, conn):
+        obj = self.queue.get()
+        if isinstance(obj, AsyncCursor):
+            self.execute(obj)
+        elif obj is PAUSE:
+            logger.info('writer paused - closing database connection.')
+            self.database._close(conn)
+            self.database._state.reset()
+            return
+        elif obj is UNPAUSE:
+            logger.error('writer received unpause, but is already running.')
+        elif obj is SHUTDOWN:
+            raise ShutdownException()
+        else:
+            logger.error('writer received unsupported object: %s', obj)
+        return conn
+
+    def execute(self, obj):
+        logger.debug('received query %s', obj.sql)
+        try:
+            cursor = self.database._execute(obj.sql, obj.params, obj.commit)
+        except Exception as execute_err:
+            cursor = None
+            exc = execute_err  # python3 is so fucking lame.
+        else:
+            exc = None
+        return obj.set_result(cursor, exc)
+
+
+class SqliteQueueDatabase(SqliteExtDatabase):
+    WAL_MODE_ERROR_MESSAGE = ('SQLite must be configured to use the WAL '
+                              'journal mode when using this feature. WAL mode '
+                              'allows one or more readers to continue reading '
+                              'while another connection writes to the '
+                              'database.')
+
+    def __init__(self, database, use_gevent=False, autostart=True,
+                 queue_max_size=None, results_timeout=None, *args, **kwargs):
+        kwargs['check_same_thread'] = False
+
+        # Ensure that journal_mode is WAL. This value is passed to the parent
+        # class constructor below.
+        pragmas = self._validate_journal_mode(kwargs.pop('pragmas', None))
+
+        # Reference to execute_sql on the parent class. Since we've overridden
+        # execute_sql(), this is just a handy way to reference the real
+        # implementation.
+        Parent = super(SqliteQueueDatabase, self)
+        self._execute = Parent.execute_sql
+
+        # Call the parent class constructor with our modified pragmas.
+        Parent.__init__(database, pragmas=pragmas, *args, **kwargs)
+
+        self._autostart = autostart
+        self._results_timeout = results_timeout
+        self._is_stopped = True
+
+        # Get different objects depending on the threading implementation.
+        self._thread_helper = self.get_thread_impl(use_gevent)(queue_max_size)
+
+        # Create the writer thread, optionally starting it.
+        self._create_write_queue()
+        if self._autostart:
+            self.start()
+
+    def get_thread_impl(self, use_gevent):
+        return GreenletHelper if use_gevent else ThreadHelper
+
+    def _validate_journal_mode(self, pragmas=None):
+        if pragmas:
+            pdict = dict((k.lower(), v) for (k, v) in pragmas)
+            if pdict.get('journal_mode', 'wal').lower() != 'wal':
+                raise ValueError(self.WAL_MODE_ERROR_MESSAGE)
+
+            return [(k, v) for (k, v) in pragmas
+                    if k != 'journal_mode'] + [('journal_mode', 'wal')]
+        else:
+            return [('journal_mode', 'wal')]
+
+    def _create_write_queue(self):
+        self._write_queue = self._thread_helper.queue()
+
+    def queue_size(self):
+        return self._write_queue.qsize()
+
+    def execute_sql(self, sql, params=None, commit=SENTINEL, timeout=None):
+        if commit is SENTINEL:
+            commit = not sql.lower().startswith('select')
+
+        if not commit:
+            return self._execute(sql, params, commit=commit)
+
+        cursor = AsyncCursor(
+            event=self._thread_helper.event(),
+            sql=sql,
+            params=params,
+            commit=commit,
+            timeout=self._results_timeout if timeout is None else timeout)
+        self._write_queue.put(cursor)
+        return cursor
+
+    def start(self):
+        with self._lock:
+            if not self._is_stopped:
+                return False
+            def run():
+                writer = Writer(self, self._write_queue)
+                writer.run()
+
+            self._writer = self._thread_helper.thread(run)
+            self._writer.start()
+            self._is_stopped = False
+            return True
+
+    def stop(self):
+        logger.debug('environment stop requested.')
+        with self._lock:
+            if self._is_stopped:
+                return False
+            self._write_queue.put(SHUTDOWN)
+            self._writer.join()
+            self._is_stopped = True
+            return True
+
+    def is_stopped(self):
+        with self._lock:
+            return self._is_stopped
+
+    def pause(self):
+        with self._lock:
+            self._write_queue.put(PAUSE)
+
+    def unpause(self):
+        with self._lock:
+            self._write_queue.put(UNPAUSE)
+
+    def __unsupported__(self, *args, **kwargs):
+        raise ValueError('This method is not supported by %r.' % type(self))
+    atomic = transaction = savepoint = __unsupported__
+
+
+class ThreadHelper(object):
+    __slots__ = ('queue_max_size',)
+
+    def __init__(self, queue_max_size=None):
+        self.queue_max_size = queue_max_size
+
+    def event(self): return Event()
+
+    def queue(self, max_size=None):
+        max_size = max_size if max_size is not None else self.queue_max_size
+        return Queue(maxsize=max_size or 0)
+
+    def thread(self, fn, *args, **kwargs):
+        thread = Thread(target=fn, args=args, kwargs=kwargs)
+        thread.daemon = True
+        return thread
+
+
+class GreenletHelper(ThreadHelper):
+    __slots__ = ()
+
+    def event(self): return GEvent()
+
+    def queue(self, max_size=None):
+        max_size = max_size if max_size is not None else self.queue_max_size
+        return GQueue(maxsize=max_size or 0)
+
+    def thread(self, fn, *args, **kwargs):
+        def wrap(*a, **k):
+            gevent.sleep()
+            return fn(*a, **k)
+        return GThread(wrap, *args, **kwargs)
diff --git a/lib/python3.7/site-packages/playhouse/test_utils.py b/lib/python3.7/site-packages/playhouse/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..333dc078b73155af1668a6d4da7093ba3454c434
--- /dev/null
+++ b/lib/python3.7/site-packages/playhouse/test_utils.py
@@ -0,0 +1,62 @@
+from functools import wraps
+import logging
+
+
+logger = logging.getLogger('peewee')
+
+
+class _QueryLogHandler(logging.Handler):
+    def __init__(self, *args, **kwargs):
+        self.queries = []
+        logging.Handler.__init__(self, *args, **kwargs)
+
+    def emit(self, record):
+        self.queries.append(record)
+
+
+class count_queries(object):
+    def __init__(self, only_select=False):
+        self.only_select = only_select
+        self.count = 0
+
+    def get_queries(self):
+        return self._handler.queries
+
+    def __enter__(self):
+        self._handler = _QueryLogHandler()
+        logger.setLevel(logging.DEBUG)
+        logger.addHandler(self._handler)
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        logger.removeHandler(self._handler)
+        if self.only_select:
+            self.count = len([q for q in self._handler.queries
+                              if q.msg[0].startswith('SELECT ')])
+        else:
+            self.count = len(self._handler.queries)
+
+
+class assert_query_count(count_queries):
+    def __init__(self, expected, only_select=False):
+        super(assert_query_count, self).__init__(only_select=only_select)
+        self.expected = expected
+
+    def __call__(self, f):
+        @wraps(f)
+        def decorated(*args, **kwds):
+            with self:
+                ret = f(*args, **kwds)
+
+            self._assert_count()
+            return ret
+
+        return decorated
+
+    def _assert_count(self):
+        error_msg = '%s != %s' % (self.count, self.expected)
+        assert self.count == self.expected, error_msg
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        super(assert_query_count, self).__exit__(exc_type, exc_val, exc_tb)
+        self._assert_count()
diff --git a/lib/python3.7/site-packages/pwiz.py b/lib/python3.7/site-packages/pwiz.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd50279fc27078b07f8a313ede8b24fca1bad049
--- /dev/null
+++ b/lib/python3.7/site-packages/pwiz.py
@@ -0,0 +1,221 @@
+#!/usr/bin/env python
+
+import datetime
+import sys
+from getpass import getpass
+from optparse import OptionParser
+
+from peewee import *
+from peewee import print_
+from peewee import __version__ as peewee_version
+from playhouse.reflection import *
+
+
+HEADER = """from peewee import *%s
+
+database = %s('%s'%s)
+"""
+
+BASE_MODEL = """\
+class BaseModel(Model):
+    class Meta:
+        database = database
+"""
+
+UNKNOWN_FIELD = """\
+class UnknownField(object):
+    def __init__(self, *_, **__): pass
+"""
+
+DATABASE_ALIASES = {
+    MySQLDatabase: ['mysql', 'mysqldb'],
+    PostgresqlDatabase: ['postgres', 'postgresql'],
+    SqliteDatabase: ['sqlite', 'sqlite3'],
+}
+
+DATABASE_MAP = dict((value, key)
+                    for key in DATABASE_ALIASES
+                    for value in DATABASE_ALIASES[key])
+
+def make_introspector(database_type, database_name, **kwargs):
+    if database_type not in DATABASE_MAP:
+        err('Unrecognized database, must be one of: %s' %
+            ', '.join(DATABASE_MAP.keys()))
+        sys.exit(1)
+
+    schema = kwargs.pop('schema', None)
+    DatabaseClass = DATABASE_MAP[database_type]
+    db = DatabaseClass(database_name, **kwargs)
+    return Introspector.from_database(db, schema=schema)
+
+def print_models(introspector, tables=None, preserve_order=False,
+                 include_views=False, ignore_unknown=False, snake_case=True):
+    database = introspector.introspect(table_names=tables,
+                                       include_views=include_views,
+                                       snake_case=snake_case)
+
+    db_kwargs = introspector.get_database_kwargs()
+    header = HEADER % (
+        introspector.get_additional_imports(),
+        introspector.get_database_class().__name__,
+        introspector.get_database_name(),
+        ', **%s' % repr(db_kwargs) if db_kwargs else '')
+    print_(header)
+
+    if not ignore_unknown:
+        print_(UNKNOWN_FIELD)
+
+    print_(BASE_MODEL)
+
+    def _print_table(table, seen, accum=None):
+        accum = accum or []
+        foreign_keys = database.foreign_keys[table]
+        for foreign_key in foreign_keys:
+            dest = foreign_key.dest_table
+
+            # In the event the destination table has already been pushed
+            # for printing, then we have a reference cycle.
+            if dest in accum and table not in accum:
+                print_('# Possible reference cycle: %s' % dest)
+
+            # If this is not a self-referential foreign key, and we have
+            # not already processed the destination table, do so now.
+            if dest not in seen and dest not in accum:
+                seen.add(dest)
+                if dest != table:
+                    _print_table(dest, seen, accum + [table])
+
+        print_('class %s(BaseModel):' % database.model_names[table])
+        columns = database.columns[table].items()
+        if not preserve_order:
+            columns = sorted(columns)
+        primary_keys = database.primary_keys[table]
+        for name, column in columns:
+            skip = all([
+                name in primary_keys,
+                name == 'id',
+                len(primary_keys) == 1,
+                column.field_class in introspector.pk_classes])
+            if skip:
+                continue
+            if column.primary_key and len(primary_keys) > 1:
+                # If we have a CompositeKey, then we do not want to explicitly
+                # mark the columns as being primary keys.
+                column.primary_key = False
+
+            is_unknown = column.field_class is UnknownField
+            if is_unknown and ignore_unknown:
+                disp = '%s - %s' % (column.name, column.raw_column_type or '?')
+                print_('    # %s' % disp)
+            else:
+                print_('    %s' % column.get_field())
+
+        print_('')
+        print_('    class Meta:')
+        print_('        table_name = \'%s\'' % table)
+        multi_column_indexes = database.multi_column_indexes(table)
+        if multi_column_indexes:
+            print_('        indexes = (')
+            for fields, unique in sorted(multi_column_indexes):
+                print_('            ((%s), %s),' % (
+                    ', '.join("'%s'" % field for field in fields),
+                    unique,
+                ))
+            print_('        )')
+
+        if introspector.schema:
+            print_('        schema = \'%s\'' % introspector.schema)
+        if len(primary_keys) > 1:
+            pk_field_names = sorted([
+                field.name for col, field in columns
+                if col in primary_keys])
+            pk_list = ', '.join("'%s'" % pk for pk in pk_field_names)
+            print_('        primary_key = CompositeKey(%s)' % pk_list)
+        elif not primary_keys:
+            print_('        primary_key = False')
+        print_('')
+
+        seen.add(table)
+
+    seen = set()
+    for table in sorted(database.model_names.keys()):
+        if table not in seen:
+            if not tables or table in tables:
+                _print_table(table, seen)
+
+def print_header(cmd_line, introspector):
+    timestamp = datetime.datetime.now()
+    print_('# Code generated by:')
+    print_('# python -m pwiz %s' % cmd_line)
+    print_('# Date: %s' % timestamp.strftime('%B %d, %Y %I:%M%p'))
+    print_('# Database: %s' % introspector.get_database_name())
+    print_('# Peewee version: %s' % peewee_version)
+    print_('')
+
+
+def err(msg):
+    sys.stderr.write('\033[91m%s\033[0m\n' % msg)
+    sys.stderr.flush()
+
+def get_option_parser():
+    parser = OptionParser(usage='usage: %prog [options] database_name')
+    ao = parser.add_option
+    ao('-H', '--host', dest='host')
+    ao('-p', '--port', dest='port', type='int')
+    ao('-u', '--user', dest='user')
+    ao('-P', '--password', dest='password', action='store_true')
+    engines = sorted(DATABASE_MAP)
+    ao('-e', '--engine', dest='engine', default='postgresql', choices=engines,
+       help=('Database type, e.g. sqlite, mysql or postgresql. Default '
+             'is "postgresql".'))
+    ao('-s', '--schema', dest='schema')
+    ao('-t', '--tables', dest='tables',
+       help=('Only generate the specified tables. Multiple table names should '
+             'be separated by commas.'))
+    ao('-v', '--views', dest='views', action='store_true',
+       help='Generate model classes for VIEWs in addition to tables.')
+    ao('-i', '--info', dest='info', action='store_true',
+       help=('Add database information and other metadata to top of the '
+             'generated file.'))
+    ao('-o', '--preserve-order', action='store_true', dest='preserve_order',
+       help='Model definition column ordering matches source table.')
+    ao('-I', '--ignore-unknown', action='store_true', dest='ignore_unknown',
+       help='Ignore fields whose type cannot be determined.')
+    ao('-L', '--legacy-naming', action='store_true', dest='legacy_naming',
+       help='Use legacy table- and column-name generation.')
+    return parser
+
+def get_connect_kwargs(options):
+    ops = ('host', 'port', 'user', 'schema')
+    kwargs = dict((o, getattr(options, o)) for o in ops if getattr(options, o))
+    if options.password:
+        kwargs['password'] = getpass()
+    return kwargs
+
+
+if __name__ == '__main__':
+    raw_argv = sys.argv
+
+    parser = get_option_parser()
+    options, args = parser.parse_args()
+
+    if len(args) < 1:
+        err('Missing required parameter "database"')
+        parser.print_help()
+        sys.exit(1)
+
+    connect = get_connect_kwargs(options)
+    database = args[-1]
+
+    tables = None
+    if options.tables:
+        tables = [table.strip() for table in options.tables.split(',')
+                  if table.strip()]
+
+    introspector = make_introspector(options.engine, database, **connect)
+    if options.info:
+        cmd_line = ' '.join(raw_argv[1:])
+        print_header(cmd_line, introspector)
+
+    print_models(introspector, tables, options.preserve_order, options.views,
+                 options.ignore_unknown, not options.legacy_naming)
diff --git a/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/INSTALLER b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/LICENSE b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..6e0693b4b010a8c42e34f89ab8fa1e18b5bae1cc
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/LICENSE
@@ -0,0 +1,19 @@
+Copyright (C) 2016 Jason R Coombs <jaraco@jaraco.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/METADATA b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..571a5cb2abb4c06a7ae2656724f45af12737478c
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/METADATA
@@ -0,0 +1,77 @@
+Metadata-Version: 2.1
+Name: setuptools
+Version: 41.0.1
+Summary: Easily download, build, install, upgrade, and uninstall Python packages
+Home-page: https://github.com/pypa/setuptools
+Author: Python Packaging Authority
+Author-email: distutils-sig@python.org
+License: UNKNOWN
+Project-URL: Documentation, https://setuptools.readthedocs.io/
+Keywords: CPAN PyPI distutils eggs package management
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: Topic :: System :: Systems Administration
+Classifier: Topic :: Utilities
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*
+Description-Content-Type: text/x-rst; charset=UTF-8
+Provides-Extra: certs
+Requires-Dist: certifi (==2016.9.26) ; extra == 'certs'
+Provides-Extra: ssl
+Requires-Dist: wincertstore (==0.2) ; (sys_platform=='win32') and extra == 'ssl'
+
+.. image:: https://img.shields.io/pypi/v/setuptools.svg
+   :target: https://pypi.org/project/setuptools
+
+.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg
+    :target: https://setuptools.readthedocs.io
+
+.. image:: https://img.shields.io/travis/pypa/setuptools/master.svg?label=Linux%20CI&logo=travis&logoColor=white
+   :target: https://travis-ci.org/pypa/setuptools
+
+.. image:: https://img.shields.io/appveyor/ci/pypa/setuptools/master.svg?label=Windows%20CI&logo=appveyor&logoColor=white
+   :target: https://ci.appveyor.com/project/pypa/setuptools/branch/master
+
+.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
+   :target: https://codecov.io/gh/pypa/setuptools
+
+.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat
+   :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme
+
+.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
+
+See the `Installation Instructions
+<https://packaging.python.org/installing/>`_ in the Python Packaging
+User's Guide for instructions on installing, upgrading, and uninstalling
+Setuptools.
+
+Questions and comments should be directed to the `distutils-sig
+mailing list <http://mail.python.org/pipermail/distutils-sig/>`_.
+Bug reports and especially tested patches may be
+submitted directly to the `bug tracker
+<https://github.com/pypa/setuptools/issues>`_.
+
+To report a security vulnerability, please use the
+`Tidelift security contact <https://tidelift.com/security>`_.
+Tidelift will coordinate the fix and disclosure.
+
+
+Code of Conduct
+---------------
+
+Everyone interacting in the setuptools project's codebases, issue trackers,
+chat rooms, and mailing lists is expected to follow the
+`PyPA Code of Conduct <https://www.pypa.io/en/latest/code-of-conduct/>`_.
+
+
diff --git a/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/RECORD b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..490b644c56c1dba111ba96fa370f5db8a1645f57
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/RECORD
@@ -0,0 +1,186 @@
+../../../bin/easy_install,sha256=jLRPmL4dd2UvSAqdBEmhIJOypbxy8tVv4BcydCkWksk,301
+../../../bin/easy_install-3.7,sha256=jLRPmL4dd2UvSAqdBEmhIJOypbxy8tVv4BcydCkWksk,301
+__pycache__/easy_install.cpython-37.pyc,,
+easy_install.py,sha256=MDC9vt5AxDsXX5qcKlBz2TnW6Tpuv_AobnfhCJ9X3PM,126
+pkg_resources/__init__.py,sha256=pp8b7Asoaheso-q8lIMS1tpQp88xjAkYgCIRX-JsPlE,107982
+pkg_resources/__pycache__/__init__.cpython-37.pyc,,
+pkg_resources/__pycache__/py31compat.cpython-37.pyc,,
+pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pkg_resources/_vendor/__pycache__/__init__.cpython-37.pyc,,
+pkg_resources/_vendor/__pycache__/appdirs.cpython-37.pyc,,
+pkg_resources/_vendor/__pycache__/pyparsing.cpython-37.pyc,,
+pkg_resources/_vendor/__pycache__/six.cpython-37.pyc,,
+pkg_resources/_vendor/appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701
+pkg_resources/_vendor/packaging/__about__.py,sha256=zkcCPTN_6TcLW0Nrlg0176-R1QQ_WVPTm8sz1R4-HjM,720
+pkg_resources/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513
+pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-37.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-37.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-37.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-37.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/markers.cpython-37.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-37.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/utils.cpython-37.pyc,,
+pkg_resources/_vendor/packaging/__pycache__/version.cpython-37.pyc,,
+pkg_resources/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860
+pkg_resources/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416
+pkg_resources/_vendor/packaging/markers.py,sha256=uEcBBtGvzqltgnArqb9c4RrcInXezDLos14zbBHhWJo,8248
+pkg_resources/_vendor/packaging/requirements.py,sha256=SikL2UynbsT0qtY9ltqngndha_sfo0w6XGFhAhoSoaQ,4355
+pkg_resources/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025
+pkg_resources/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421
+pkg_resources/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556
+pkg_resources/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055
+pkg_resources/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098
+pkg_resources/extern/__init__.py,sha256=cHiEfHuLmm6rs5Ve_ztBfMI7Lr31vss-D4wkqF5xzlI,2498
+pkg_resources/extern/__pycache__/__init__.cpython-37.pyc,,
+pkg_resources/py31compat.py,sha256=-WQ0e4c3RG_acdhwC3gLiXhP_lg4G5q7XYkZkQg0gxU,558
+setuptools-41.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+setuptools-41.0.1.dist-info/LICENSE,sha256=wyo6w5WvYyHv0ovnPQagDw22q4h9HCHU_sRhKNIFbVo,1078
+setuptools-41.0.1.dist-info/METADATA,sha256=_i0otxacylu95CLISnMjTjG7DZ2vYZOvwedcoJ06JNE,3303
+setuptools-41.0.1.dist-info/RECORD,,
+setuptools-41.0.1.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110
+setuptools-41.0.1.dist-info/dependency_links.txt,sha256=HlkCFkoK5TbZ5EMLbLKYhLcY_E31kBWD8TqW2EgmatQ,239
+setuptools-41.0.1.dist-info/entry_points.txt,sha256=jBqCYDlVjl__sjYFGXo1JQGIMAYFJE-prYWUtnMZEew,2990
+setuptools-41.0.1.dist-info/top_level.txt,sha256=2HUXVVwA4Pff1xgTFr3GsTXXKaPaO6vlG6oNJ_4u4Tg,38
+setuptools-41.0.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+setuptools/__init__.py,sha256=WBpCcn2lvdckotabeae1TTYonPOcgCIF3raD2zRWzBc,7283
+setuptools/__pycache__/__init__.cpython-37.pyc,,
+setuptools/__pycache__/_deprecation_warning.cpython-37.pyc,,
+setuptools/__pycache__/archive_util.cpython-37.pyc,,
+setuptools/__pycache__/build_meta.cpython-37.pyc,,
+setuptools/__pycache__/config.cpython-37.pyc,,
+setuptools/__pycache__/dep_util.cpython-37.pyc,,
+setuptools/__pycache__/depends.cpython-37.pyc,,
+setuptools/__pycache__/dist.cpython-37.pyc,,
+setuptools/__pycache__/extension.cpython-37.pyc,,
+setuptools/__pycache__/glibc.cpython-37.pyc,,
+setuptools/__pycache__/glob.cpython-37.pyc,,
+setuptools/__pycache__/launch.cpython-37.pyc,,
+setuptools/__pycache__/lib2to3_ex.cpython-37.pyc,,
+setuptools/__pycache__/monkey.cpython-37.pyc,,
+setuptools/__pycache__/msvc.cpython-37.pyc,,
+setuptools/__pycache__/namespaces.cpython-37.pyc,,
+setuptools/__pycache__/package_index.cpython-37.pyc,,
+setuptools/__pycache__/pep425tags.cpython-37.pyc,,
+setuptools/__pycache__/py27compat.cpython-37.pyc,,
+setuptools/__pycache__/py31compat.cpython-37.pyc,,
+setuptools/__pycache__/py33compat.cpython-37.pyc,,
+setuptools/__pycache__/sandbox.cpython-37.pyc,,
+setuptools/__pycache__/site-patch.cpython-37.pyc,,
+setuptools/__pycache__/ssl_support.cpython-37.pyc,,
+setuptools/__pycache__/unicode_utils.cpython-37.pyc,,
+setuptools/__pycache__/version.cpython-37.pyc,,
+setuptools/__pycache__/wheel.cpython-37.pyc,,
+setuptools/__pycache__/windows_support.cpython-37.pyc,,
+setuptools/_deprecation_warning.py,sha256=jU9-dtfv6cKmtQJOXN8nP1mm7gONw5kKEtiPtbwnZyI,218
+setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+setuptools/_vendor/__pycache__/__init__.cpython-37.pyc,,
+setuptools/_vendor/__pycache__/pyparsing.cpython-37.pyc,,
+setuptools/_vendor/__pycache__/six.cpython-37.pyc,,
+setuptools/_vendor/packaging/__about__.py,sha256=zkcCPTN_6TcLW0Nrlg0176-R1QQ_WVPTm8sz1R4-HjM,720
+setuptools/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513
+setuptools/_vendor/packaging/__pycache__/__about__.cpython-37.pyc,,
+setuptools/_vendor/packaging/__pycache__/__init__.cpython-37.pyc,,
+setuptools/_vendor/packaging/__pycache__/_compat.cpython-37.pyc,,
+setuptools/_vendor/packaging/__pycache__/_structures.cpython-37.pyc,,
+setuptools/_vendor/packaging/__pycache__/markers.cpython-37.pyc,,
+setuptools/_vendor/packaging/__pycache__/requirements.cpython-37.pyc,,
+setuptools/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc,,
+setuptools/_vendor/packaging/__pycache__/utils.cpython-37.pyc,,
+setuptools/_vendor/packaging/__pycache__/version.cpython-37.pyc,,
+setuptools/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860
+setuptools/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416
+setuptools/_vendor/packaging/markers.py,sha256=Gvpk9EY20yKaMTiKgQZ8yFEEpodqVgVYtfekoic1Yts,8239
+setuptools/_vendor/packaging/requirements.py,sha256=t44M2HVWtr8phIz2OhnILzuGT3rTATaovctV1dpnVIg,4343
+setuptools/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025
+setuptools/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421
+setuptools/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556
+setuptools/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055
+setuptools/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098
+setuptools/archive_util.py,sha256=kw8Ib_lKjCcnPKNbS7h8HztRVK0d5RacU3r_KRdVnmM,6592
+setuptools/build_meta.py,sha256=ioZE7tGSWY6vy7KLune0Iq334ugXuO39imcKC8prjBY,9387
+setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536
+setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752
+setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536
+setuptools/command/__init__.py,sha256=NWzJ0A1BEengZpVeqUyWLNm2bk4P3F4iL5QUErHy7kA,594
+setuptools/command/__pycache__/__init__.cpython-37.pyc,,
+setuptools/command/__pycache__/alias.cpython-37.pyc,,
+setuptools/command/__pycache__/bdist_egg.cpython-37.pyc,,
+setuptools/command/__pycache__/bdist_rpm.cpython-37.pyc,,
+setuptools/command/__pycache__/bdist_wininst.cpython-37.pyc,,
+setuptools/command/__pycache__/build_clib.cpython-37.pyc,,
+setuptools/command/__pycache__/build_ext.cpython-37.pyc,,
+setuptools/command/__pycache__/build_py.cpython-37.pyc,,
+setuptools/command/__pycache__/develop.cpython-37.pyc,,
+setuptools/command/__pycache__/dist_info.cpython-37.pyc,,
+setuptools/command/__pycache__/easy_install.cpython-37.pyc,,
+setuptools/command/__pycache__/egg_info.cpython-37.pyc,,
+setuptools/command/__pycache__/install.cpython-37.pyc,,
+setuptools/command/__pycache__/install_egg_info.cpython-37.pyc,,
+setuptools/command/__pycache__/install_lib.cpython-37.pyc,,
+setuptools/command/__pycache__/install_scripts.cpython-37.pyc,,
+setuptools/command/__pycache__/py36compat.cpython-37.pyc,,
+setuptools/command/__pycache__/register.cpython-37.pyc,,
+setuptools/command/__pycache__/rotate.cpython-37.pyc,,
+setuptools/command/__pycache__/saveopts.cpython-37.pyc,,
+setuptools/command/__pycache__/sdist.cpython-37.pyc,,
+setuptools/command/__pycache__/setopt.cpython-37.pyc,,
+setuptools/command/__pycache__/test.cpython-37.pyc,,
+setuptools/command/__pycache__/upload.cpython-37.pyc,,
+setuptools/command/__pycache__/upload_docs.cpython-37.pyc,,
+setuptools/command/alias.py,sha256=KjpE0sz_SDIHv3fpZcIQK-sCkJz-SrC6Gmug6b9Nkc8,2426
+setuptools/command/bdist_egg.py,sha256=be-IBpr1zhS9i6GjKANJgzkbH3ChImdWY7S-j0r2BK8,18167
+setuptools/command/bdist_rpm.py,sha256=B7l0TnzCGb-0nLlm6rS00jWLkojASwVmdhW2w5Qz_Ak,1508
+setuptools/command/bdist_wininst.py,sha256=_6dz3lpB1tY200LxKPLM7qgwTCceOMgaWFF-jW2-pm0,637
+setuptools/command/build_clib.py,sha256=bQ9aBr-5ZSO-9fGsGsDLz0mnnFteHUZnftVLkhvHDq0,4484
+setuptools/command/build_ext.py,sha256=81CTgsqjBjNl_HOgCJ1lQ5vv1NIM3RBpcoVGpqT4N1M,12897
+setuptools/command/build_py.py,sha256=yWyYaaS9F3o9JbIczn064A5g1C5_UiKRDxGaTqYbtLE,9596
+setuptools/command/develop.py,sha256=MQlnGS6uP19erK2JCNOyQYoYyquk3PADrqrrinqqLtA,8184
+setuptools/command/dist_info.py,sha256=5t6kOfrdgALT-P3ogss6PF9k-Leyesueycuk3dUyZnI,960
+setuptools/command/easy_install.py,sha256=telww7CuPsoTtvlpY-ktnZGT85cZ6xGCGZa0vHvFJ-Q,87273
+setuptools/command/egg_info.py,sha256=w73EdxYSOk2gsaAiHGL2dZrCldoPiuRr2eTfqcFvCds,25570
+setuptools/command/install.py,sha256=a0EZpL_A866KEdhicTGbuyD_TYl1sykfzdrri-zazT4,4683
+setuptools/command/install_egg_info.py,sha256=bMgeIeRiXzQ4DAGPV1328kcjwQjHjOWU4FngAWLV78Q,2203
+setuptools/command/install_lib.py,sha256=11mxf0Ch12NsuYwS8PHwXBRvyh671QAM4cTRh7epzG0,3840
+setuptools/command/install_scripts.py,sha256=UD0rEZ6861mTYhIdzcsqKnUl8PozocXWl9VBQ1VTWnc,2439
+setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628
+setuptools/command/py36compat.py,sha256=SzjZcOxF7zdFUT47Zv2n7AM3H8koDys_0OpS-n9gIfc,4986
+setuptools/command/register.py,sha256=LO3MvYKPE8dN1m-KkrBRHC68ZFoPvA_vI8Xgp7vv6zI,534
+setuptools/command/rotate.py,sha256=co5C1EkI7P0GGT6Tqz-T2SIj2LBJTZXYELpmao6d4KQ,2164
+setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658
+setuptools/command/sdist.py,sha256=gr5hFrDzUtGfp_0tu0sllzIyr3jMQegIkFmlDauQJxw,7388
+setuptools/command/setopt.py,sha256=NTWDyx-gjDF-txf4dO577s7LOzHVoKR0Mq33rFxaRr8,5085
+setuptools/command/test.py,sha256=oePJ49u17ENKtrM-rOrrLlRhtNnrzcSr0IW-gE9XVq0,9285
+setuptools/command/upload.py,sha256=GxtNkIl7SA0r8mACkbDcSCN1m2_WPppK9gZXJmQSiow,6811
+setuptools/command/upload_docs.py,sha256=oXiGplM_cUKLwE4CWWw98RzCufAu8tBhMC97GegFcms,7311
+setuptools/config.py,sha256=lz19l1AtoHctpp1_tbYZv176nrEj4Gpf7ykNIYTIkAQ,20425
+setuptools/dep_util.py,sha256=fgixvC1R7sH3r13ktyf7N0FALoqEXL1cBarmNpSEoWg,935
+setuptools/depends.py,sha256=hC8QIDcM3VDpRXvRVA6OfL9AaQfxvhxHcN_w6sAyNq8,5837
+setuptools/dist.py,sha256=qYPmmVlLPWCLHrILR0J74bqoYgTSZh5ocLeyRKqnVyU,49913
+setuptools/extension.py,sha256=uc6nHI-MxwmNCNPbUiBnybSyqhpJqjbhvOQ-emdvt_E,1729
+setuptools/extern/__init__.py,sha256=TxeNKFMSfBMzBpBDiHx8Dh3RzsdVmvWaXhtZ03DZMs0,2499
+setuptools/extern/__pycache__/__init__.cpython-37.pyc,,
+setuptools/glibc.py,sha256=X64VvGPL2AbURKwYRsWJOXXGAYOiF_v2qixeTkAULuU,3146
+setuptools/glob.py,sha256=o75cHrOxYsvn854thSxE0x9k8JrKDuhP_rRXlVB00Q4,5084
+setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536
+setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264
+setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536
+setuptools/launch.py,sha256=sd7ejwhBocCDx_wG9rIs0OaZ8HtmmFU8ZC6IR_S0Lvg,787
+setuptools/lib2to3_ex.py,sha256=t5e12hbR2pi9V4ezWDTB4JM-AISUnGOkmcnYHek3xjg,2013
+setuptools/monkey.py,sha256=FGc9fffh7gAxMLFmJs2DW_OYWpBjkdbNS2n14UAK4NA,5264
+setuptools/msvc.py,sha256=uuRFaZzjJt5Fv3ZmyKUUuLtjx12_8G9RILigGec4irI,40838
+setuptools/namespaces.py,sha256=F0Nrbv8KCT2OrO7rwa03om4N4GZKAlnce-rr-cgDQa8,3199
+setuptools/package_index.py,sha256=F9LBC-hQ5fkjeEVflxif0mo_DzRMrepahdFTPenOtGM,40587
+setuptools/pep425tags.py,sha256=o_D_WVeWcXZiI2xjPSg7pouGOvaWRgGRxEDK9DzAXIA,10861
+setuptools/py27compat.py,sha256=3mwxRMDk5Q5O1rSXOERbQDXhFqwDJhhUitfMW_qpUCo,536
+setuptools/py31compat.py,sha256=h2rtZghOfwoGYd8sQ0-auaKiF3TcL3qX0bX3VessqcE,838
+setuptools/py33compat.py,sha256=OubjldHJH1KGE1CKt1kRU-Q55keftHT3ea1YoL0ZSco,1195
+setuptools/sandbox.py,sha256=9UbwfEL5QY436oMI1LtFWohhoZ-UzwHvGyZjUH_qhkw,14276
+setuptools/script (dev).tmpl,sha256=RUzQzCQUaXtwdLtYHWYbIQmOaES5Brqq1FvUA_tu-5I,218
+setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138
+setuptools/site-patch.py,sha256=OumkIHMuoSenRSW1382kKWI1VAwxNE86E5W8iDd34FY,2302
+setuptools/ssl_support.py,sha256=nLjPUBBw7RTTx6O4RJZ5eAMGgjJG8beiDbkFXDZpLuM,8493
+setuptools/unicode_utils.py,sha256=NOiZ_5hD72A6w-4wVj8awHFM3n51Kmw1Ic_vx15XFqw,996
+setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144
+setuptools/wheel.py,sha256=94uqXsOaKt91d9hW5z6ZppZmNSs_nO66R4uiwhcr4V0,8094
+setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714
diff --git a/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/WHEEL b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..c8240f03e87fdc556c4cb9a1f721a73138177cc5
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/dependency_links.txt b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/dependency_links.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e87d02103ede91545d70783dd59653d183424b68
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/dependency_links.txt
@@ -0,0 +1,2 @@
+https://files.pythonhosted.org/packages/source/c/certifi/certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d
+https://files.pythonhosted.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2
diff --git a/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/entry_points.txt b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4159fd0a1b925dc63b5ddafc71dfcdf997e84002
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/entry_points.txt
@@ -0,0 +1,65 @@
+[console_scripts]
+easy_install = setuptools.command.easy_install:main
+easy_install-3.6 = setuptools.command.easy_install:main
+
+[distutils.commands]
+alias = setuptools.command.alias:alias
+bdist_egg = setuptools.command.bdist_egg:bdist_egg
+bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm
+bdist_wininst = setuptools.command.bdist_wininst:bdist_wininst
+build_clib = setuptools.command.build_clib:build_clib
+build_ext = setuptools.command.build_ext:build_ext
+build_py = setuptools.command.build_py:build_py
+develop = setuptools.command.develop:develop
+dist_info = setuptools.command.dist_info:dist_info
+easy_install = setuptools.command.easy_install:easy_install
+egg_info = setuptools.command.egg_info:egg_info
+install = setuptools.command.install:install
+install_egg_info = setuptools.command.install_egg_info:install_egg_info
+install_lib = setuptools.command.install_lib:install_lib
+install_scripts = setuptools.command.install_scripts:install_scripts
+register = setuptools.command.register:register
+rotate = setuptools.command.rotate:rotate
+saveopts = setuptools.command.saveopts:saveopts
+sdist = setuptools.command.sdist:sdist
+setopt = setuptools.command.setopt:setopt
+test = setuptools.command.test:test
+upload = setuptools.command.upload:upload
+upload_docs = setuptools.command.upload_docs:upload_docs
+
+[distutils.setup_keywords]
+convert_2to3_doctests = setuptools.dist:assert_string_list
+dependency_links = setuptools.dist:assert_string_list
+eager_resources = setuptools.dist:assert_string_list
+entry_points = setuptools.dist:check_entry_points
+exclude_package_data = setuptools.dist:check_package_data
+extras_require = setuptools.dist:check_extras
+include_package_data = setuptools.dist:assert_bool
+install_requires = setuptools.dist:check_requirements
+namespace_packages = setuptools.dist:check_nsp
+package_data = setuptools.dist:check_package_data
+packages = setuptools.dist:check_packages
+python_requires = setuptools.dist:check_specifier
+setup_requires = setuptools.dist:check_requirements
+test_loader = setuptools.dist:check_importable
+test_runner = setuptools.dist:check_importable
+test_suite = setuptools.dist:check_test_suite
+tests_require = setuptools.dist:check_requirements
+use_2to3 = setuptools.dist:assert_bool
+use_2to3_exclude_fixers = setuptools.dist:assert_string_list
+use_2to3_fixers = setuptools.dist:assert_string_list
+zip_safe = setuptools.dist:assert_bool
+
+[egg_info.writers]
+PKG-INFO = setuptools.command.egg_info:write_pkg_info
+dependency_links.txt = setuptools.command.egg_info:overwrite_arg
+depends.txt = setuptools.command.egg_info:warn_depends_obsolete
+eager_resources.txt = setuptools.command.egg_info:overwrite_arg
+entry_points.txt = setuptools.command.egg_info:write_entries
+namespace_packages.txt = setuptools.command.egg_info:overwrite_arg
+requires.txt = setuptools.command.egg_info:write_requirements
+top_level.txt = setuptools.command.egg_info:write_toplevel_names
+
+[setuptools.installation]
+eggsecutable = setuptools.command.easy_install:bootstrap
+
diff --git a/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/top_level.txt b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4577c6a795e510bf7578236665f582c3770fb42e
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/top_level.txt
@@ -0,0 +1,3 @@
+easy_install
+pkg_resources
+setuptools
diff --git a/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/zip-safe b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/zip-safe
new file mode 100644
index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools-41.0.1.dist-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/lib/python3.7/site-packages/setuptools/__init__.py b/lib/python3.7/site-packages/setuptools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a71b2bbdc6170963a66959c48080c1dedc7bb703
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/__init__.py
@@ -0,0 +1,228 @@
+"""Extensions to the 'distutils' for large or complex distributions"""
+
+import os
+import sys
+import functools
+import distutils.core
+import distutils.filelist
+import re
+from distutils.errors import DistutilsOptionError
+from distutils.util import convert_path
+from fnmatch import fnmatchcase
+
+from ._deprecation_warning import SetuptoolsDeprecationWarning
+
+from setuptools.extern.six import PY3, string_types
+from setuptools.extern.six.moves import filter, map
+
+import setuptools.version
+from setuptools.extension import Extension
+from setuptools.dist import Distribution, Feature
+from setuptools.depends import Require
+from . import monkey
+
+__metaclass__ = type
+
+
+__all__ = [
+    'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
+    'SetuptoolsDeprecationWarning',
+    'find_packages'
+]
+
+if PY3:
+  __all__.append('find_namespace_packages')
+
+__version__ = setuptools.version.__version__
+
+bootstrap_install_from = None
+
+# If we run 2to3 on .py files, should we also convert docstrings?
+# Default: yes; assume that we can detect doctests reliably
+run_2to3_on_doctests = True
+# Standard package names for fixer packages
+lib2to3_fixer_packages = ['lib2to3.fixes']
+
+
+class PackageFinder:
+    """
+    Generate a list of all Python packages found within a directory
+    """
+
+    @classmethod
+    def find(cls, where='.', exclude=(), include=('*',)):
+        """Return a list all Python packages found within directory 'where'
+
+        'where' is the root directory which will be searched for packages.  It
+        should be supplied as a "cross-platform" (i.e. URL-style) path; it will
+        be converted to the appropriate local path syntax.
+
+        'exclude' is a sequence of package names to exclude; '*' can be used
+        as a wildcard in the names, such that 'foo.*' will exclude all
+        subpackages of 'foo' (but not 'foo' itself).
+
+        'include' is a sequence of package names to include.  If it's
+        specified, only the named packages will be included.  If it's not
+        specified, all found packages will be included.  'include' can contain
+        shell style wildcard patterns just like 'exclude'.
+        """
+
+        return list(cls._find_packages_iter(
+            convert_path(where),
+            cls._build_filter('ez_setup', '*__pycache__', *exclude),
+            cls._build_filter(*include)))
+
+    @classmethod
+    def _find_packages_iter(cls, where, exclude, include):
+        """
+        All the packages found in 'where' that pass the 'include' filter, but
+        not the 'exclude' filter.
+        """
+        for root, dirs, files in os.walk(where, followlinks=True):
+            # Copy dirs to iterate over it, then empty dirs.
+            all_dirs = dirs[:]
+            dirs[:] = []
+
+            for dir in all_dirs:
+                full_path = os.path.join(root, dir)
+                rel_path = os.path.relpath(full_path, where)
+                package = rel_path.replace(os.path.sep, '.')
+
+                # Skip directory trees that are not valid packages
+                if ('.' in dir or not cls._looks_like_package(full_path)):
+                    continue
+
+                # Should this package be included?
+                if include(package) and not exclude(package):
+                    yield package
+
+                # Keep searching subdirectories, as there may be more packages
+                # down there, even if the parent was excluded.
+                dirs.append(dir)
+
+    @staticmethod
+    def _looks_like_package(path):
+        """Does a directory look like a package?"""
+        return os.path.isfile(os.path.join(path, '__init__.py'))
+
+    @staticmethod
+    def _build_filter(*patterns):
+        """
+        Given a list of patterns, return a callable that will be true only if
+        the input matches at least one of the patterns.
+        """
+        return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
+
+
+class PEP420PackageFinder(PackageFinder):
+    @staticmethod
+    def _looks_like_package(path):
+        return True
+
+
+find_packages = PackageFinder.find
+
+if PY3:
+  find_namespace_packages = PEP420PackageFinder.find
+
+
+def _install_setup_requires(attrs):
+    # Note: do not use `setuptools.Distribution` directly, as
+    # our PEP 517 backend patch `distutils.core.Distribution`.
+    dist = distutils.core.Distribution(dict(
+        (k, v) for k, v in attrs.items()
+        if k in ('dependency_links', 'setup_requires')
+    ))
+    # Honor setup.cfg's options.
+    dist.parse_config_files(ignore_option_errors=True)
+    if dist.setup_requires:
+        dist.fetch_build_eggs(dist.setup_requires)
+
+
+def setup(**attrs):
+    # Make sure we have any requirements needed to interpret 'attrs'.
+    _install_setup_requires(attrs)
+    return distutils.core.setup(**attrs)
+
+setup.__doc__ = distutils.core.setup.__doc__
+
+
+_Command = monkey.get_unpatched(distutils.core.Command)
+
+
+class Command(_Command):
+    __doc__ = _Command.__doc__
+
+    command_consumes_arguments = False
+
+    def __init__(self, dist, **kw):
+        """
+        Construct the command for dist, updating
+        vars(self) with any keyword parameters.
+        """
+        _Command.__init__(self, dist)
+        vars(self).update(kw)
+
+    def _ensure_stringlike(self, option, what, default=None):
+        val = getattr(self, option)
+        if val is None:
+            setattr(self, option, default)
+            return default
+        elif not isinstance(val, string_types):
+            raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
+                                       % (option, what, val))
+        return val
+
+    def ensure_string_list(self, option):
+        r"""Ensure that 'option' is a list of strings.  If 'option' is
+        currently a string, we split it either on /,\s*/ or /\s+/, so
+        "foo bar baz", "foo,bar,baz", and "foo,   bar baz" all become
+        ["foo", "bar", "baz"].
+        """
+        val = getattr(self, option)
+        if val is None:
+            return
+        elif isinstance(val, string_types):
+            setattr(self, option, re.split(r',\s*|\s+', val))
+        else:
+            if isinstance(val, list):
+                ok = all(isinstance(v, string_types) for v in val)
+            else:
+                ok = False
+            if not ok:
+                raise DistutilsOptionError(
+                      "'%s' must be a list of strings (got %r)"
+                      % (option, val))
+
+    def reinitialize_command(self, command, reinit_subcommands=0, **kw):
+        cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
+        vars(cmd).update(kw)
+        return cmd
+
+
+def _find_all_simple(path):
+    """
+    Find all files under 'path'
+    """
+    results = (
+        os.path.join(base, file)
+        for base, dirs, files in os.walk(path, followlinks=True)
+        for file in files
+    )
+    return filter(os.path.isfile, results)
+
+
+def findall(dir=os.curdir):
+    """
+    Find all files under 'dir' and return the list of full filenames.
+    Unless dir is '.', return full filenames with dir prepended.
+    """
+    files = _find_all_simple(dir)
+    if dir == os.curdir:
+        make_rel = functools.partial(os.path.relpath, start=dir)
+        files = map(make_rel, files)
+    return list(files)
+
+
+# Apply monkey patches
+monkey.patch_all()
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f4e12b29326d0eda3237a0598942278db071c9f1
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/_deprecation_warning.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/_deprecation_warning.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..200237134ec2f818faf7c2f506ab1e7ec02322ff
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/_deprecation_warning.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/archive_util.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/archive_util.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f06d904ae268dd419279a7f26b9b87cef916af90
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/archive_util.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/build_meta.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/build_meta.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d0191df0efee500bb5eccd70dd687c5dbf417c95
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/build_meta.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/config.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/config.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b03b681d77e29e3d952cccf8bed3c532d5064c35
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/config.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/dep_util.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/dep_util.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9409ec147214bf4a589d87c9d943e213b1990cb
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/dep_util.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/depends.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/depends.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36118b65b28b0d3c91e0da13f8c397ebf60246c0
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/depends.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/dist.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/dist.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67c531b3091acfcddbae07bab2efbfff13d72789
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/dist.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/extension.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/extension.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a30cb634fe45460ab7c7b6355eab6df9b1cb0e34
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/extension.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/glibc.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/glibc.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ec95873b20b9dda4e47bfd8fe6ff2cd14ca2eaec
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/glibc.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/glob.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/glob.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..31b65e2fec1bdb9a2fcfc470169eb1802e8649a8
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/glob.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/launch.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/launch.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f1b0bf62f1c712ae2313c42ad49f0044501b8792
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/launch.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/lib2to3_ex.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/lib2to3_ex.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..496ce64b5d2e4c2ff8d9ffb8d332103dfa028dff
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/lib2to3_ex.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/monkey.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/monkey.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1922b98ed34b86d7e39af35d841bbd6a481a8120
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/monkey.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/msvc.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/msvc.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..331d3dc50d913ee1946d132c40fffc4912f577bc
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/msvc.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/namespaces.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/namespaces.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf62e1be5722a2264750e8b00af922b6dff1b3bc
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/namespaces.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/package_index.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/package_index.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d32affaf01db5cc9ac46e6e533e5aa8963b0a304
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/package_index.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/pep425tags.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/pep425tags.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..efeaf661cbde6f3804c453b474b79b3ea342a920
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/pep425tags.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/py27compat.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/py27compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6a5826a7ea1a5565d18861897b82be338cb8b4ef
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/py27compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/py31compat.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/py31compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b21556b109337ce3dc67b1f9ac6068bd215a351
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/py31compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/py33compat.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/py33compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a1e256545b47b7c03ff08761167716b4e540784
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/py33compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/sandbox.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/sandbox.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..147cf214e53b54fe71390bbc0c2b2dd32bb0d510
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/sandbox.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/site-patch.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/site-patch.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a18cefa53c1f205c329bde09222678a82ccf570d
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/site-patch.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/ssl_support.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/ssl_support.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e9835ff44ded411385835887fc632beba35b4ebf
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/ssl_support.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/unicode_utils.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/unicode_utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..621b937a0e7936ef5f7b4efac9073154d0ff07a3
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/unicode_utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/version.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/version.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..47cf29c95761daf68ac90602d8e421f288e23ef7
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/version.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/wheel.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/wheel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a3d79cdafbe62bc213c1b0fc561c417628d1071
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/wheel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/__pycache__/windows_support.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/__pycache__/windows_support.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2258577a69df59d9db4578b6dba1b40e1ef959f8
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/__pycache__/windows_support.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_deprecation_warning.py b/lib/python3.7/site-packages/setuptools/_deprecation_warning.py
new file mode 100644
index 0000000000000000000000000000000000000000..086b64dd3817c0c1a194ffc1959eeffdd2695bef
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_deprecation_warning.py
@@ -0,0 +1,7 @@
+class SetuptoolsDeprecationWarning(Warning):
+    """
+    Base class for warning deprecations in ``setuptools``
+
+    This class is not derived from ``DeprecationWarning``, and as such is
+    visible by default.
+    """
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/__init__.py b/lib/python3.7/site-packages/setuptools/_vendor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b4ee45d33fd23c2606fdd088e475c40809ae4dd8
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/__pycache__/pyparsing.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/__pycache__/pyparsing.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6d51c01f392cf2fd291bad118aa9523056c6e03d
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/__pycache__/pyparsing.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/__pycache__/six.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/__pycache__/six.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b25f3a12ab77962f3e9d5e4fdb0563162ce73831
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/__pycache__/six.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__about__.py b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__about__.py
new file mode 100644
index 0000000000000000000000000000000000000000..95d330ef823aa2e12f7846bc63c0955b25df6029
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__about__.py
@@ -0,0 +1,21 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+__all__ = [
+    "__title__", "__summary__", "__uri__", "__version__", "__author__",
+    "__email__", "__license__", "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "16.8"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD or Apache License, Version 2.0"
+__copyright__ = "Copyright 2014-2016 %s" % __author__
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__init__.py b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ee6220203e5425f900fb5a43676c24ea377c2fa
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__init__.py
@@ -0,0 +1,14 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+from .__about__ import (
+    __author__, __copyright__, __email__, __license__, __summary__, __title__,
+    __uri__, __version__
+)
+
+__all__ = [
+    "__title__", "__summary__", "__uri__", "__version__", "__author__",
+    "__email__", "__license__", "__copyright__",
+]
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/__about__.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/__about__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cbb2956dc2107ab7f719c4f33f14ee1b3c0f685f
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/__about__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8f248e0918c80dc300c4c1b5be36578319393a60
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/_compat.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/_compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b224956148009d10b782d0444446010d378e9c9
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/_compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/_structures.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/_structures.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e989c4fc0d45310c648a83623286aa7790371ef8
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/_structures.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/markers.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/markers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..89b0b8b4df487b7f8f128f833f9f5821d2a53f2e
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/markers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/requirements.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/requirements.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9bce68f393ef4baf15d1d5676f2866e93eabd729
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/requirements.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..140a3dd161f0b7aaf371fff424d3030b666dd10c
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/utils.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5cf29feb2d4f282a5aa4ad7c7ae89b138d41bc3
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/version.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/version.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b7237a6d974be2313d98673d3a27bf6c2eb447c
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/_vendor/packaging/__pycache__/version.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/_compat.py b/lib/python3.7/site-packages/setuptools/_vendor/packaging/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..210bb80b7e7b64cb79f7e7cdf3e42819fe3471fe
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/packaging/_compat.py
@@ -0,0 +1,30 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+# flake8: noqa
+
+if PY3:
+    string_types = str,
+else:
+    string_types = basestring,
+
+
+def with_metaclass(meta, *bases):
+    """
+    Create a base class with a metaclass.
+    """
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/_structures.py b/lib/python3.7/site-packages/setuptools/_vendor/packaging/_structures.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccc27861c3a4d9efaa3db753c77c4515a627bd98
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/packaging/_structures.py
@@ -0,0 +1,68 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+
+class Infinity(object):
+
+    def __repr__(self):
+        return "Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return False
+
+    def __le__(self, other):
+        return False
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return True
+
+    def __ge__(self, other):
+        return True
+
+    def __neg__(self):
+        return NegativeInfinity
+
+Infinity = Infinity()
+
+
+class NegativeInfinity(object):
+
+    def __repr__(self):
+        return "-Infinity"
+
+    def __hash__(self):
+        return hash(repr(self))
+
+    def __lt__(self, other):
+        return True
+
+    def __le__(self, other):
+        return True
+
+    def __eq__(self, other):
+        return isinstance(other, self.__class__)
+
+    def __ne__(self, other):
+        return not isinstance(other, self.__class__)
+
+    def __gt__(self, other):
+        return False
+
+    def __ge__(self, other):
+        return False
+
+    def __neg__(self):
+        return Infinity
+
+NegativeInfinity = NegativeInfinity()
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/markers.py b/lib/python3.7/site-packages/setuptools/_vendor/packaging/markers.py
new file mode 100644
index 0000000000000000000000000000000000000000..031332a3058f4bc47eadd320e63a2cdc6b76df25
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/packaging/markers.py
@@ -0,0 +1,301 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import operator
+import os
+import platform
+import sys
+
+from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
+from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
+from setuptools.extern.pyparsing import Literal as L  # noqa
+
+from ._compat import string_types
+from .specifiers import Specifier, InvalidSpecifier
+
+
+__all__ = [
+    "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
+    "Marker", "default_environment",
+]
+
+
+class InvalidMarker(ValueError):
+    """
+    An invalid marker was found, users should refer to PEP 508.
+    """
+
+
+class UndefinedComparison(ValueError):
+    """
+    An invalid operation was attempted on a value that doesn't support it.
+    """
+
+
+class UndefinedEnvironmentName(ValueError):
+    """
+    A name was attempted to be used that does not exist inside of the
+    environment.
+    """
+
+
+class Node(object):
+
+    def __init__(self, value):
+        self.value = value
+
+    def __str__(self):
+        return str(self.value)
+
+    def __repr__(self):
+        return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
+
+    def serialize(self):
+        raise NotImplementedError
+
+
+class Variable(Node):
+
+    def serialize(self):
+        return str(self)
+
+
+class Value(Node):
+
+    def serialize(self):
+        return '"{0}"'.format(self)
+
+
+class Op(Node):
+
+    def serialize(self):
+        return str(self)
+
+
+VARIABLE = (
+    L("implementation_version") |
+    L("platform_python_implementation") |
+    L("implementation_name") |
+    L("python_full_version") |
+    L("platform_release") |
+    L("platform_version") |
+    L("platform_machine") |
+    L("platform_system") |
+    L("python_version") |
+    L("sys_platform") |
+    L("os_name") |
+    L("os.name") |  # PEP-345
+    L("sys.platform") |  # PEP-345
+    L("platform.version") |  # PEP-345
+    L("platform.machine") |  # PEP-345
+    L("platform.python_implementation") |  # PEP-345
+    L("python_implementation") |  # undocumented setuptools legacy
+    L("extra")
+)
+ALIASES = {
+    'os.name': 'os_name',
+    'sys.platform': 'sys_platform',
+    'platform.version': 'platform_version',
+    'platform.machine': 'platform_machine',
+    'platform.python_implementation': 'platform_python_implementation',
+    'python_implementation': 'platform_python_implementation'
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+    L("===") |
+    L("==") |
+    L(">=") |
+    L("<=") |
+    L("!=") |
+    L("~=") |
+    L(">") |
+    L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results):
+    if isinstance(results, ParseResults):
+        return [_coerce_parse_result(i) for i in results]
+    else:
+        return results
+
+
+def _format_marker(marker, first=True):
+    assert isinstance(marker, (list, tuple, string_types))
+
+    # Sometimes we have a structure like [[...]] which is a single item list
+    # where the single item is itself it's own list. In that case we want skip
+    # the rest of this function so that we don't get extraneous () on the
+    # outside.
+    if (isinstance(marker, list) and len(marker) == 1 and
+            isinstance(marker[0], (list, tuple))):
+        return _format_marker(marker[0])
+
+    if isinstance(marker, list):
+        inner = (_format_marker(m, first=False) for m in marker)
+        if first:
+            return " ".join(inner)
+        else:
+            return "(" + " ".join(inner) + ")"
+    elif isinstance(marker, tuple):
+        return " ".join([m.serialize() for m in marker])
+    else:
+        return marker
+
+
+_operators = {
+    "in": lambda lhs, rhs: lhs in rhs,
+    "not in": lambda lhs, rhs: lhs not in rhs,
+    "<": operator.lt,
+    "<=": operator.le,
+    "==": operator.eq,
+    "!=": operator.ne,
+    ">=": operator.ge,
+    ">": operator.gt,
+}
+
+
+def _eval_op(lhs, op, rhs):
+    try:
+        spec = Specifier("".join([op.serialize(), rhs]))
+    except InvalidSpecifier:
+        pass
+    else:
+        return spec.contains(lhs)
+
+    oper = _operators.get(op.serialize())
+    if oper is None:
+        raise UndefinedComparison(
+            "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
+        )
+
+    return oper(lhs, rhs)
+
+
+_undefined = object()
+
+
+def _get_env(environment, name):
+    value = environment.get(name, _undefined)
+
+    if value is _undefined:
+        raise UndefinedEnvironmentName(
+            "{0!r} does not exist in evaluation environment.".format(name)
+        )
+
+    return value
+
+
+def _evaluate_markers(markers, environment):
+    groups = [[]]
+
+    for marker in markers:
+        assert isinstance(marker, (list, tuple, string_types))
+
+        if isinstance(marker, list):
+            groups[-1].append(_evaluate_markers(marker, environment))
+        elif isinstance(marker, tuple):
+            lhs, op, rhs = marker
+
+            if isinstance(lhs, Variable):
+                lhs_value = _get_env(environment, lhs.value)
+                rhs_value = rhs.value
+            else:
+                lhs_value = lhs.value
+                rhs_value = _get_env(environment, rhs.value)
+
+            groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+        else:
+            assert marker in ["and", "or"]
+            if marker == "or":
+                groups.append([])
+
+    return any(all(item) for item in groups)
+
+
+def format_full_version(info):
+    version = '{0.major}.{0.minor}.{0.micro}'.format(info)
+    kind = info.releaselevel
+    if kind != 'final':
+        version += kind[0] + str(info.serial)
+    return version
+
+
+def default_environment():
+    if hasattr(sys, 'implementation'):
+        iver = format_full_version(sys.implementation.version)
+        implementation_name = sys.implementation.name
+    else:
+        iver = '0'
+        implementation_name = ''
+
+    return {
+        "implementation_name": implementation_name,
+        "implementation_version": iver,
+        "os_name": os.name,
+        "platform_machine": platform.machine(),
+        "platform_release": platform.release(),
+        "platform_system": platform.system(),
+        "platform_version": platform.version(),
+        "python_full_version": platform.python_version(),
+        "platform_python_implementation": platform.python_implementation(),
+        "python_version": platform.python_version()[:3],
+        "sys_platform": sys.platform,
+    }
+
+
+class Marker(object):
+
+    def __init__(self, marker):
+        try:
+            self._markers = _coerce_parse_result(MARKER.parseString(marker))
+        except ParseException as e:
+            err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
+                marker, marker[e.loc:e.loc + 8])
+            raise InvalidMarker(err_str)
+
+    def __str__(self):
+        return _format_marker(self._markers)
+
+    def __repr__(self):
+        return "<Marker({0!r})>".format(str(self))
+
+    def evaluate(self, environment=None):
+        """Evaluate a marker.
+
+        Return the boolean from evaluating the given marker against the
+        environment. environment is an optional argument to override all or
+        part of the determined environment.
+
+        The environment is determined from the current Python process.
+        """
+        current_environment = default_environment()
+        if environment is not None:
+            current_environment.update(environment)
+
+        return _evaluate_markers(self._markers, current_environment)
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/requirements.py b/lib/python3.7/site-packages/setuptools/_vendor/packaging/requirements.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b493416f215081b46c901d76c3a4b4648a32296
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/packaging/requirements.py
@@ -0,0 +1,127 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import string
+import re
+
+from setuptools.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
+from setuptools.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
+from setuptools.extern.pyparsing import Literal as L  # noqa
+from setuptools.extern.six.moves.urllib import parse as urlparse
+
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+
+class InvalidRequirement(ValueError):
+    """
+    An invalid requirement was found, users should refer to PEP 508.
+    """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r'[^ ]+')("url")
+URL = (AT + URI)
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
+                       joinString=",", adjacent=False)("_raw_spec")
+_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+    lambda s, l, t: Marker(s[t._original_start:t._original_end])
+)
+MARKER_SEPERATOR = SEMICOLON
+MARKER = MARKER_SEPERATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = \
+    NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+
+
+class Requirement(object):
+    """Parse a requirement.
+
+    Parse a given requirement string into its parts, such as name, specifier,
+    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+    string.
+    """
+
+    # TODO: Can we test whether something is contained within a requirement?
+    #       If so how do we do that? Do we need to test against the _name_ of
+    #       the thing as well as the version? What about the markers?
+    # TODO: Can we normalize the name and extra name?
+
+    def __init__(self, requirement_string):
+        try:
+            req = REQUIREMENT.parseString(requirement_string)
+        except ParseException as e:
+            raise InvalidRequirement(
+                "Invalid requirement, parse error at \"{0!r}\"".format(
+                    requirement_string[e.loc:e.loc + 8]))
+
+        self.name = req.name
+        if req.url:
+            parsed_url = urlparse.urlparse(req.url)
+            if not (parsed_url.scheme and parsed_url.netloc) or (
+                    not parsed_url.scheme and not parsed_url.netloc):
+                raise InvalidRequirement("Invalid URL given")
+            self.url = req.url
+        else:
+            self.url = None
+        self.extras = set(req.extras.asList() if req.extras else [])
+        self.specifier = SpecifierSet(req.specifier)
+        self.marker = req.marker if req.marker else None
+
+    def __str__(self):
+        parts = [self.name]
+
+        if self.extras:
+            parts.append("[{0}]".format(",".join(sorted(self.extras))))
+
+        if self.specifier:
+            parts.append(str(self.specifier))
+
+        if self.url:
+            parts.append("@ {0}".format(self.url))
+
+        if self.marker:
+            parts.append("; {0}".format(self.marker))
+
+        return "".join(parts)
+
+    def __repr__(self):
+        return "<Requirement({0!r})>".format(str(self))
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/specifiers.py b/lib/python3.7/site-packages/setuptools/_vendor/packaging/specifiers.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f5a76cfd63f47dcce29b3ea82f59d10f4e8d771
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/packaging/specifiers.py
@@ -0,0 +1,774 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import abc
+import functools
+import itertools
+import re
+
+from ._compat import string_types, with_metaclass
+from .version import Version, LegacyVersion, parse
+
+
+class InvalidSpecifier(ValueError):
+    """
+    An invalid specifier was found, users should refer to PEP 440.
+    """
+
+
+class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
+
+    @abc.abstractmethod
+    def __str__(self):
+        """
+        Returns the str representation of this Specifier like object. This
+        should be representative of the Specifier itself.
+        """
+
+    @abc.abstractmethod
+    def __hash__(self):
+        """
+        Returns a hash value for this Specifier like object.
+        """
+
+    @abc.abstractmethod
+    def __eq__(self, other):
+        """
+        Returns a boolean representing whether or not the two Specifier like
+        objects are equal.
+        """
+
+    @abc.abstractmethod
+    def __ne__(self, other):
+        """
+        Returns a boolean representing whether or not the two Specifier like
+        objects are not equal.
+        """
+
+    @abc.abstractproperty
+    def prereleases(self):
+        """
+        Returns whether or not pre-releases as a whole are allowed by this
+        specifier.
+        """
+
+    @prereleases.setter
+    def prereleases(self, value):
+        """
+        Sets whether or not pre-releases as a whole are allowed by this
+        specifier.
+        """
+
+    @abc.abstractmethod
+    def contains(self, item, prereleases=None):
+        """
+        Determines if the given item is contained within this specifier.
+        """
+
+    @abc.abstractmethod
+    def filter(self, iterable, prereleases=None):
+        """
+        Takes an iterable of items and filters them so that only items which
+        are contained within this specifier are allowed in it.
+        """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+    _operators = {}
+
+    def __init__(self, spec="", prereleases=None):
+        match = self._regex.search(spec)
+        if not match:
+            raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
+
+        self._spec = (
+            match.group("operator").strip(),
+            match.group("version").strip(),
+        )
+
+        # Store whether or not this Specifier should accept prereleases
+        self._prereleases = prereleases
+
+    def __repr__(self):
+        pre = (
+            ", prereleases={0!r}".format(self.prereleases)
+            if self._prereleases is not None
+            else ""
+        )
+
+        return "<{0}({1!r}{2})>".format(
+            self.__class__.__name__,
+            str(self),
+            pre,
+        )
+
+    def __str__(self):
+        return "{0}{1}".format(*self._spec)
+
+    def __hash__(self):
+        return hash(self._spec)
+
+    def __eq__(self, other):
+        if isinstance(other, string_types):
+            try:
+                other = self.__class__(other)
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._spec == other._spec
+
+    def __ne__(self, other):
+        if isinstance(other, string_types):
+            try:
+                other = self.__class__(other)
+            except InvalidSpecifier:
+                return NotImplemented
+        elif not isinstance(other, self.__class__):
+            return NotImplemented
+
+        return self._spec != other._spec
+
+    def _get_operator(self, op):
+        return getattr(self, "_compare_{0}".format(self._operators[op]))
+
+    def _coerce_version(self, version):
+        if not isinstance(version, (LegacyVersion, Version)):
+            version = parse(version)
+        return version
+
+    @property
+    def operator(self):
+        return self._spec[0]
+
+    @property
+    def version(self):
+        return self._spec[1]
+
+    @property
+    def prereleases(self):
+        return self._prereleases
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+    def __contains__(self, item):
+        return self.contains(item)
+
+    def contains(self, item, prereleases=None):
+        # Determine if prereleases are to be allowed or not.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # Normalize item to a Version or LegacyVersion, this allows us to have
+        # a shortcut for ``"2.0" in Specifier(">=2")
+        item = self._coerce_version(item)
+
+        # Determine if we should be supporting prereleases in this specifier
+        # or not, if we do not support prereleases than we can short circuit
+        # logic if this version is a prereleases.
+        if item.is_prerelease and not prereleases:
+            return False
+
+        # Actually do the comparison to determine if this item is contained
+        # within this Specifier or not.
+        return self._get_operator(self.operator)(item, self.version)
+
+    def filter(self, iterable, prereleases=None):
+        yielded = False
+        found_prereleases = []
+
+        kw = {"prereleases": prereleases if prereleases is not None else True}
+
+        # Attempt to iterate over all the values in the iterable and if any of
+        # them match, yield them.
+        for version in iterable:
+            parsed_version = self._coerce_version(version)
+
+            if self.contains(parsed_version, **kw):
+                # If our version is a prerelease, and we were not set to allow
+                # prereleases, then we'll store it for later incase nothing
+                # else matches this specifier.
+                if (parsed_version.is_prerelease and not
+                        (prereleases or self.prereleases)):
+                    found_prereleases.append(version)
+                # Either this is not a prerelease, or we should have been
+                # accepting prereleases from the begining.
+                else:
+                    yielded = True
+                    yield version
+
+        # Now that we've iterated over everything, determine if we've yielded
+        # any values, and if we have not and we have any prereleases stored up
+        # then we will go ahead and yield the prereleases.
+        if not yielded and found_prereleases:
+            for version in found_prereleases:
+                yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+    _regex_str = (
+        r"""
+        (?P<operator>(==|!=|<=|>=|<|>))
+        \s*
+        (?P<version>
+            [^,;\s)]* # Since this is a "legacy" specifier, and the version
+                      # string can be just about anything, we match everything
+                      # except for whitespace, a semi-colon for marker support,
+                      # a closing paren since versions can be enclosed in
+                      # them, and a comma since it's a version separator.
+        )
+        """
+    )
+
+    _regex = re.compile(
+        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    _operators = {
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+    }
+
+    def _coerce_version(self, version):
+        if not isinstance(version, LegacyVersion):
+            version = LegacyVersion(str(version))
+        return version
+
+    def _compare_equal(self, prospective, spec):
+        return prospective == self._coerce_version(spec)
+
+    def _compare_not_equal(self, prospective, spec):
+        return prospective != self._coerce_version(spec)
+
+    def _compare_less_than_equal(self, prospective, spec):
+        return prospective <= self._coerce_version(spec)
+
+    def _compare_greater_than_equal(self, prospective, spec):
+        return prospective >= self._coerce_version(spec)
+
+    def _compare_less_than(self, prospective, spec):
+        return prospective < self._coerce_version(spec)
+
+    def _compare_greater_than(self, prospective, spec):
+        return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(fn):
+    @functools.wraps(fn)
+    def wrapped(self, prospective, spec):
+        if not isinstance(prospective, Version):
+            return False
+        return fn(self, prospective, spec)
+    return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+    _regex_str = (
+        r"""
+        (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+        (?P<version>
+            (?:
+                # The identity operators allow for an escape hatch that will
+                # do an exact string match of the version you wish to install.
+                # This will not be parsed by PEP 440 and we cannot determine
+                # any semantic meaning from it. This operator is discouraged
+                # but included entirely as an escape hatch.
+                (?<====)  # Only match for the identity operator
+                \s*
+                [^\s]*    # We just match everything, except for whitespace
+                          # since we are only testing for strict identity.
+            )
+            |
+            (?:
+                # The (non)equality operators allow for wild card and local
+                # versions to be specified so we have to define these two
+                # operators separately to enable that.
+                (?<===|!=)            # Only match for equals and not equals
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+
+                # You cannot use a wild card and a dev or local version
+                # together so group them with a | and make them optional.
+                (?:
+                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
+                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+                    |
+                    \.\*  # Wild card syntax of .*
+                )?
+            )
+            |
+            (?:
+                # The compatible operator requires at least two digits in the
+                # release segment.
+                (?<=~=)               # Only match for the compatible operator
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+            |
+            (?:
+                # All other operators only allow a sub set of what the
+                # (non)equality operators do. Specifically they do not allow
+                # local versions to be specified nor do they allow the prefix
+                # matching wild cards.
+                (?<!==|!=|~=)         # We have special cases for these
+                                      # operators so we want to make sure they
+                                      # don't match here.
+
+                \s*
+                v?
+                (?:[0-9]+!)?          # epoch
+                [0-9]+(?:\.[0-9]+)*   # release
+                (?:                   # pre release
+                    [-_\.]?
+                    (a|b|c|rc|alpha|beta|pre|preview)
+                    [-_\.]?
+                    [0-9]*
+                )?
+                (?:                                   # post release
+                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+                )?
+                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
+            )
+        )
+        """
+    )
+
+    _regex = re.compile(
+        r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    _operators = {
+        "~=": "compatible",
+        "==": "equal",
+        "!=": "not_equal",
+        "<=": "less_than_equal",
+        ">=": "greater_than_equal",
+        "<": "less_than",
+        ">": "greater_than",
+        "===": "arbitrary",
+    }
+
+    @_require_version_compare
+    def _compare_compatible(self, prospective, spec):
+        # Compatible releases have an equivalent combination of >= and ==. That
+        # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+        # implement this in terms of the other specifiers instead of
+        # implementing it ourselves. The only thing we need to do is construct
+        # the other specifiers.
+
+        # We want everything but the last item in the version, but we want to
+        # ignore post and dev releases and we want to treat the pre-release as
+        # it's own separate segment.
+        prefix = ".".join(
+            list(
+                itertools.takewhile(
+                    lambda x: (not x.startswith("post") and not
+                               x.startswith("dev")),
+                    _version_split(spec),
+                )
+            )[:-1]
+        )
+
+        # Add the prefix notation to the end of our string
+        prefix += ".*"
+
+        return (self._get_operator(">=")(prospective, spec) and
+                self._get_operator("==")(prospective, prefix))
+
+    @_require_version_compare
+    def _compare_equal(self, prospective, spec):
+        # We need special logic to handle prefix matching
+        if spec.endswith(".*"):
+            # In the case of prefix matching we want to ignore local segment.
+            prospective = Version(prospective.public)
+            # Split the spec out by dots, and pretend that there is an implicit
+            # dot in between a release segment and a pre-release segment.
+            spec = _version_split(spec[:-2])  # Remove the trailing .*
+
+            # Split the prospective version out by dots, and pretend that there
+            # is an implicit dot in between a release segment and a pre-release
+            # segment.
+            prospective = _version_split(str(prospective))
+
+            # Shorten the prospective version to be the same length as the spec
+            # so that we can determine if the specifier is a prefix of the
+            # prospective version or not.
+            prospective = prospective[:len(spec)]
+
+            # Pad out our two sides with zeros so that they both equal the same
+            # length.
+            spec, prospective = _pad_version(spec, prospective)
+        else:
+            # Convert our spec string into a Version
+            spec = Version(spec)
+
+            # If the specifier does not have a local segment, then we want to
+            # act as if the prospective version also does not have a local
+            # segment.
+            if not spec.local:
+                prospective = Version(prospective.public)
+
+        return prospective == spec
+
+    @_require_version_compare
+    def _compare_not_equal(self, prospective, spec):
+        return not self._compare_equal(prospective, spec)
+
+    @_require_version_compare
+    def _compare_less_than_equal(self, prospective, spec):
+        return prospective <= Version(spec)
+
+    @_require_version_compare
+    def _compare_greater_than_equal(self, prospective, spec):
+        return prospective >= Version(spec)
+
+    @_require_version_compare
+    def _compare_less_than(self, prospective, spec):
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec)
+
+        # Check to see if the prospective version is less than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective < spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a pre-release version, that we do not accept pre-release
+        # versions for the version mentioned in the specifier (e.g. <3.1 should
+        # not match 3.1.dev0, but should match 3.0.dev0).
+        if not spec.is_prerelease and prospective.is_prerelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # less than the spec version *and* it's not a pre-release of the same
+        # version in the spec.
+        return True
+
+    @_require_version_compare
+    def _compare_greater_than(self, prospective, spec):
+        # Convert our spec to a Version instance, since we'll want to work with
+        # it as a version.
+        spec = Version(spec)
+
+        # Check to see if the prospective version is greater than the spec
+        # version. If it's not we can short circuit and just return False now
+        # instead of doing extra unneeded work.
+        if not prospective > spec:
+            return False
+
+        # This special case is here so that, unless the specifier itself
+        # includes is a post-release version, that we do not accept
+        # post-release versions for the version mentioned in the specifier
+        # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+        if not spec.is_postrelease and prospective.is_postrelease:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # Ensure that we do not allow a local version of the version mentioned
+        # in the specifier, which is techincally greater than, to match.
+        if prospective.local is not None:
+            if Version(prospective.base_version) == Version(spec.base_version):
+                return False
+
+        # If we've gotten to here, it means that prospective version is both
+        # greater than the spec version *and* it's not a pre-release of the
+        # same version in the spec.
+        return True
+
+    def _compare_arbitrary(self, prospective, spec):
+        return str(prospective).lower() == str(spec).lower()
+
+    @property
+    def prereleases(self):
+        # If there is an explicit prereleases set for this, then we'll just
+        # blindly use that.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # Look at all of our specifiers and determine if they are inclusive
+        # operators, and if they are if they are including an explicit
+        # prerelease.
+        operator, version = self._spec
+        if operator in ["==", ">=", "<=", "~=", "==="]:
+            # The == specifier can include a trailing .*, if it does we
+            # want to remove before parsing.
+            if operator == "==" and version.endswith(".*"):
+                version = version[:-2]
+
+            # Parse the version, and if it is a pre-release than this
+            # specifier allows pre-releases.
+            if parse(version).is_prerelease:
+                return True
+
+        return False
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version):
+    result = []
+    for item in version.split("."):
+        match = _prefix_regex.search(item)
+        if match:
+            result.extend(match.groups())
+        else:
+            result.append(item)
+    return result
+
+
+def _pad_version(left, right):
+    left_split, right_split = [], []
+
+    # Get the release segment of our versions
+    left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+    right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+    # Get the rest of our versions
+    left_split.append(left[len(left_split[0]):])
+    right_split.append(right[len(right_split[0]):])
+
+    # Insert our padding
+    left_split.insert(
+        1,
+        ["0"] * max(0, len(right_split[0]) - len(left_split[0])),
+    )
+    right_split.insert(
+        1,
+        ["0"] * max(0, len(left_split[0]) - len(right_split[0])),
+    )
+
+    return (
+        list(itertools.chain(*left_split)),
+        list(itertools.chain(*right_split)),
+    )
+
+
+class SpecifierSet(BaseSpecifier):
+
+    def __init__(self, specifiers="", prereleases=None):
+        # Split on , to break each indidivual specifier into it's own item, and
+        # strip each item to remove leading/trailing whitespace.
+        specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+        # Parsed each individual specifier, attempting first to make it a
+        # Specifier and falling back to a LegacySpecifier.
+        parsed = set()
+        for specifier in specifiers:
+            try:
+                parsed.add(Specifier(specifier))
+            except InvalidSpecifier:
+                parsed.add(LegacySpecifier(specifier))
+
+        # Turn our parsed specifiers into a frozen set and save them for later.
+        self._specs = frozenset(parsed)
+
+        # Store our prereleases value so we can use it later to determine if
+        # we accept prereleases or not.
+        self._prereleases = prereleases
+
+    def __repr__(self):
+        pre = (
+            ", prereleases={0!r}".format(self.prereleases)
+            if self._prereleases is not None
+            else ""
+        )
+
+        return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
+
+    def __str__(self):
+        return ",".join(sorted(str(s) for s in self._specs))
+
+    def __hash__(self):
+        return hash(self._specs)
+
+    def __and__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        specifier = SpecifierSet()
+        specifier._specs = frozenset(self._specs | other._specs)
+
+        if self._prereleases is None and other._prereleases is not None:
+            specifier._prereleases = other._prereleases
+        elif self._prereleases is not None and other._prereleases is None:
+            specifier._prereleases = self._prereleases
+        elif self._prereleases == other._prereleases:
+            specifier._prereleases = self._prereleases
+        else:
+            raise ValueError(
+                "Cannot combine SpecifierSets with True and False prerelease "
+                "overrides."
+            )
+
+        return specifier
+
+    def __eq__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif isinstance(other, _IndividualSpecifier):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs == other._specs
+
+    def __ne__(self, other):
+        if isinstance(other, string_types):
+            other = SpecifierSet(other)
+        elif isinstance(other, _IndividualSpecifier):
+            other = SpecifierSet(str(other))
+        elif not isinstance(other, SpecifierSet):
+            return NotImplemented
+
+        return self._specs != other._specs
+
+    def __len__(self):
+        return len(self._specs)
+
+    def __iter__(self):
+        return iter(self._specs)
+
+    @property
+    def prereleases(self):
+        # If we have been given an explicit prerelease modifier, then we'll
+        # pass that through here.
+        if self._prereleases is not None:
+            return self._prereleases
+
+        # If we don't have any specifiers, and we don't have a forced value,
+        # then we'll just return None since we don't know if this should have
+        # pre-releases or not.
+        if not self._specs:
+            return None
+
+        # Otherwise we'll see if any of the given specifiers accept
+        # prereleases, if any of them do we'll return True, otherwise False.
+        return any(s.prereleases for s in self._specs)
+
+    @prereleases.setter
+    def prereleases(self, value):
+        self._prereleases = value
+
+    def __contains__(self, item):
+        return self.contains(item)
+
+    def contains(self, item, prereleases=None):
+        # Ensure that our item is a Version or LegacyVersion instance.
+        if not isinstance(item, (LegacyVersion, Version)):
+            item = parse(item)
+
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # We can determine if we're going to allow pre-releases by looking to
+        # see if any of the underlying items supports them. If none of them do
+        # and this item is a pre-release then we do not allow it and we can
+        # short circuit that here.
+        # Note: This means that 1.0.dev1 would not be contained in something
+        #       like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+        if not prereleases and item.is_prerelease:
+            return False
+
+        # We simply dispatch to the underlying specs here to make sure that the
+        # given version is contained within all of them.
+        # Note: This use of all() here means that an empty set of specifiers
+        #       will always return True, this is an explicit design decision.
+        return all(
+            s.contains(item, prereleases=prereleases)
+            for s in self._specs
+        )
+
+    def filter(self, iterable, prereleases=None):
+        # Determine if we're forcing a prerelease or not, if we're not forcing
+        # one for this particular filter call, then we'll use whatever the
+        # SpecifierSet thinks for whether or not we should support prereleases.
+        if prereleases is None:
+            prereleases = self.prereleases
+
+        # If we have any specifiers, then we want to wrap our iterable in the
+        # filter method for each one, this will act as a logical AND amongst
+        # each specifier.
+        if self._specs:
+            for spec in self._specs:
+                iterable = spec.filter(iterable, prereleases=bool(prereleases))
+            return iterable
+        # If we do not have any specifiers, then we need to have a rough filter
+        # which will filter out any pre-releases, unless there are no final
+        # releases, and which will filter out LegacyVersion in general.
+        else:
+            filtered = []
+            found_prereleases = []
+
+            for item in iterable:
+                # Ensure that we some kind of Version class for this item.
+                if not isinstance(item, (LegacyVersion, Version)):
+                    parsed_version = parse(item)
+                else:
+                    parsed_version = item
+
+                # Filter out any item which is parsed as a LegacyVersion
+                if isinstance(parsed_version, LegacyVersion):
+                    continue
+
+                # Store any item which is a pre-release for later unless we've
+                # already found a final version or we are accepting prereleases
+                if parsed_version.is_prerelease and not prereleases:
+                    if not filtered:
+                        found_prereleases.append(item)
+                else:
+                    filtered.append(item)
+
+            # If we've found no items except for pre-releases, then we'll go
+            # ahead and use the pre-releases
+            if not filtered and found_prereleases and prereleases is None:
+                return found_prereleases
+
+            return filtered
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/utils.py b/lib/python3.7/site-packages/setuptools/_vendor/packaging/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..942387cef5d75f299a769b1eb43b6c7679e7a3a0
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/packaging/utils.py
@@ -0,0 +1,14 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import re
+
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+
+
+def canonicalize_name(name):
+    # This is taken from PEP 503.
+    return _canonicalize_regex.sub("-", name).lower()
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/packaging/version.py b/lib/python3.7/site-packages/setuptools/_vendor/packaging/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..83b5ee8c5efadf22ce2f16ff08c8a8d75f1eb5df
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/packaging/version.py
@@ -0,0 +1,393 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import collections
+import itertools
+import re
+
+from ._structures import Infinity
+
+
+__all__ = [
+    "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"
+]
+
+
+_Version = collections.namedtuple(
+    "_Version",
+    ["epoch", "release", "dev", "pre", "post", "local"],
+)
+
+
+def parse(version):
+    """
+    Parse the given version string and return either a :class:`Version` object
+    or a :class:`LegacyVersion` object depending on if the given version is
+    a valid PEP 440 version or a legacy version.
+    """
+    try:
+        return Version(version)
+    except InvalidVersion:
+        return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+    """
+    An invalid version was found, users should refer to PEP 440.
+    """
+
+
+class _BaseVersion(object):
+
+    def __hash__(self):
+        return hash(self._key)
+
+    def __lt__(self, other):
+        return self._compare(other, lambda s, o: s < o)
+
+    def __le__(self, other):
+        return self._compare(other, lambda s, o: s <= o)
+
+    def __eq__(self, other):
+        return self._compare(other, lambda s, o: s == o)
+
+    def __ge__(self, other):
+        return self._compare(other, lambda s, o: s >= o)
+
+    def __gt__(self, other):
+        return self._compare(other, lambda s, o: s > o)
+
+    def __ne__(self, other):
+        return self._compare(other, lambda s, o: s != o)
+
+    def _compare(self, other, method):
+        if not isinstance(other, _BaseVersion):
+            return NotImplemented
+
+        return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+
+    def __init__(self, version):
+        self._version = str(version)
+        self._key = _legacy_cmpkey(self._version)
+
+    def __str__(self):
+        return self._version
+
+    def __repr__(self):
+        return "<LegacyVersion({0})>".format(repr(str(self)))
+
+    @property
+    def public(self):
+        return self._version
+
+    @property
+    def base_version(self):
+        return self._version
+
+    @property
+    def local(self):
+        return None
+
+    @property
+    def is_prerelease(self):
+        return False
+
+    @property
+    def is_postrelease(self):
+        return False
+
+
+_legacy_version_component_re = re.compile(
+    r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
+)
+
+_legacy_version_replacement_map = {
+    "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+    for part in _legacy_version_component_re.split(s):
+        part = _legacy_version_replacement_map.get(part, part)
+
+        if not part or part == ".":
+            continue
+
+        if part[:1] in "0123456789":
+            # pad for numeric comparison
+            yield part.zfill(8)
+        else:
+            yield "*" + part
+
+    # ensure that alpha/beta/candidate are before final
+    yield "*final"
+
+
+def _legacy_cmpkey(version):
+    # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+    # greater than or equal to 0. This will effectively put the LegacyVersion,
+    # which uses the defacto standard originally implemented by setuptools,
+    # as before all PEP 440 versions.
+    epoch = -1
+
+    # This scheme is taken from pkg_resources.parse_version setuptools prior to
+    # it's adoption of the packaging library.
+    parts = []
+    for part in _parse_version_parts(version.lower()):
+        if part.startswith("*"):
+            # remove "-" before a prerelease tag
+            if part < "*final":
+                while parts and parts[-1] == "*final-":
+                    parts.pop()
+
+            # remove trailing zeros from each series of numeric parts
+            while parts and parts[-1] == "00000000":
+                parts.pop()
+
+        parts.append(part)
+    parts = tuple(parts)
+
+    return epoch, parts
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+    v?
+    (?:
+        (?:(?P<epoch>[0-9]+)!)?                           # epoch
+        (?P<release>[0-9]+(?:\.[0-9]+)*)                  # release segment
+        (?P<pre>                                          # pre-release
+            [-_\.]?
+            (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P<pre_n>[0-9]+)?
+        )?
+        (?P<post>                                         # post release
+            (?:-(?P<post_n1>[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?P<post_l>post|rev|r)
+                [-_\.]?
+                (?P<post_n2>[0-9]+)?
+            )
+        )?
+        (?P<dev>                                          # dev release
+            [-_\.]?
+            (?P<dev_l>dev)
+            [-_\.]?
+            (?P<dev_n>[0-9]+)?
+        )?
+    )
+    (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "<Version({0})>".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(".post{0}".format(self._version.post[1]))
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(".dev{0}".format(self._version.dev[1]))
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                "+{0}".format(".".join(str(x) for x in self._version.local))
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/pyparsing.py b/lib/python3.7/site-packages/setuptools/_vendor/pyparsing.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf75e1e5fcbfe7eac41d2a9e446c5c980741087b
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/pyparsing.py
@@ -0,0 +1,5742 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2018  Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and executing simple grammars,
+vs. the traditional lex/yacc approach, or the use of regular expressions.  With pyparsing, you
+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
+provides a library of classes that you use to construct the grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form 
+C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements 
+(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
+L{Literal} expressions)::
+
+    from pyparsing import Word, alphas
+
+    # define grammar of a greeting
+    greet = Word(alphas) + "," + Word(alphas) + "!"
+
+    hello = "Hello, World!"
+    print (hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+    Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the self-explanatory
+class names, and the use of '+', '|' and '^' operators.
+
+The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
+object with named attributes.
+
+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello  ,  World  !", etc.)
+ - quoted strings
+ - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+ - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
+ - construct character word-group expressions using the L{Word} class
+ - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
+ - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
+ - associate names with your parsed results using L{ParserElement.setResultsName}
+ - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
+ - find more useful common expressions in the L{pyparsing_common} namespace class
+"""
+
+__version__ = "2.2.1"
+__versionTime__ = "18 Sep 2018 00:49 UTC"
+__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+import collections
+import pprint
+import traceback
+import types
+from datetime import datetime
+
+try:
+    from _thread import RLock
+except ImportError:
+    from threading import RLock
+
+try:
+    # Python 3
+    from collections.abc import Iterable
+    from collections.abc import MutableMapping
+except ImportError:
+    # Python 2.7
+    from collections import Iterable
+    from collections import MutableMapping
+
+try:
+    from collections import OrderedDict as _OrderedDict
+except ImportError:
+    try:
+        from ordereddict import OrderedDict as _OrderedDict
+    except ImportError:
+        _OrderedDict = None
+
+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
+
+__all__ = [
+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', 
+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', 
+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
+'CloseMatch', 'tokenMap', 'pyparsing_common',
+]
+
+system_version = tuple(sys.version_info)[:3]
+PY_3 = system_version[0] == 3
+if PY_3:
+    _MAX_INT = sys.maxsize
+    basestring = str
+    unichr = chr
+    _ustr = str
+
+    # build list of single arg builtins, that can be used as parse actions
+    singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
+
+else:
+    _MAX_INT = sys.maxint
+    range = xrange
+
+    def _ustr(obj):
+        """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
+           str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
+           then < returns the unicode object | encodes it with the default encoding | ... >.
+        """
+        if isinstance(obj,unicode):
+            return obj
+
+        try:
+            # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+            # it won't break any existing code.
+            return str(obj)
+
+        except UnicodeEncodeError:
+            # Else encode it
+            ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
+            xmlcharref = Regex(r'&#\d+;')
+            xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
+            return xmlcharref.transformString(ret)
+
+    # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+    singleArgBuiltins = []
+    import __builtin__
+    for fname in "sum len sorted reversed list tuple set any all min max".split():
+        try:
+            singleArgBuiltins.append(getattr(__builtin__,fname))
+        except AttributeError:
+            continue
+            
+_generatorType = type((y for y in range(1)))
+ 
+def _xml_escape(data):
+    """Escape &, <, >, ", ', etc. in a string of data."""
+
+    # ampersand must be replaced first
+    from_symbols = '&><"\''
+    to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
+    for from_,to_ in zip(from_symbols, to_symbols):
+        data = data.replace(from_, to_)
+    return data
+
+class _Constants(object):
+    pass
+
+alphas     = string.ascii_uppercase + string.ascii_lowercase
+nums       = "0123456789"
+hexnums    = nums + "ABCDEFabcdef"
+alphanums  = alphas + nums
+_bslash    = chr(92)
+printables = "".join(c for c in string.printable if c not in string.whitespace)
+
+class ParseBaseException(Exception):
+    """base exception class for all parsing runtime exceptions"""
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, pstr, loc=0, msg=None, elem=None ):
+        self.loc = loc
+        if msg is None:
+            self.msg = pstr
+            self.pstr = ""
+        else:
+            self.msg = msg
+            self.pstr = pstr
+        self.parserElement = elem
+        self.args = (pstr, loc, msg)
+
+    @classmethod
+    def _from_exception(cls, pe):
+        """
+        internal factory method to simplify creating one type of ParseException 
+        from another - avoids having __init__ signature conflicts among subclasses
+        """
+        return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+    def __getattr__( self, aname ):
+        """supported attributes by name are:
+            - lineno - returns the line number of the exception text
+            - col - returns the column number of the exception text
+            - line - returns the line containing the exception text
+        """
+        if( aname == "lineno" ):
+            return lineno( self.loc, self.pstr )
+        elif( aname in ("col", "column") ):
+            return col( self.loc, self.pstr )
+        elif( aname == "line" ):
+            return line( self.loc, self.pstr )
+        else:
+            raise AttributeError(aname)
+
+    def __str__( self ):
+        return "%s (at char %d), (line:%d, col:%d)" % \
+                ( self.msg, self.loc, self.lineno, self.column )
+    def __repr__( self ):
+        return _ustr(self)
+    def markInputline( self, markerString = ">!<" ):
+        """Extracts the exception line from the input string, and marks
+           the location of the exception with a special symbol.
+        """
+        line_str = self.line
+        line_column = self.column - 1
+        if markerString:
+            line_str = "".join((line_str[:line_column],
+                                markerString, line_str[line_column:]))
+        return line_str.strip()
+    def __dir__(self):
+        return "lineno col line".split() + dir(type(self))
+
+class ParseException(ParseBaseException):
+    """
+    Exception thrown when parse expressions don't match class;
+    supported attributes by name are:
+     - lineno - returns the line number of the exception text
+     - col - returns the column number of the exception text
+     - line - returns the line containing the exception text
+        
+    Example::
+        try:
+            Word(nums).setName("integer").parseString("ABC")
+        except ParseException as pe:
+            print(pe)
+            print("column: {}".format(pe.col))
+            
+    prints::
+       Expected integer (at char 0), (line:1, col:1)
+        column: 1
+    """
+    pass
+
+class ParseFatalException(ParseBaseException):
+    """user-throwable exception thrown when inconsistent parse content
+       is found; stops all parsing immediately"""
+    pass
+
+class ParseSyntaxException(ParseFatalException):
+    """just like L{ParseFatalException}, but thrown internally when an
+       L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop 
+       immediately because an unbacktrackable syntax error has been found"""
+    pass
+
+#~ class ReparseException(ParseBaseException):
+    #~ """Experimental class - parse actions can raise this exception to cause
+       #~ pyparsing to reparse the input string:
+        #~ - with a modified input string, and/or
+        #~ - with a modified start location
+       #~ Set the values of the ReparseException in the constructor, and raise the
+       #~ exception in a parse action to cause pyparsing to use the new string/location.
+       #~ Setting the values as None causes no change to be made.
+       #~ """
+    #~ def __init_( self, newstring, restartLoc ):
+        #~ self.newParseText = newstring
+        #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+    """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
+    def __init__( self, parseElementList ):
+        self.parseElementTrace = parseElementList
+
+    def __str__( self ):
+        return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+    def __init__(self,p1,p2):
+        self.tup = (p1,p2)
+    def __getitem__(self,i):
+        return self.tup[i]
+    def __repr__(self):
+        return repr(self.tup[0])
+    def setOffset(self,i):
+        self.tup = (self.tup[0],i)
+
+class ParseResults(object):
+    """
+    Structured parse results, to provide multiple means of access to the parsed data:
+       - as a list (C{len(results)})
+       - by list index (C{results[0], results[1]}, etc.)
+       - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
+
+    Example::
+        integer = Word(nums)
+        date_str = (integer.setResultsName("year") + '/' 
+                        + integer.setResultsName("month") + '/' 
+                        + integer.setResultsName("day"))
+        # equivalent form:
+        # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+        # parseString returns a ParseResults object
+        result = date_str.parseString("1999/12/31")
+
+        def test(s, fn=repr):
+            print("%s -> %s" % (s, fn(eval(s))))
+        test("list(result)")
+        test("result[0]")
+        test("result['month']")
+        test("result.day")
+        test("'month' in result")
+        test("'minutes' in result")
+        test("result.dump()", str)
+    prints::
+        list(result) -> ['1999', '/', '12', '/', '31']
+        result[0] -> '1999'
+        result['month'] -> '12'
+        result.day -> '31'
+        'month' in result -> True
+        'minutes' in result -> False
+        result.dump() -> ['1999', '/', '12', '/', '31']
+        - day: 31
+        - month: 12
+        - year: 1999
+    """
+    def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
+        if isinstance(toklist, cls):
+            return toklist
+        retobj = object.__new__(cls)
+        retobj.__doinit = True
+        return retobj
+
+    # Performance tuning: we construct a *lot* of these, so keep this
+    # constructor as small and fast as possible
+    def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
+        if self.__doinit:
+            self.__doinit = False
+            self.__name = None
+            self.__parent = None
+            self.__accumNames = {}
+            self.__asList = asList
+            self.__modal = modal
+            if toklist is None:
+                toklist = []
+            if isinstance(toklist, list):
+                self.__toklist = toklist[:]
+            elif isinstance(toklist, _generatorType):
+                self.__toklist = list(toklist)
+            else:
+                self.__toklist = [toklist]
+            self.__tokdict = dict()
+
+        if name is not None and name:
+            if not modal:
+                self.__accumNames[name] = 0
+            if isinstance(name,int):
+                name = _ustr(name) # will always return a str, but use _ustr for consistency
+            self.__name = name
+            if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
+                if isinstance(toklist,basestring):
+                    toklist = [ toklist ]
+                if asList:
+                    if isinstance(toklist,ParseResults):
+                        self[name] = _ParseResultsWithOffset(toklist.copy(),0)
+                    else:
+                        self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
+                    self[name].__name = name
+                else:
+                    try:
+                        self[name] = toklist[0]
+                    except (KeyError,TypeError,IndexError):
+                        self[name] = toklist
+
+    def __getitem__( self, i ):
+        if isinstance( i, (int,slice) ):
+            return self.__toklist[i]
+        else:
+            if i not in self.__accumNames:
+                return self.__tokdict[i][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[i] ])
+
+    def __setitem__( self, k, v, isinstance=isinstance ):
+        if isinstance(v,_ParseResultsWithOffset):
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
+            sub = v[0]
+        elif isinstance(k,(int,slice)):
+            self.__toklist[k] = v
+            sub = v
+        else:
+            self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
+            sub = v
+        if isinstance(sub,ParseResults):
+            sub.__parent = wkref(self)
+
+    def __delitem__( self, i ):
+        if isinstance(i,(int,slice)):
+            mylen = len( self.__toklist )
+            del self.__toklist[i]
+
+            # convert int to slice
+            if isinstance(i, int):
+                if i < 0:
+                    i += mylen
+                i = slice(i, i+1)
+            # get removed indices
+            removed = list(range(*i.indices(mylen)))
+            removed.reverse()
+            # fixup indices in token dictionary
+            for name,occurrences in self.__tokdict.items():
+                for j in removed:
+                    for k, (value, position) in enumerate(occurrences):
+                        occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+        else:
+            del self.__tokdict[i]
+
+    def __contains__( self, k ):
+        return k in self.__tokdict
+
+    def __len__( self ): return len( self.__toklist )
+    def __bool__(self): return ( not not self.__toklist )
+    __nonzero__ = __bool__
+    def __iter__( self ): return iter( self.__toklist )
+    def __reversed__( self ): return iter( self.__toklist[::-1] )
+    def _iterkeys( self ):
+        if hasattr(self.__tokdict, "iterkeys"):
+            return self.__tokdict.iterkeys()
+        else:
+            return iter(self.__tokdict)
+
+    def _itervalues( self ):
+        return (self[k] for k in self._iterkeys())
+            
+    def _iteritems( self ):
+        return ((k, self[k]) for k in self._iterkeys())
+
+    if PY_3:
+        keys = _iterkeys       
+        """Returns an iterator of all named result keys (Python 3.x only)."""
+
+        values = _itervalues
+        """Returns an iterator of all named result values (Python 3.x only)."""
+
+        items = _iteritems
+        """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
+
+    else:
+        iterkeys = _iterkeys
+        """Returns an iterator of all named result keys (Python 2.x only)."""
+
+        itervalues = _itervalues
+        """Returns an iterator of all named result values (Python 2.x only)."""
+
+        iteritems = _iteritems
+        """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
+
+        def keys( self ):
+            """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.iterkeys())
+
+        def values( self ):
+            """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.itervalues())
+                
+        def items( self ):
+            """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
+            return list(self.iteritems())
+
+    def haskeys( self ):
+        """Since keys() returns an iterator, this method is helpful in bypassing
+           code that looks for the existence of any defined results names."""
+        return bool(self.__tokdict)
+        
+    def pop( self, *args, **kwargs):
+        """
+        Removes and returns item at specified index (default=C{last}).
+        Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
+        argument or an integer argument, it will use C{list} semantics
+        and pop tokens from the list of parsed tokens. If passed a 
+        non-integer argument (most likely a string), it will use C{dict}
+        semantics and pop the corresponding value from any defined 
+        results names. A second default return value argument is 
+        supported, just as in C{dict.pop()}.
+
+        Example::
+            def remove_first(tokens):
+                tokens.pop(0)
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+            print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
+
+            label = Word(alphas)
+            patt = label("LABEL") + OneOrMore(Word(nums))
+            print(patt.parseString("AAB 123 321").dump())
+
+            # Use pop() in a parse action to remove named result (note that corresponding value is not
+            # removed from list form of results)
+            def remove_LABEL(tokens):
+                tokens.pop("LABEL")
+                return tokens
+            patt.addParseAction(remove_LABEL)
+            print(patt.parseString("AAB 123 321").dump())
+        prints::
+            ['AAB', '123', '321']
+            - LABEL: AAB
+
+            ['AAB', '123', '321']
+        """
+        if not args:
+            args = [-1]
+        for k,v in kwargs.items():
+            if k == 'default':
+                args = (args[0], v)
+            else:
+                raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
+        if (isinstance(args[0], int) or 
+                        len(args) == 1 or 
+                        args[0] in self):
+            index = args[0]
+            ret = self[index]
+            del self[index]
+            return ret
+        else:
+            defaultvalue = args[1]
+            return defaultvalue
+
+    def get(self, key, defaultValue=None):
+        """
+        Returns named result matching the given key, or if there is no
+        such name, then returns the given C{defaultValue} or C{None} if no
+        C{defaultValue} is specified.
+
+        Similar to C{dict.get()}.
+        
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            result = date_str.parseString("1999/12/31")
+            print(result.get("year")) # -> '1999'
+            print(result.get("hour", "not specified")) # -> 'not specified'
+            print(result.get("hour")) # -> None
+        """
+        if key in self:
+            return self[key]
+        else:
+            return defaultValue
+
+    def insert( self, index, insStr ):
+        """
+        Inserts new element at location index in the list of parsed tokens.
+        
+        Similar to C{list.insert()}.
+
+        Example::
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+            # use a parse action to insert the parse location in the front of the parsed results
+            def insert_locn(locn, tokens):
+                tokens.insert(0, locn)
+            print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
+        """
+        self.__toklist.insert(index, insStr)
+        # fixup indices in token dictionary
+        for name,occurrences in self.__tokdict.items():
+            for k, (value, position) in enumerate(occurrences):
+                occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+    def append( self, item ):
+        """
+        Add single element to end of ParseResults list of elements.
+
+        Example::
+            print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+            
+            # use a parse action to compute the sum of the parsed integers, and add it to the end
+            def append_sum(tokens):
+                tokens.append(sum(map(int, tokens)))
+            print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
+        """
+        self.__toklist.append(item)
+
+    def extend( self, itemseq ):
+        """
+        Add sequence of elements to end of ParseResults list of elements.
+
+        Example::
+            patt = OneOrMore(Word(alphas))
+            
+            # use a parse action to append the reverse of the matched strings, to make a palindrome
+            def make_palindrome(tokens):
+                tokens.extend(reversed([t[::-1] for t in tokens]))
+                return ''.join(tokens)
+            print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+        """
+        if isinstance(itemseq, ParseResults):
+            self += itemseq
+        else:
+            self.__toklist.extend(itemseq)
+
+    def clear( self ):
+        """
+        Clear all elements and results names.
+        """
+        del self.__toklist[:]
+        self.__tokdict.clear()
+
+    def __getattr__( self, name ):
+        try:
+            return self[name]
+        except KeyError:
+            return ""
+            
+        if name in self.__tokdict:
+            if name not in self.__accumNames:
+                return self.__tokdict[name][-1][0]
+            else:
+                return ParseResults([ v[0] for v in self.__tokdict[name] ])
+        else:
+            return ""
+
+    def __add__( self, other ):
+        ret = self.copy()
+        ret += other
+        return ret
+
+    def __iadd__( self, other ):
+        if other.__tokdict:
+            offset = len(self.__toklist)
+            addoffset = lambda a: offset if a<0 else a+offset
+            otheritems = other.__tokdict.items()
+            otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
+                                for (k,vlist) in otheritems for v in vlist]
+            for k,v in otherdictitems:
+                self[k] = v
+                if isinstance(v[0],ParseResults):
+                    v[0].__parent = wkref(self)
+            
+        self.__toklist += other.__toklist
+        self.__accumNames.update( other.__accumNames )
+        return self
+
+    def __radd__(self, other):
+        if isinstance(other,int) and other == 0:
+            # useful for merging many ParseResults using sum() builtin
+            return self.copy()
+        else:
+            # this may raise a TypeError - so be it
+            return other + self
+        
+    def __repr__( self ):
+        return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
+
+    def __str__( self ):
+        return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
+
+    def _asStringList( self, sep='' ):
+        out = []
+        for item in self.__toklist:
+            if out and sep:
+                out.append(sep)
+            if isinstance( item, ParseResults ):
+                out += item._asStringList()
+            else:
+                out.append( _ustr(item) )
+        return out
+
+    def asList( self ):
+        """
+        Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+        Example::
+            patt = OneOrMore(Word(alphas))
+            result = patt.parseString("sldkj lsdkj sldkj")
+            # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+            print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
+            
+            # Use asList() to create an actual list
+            result_list = result.asList()
+            print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
+        """
+        return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
+
+    def asDict( self ):
+        """
+        Returns the named parse results as a nested dictionary.
+
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+            
+            result = date_str.parseString('12/31/1999')
+            print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+            
+            result_dict = result.asDict()
+            print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
+
+            # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+            import json
+            print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+            print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
+        """
+        if PY_3:
+            item_fn = self.items
+        else:
+            item_fn = self.iteritems
+            
+        def toItem(obj):
+            if isinstance(obj, ParseResults):
+                if obj.haskeys():
+                    return obj.asDict()
+                else:
+                    return [toItem(v) for v in obj]
+            else:
+                return obj
+                
+        return dict((k,toItem(v)) for k,v in item_fn())
+
+    def copy( self ):
+        """
+        Returns a new copy of a C{ParseResults} object.
+        """
+        ret = ParseResults( self.__toklist )
+        ret.__tokdict = self.__tokdict.copy()
+        ret.__parent = self.__parent
+        ret.__accumNames.update( self.__accumNames )
+        ret.__name = self.__name
+        return ret
+
+    def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
+        """
+        (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
+        """
+        nl = "\n"
+        out = []
+        namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
+                                                            for v in vlist)
+        nextLevelIndent = indent + "  "
+
+        # collapse out indents if formatting is not desired
+        if not formatted:
+            indent = ""
+            nextLevelIndent = ""
+            nl = ""
+
+        selfTag = None
+        if doctag is not None:
+            selfTag = doctag
+        else:
+            if self.__name:
+                selfTag = self.__name
+
+        if not selfTag:
+            if namedItemsOnly:
+                return ""
+            else:
+                selfTag = "ITEM"
+
+        out += [ nl, indent, "<", selfTag, ">" ]
+
+        for i,res in enumerate(self.__toklist):
+            if isinstance(res,ParseResults):
+                if i in namedItems:
+                    out += [ res.asXML(namedItems[i],
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+                else:
+                    out += [ res.asXML(None,
+                                        namedItemsOnly and doctag is None,
+                                        nextLevelIndent,
+                                        formatted)]
+            else:
+                # individual token, see if there is a name for it
+                resTag = None
+                if i in namedItems:
+                    resTag = namedItems[i]
+                if not resTag:
+                    if namedItemsOnly:
+                        continue
+                    else:
+                        resTag = "ITEM"
+                xmlBodyText = _xml_escape(_ustr(res))
+                out += [ nl, nextLevelIndent, "<", resTag, ">",
+                                                xmlBodyText,
+                                                "</", resTag, ">" ]
+
+        out += [ nl, indent, "</", selfTag, ">" ]
+        return "".join(out)
+
+    def __lookup(self,sub):
+        for k,vlist in self.__tokdict.items():
+            for v,loc in vlist:
+                if sub is v:
+                    return k
+        return None
+
+    def getName(self):
+        r"""
+        Returns the results name for this token expression. Useful when several 
+        different expressions might match at a particular location.
+
+        Example::
+            integer = Word(nums)
+            ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+            house_number_expr = Suppress('#') + Word(nums, alphanums)
+            user_data = (Group(house_number_expr)("house_number") 
+                        | Group(ssn_expr)("ssn")
+                        | Group(integer)("age"))
+            user_info = OneOrMore(user_data)
+            
+            result = user_info.parseString("22 111-22-3333 #221B")
+            for item in result:
+                print(item.getName(), ':', item[0])
+        prints::
+            age : 22
+            ssn : 111-22-3333
+            house_number : 221B
+        """
+        if self.__name:
+            return self.__name
+        elif self.__parent:
+            par = self.__parent()
+            if par:
+                return par.__lookup(self)
+            else:
+                return None
+        elif (len(self) == 1 and
+               len(self.__tokdict) == 1 and
+               next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
+            return next(iter(self.__tokdict.keys()))
+        else:
+            return None
+
+    def dump(self, indent='', depth=0, full=True):
+        """
+        Diagnostic method for listing out the contents of a C{ParseResults}.
+        Accepts an optional C{indent} argument so that this string can be embedded
+        in a nested display of other data.
+
+        Example::
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+            
+            result = date_str.parseString('12/31/1999')
+            print(result.dump())
+        prints::
+            ['12', '/', '31', '/', '1999']
+            - day: 1999
+            - month: 31
+            - year: 12
+        """
+        out = []
+        NL = '\n'
+        out.append( indent+_ustr(self.asList()) )
+        if full:
+            if self.haskeys():
+                items = sorted((str(k), v) for k,v in self.items())
+                for k,v in items:
+                    if out:
+                        out.append(NL)
+                    out.append( "%s%s- %s: " % (indent,('  '*depth), k) )
+                    if isinstance(v,ParseResults):
+                        if v:
+                            out.append( v.dump(indent,depth+1) )
+                        else:
+                            out.append(_ustr(v))
+                    else:
+                        out.append(repr(v))
+            elif any(isinstance(vv,ParseResults) for vv in self):
+                v = self
+                for i,vv in enumerate(v):
+                    if isinstance(vv,ParseResults):
+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),vv.dump(indent,depth+1) ))
+                    else:
+                        out.append("\n%s%s[%d]:\n%s%s%s" % (indent,('  '*(depth)),i,indent,('  '*(depth+1)),_ustr(vv)))
+            
+        return "".join(out)
+
+    def pprint(self, *args, **kwargs):
+        """
+        Pretty-printer for parsed results as a list, using the C{pprint} module.
+        Accepts additional positional or keyword args as defined for the 
+        C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
+
+        Example::
+            ident = Word(alphas, alphanums)
+            num = Word(nums)
+            func = Forward()
+            term = ident | num | Group('(' + func + ')')
+            func <<= ident + Group(Optional(delimitedList(term)))
+            result = func.parseString("fna a,b,(fnb c,d,200),100")
+            result.pprint(width=40)
+        prints::
+            ['fna',
+             ['a',
+              'b',
+              ['(', 'fnb', ['c', 'd', '200'], ')'],
+              '100']]
+        """
+        pprint.pprint(self.asList(), *args, **kwargs)
+
+    # add support for pickle protocol
+    def __getstate__(self):
+        return ( self.__toklist,
+                 ( self.__tokdict.copy(),
+                   self.__parent is not None and self.__parent() or None,
+                   self.__accumNames,
+                   self.__name ) )
+
+    def __setstate__(self,state):
+        self.__toklist = state[0]
+        (self.__tokdict,
+         par,
+         inAccumNames,
+         self.__name) = state[1]
+        self.__accumNames = {}
+        self.__accumNames.update(inAccumNames)
+        if par is not None:
+            self.__parent = wkref(par)
+        else:
+            self.__parent = None
+
+    def __getnewargs__(self):
+        return self.__toklist, self.__name, self.__asList, self.__modal
+
+    def __dir__(self):
+        return (dir(type(self)) + list(self.keys()))
+
+MutableMapping.register(ParseResults)
+
+def col (loc,strg):
+    """Returns current column within a string, counting newlines as line separators.
+   The first column is number 1.
+
+   Note: the default parsing behavior is to expand tabs in the input string
+   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+   consistent view of the parsed string, the parse location, and line and column
+   positions within the parsed string.
+   """
+    s = strg
+    return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
+
+def lineno(loc,strg):
+    """Returns current line number within a string, counting newlines as line separators.
+   The first line is number 1.
+
+   Note: the default parsing behavior is to expand tabs in the input string
+   before starting the parsing process.  See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+   on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+   consistent view of the parsed string, the parse location, and line and column
+   positions within the parsed string.
+   """
+    return strg.count("\n",0,loc) + 1
+
+def line( loc, strg ):
+    """Returns the line of text containing loc within a string, counting newlines as line separators.
+       """
+    lastCR = strg.rfind("\n", 0, loc)
+    nextCR = strg.find("\n", loc)
+    if nextCR >= 0:
+        return strg[lastCR+1:nextCR]
+    else:
+        return strg[lastCR+1:]
+
+def _defaultStartDebugAction( instring, loc, expr ):
+    print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
+
+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
+    print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction( instring, loc, expr, exc ):
+    print ("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+    """'Do-nothing' debug action, to suppress debugging output during parsing."""
+    pass
+
+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
+#~ 'decorator to trim function calls to match the arity of the target'
+#~ def _trim_arity(func, maxargs=3):
+    #~ if func in singleArgBuiltins:
+        #~ return lambda s,l,t: func(t)
+    #~ limit = 0
+    #~ foundArity = False
+    #~ def wrapper(*args):
+        #~ nonlocal limit,foundArity
+        #~ while 1:
+            #~ try:
+                #~ ret = func(*args[limit:])
+                #~ foundArity = True
+                #~ return ret
+            #~ except TypeError:
+                #~ if limit == maxargs or foundArity:
+                    #~ raise
+                #~ limit += 1
+                #~ continue
+    #~ return wrapper
+
+# this version is Python 2.x-3.x cross-compatible
+'decorator to trim function calls to match the arity of the target'
+def _trim_arity(func, maxargs=2):
+    if func in singleArgBuiltins:
+        return lambda s,l,t: func(t)
+    limit = [0]
+    foundArity = [False]
+    
+    # traceback return data structure changed in Py3.5 - normalize back to plain tuples
+    if system_version[:2] >= (3,5):
+        def extract_stack(limit=0):
+            # special handling for Python 3.5.0 - extra deep call stack by 1
+            offset = -3 if system_version == (3,5,0) else -2
+            frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
+            return [frame_summary[:2]]
+        def extract_tb(tb, limit=0):
+            frames = traceback.extract_tb(tb, limit=limit)
+            frame_summary = frames[-1]
+            return [frame_summary[:2]]
+    else:
+        extract_stack = traceback.extract_stack
+        extract_tb = traceback.extract_tb
+    
+    # synthesize what would be returned by traceback.extract_stack at the call to 
+    # user's parse action 'func', so that we don't incur call penalty at parse time
+    
+    LINE_DIFF = 6
+    # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND 
+    # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+    this_line = extract_stack(limit=2)[-1]
+    pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
+
+    def wrapper(*args):
+        while 1:
+            try:
+                ret = func(*args[limit[0]:])
+                foundArity[0] = True
+                return ret
+            except TypeError:
+                # re-raise TypeErrors if they did not come from our arity testing
+                if foundArity[0]:
+                    raise
+                else:
+                    try:
+                        tb = sys.exc_info()[-1]
+                        if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
+                            raise
+                    finally:
+                        del tb
+
+                if limit[0] <= maxargs:
+                    limit[0] += 1
+                    continue
+                raise
+
+    # copy func name to wrapper for sensible debug output
+    func_name = "<parse action>"
+    try:
+        func_name = getattr(func, '__name__', 
+                            getattr(func, '__class__').__name__)
+    except Exception:
+        func_name = str(func)
+    wrapper.__name__ = func_name
+
+    return wrapper
+
+class ParserElement(object):
+    """Abstract base level parser element class."""
+    DEFAULT_WHITE_CHARS = " \n\t\r"
+    verbose_stacktrace = False
+
+    @staticmethod
+    def setDefaultWhitespaceChars( chars ):
+        r"""
+        Overrides the default whitespace chars
+
+        Example::
+            # default whitespace chars are space, <TAB> and newline
+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def', 'ghi', 'jkl']
+            
+            # change to just treat newline as significant
+            ParserElement.setDefaultWhitespaceChars(" \t")
+            OneOrMore(Word(alphas)).parseString("abc def\nghi jkl")  # -> ['abc', 'def']
+        """
+        ParserElement.DEFAULT_WHITE_CHARS = chars
+
+    @staticmethod
+    def inlineLiteralsUsing(cls):
+        """
+        Set class to be used for inclusion of string literals into a parser.
+        
+        Example::
+            # default literal class used is Literal
+            integer = Word(nums)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
+
+
+            # change to Suppress
+            ParserElement.inlineLiteralsUsing(Suppress)
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")           
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '12', '31']
+        """
+        ParserElement._literalStringClass = cls
+
+    def __init__( self, savelist=False ):
+        self.parseAction = list()
+        self.failAction = None
+        #~ self.name = "<unknown>"  # don't define self.name, let subclasses try/except upcall
+        self.strRepr = None
+        self.resultsName = None
+        self.saveAsList = savelist
+        self.skipWhitespace = True
+        self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        self.copyDefaultWhiteChars = True
+        self.mayReturnEmpty = False # used when checking for left-recursion
+        self.keepTabs = False
+        self.ignoreExprs = list()
+        self.debug = False
+        self.streamlined = False
+        self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+        self.errmsg = ""
+        self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+        self.debugActions = ( None, None, None ) #custom debug actions
+        self.re = None
+        self.callPreparse = True # used to avoid redundant calls to preParse
+        self.callDuringTry = False
+
+    def copy( self ):
+        """
+        Make a copy of this C{ParserElement}.  Useful for defining different parse actions
+        for the same parsing pattern, using copies of the original parse element.
+        
+        Example::
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
+            integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+            
+            print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
+        prints::
+            [5120, 100, 655360, 268435456]
+        Equivalent form of C{expr.copy()} is just C{expr()}::
+            integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+        """
+        cpy = copy.copy( self )
+        cpy.parseAction = self.parseAction[:]
+        cpy.ignoreExprs = self.ignoreExprs[:]
+        if self.copyDefaultWhiteChars:
+            cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+        return cpy
+
+    def setName( self, name ):
+        """
+        Define name for this expression, makes debugging and exception messages clearer.
+        
+        Example::
+            Word(nums).parseString("ABC")  # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
+            Word(nums).setName("integer").parseString("ABC")  # -> Exception: Expected integer (at char 0), (line:1, col:1)
+        """
+        self.name = name
+        self.errmsg = "Expected " + self.name
+        if hasattr(self,"exception"):
+            self.exception.msg = self.errmsg
+        return self
+
+    def setResultsName( self, name, listAllMatches=False ):
+        """
+        Define name for referencing matching tokens as a nested attribute
+        of the returned parse results.
+        NOTE: this returns a *copy* of the original C{ParserElement} object;
+        this is so that the client can define a basic element, such as an
+        integer, and reference it in multiple places with different names.
+
+        You can also set results names using the abbreviated syntax,
+        C{expr("name")} in place of C{expr.setResultsName("name")} - 
+        see L{I{__call__}<__call__>}.
+
+        Example::
+            date_str = (integer.setResultsName("year") + '/' 
+                        + integer.setResultsName("month") + '/' 
+                        + integer.setResultsName("day"))
+
+            # equivalent form:
+            date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+        """
+        newself = self.copy()
+        if name.endswith("*"):
+            name = name[:-1]
+            listAllMatches=True
+        newself.resultsName = name
+        newself.modalResults = not listAllMatches
+        return newself
+
+    def setBreak(self,breakFlag = True):
+        """Method to invoke the Python pdb debugger when this element is
+           about to be parsed. Set C{breakFlag} to True to enable, False to
+           disable.
+        """
+        if breakFlag:
+            _parseMethod = self._parse
+            def breaker(instring, loc, doActions=True, callPreParse=True):
+                import pdb
+                pdb.set_trace()
+                return _parseMethod( instring, loc, doActions, callPreParse )
+            breaker._originalParseMethod = _parseMethod
+            self._parse = breaker
+        else:
+            if hasattr(self._parse,"_originalParseMethod"):
+                self._parse = self._parse._originalParseMethod
+        return self
+
+    def setParseAction( self, *fns, **kwargs ):
+        """
+        Define one or more actions to perform when successfully matching parse element definition.
+        Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
+        C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
+         - s   = the original string being parsed (see note below)
+         - loc = the location of the matching substring
+         - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
+        If the functions in fns modify the tokens, they can return them as the return
+        value from fn, and the modified list of tokens will replace the original.
+        Otherwise, fn does not need to return any value.
+
+        Optional keyword arguments:
+         - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
+
+        Note: the default parsing behavior is to expand tabs in the input string
+        before starting the parsing process.  See L{I{parseString}<parseString>} for more information
+        on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+        consistent view of the parsed string, the parse location, and line and column
+        positions within the parsed string.
+        
+        Example::
+            integer = Word(nums)
+            date_str = integer + '/' + integer + '/' + integer
+
+            date_str.parseString("1999/12/31")  # -> ['1999', '/', '12', '/', '31']
+
+            # use parse action to convert to ints at parse time
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            date_str = integer + '/' + integer + '/' + integer
+
+            # note that integer fields are now ints, not strings
+            date_str.parseString("1999/12/31")  # -> [1999, '/', 12, '/', 31]
+        """
+        self.parseAction = list(map(_trim_arity, list(fns)))
+        self.callDuringTry = kwargs.get("callDuringTry", False)
+        return self
+
+    def addParseAction( self, *fns, **kwargs ):
+        """
+        Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
+        
+        See examples in L{I{copy}<copy>}.
+        """
+        self.parseAction += list(map(_trim_arity, list(fns)))
+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+        return self
+
+    def addCondition(self, *fns, **kwargs):
+        """Add a boolean predicate function to expression's list of parse actions. See 
+        L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, 
+        functions passed to C{addCondition} need to return boolean success/fail of the condition.
+
+        Optional keyword arguments:
+         - message = define a custom message to be used in the raised exception
+         - fatal   = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
+         
+        Example::
+            integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+            year_int = integer.copy()
+            year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+            date_str = year_int + '/' + integer + '/' + integer
+
+            result = date_str.parseString("1999/12/31")  # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
+        """
+        msg = kwargs.get("message", "failed user-defined condition")
+        exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
+        for fn in fns:
+            def pa(s,l,t):
+                if not bool(_trim_arity(fn)(s,l,t)):
+                    raise exc_type(s,l,msg)
+            self.parseAction.append(pa)
+        self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+        return self
+
+    def setFailAction( self, fn ):
+        """Define action to perform if parsing fails at this expression.
+           Fail acton fn is a callable function that takes the arguments
+           C{fn(s,loc,expr,err)} where:
+            - s = string being parsed
+            - loc = location where expression match was attempted and failed
+            - expr = the parse expression that failed
+            - err = the exception thrown
+           The function returns no value.  It may throw C{L{ParseFatalException}}
+           if it is desired to stop parsing immediately."""
+        self.failAction = fn
+        return self
+
+    def _skipIgnorables( self, instring, loc ):
+        exprsFound = True
+        while exprsFound:
+            exprsFound = False
+            for e in self.ignoreExprs:
+                try:
+                    while 1:
+                        loc,dummy = e._parse( instring, loc )
+                        exprsFound = True
+                except ParseException:
+                    pass
+        return loc
+
+    def preParse( self, instring, loc ):
+        if self.ignoreExprs:
+            loc = self._skipIgnorables( instring, loc )
+
+        if self.skipWhitespace:
+            wt = self.whiteChars
+            instrlen = len(instring)
+            while loc < instrlen and instring[loc] in wt:
+                loc += 1
+
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        return loc, []
+
+    def postParse( self, instring, loc, tokenlist ):
+        return tokenlist
+
+    #~ @profile
+    def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
+        debugging = ( self.debug ) #and doActions )
+
+        if debugging or self.failAction:
+            #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+            if (self.debugActions[0] ):
+                self.debugActions[0]( instring, loc, self )
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            try:
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            except ParseBaseException as err:
+                #~ print ("Exception raised:", err)
+                if self.debugActions[2]:
+                    self.debugActions[2]( instring, tokensStart, self, err )
+                if self.failAction:
+                    self.failAction( instring, tokensStart, self, err )
+                raise
+        else:
+            if callPreParse and self.callPreparse:
+                preloc = self.preParse( instring, loc )
+            else:
+                preloc = loc
+            tokensStart = preloc
+            if self.mayIndexError or preloc >= len(instring):
+                try:
+                    loc,tokens = self.parseImpl( instring, preloc, doActions )
+                except IndexError:
+                    raise ParseException( instring, len(instring), self.errmsg, self )
+            else:
+                loc,tokens = self.parseImpl( instring, preloc, doActions )
+
+        tokens = self.postParse( instring, loc, tokens )
+
+        retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
+        if self.parseAction and (doActions or self.callDuringTry):
+            if debugging:
+                try:
+                    for fn in self.parseAction:
+                        tokens = fn( instring, tokensStart, retTokens )
+                        if tokens is not None:
+                            retTokens = ParseResults( tokens,
+                                                      self.resultsName,
+                                                      asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                      modal=self.modalResults )
+                except ParseBaseException as err:
+                    #~ print "Exception raised in user parse action:", err
+                    if (self.debugActions[2] ):
+                        self.debugActions[2]( instring, tokensStart, self, err )
+                    raise
+            else:
+                for fn in self.parseAction:
+                    tokens = fn( instring, tokensStart, retTokens )
+                    if tokens is not None:
+                        retTokens = ParseResults( tokens,
+                                                  self.resultsName,
+                                                  asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+                                                  modal=self.modalResults )
+        if debugging:
+            #~ print ("Matched",self,"->",retTokens.asList())
+            if (self.debugActions[1] ):
+                self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
+
+        return loc, retTokens
+
+    def tryParse( self, instring, loc ):
+        try:
+            return self._parse( instring, loc, doActions=False )[0]
+        except ParseFatalException:
+            raise ParseException( instring, loc, self.errmsg, self)
+    
+    def canParseNext(self, instring, loc):
+        try:
+            self.tryParse(instring, loc)
+        except (ParseException, IndexError):
+            return False
+        else:
+            return True
+
+    class _UnboundedCache(object):
+        def __init__(self):
+            cache = {}
+            self.not_in_cache = not_in_cache = object()
+
+            def get(self, key):
+                return cache.get(key, not_in_cache)
+
+            def set(self, key, value):
+                cache[key] = value
+
+            def clear(self):
+                cache.clear()
+                
+            def cache_len(self):
+                return len(cache)
+
+            self.get = types.MethodType(get, self)
+            self.set = types.MethodType(set, self)
+            self.clear = types.MethodType(clear, self)
+            self.__len__ = types.MethodType(cache_len, self)
+
+    if _OrderedDict is not None:
+        class _FifoCache(object):
+            def __init__(self, size):
+                self.not_in_cache = not_in_cache = object()
+
+                cache = _OrderedDict()
+
+                def get(self, key):
+                    return cache.get(key, not_in_cache)
+
+                def set(self, key, value):
+                    cache[key] = value
+                    while len(cache) > size:
+                        try:
+                            cache.popitem(False)
+                        except KeyError:
+                            pass
+
+                def clear(self):
+                    cache.clear()
+
+                def cache_len(self):
+                    return len(cache)
+
+                self.get = types.MethodType(get, self)
+                self.set = types.MethodType(set, self)
+                self.clear = types.MethodType(clear, self)
+                self.__len__ = types.MethodType(cache_len, self)
+
+    else:
+        class _FifoCache(object):
+            def __init__(self, size):
+                self.not_in_cache = not_in_cache = object()
+
+                cache = {}
+                key_fifo = collections.deque([], size)
+
+                def get(self, key):
+                    return cache.get(key, not_in_cache)
+
+                def set(self, key, value):
+                    cache[key] = value
+                    while len(key_fifo) > size:
+                        cache.pop(key_fifo.popleft(), None)
+                    key_fifo.append(key)
+
+                def clear(self):
+                    cache.clear()
+                    key_fifo.clear()
+
+                def cache_len(self):
+                    return len(cache)
+
+                self.get = types.MethodType(get, self)
+                self.set = types.MethodType(set, self)
+                self.clear = types.MethodType(clear, self)
+                self.__len__ = types.MethodType(cache_len, self)
+
+    # argument cache for optimizing repeated calls when backtracking through recursive expressions
+    packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
+    packrat_cache_lock = RLock()
+    packrat_cache_stats = [0, 0]
+
+    # this method gets repeatedly called during backtracking with the same arguments -
+    # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+    def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
+        HIT, MISS = 0, 1
+        lookup = (self, instring, loc, callPreParse, doActions)
+        with ParserElement.packrat_cache_lock:
+            cache = ParserElement.packrat_cache
+            value = cache.get(lookup)
+            if value is cache.not_in_cache:
+                ParserElement.packrat_cache_stats[MISS] += 1
+                try:
+                    value = self._parseNoCache(instring, loc, doActions, callPreParse)
+                except ParseBaseException as pe:
+                    # cache a copy of the exception, without the traceback
+                    cache.set(lookup, pe.__class__(*pe.args))
+                    raise
+                else:
+                    cache.set(lookup, (value[0], value[1].copy()))
+                    return value
+            else:
+                ParserElement.packrat_cache_stats[HIT] += 1
+                if isinstance(value, Exception):
+                    raise value
+                return (value[0], value[1].copy())
+
+    _parse = _parseNoCache
+
+    @staticmethod
+    def resetCache():
+        ParserElement.packrat_cache.clear()
+        ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
+
+    _packratEnabled = False
+    @staticmethod
+    def enablePackrat(cache_size_limit=128):
+        """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+           Repeated parse attempts at the same string location (which happens
+           often in many complex grammars) can immediately return a cached value,
+           instead of re-executing parsing/validating code.  Memoizing is done of
+           both valid results and parsing exceptions.
+           
+           Parameters:
+            - cache_size_limit - (default=C{128}) - if an integer value is provided
+              will limit the size of the packrat cache; if None is passed, then
+              the cache size will be unbounded; if 0 is passed, the cache will
+              be effectively disabled.
+            
+           This speedup may break existing programs that use parse actions that
+           have side-effects.  For this reason, packrat parsing is disabled when
+           you first import pyparsing.  To activate the packrat feature, your
+           program must call the class method C{ParserElement.enablePackrat()}.  If
+           your program uses C{psyco} to "compile as you go", you must call
+           C{enablePackrat} before calling C{psyco.full()}.  If you do not do this,
+           Python will crash.  For best results, call C{enablePackrat()} immediately
+           after importing pyparsing.
+           
+           Example::
+               import pyparsing
+               pyparsing.ParserElement.enablePackrat()
+        """
+        if not ParserElement._packratEnabled:
+            ParserElement._packratEnabled = True
+            if cache_size_limit is None:
+                ParserElement.packrat_cache = ParserElement._UnboundedCache()
+            else:
+                ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
+            ParserElement._parse = ParserElement._parseCache
+
+    def parseString( self, instring, parseAll=False ):
+        """
+        Execute the parse expression with the given string.
+        This is the main interface to the client code, once the complete
+        expression has been built.
+
+        If you want the grammar to require that the entire input string be
+        successfully parsed, then set C{parseAll} to True (equivalent to ending
+        the grammar with C{L{StringEnd()}}).
+
+        Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
+        in order to report proper column numbers in parse actions.
+        If the input string contains tabs and
+        the grammar uses parse actions that use the C{loc} argument to index into the
+        string being parsed, you can ensure you have a consistent view of the input
+        string by:
+         - calling C{parseWithTabs} on your grammar before calling C{parseString}
+           (see L{I{parseWithTabs}<parseWithTabs>})
+         - define your parse action using the full C{(s,loc,toks)} signature, and
+           reference the input string using the parse action's C{s} argument
+         - explictly expand the tabs in your input string before calling
+           C{parseString}
+        
+        Example::
+            Word('a').parseString('aaaaabaaa')  # -> ['aaaaa']
+            Word('a').parseString('aaaaabaaa', parseAll=True)  # -> Exception: Expected end of text
+        """
+        ParserElement.resetCache()
+        if not self.streamlined:
+            self.streamline()
+            #~ self.saveAsList = True
+        for e in self.ignoreExprs:
+            e.streamline()
+        if not self.keepTabs:
+            instring = instring.expandtabs()
+        try:
+            loc, tokens = self._parse( instring, 0 )
+            if parseAll:
+                loc = self.preParse( instring, loc )
+                se = Empty() + StringEnd()
+                se._parse( instring, loc )
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+        else:
+            return tokens
+
+    def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
+        """
+        Scan the input string for expression matches.  Each match will return the
+        matching tokens, start location, and end location.  May be called with optional
+        C{maxMatches} argument, to clip scanning after 'n' matches are found.  If
+        C{overlap} is specified, then overlapping matches will be reported.
+
+        Note that the start and end locations are reported relative to the string
+        being parsed.  See L{I{parseString}<parseString>} for more information on parsing
+        strings with embedded tabs.
+
+        Example::
+            source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+            print(source)
+            for tokens,start,end in Word(alphas).scanString(source):
+                print(' '*start + '^'*(end-start))
+                print(' '*start + tokens[0])
+        
+        prints::
+        
+            sldjf123lsdjjkf345sldkjf879lkjsfd987
+            ^^^^^
+            sldjf
+                    ^^^^^^^
+                    lsdjjkf
+                              ^^^^^^
+                              sldkjf
+                                       ^^^^^^
+                                       lkjsfd
+        """
+        if not self.streamlined:
+            self.streamline()
+        for e in self.ignoreExprs:
+            e.streamline()
+
+        if not self.keepTabs:
+            instring = _ustr(instring).expandtabs()
+        instrlen = len(instring)
+        loc = 0
+        preparseFn = self.preParse
+        parseFn = self._parse
+        ParserElement.resetCache()
+        matches = 0
+        try:
+            while loc <= instrlen and matches < maxMatches:
+                try:
+                    preloc = preparseFn( instring, loc )
+                    nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
+                except ParseException:
+                    loc = preloc+1
+                else:
+                    if nextLoc > loc:
+                        matches += 1
+                        yield tokens, preloc, nextLoc
+                        if overlap:
+                            nextloc = preparseFn( instring, loc )
+                            if nextloc > loc:
+                                loc = nextLoc
+                            else:
+                                loc += 1
+                        else:
+                            loc = nextLoc
+                    else:
+                        loc = preloc+1
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def transformString( self, instring ):
+        """
+        Extension to C{L{scanString}}, to modify matching text with modified tokens that may
+        be returned from a parse action.  To use C{transformString}, define a grammar and
+        attach a parse action to it that modifies the returned token list.
+        Invoking C{transformString()} on a target string will then scan for matches,
+        and replace the matched text patterns according to the logic in the parse
+        action.  C{transformString()} returns the resulting transformed string.
+        
+        Example::
+            wd = Word(alphas)
+            wd.setParseAction(lambda toks: toks[0].title())
+            
+            print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
+        Prints::
+            Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+        """
+        out = []
+        lastE = 0
+        # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
+        # keep string locs straight between transformString and scanString
+        self.keepTabs = True
+        try:
+            for t,s,e in self.scanString( instring ):
+                out.append( instring[lastE:s] )
+                if t:
+                    if isinstance(t,ParseResults):
+                        out += t.asList()
+                    elif isinstance(t,list):
+                        out += t
+                    else:
+                        out.append(t)
+                lastE = e
+            out.append(instring[lastE:])
+            out = [o for o in out if o]
+            return "".join(map(_ustr,_flatten(out)))
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def searchString( self, instring, maxMatches=_MAX_INT ):
+        """
+        Another extension to C{L{scanString}}, simplifying the access to the tokens found
+        to match the given parse expression.  May be called with optional
+        C{maxMatches} argument, to clip searching after 'n' matches are found.
+        
+        Example::
+            # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+            cap_word = Word(alphas.upper(), alphas.lower())
+            
+            print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+            # the sum() builtin can be used to merge results into a single ParseResults object
+            print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
+        prints::
+            [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+            ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+        """
+        try:
+            return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
+        """
+        Generator method to split a string using the given expression as a separator.
+        May be called with optional C{maxsplit} argument, to limit the number of splits;
+        and the optional C{includeSeparators} argument (default=C{False}), if the separating
+        matching text should be included in the split results.
+        
+        Example::        
+            punc = oneOf(list(".,;:/-!?"))
+            print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+        prints::
+            ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+        """
+        splits = 0
+        last = 0
+        for t,s,e in self.scanString(instring, maxMatches=maxsplit):
+            yield instring[last:s]
+            if includeSeparators:
+                yield t[0]
+            last = e
+        yield instring[last:]
+
+    def __add__(self, other ):
+        """
+        Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
+        converts them to L{Literal}s by default.
+        
+        Example::
+            greet = Word(alphas) + "," + Word(alphas) + "!"
+            hello = "Hello, World!"
+            print (hello, "->", greet.parseString(hello))
+        Prints::
+            Hello, World! -> ['Hello', ',', 'World', '!']
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return And( [ self, other ] )
+
+    def __radd__(self, other ):
+        """
+        Implementation of + operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other + self
+
+    def __sub__(self, other):
+        """
+        Implementation of - operator, returns C{L{And}} with error stop
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return self + And._ErrorStop() + other
+
+    def __rsub__(self, other ):
+        """
+        Implementation of - operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other - self
+
+    def __mul__(self,other):
+        """
+        Implementation of * operator, allows use of C{expr * 3} in place of
+        C{expr + expr + expr}.  Expressions may also me multiplied by a 2-integer
+        tuple, similar to C{{min,max}} multipliers in regular expressions.  Tuples
+        may also include C{None} as in:
+         - C{expr*(n,None)} or C{expr*(n,)} is equivalent
+              to C{expr*n + L{ZeroOrMore}(expr)}
+              (read as "at least n instances of C{expr}")
+         - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
+              (read as "0 to n instances of C{expr}")
+         - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
+         - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
+
+        Note that C{expr*(None,n)} does not raise an exception if
+        more than n exprs exist in the input stream; that is,
+        C{expr*(None,n)} does not enforce a maximum number of expr
+        occurrences.  If this behavior is desired, then write
+        C{expr*(None,n) + ~expr}
+        """
+        if isinstance(other,int):
+            minElements, optElements = other,0
+        elif isinstance(other,tuple):
+            other = (other + (None, None))[:2]
+            if other[0] is None:
+                other = (0, other[1])
+            if isinstance(other[0],int) and other[1] is None:
+                if other[0] == 0:
+                    return ZeroOrMore(self)
+                if other[0] == 1:
+                    return OneOrMore(self)
+                else:
+                    return self*other[0] + ZeroOrMore(self)
+            elif isinstance(other[0],int) and isinstance(other[1],int):
+                minElements, optElements = other
+                optElements -= minElements
+            else:
+                raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
+        else:
+            raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+        if minElements < 0:
+            raise ValueError("cannot multiply ParserElement by negative value")
+        if optElements < 0:
+            raise ValueError("second tuple value must be greater or equal to first tuple value")
+        if minElements == optElements == 0:
+            raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
+
+        if (optElements):
+            def makeOptionalList(n):
+                if n>1:
+                    return Optional(self + makeOptionalList(n-1))
+                else:
+                    return Optional(self)
+            if minElements:
+                if minElements == 1:
+                    ret = self + makeOptionalList(optElements)
+                else:
+                    ret = And([self]*minElements) + makeOptionalList(optElements)
+            else:
+                ret = makeOptionalList(optElements)
+        else:
+            if minElements == 1:
+                ret = self
+            else:
+                ret = And([self]*minElements)
+        return ret
+
+    def __rmul__(self, other):
+        return self.__mul__(other)
+
+    def __or__(self, other ):
+        """
+        Implementation of | operator - returns C{L{MatchFirst}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return MatchFirst( [ self, other ] )
+
+    def __ror__(self, other ):
+        """
+        Implementation of | operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other | self
+
+    def __xor__(self, other ):
+        """
+        Implementation of ^ operator - returns C{L{Or}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Or( [ self, other ] )
+
+    def __rxor__(self, other ):
+        """
+        Implementation of ^ operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other ^ self
+
+    def __and__(self, other ):
+        """
+        Implementation of & operator - returns C{L{Each}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return Each( [ self, other ] )
+
+    def __rand__(self, other ):
+        """
+        Implementation of & operator when left operand is not a C{L{ParserElement}}
+        """
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        if not isinstance( other, ParserElement ):
+            warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+                    SyntaxWarning, stacklevel=2)
+            return None
+        return other & self
+
+    def __invert__( self ):
+        """
+        Implementation of ~ operator - returns C{L{NotAny}}
+        """
+        return NotAny( self )
+
+    def __call__(self, name=None):
+        """
+        Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
+        
+        If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
+        passed as C{True}.
+           
+        If C{name} is omitted, same as calling C{L{copy}}.
+
+        Example::
+            # these are equivalent
+            userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
+            userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")             
+        """
+        if name is not None:
+            return self.setResultsName(name)
+        else:
+            return self.copy()
+
+    def suppress( self ):
+        """
+        Suppresses the output of this C{ParserElement}; useful to keep punctuation from
+        cluttering up returned output.
+        """
+        return Suppress( self )
+
+    def leaveWhitespace( self ):
+        """
+        Disables the skipping of whitespace before matching the characters in the
+        C{ParserElement}'s defined pattern.  This is normally only used internally by
+        the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+        """
+        self.skipWhitespace = False
+        return self
+
+    def setWhitespaceChars( self, chars ):
+        """
+        Overrides the default whitespace chars
+        """
+        self.skipWhitespace = True
+        self.whiteChars = chars
+        self.copyDefaultWhiteChars = False
+        return self
+
+    def parseWithTabs( self ):
+        """
+        Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
+        Must be called before C{parseString} when the input grammar contains elements that
+        match C{<TAB>} characters.
+        """
+        self.keepTabs = True
+        return self
+
+    def ignore( self, other ):
+        """
+        Define expression to be ignored (e.g., comments) while doing pattern
+        matching; may be called repeatedly, to define multiple comment or other
+        ignorable patterns.
+        
+        Example::
+            patt = OneOrMore(Word(alphas))
+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
+            
+            patt.ignore(cStyleComment)
+            patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
+        """
+        if isinstance(other, basestring):
+            other = Suppress(other)
+
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                self.ignoreExprs.append(other)
+        else:
+            self.ignoreExprs.append( Suppress( other.copy() ) )
+        return self
+
+    def setDebugActions( self, startAction, successAction, exceptionAction ):
+        """
+        Enable display of debugging messages while doing pattern matching.
+        """
+        self.debugActions = (startAction or _defaultStartDebugAction,
+                             successAction or _defaultSuccessDebugAction,
+                             exceptionAction or _defaultExceptionDebugAction)
+        self.debug = True
+        return self
+
+    def setDebug( self, flag=True ):
+        """
+        Enable display of debugging messages while doing pattern matching.
+        Set C{flag} to True to enable, False to disable.
+
+        Example::
+            wd = Word(alphas).setName("alphaword")
+            integer = Word(nums).setName("numword")
+            term = wd | integer
+            
+            # turn on debugging for wd
+            wd.setDebug()
+
+            OneOrMore(term).parseString("abc 123 xyz 890")
+        
+        prints::
+            Match alphaword at loc 0(1,1)
+            Matched alphaword -> ['abc']
+            Match alphaword at loc 3(1,4)
+            Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+            Match alphaword at loc 7(1,8)
+            Matched alphaword -> ['xyz']
+            Match alphaword at loc 11(1,12)
+            Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+            Match alphaword at loc 15(1,16)
+            Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+        The output shown is that produced by the default debug actions - custom debug actions can be
+        specified using L{setDebugActions}. Prior to attempting
+        to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
+        is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
+        message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
+        which makes debugging and exception messages easier to understand - for instance, the default
+        name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
+        """
+        if flag:
+            self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
+        else:
+            self.debug = False
+        return self
+
+    def __str__( self ):
+        return self.name
+
+    def __repr__( self ):
+        return _ustr(self)
+
+    def streamline( self ):
+        self.streamlined = True
+        self.strRepr = None
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        pass
+
+    def validate( self, validateTrace=[] ):
+        """
+        Check defined expressions for valid structure, check for infinite recursive definitions.
+        """
+        self.checkRecursion( [] )
+
+    def parseFile( self, file_or_filename, parseAll=False ):
+        """
+        Execute the parse expression on the given file or filename.
+        If a filename is specified (instead of a file object),
+        the entire file is opened, read, and closed before parsing.
+        """
+        try:
+            file_contents = file_or_filename.read()
+        except AttributeError:
+            with open(file_or_filename, "r") as f:
+                file_contents = f.read()
+        try:
+            return self.parseString(file_contents, parseAll)
+        except ParseBaseException as exc:
+            if ParserElement.verbose_stacktrace:
+                raise
+            else:
+                # catch and re-raise exception from here, clears out pyparsing internal stack trace
+                raise exc
+
+    def __eq__(self,other):
+        if isinstance(other, ParserElement):
+            return self is other or vars(self) == vars(other)
+        elif isinstance(other, basestring):
+            return self.matches(other)
+        else:
+            return super(ParserElement,self)==other
+
+    def __ne__(self,other):
+        return not (self == other)
+
+    def __hash__(self):
+        return hash(id(self))
+
+    def __req__(self,other):
+        return self == other
+
+    def __rne__(self,other):
+        return not (self == other)
+
+    def matches(self, testString, parseAll=True):
+        """
+        Method for quick testing of a parser against a test string. Good for simple 
+        inline microtests of sub expressions while building up larger parser.
+           
+        Parameters:
+         - testString - to test against this expression for a match
+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
+            
+        Example::
+            expr = Word(nums)
+            assert expr.matches("100")
+        """
+        try:
+            self.parseString(_ustr(testString), parseAll=parseAll)
+            return True
+        except ParseBaseException:
+            return False
+                
+    def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
+        """
+        Execute the parse expression on a series of test strings, showing each
+        test, the parsed results or where the parse failed. Quick and easy way to
+        run a parse expression against a list of sample strings.
+           
+        Parameters:
+         - tests - a list of separate test strings, or a multiline string of test strings
+         - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests           
+         - comment - (default=C{'#'}) - expression for indicating embedded comments in the test 
+              string; pass None to disable comment filtering
+         - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
+              if False, only dump nested list
+         - printResults - (default=C{True}) prints test output to stdout
+         - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
+
+        Returns: a (success, results) tuple, where success indicates that all tests succeeded
+        (or failed if C{failureTests} is True), and the results contain a list of lines of each 
+        test's output
+        
+        Example::
+            number_expr = pyparsing_common.number.copy()
+
+            result = number_expr.runTests('''
+                # unsigned integer
+                100
+                # negative integer
+                -100
+                # float with scientific notation
+                6.02e23
+                # integer with scientific notation
+                1e-12
+                ''')
+            print("Success" if result[0] else "Failed!")
+
+            result = number_expr.runTests('''
+                # stray character
+                100Z
+                # missing leading digit before '.'
+                -.100
+                # too many '.'
+                3.14.159
+                ''', failureTests=True)
+            print("Success" if result[0] else "Failed!")
+        prints::
+            # unsigned integer
+            100
+            [100]
+
+            # negative integer
+            -100
+            [-100]
+
+            # float with scientific notation
+            6.02e23
+            [6.02e+23]
+
+            # integer with scientific notation
+            1e-12
+            [1e-12]
+
+            Success
+            
+            # stray character
+            100Z
+               ^
+            FAIL: Expected end of text (at char 3), (line:1, col:4)
+
+            # missing leading digit before '.'
+            -.100
+            ^
+            FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
+
+            # too many '.'
+            3.14.159
+                ^
+            FAIL: Expected end of text (at char 4), (line:1, col:5)
+
+            Success
+
+        Each test string must be on a single line. If you want to test a string that spans multiple
+        lines, create a test like this::
+
+            expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
+        
+        (Note that this is a raw string literal, you must include the leading 'r'.)
+        """
+        if isinstance(tests, basestring):
+            tests = list(map(str.strip, tests.rstrip().splitlines()))
+        if isinstance(comment, basestring):
+            comment = Literal(comment)
+        allResults = []
+        comments = []
+        success = True
+        for t in tests:
+            if comment is not None and comment.matches(t, False) or comments and not t:
+                comments.append(t)
+                continue
+            if not t:
+                continue
+            out = ['\n'.join(comments), t]
+            comments = []
+            try:
+                t = t.replace(r'\n','\n')
+                result = self.parseString(t, parseAll=parseAll)
+                out.append(result.dump(full=fullDump))
+                success = success and not failureTests
+            except ParseBaseException as pe:
+                fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
+                if '\n' in t:
+                    out.append(line(pe.loc, t))
+                    out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
+                else:
+                    out.append(' '*pe.loc + '^' + fatal)
+                out.append("FAIL: " + str(pe))
+                success = success and failureTests
+                result = pe
+            except Exception as exc:
+                out.append("FAIL-EXCEPTION: " + str(exc))
+                success = success and failureTests
+                result = exc
+
+            if printResults:
+                if fullDump:
+                    out.append('')
+                print('\n'.join(out))
+
+            allResults.append((t, result))
+        
+        return success, allResults
+
+        
+class Token(ParserElement):
+    """
+    Abstract C{ParserElement} subclass, for defining atomic matching patterns.
+    """
+    def __init__( self ):
+        super(Token,self).__init__( savelist=False )
+
+
+class Empty(Token):
+    """
+    An empty token, will always match.
+    """
+    def __init__( self ):
+        super(Empty,self).__init__()
+        self.name = "Empty"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+
+class NoMatch(Token):
+    """
+    A token that will never match.
+    """
+    def __init__( self ):
+        super(NoMatch,self).__init__()
+        self.name = "NoMatch"
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.errmsg = "Unmatchable token"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Literal(Token):
+    """
+    Token to exactly match a specified string.
+    
+    Example::
+        Literal('blah').parseString('blah')  # -> ['blah']
+        Literal('blah').parseString('blahfooblah')  # -> ['blah']
+        Literal('blah').parseString('bla')  # -> Exception: Expected "blah"
+    
+    For case-insensitive matching, use L{CaselessLiteral}.
+    
+    For keyword matching (force word break before and after the matched string),
+    use L{Keyword} or L{CaselessKeyword}.
+    """
+    def __init__( self, matchString ):
+        super(Literal,self).__init__()
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Literal; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+            self.__class__ = Empty
+        self.name = '"%s"' % _ustr(self.match)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+
+    # Performance tuning: this routine gets called a *lot*
+    # if this is a single character match string  and the first character matches,
+    # short-circuit as quickly as possible, and avoid calling startswith
+    #~ @profile
+    def parseImpl( self, instring, loc, doActions=True ):
+        if (instring[loc] == self.firstMatchChar and
+            (self.matchLen==1 or instring.startswith(self.match,loc)) ):
+            return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+_L = Literal
+ParserElement._literalStringClass = Literal
+
+class Keyword(Token):
+    """
+    Token to exactly match a specified string as a keyword, that is, it must be
+    immediately followed by a non-keyword character.  Compare with C{L{Literal}}:
+     - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
+     - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
+    Accepts two optional constructor arguments in addition to the keyword string:
+     - C{identChars} is a string of characters that would be valid identifier characters,
+          defaulting to all alphanumerics + "_" and "$"
+     - C{caseless} allows case-insensitive matching, default is C{False}.
+       
+    Example::
+        Keyword("start").parseString("start")  # -> ['start']
+        Keyword("start").parseString("starting")  # -> Exception
+
+    For case-insensitive matching, use L{CaselessKeyword}.
+    """
+    DEFAULT_KEYWORD_CHARS = alphanums+"_$"
+
+    def __init__( self, matchString, identChars=None, caseless=False ):
+        super(Keyword,self).__init__()
+        if identChars is None:
+            identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        self.match = matchString
+        self.matchLen = len(matchString)
+        try:
+            self.firstMatchChar = matchString[0]
+        except IndexError:
+            warnings.warn("null string passed to Keyword; use Empty() instead",
+                            SyntaxWarning, stacklevel=2)
+        self.name = '"%s"' % self.match
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = False
+        self.mayIndexError = False
+        self.caseless = caseless
+        if caseless:
+            self.caselessmatch = matchString.upper()
+            identChars = identChars.upper()
+        self.identChars = set(identChars)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.caseless:
+            if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+                 (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
+                 (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        else:
+            if (instring[loc] == self.firstMatchChar and
+                (self.matchLen==1 or instring.startswith(self.match,loc)) and
+                (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
+                (loc == 0 or instring[loc-1] not in self.identChars) ):
+                return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+
+    def copy(self):
+        c = super(Keyword,self).copy()
+        c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
+        return c
+
+    @staticmethod
+    def setDefaultKeywordChars( chars ):
+        """Overrides the default Keyword chars
+        """
+        Keyword.DEFAULT_KEYWORD_CHARS = chars
+
+class CaselessLiteral(Literal):
+    """
+    Token to match a specified string, ignoring case of letters.
+    Note: the matched results will always be in the case of the given
+    match string, NOT the case of the input text.
+
+    Example::
+        OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
+        
+    (Contrast with example for L{CaselessKeyword}.)
+    """
+    def __init__( self, matchString ):
+        super(CaselessLiteral,self).__init__( matchString.upper() )
+        # Preserve the defining literal.
+        self.returnString = matchString
+        self.name = "'%s'" % self.returnString
+        self.errmsg = "Expected " + self.name
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[ loc:loc+self.matchLen ].upper() == self.match:
+            return loc+self.matchLen, self.returnString
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class CaselessKeyword(Keyword):
+    """
+    Caseless version of L{Keyword}.
+
+    Example::
+        OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
+        
+    (Contrast with example for L{CaselessLiteral}.)
+    """
+    def __init__( self, matchString, identChars=None ):
+        super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+             (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
+            return loc+self.matchLen, self.match
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class CloseMatch(Token):
+    """
+    A variation on L{Literal} which matches "close" matches, that is, 
+    strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
+     - C{match_string} - string to be matched
+     - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
+    
+    The results from a successful parse will contain the matched text from the input string and the following named results:
+     - C{mismatches} - a list of the positions within the match_string where mismatches were found
+     - C{original} - the original match_string used to compare against the input string
+    
+    If C{mismatches} is an empty list, then the match was an exact match.
+    
+    Example::
+        patt = CloseMatch("ATCATCGAATGGA")
+        patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
+        patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
+
+        # exact match
+        patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
+
+        # close match allowing up to 2 mismatches
+        patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
+        patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
+    """
+    def __init__(self, match_string, maxMismatches=1):
+        super(CloseMatch,self).__init__()
+        self.name = match_string
+        self.match_string = match_string
+        self.maxMismatches = maxMismatches
+        self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
+        self.mayIndexError = False
+        self.mayReturnEmpty = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        start = loc
+        instrlen = len(instring)
+        maxloc = start + len(self.match_string)
+
+        if maxloc <= instrlen:
+            match_string = self.match_string
+            match_stringloc = 0
+            mismatches = []
+            maxMismatches = self.maxMismatches
+
+            for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
+                src,mat = s_m
+                if src != mat:
+                    mismatches.append(match_stringloc)
+                    if len(mismatches) > maxMismatches:
+                        break
+            else:
+                loc = match_stringloc + 1
+                results = ParseResults([instring[start:loc]])
+                results['original'] = self.match_string
+                results['mismatches'] = mismatches
+                return loc, results
+
+        raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Word(Token):
+    """
+    Token for matching words composed of allowed character sets.
+    Defined with string containing all allowed initial characters,
+    an optional string containing allowed body characters (if omitted,
+    defaults to the initial character set), and an optional minimum,
+    maximum, and/or exact length.  The default value for C{min} is 1 (a
+    minimum value < 1 is not valid); the default values for C{max} and C{exact}
+    are 0, meaning no maximum or exact length restriction. An optional
+    C{excludeChars} parameter can list characters that might be found in 
+    the input C{bodyChars} string; useful to define a word of all printables
+    except for one or two characters, for instance.
+    
+    L{srange} is useful for defining custom character set strings for defining 
+    C{Word} expressions, using range notation from regular expression character sets.
+    
+    A common mistake is to use C{Word} to match a specific literal string, as in 
+    C{Word("Address")}. Remember that C{Word} uses the string argument to define
+    I{sets} of matchable characters. This expression would match "Add", "AAA",
+    "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
+    To match an exact literal string, use L{Literal} or L{Keyword}.
+
+    pyparsing includes helper strings for building Words:
+     - L{alphas}
+     - L{nums}
+     - L{alphanums}
+     - L{hexnums}
+     - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
+     - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
+     - L{printables} (any non-whitespace character)
+
+    Example::
+        # a word composed of digits
+        integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
+        
+        # a word with a leading capital, and zero or more lowercase
+        capital_word = Word(alphas.upper(), alphas.lower())
+
+        # hostnames are alphanumeric, with leading alpha, and '-'
+        hostname = Word(alphas, alphanums+'-')
+        
+        # roman numeral (not a strict parser, accepts invalid mix of characters)
+        roman = Word("IVXLCDM")
+        
+        # any string of non-whitespace characters, except for ','
+        csv_value = Word(printables, excludeChars=",")
+    """
+    def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
+        super(Word,self).__init__()
+        if excludeChars:
+            initChars = ''.join(c for c in initChars if c not in excludeChars)
+            if bodyChars:
+                bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
+        self.initCharsOrig = initChars
+        self.initChars = set(initChars)
+        if bodyChars :
+            self.bodyCharsOrig = bodyChars
+            self.bodyChars = set(bodyChars)
+        else:
+            self.bodyCharsOrig = initChars
+            self.bodyChars = set(initChars)
+
+        self.maxSpecified = max > 0
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.asKeyword = asKeyword
+
+        if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
+            if self.bodyCharsOrig == self.initCharsOrig:
+                self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
+            elif len(self.initCharsOrig) == 1:
+                self.reString = "%s[%s]*" % \
+                                      (re.escape(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            else:
+                self.reString = "[%s][%s]*" % \
+                                      (_escapeRegexRangeChars(self.initCharsOrig),
+                                      _escapeRegexRangeChars(self.bodyCharsOrig),)
+            if self.asKeyword:
+                self.reString = r"\b"+self.reString+r"\b"
+            try:
+                self.re = re.compile( self.reString )
+            except Exception:
+                self.re = None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.re:
+            result = self.re.match(instring,loc)
+            if not result:
+                raise ParseException(instring, loc, self.errmsg, self)
+
+            loc = result.end()
+            return loc, result.group()
+
+        if not(instring[ loc ] in self.initChars):
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        start = loc
+        loc += 1
+        instrlen = len(instring)
+        bodychars = self.bodyChars
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, instrlen )
+        while loc < maxloc and instring[loc] in bodychars:
+            loc += 1
+
+        throwException = False
+        if loc - start < self.minLen:
+            throwException = True
+        if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+            throwException = True
+        if self.asKeyword:
+            if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
+                throwException = True
+
+        if throwException:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+    def __str__( self ):
+        try:
+            return super(Word,self).__str__()
+        except Exception:
+            pass
+
+
+        if self.strRepr is None:
+
+            def charsAsStr(s):
+                if len(s)>4:
+                    return s[:4]+"..."
+                else:
+                    return s
+
+            if ( self.initCharsOrig != self.bodyCharsOrig ):
+                self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
+            else:
+                self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
+
+        return self.strRepr
+
+
+class Regex(Token):
+    r"""
+    Token for matching strings that match a given regular expression.
+    Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
+    If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as 
+    named parse results.
+
+    Example::
+        realnum = Regex(r"[+-]?\d+\.\d*")
+        date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
+        # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
+        roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
+    """
+    compiledREtype = type(re.compile("[A-Z]"))
+    def __init__( self, pattern, flags=0):
+        """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
+        super(Regex,self).__init__()
+
+        if isinstance(pattern, basestring):
+            if not pattern:
+                warnings.warn("null string passed to Regex; use Empty() instead",
+                        SyntaxWarning, stacklevel=2)
+
+            self.pattern = pattern
+            self.flags = flags
+
+            try:
+                self.re = re.compile(self.pattern, self.flags)
+                self.reString = self.pattern
+            except sre_constants.error:
+                warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
+                    SyntaxWarning, stacklevel=2)
+                raise
+
+        elif isinstance(pattern, Regex.compiledREtype):
+            self.re = pattern
+            self.pattern = \
+            self.reString = str(pattern)
+            self.flags = flags
+            
+        else:
+            raise ValueError("Regex may only be constructed with a string or a compiled RE object")
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = self.re.match(instring,loc)
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        d = result.groupdict()
+        ret = ParseResults(result.group())
+        if d:
+            for k in d:
+                ret[k] = d[k]
+        return loc,ret
+
+    def __str__( self ):
+        try:
+            return super(Regex,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "Re:(%s)" % repr(self.pattern)
+
+        return self.strRepr
+
+
+class QuotedString(Token):
+    r"""
+    Token for matching strings that are delimited by quoting characters.
+    
+    Defined with the following parameters:
+        - quoteChar - string of one or more characters defining the quote delimiting string
+        - escChar - character to escape quotes, typically backslash (default=C{None})
+        - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
+        - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
+        - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
+        - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
+        - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
+
+    Example::
+        qs = QuotedString('"')
+        print(qs.searchString('lsjdf "This is the quote" sldjf'))
+        complex_qs = QuotedString('{{', endQuoteChar='}}')
+        print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
+        sql_qs = QuotedString('"', escQuote='""')
+        print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
+    prints::
+        [['This is the quote']]
+        [['This is the "quote"']]
+        [['This is the quote with "embedded" quotes']]
+    """
+    def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
+        super(QuotedString,self).__init__()
+
+        # remove white space from quote chars - wont work anyway
+        quoteChar = quoteChar.strip()
+        if not quoteChar:
+            warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+            raise SyntaxError()
+
+        if endQuoteChar is None:
+            endQuoteChar = quoteChar
+        else:
+            endQuoteChar = endQuoteChar.strip()
+            if not endQuoteChar:
+                warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+                raise SyntaxError()
+
+        self.quoteChar = quoteChar
+        self.quoteCharLen = len(quoteChar)
+        self.firstQuoteChar = quoteChar[0]
+        self.endQuoteChar = endQuoteChar
+        self.endQuoteCharLen = len(endQuoteChar)
+        self.escChar = escChar
+        self.escQuote = escQuote
+        self.unquoteResults = unquoteResults
+        self.convertWhitespaceEscapes = convertWhitespaceEscapes
+
+        if multiline:
+            self.flags = re.MULTILINE | re.DOTALL
+            self.pattern = r'%s(?:[^%s%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        else:
+            self.flags = 0
+            self.pattern = r'%s(?:[^%s\n\r%s]' % \
+                ( re.escape(self.quoteChar),
+                  _escapeRegexRangeChars(self.endQuoteChar[0]),
+                  (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+        if len(self.endQuoteChar) > 1:
+            self.pattern += (
+                '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
+                                               _escapeRegexRangeChars(self.endQuoteChar[i]))
+                                    for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
+                )
+        if escQuote:
+            self.pattern += (r'|(?:%s)' % re.escape(escQuote))
+        if escChar:
+            self.pattern += (r'|(?:%s.)' % re.escape(escChar))
+            self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
+        self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
+
+        try:
+            self.re = re.compile(self.pattern, self.flags)
+            self.reString = self.pattern
+        except sre_constants.error:
+            warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
+                SyntaxWarning, stacklevel=2)
+            raise
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayIndexError = False
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
+        if not result:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        loc = result.end()
+        ret = result.group()
+
+        if self.unquoteResults:
+
+            # strip off quotes
+            ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
+
+            if isinstance(ret,basestring):
+                # replace escaped whitespace
+                if '\\' in ret and self.convertWhitespaceEscapes:
+                    ws_map = {
+                        r'\t' : '\t',
+                        r'\n' : '\n',
+                        r'\f' : '\f',
+                        r'\r' : '\r',
+                    }
+                    for wslit,wschar in ws_map.items():
+                        ret = ret.replace(wslit, wschar)
+
+                # replace escaped characters
+                if self.escChar:
+                    ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
+
+                # replace escaped quotes
+                if self.escQuote:
+                    ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+        return loc, ret
+
+    def __str__( self ):
+        try:
+            return super(QuotedString,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
+
+        return self.strRepr
+
+
+class CharsNotIn(Token):
+    """
+    Token for matching words composed of characters I{not} in a given set (will
+    include whitespace in matched characters if not listed in the provided exclusion set - see example).
+    Defined with string containing all disallowed characters, and an optional
+    minimum, maximum, and/or exact length.  The default value for C{min} is 1 (a
+    minimum value < 1 is not valid); the default values for C{max} and C{exact}
+    are 0, meaning no maximum or exact length restriction.
+
+    Example::
+        # define a comma-separated-value as anything that is not a ','
+        csv_value = CharsNotIn(',')
+        print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
+    prints::
+        ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
+    """
+    def __init__( self, notChars, min=1, max=0, exact=0 ):
+        super(CharsNotIn,self).__init__()
+        self.skipWhitespace = False
+        self.notChars = notChars
+
+        if min < 1:
+            raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+        self.name = _ustr(self)
+        self.errmsg = "Expected " + self.name
+        self.mayReturnEmpty = ( self.minLen == 0 )
+        self.mayIndexError = False
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if instring[loc] in self.notChars:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        start = loc
+        loc += 1
+        notchars = self.notChars
+        maxlen = min( start+self.maxLen, len(instring) )
+        while loc < maxlen and \
+              (instring[loc] not in notchars):
+            loc += 1
+
+        if loc - start < self.minLen:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+    def __str__( self ):
+        try:
+            return super(CharsNotIn, self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            if len(self.notChars) > 4:
+                self.strRepr = "!W:(%s...)" % self.notChars[:4]
+            else:
+                self.strRepr = "!W:(%s)" % self.notChars
+
+        return self.strRepr
+
+class White(Token):
+    """
+    Special matching class for matching whitespace.  Normally, whitespace is ignored
+    by pyparsing grammars.  This class is included when some whitespace structures
+    are significant.  Define with a string containing the whitespace characters to be
+    matched; default is C{" \\t\\r\\n"}.  Also takes optional C{min}, C{max}, and C{exact} arguments,
+    as defined for the C{L{Word}} class.
+    """
+    whiteStrs = {
+        " " : "<SPC>",
+        "\t": "<TAB>",
+        "\n": "<LF>",
+        "\r": "<CR>",
+        "\f": "<FF>",
+        }
+    def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
+        super(White,self).__init__()
+        self.matchWhite = ws
+        self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
+        #~ self.leaveWhitespace()
+        self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
+        self.mayReturnEmpty = True
+        self.errmsg = "Expected " + self.name
+
+        self.minLen = min
+
+        if max > 0:
+            self.maxLen = max
+        else:
+            self.maxLen = _MAX_INT
+
+        if exact > 0:
+            self.maxLen = exact
+            self.minLen = exact
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if not(instring[ loc ] in self.matchWhite):
+            raise ParseException(instring, loc, self.errmsg, self)
+        start = loc
+        loc += 1
+        maxloc = start + self.maxLen
+        maxloc = min( maxloc, len(instring) )
+        while loc < maxloc and instring[loc] in self.matchWhite:
+            loc += 1
+
+        if loc - start < self.minLen:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        return loc, instring[start:loc]
+
+
+class _PositionToken(Token):
+    def __init__( self ):
+        super(_PositionToken,self).__init__()
+        self.name=self.__class__.__name__
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+
+class GoToColumn(_PositionToken):
+    """
+    Token to advance to a specific column of input text; useful for tabular report scraping.
+    """
+    def __init__( self, colno ):
+        super(GoToColumn,self).__init__()
+        self.col = colno
+
+    def preParse( self, instring, loc ):
+        if col(loc,instring) != self.col:
+            instrlen = len(instring)
+            if self.ignoreExprs:
+                loc = self._skipIgnorables( instring, loc )
+            while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
+                loc += 1
+        return loc
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        thiscol = col( loc, instring )
+        if thiscol > self.col:
+            raise ParseException( instring, loc, "Text not in expected column", self )
+        newloc = loc + self.col - thiscol
+        ret = instring[ loc: newloc ]
+        return newloc, ret
+
+
+class LineStart(_PositionToken):
+    """
+    Matches if current position is at the beginning of a line within the parse string
+    
+    Example::
+    
+        test = '''\
+        AAA this line
+        AAA and this line
+          AAA but not this one
+        B AAA and definitely not this one
+        '''
+
+        for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
+            print(t)
+    
+    Prints::
+        ['AAA', ' this line']
+        ['AAA', ' and this line']    
+
+    """
+    def __init__( self ):
+        super(LineStart,self).__init__()
+        self.errmsg = "Expected start of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if col(loc, instring) == 1:
+            return loc, []
+        raise ParseException(instring, loc, self.errmsg, self)
+
+class LineEnd(_PositionToken):
+    """
+    Matches if current position is at the end of a line within the parse string
+    """
+    def __init__( self ):
+        super(LineEnd,self).__init__()
+        self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
+        self.errmsg = "Expected end of line"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc<len(instring):
+            if instring[loc] == "\n":
+                return loc+1, "\n"
+            else:
+                raise ParseException(instring, loc, self.errmsg, self)
+        elif loc == len(instring):
+            return loc+1, []
+        else:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+class StringStart(_PositionToken):
+    """
+    Matches if current position is at the beginning of the parse string
+    """
+    def __init__( self ):
+        super(StringStart,self).__init__()
+        self.errmsg = "Expected start of text"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc != 0:
+            # see if entire string up to here is just whitespace and ignoreables
+            if loc != self.preParse( instring, 0 ):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+class StringEnd(_PositionToken):
+    """
+    Matches if current position is at the end of the parse string
+    """
+    def __init__( self ):
+        super(StringEnd,self).__init__()
+        self.errmsg = "Expected end of text"
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if loc < len(instring):
+            raise ParseException(instring, loc, self.errmsg, self)
+        elif loc == len(instring):
+            return loc+1, []
+        elif loc > len(instring):
+            return loc, []
+        else:
+            raise ParseException(instring, loc, self.errmsg, self)
+
+class WordStart(_PositionToken):
+    """
+    Matches if the current position is at the beginning of a Word, and
+    is not preceded by any character in a given set of C{wordChars}
+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+    use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
+    the string being parsed, or at the beginning of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordStart,self).__init__()
+        self.wordChars = set(wordChars)
+        self.errmsg = "Not at the start of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        if loc != 0:
+            if (instring[loc-1] in self.wordChars or
+                instring[loc] not in self.wordChars):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+class WordEnd(_PositionToken):
+    """
+    Matches if the current position is at the end of a Word, and
+    is not followed by any character in a given set of C{wordChars}
+    (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+    use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
+    the string being parsed, or at the end of a line.
+    """
+    def __init__(self, wordChars = printables):
+        super(WordEnd,self).__init__()
+        self.wordChars = set(wordChars)
+        self.skipWhitespace = False
+        self.errmsg = "Not at the end of a word"
+
+    def parseImpl(self, instring, loc, doActions=True ):
+        instrlen = len(instring)
+        if instrlen>0 and loc<instrlen:
+            if (instring[loc] in self.wordChars or
+                instring[loc-1] not in self.wordChars):
+                raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+
+class ParseExpression(ParserElement):
+    """
+    Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(ParseExpression,self).__init__(savelist)
+        if isinstance( exprs, _generatorType ):
+            exprs = list(exprs)
+
+        if isinstance( exprs, basestring ):
+            self.exprs = [ ParserElement._literalStringClass( exprs ) ]
+        elif isinstance( exprs, Iterable ):
+            exprs = list(exprs)
+            # if sequence of strings provided, wrap with Literal
+            if all(isinstance(expr, basestring) for expr in exprs):
+                exprs = map(ParserElement._literalStringClass, exprs)
+            self.exprs = list(exprs)
+        else:
+            try:
+                self.exprs = list( exprs )
+            except TypeError:
+                self.exprs = [ exprs ]
+        self.callPreparse = False
+
+    def __getitem__( self, i ):
+        return self.exprs[i]
+
+    def append( self, other ):
+        self.exprs.append( other )
+        self.strRepr = None
+        return self
+
+    def leaveWhitespace( self ):
+        """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
+           all contained expressions."""
+        self.skipWhitespace = False
+        self.exprs = [ e.copy() for e in self.exprs ]
+        for e in self.exprs:
+            e.leaveWhitespace()
+        return self
+
+    def ignore( self, other ):
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                super( ParseExpression, self).ignore( other )
+                for e in self.exprs:
+                    e.ignore( self.ignoreExprs[-1] )
+        else:
+            super( ParseExpression, self).ignore( other )
+            for e in self.exprs:
+                e.ignore( self.ignoreExprs[-1] )
+        return self
+
+    def __str__( self ):
+        try:
+            return super(ParseExpression,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None:
+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
+        return self.strRepr
+
+    def streamline( self ):
+        super(ParseExpression,self).streamline()
+
+        for e in self.exprs:
+            e.streamline()
+
+        # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
+        # but only if there are no parse actions or resultsNames on the nested And's
+        # (likewise for Or's and MatchFirst's)
+        if ( len(self.exprs) == 2 ):
+            other = self.exprs[0]
+            if ( isinstance( other, self.__class__ ) and
+                  not(other.parseAction) and
+                  other.resultsName is None and
+                  not other.debug ):
+                self.exprs = other.exprs[:] + [ self.exprs[1] ]
+                self.strRepr = None
+                self.mayReturnEmpty |= other.mayReturnEmpty
+                self.mayIndexError  |= other.mayIndexError
+
+            other = self.exprs[-1]
+            if ( isinstance( other, self.__class__ ) and
+                  not(other.parseAction) and
+                  other.resultsName is None and
+                  not other.debug ):
+                self.exprs = self.exprs[:-1] + other.exprs[:]
+                self.strRepr = None
+                self.mayReturnEmpty |= other.mayReturnEmpty
+                self.mayIndexError  |= other.mayIndexError
+
+        self.errmsg = "Expected " + _ustr(self)
+        
+        return self
+
+    def setResultsName( self, name, listAllMatches=False ):
+        ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
+        return ret
+
+    def validate( self, validateTrace=[] ):
+        tmp = validateTrace[:]+[self]
+        for e in self.exprs:
+            e.validate(tmp)
+        self.checkRecursion( [] )
+        
+    def copy(self):
+        ret = super(ParseExpression,self).copy()
+        ret.exprs = [e.copy() for e in self.exprs]
+        return ret
+
+class And(ParseExpression):
+    """
+    Requires all given C{ParseExpression}s to be found in the given order.
+    Expressions may be separated by whitespace.
+    May be constructed using the C{'+'} operator.
+    May also be constructed using the C{'-'} operator, which will suppress backtracking.
+
+    Example::
+        integer = Word(nums)
+        name_expr = OneOrMore(Word(alphas))
+
+        expr = And([integer("id"),name_expr("name"),integer("age")])
+        # more easily written as:
+        expr = integer("id") + name_expr("name") + integer("age")
+    """
+
+    class _ErrorStop(Empty):
+        def __init__(self, *args, **kwargs):
+            super(And._ErrorStop,self).__init__(*args, **kwargs)
+            self.name = '-'
+            self.leaveWhitespace()
+
+    def __init__( self, exprs, savelist = True ):
+        super(And,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+        self.setWhitespaceChars( self.exprs[0].whiteChars )
+        self.skipWhitespace = self.exprs[0].skipWhitespace
+        self.callPreparse = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        # pass False as last arg to _parse for first element, since we already
+        # pre-parsed the string as part of our And pre-parsing
+        loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
+        errorStop = False
+        for e in self.exprs[1:]:
+            if isinstance(e, And._ErrorStop):
+                errorStop = True
+                continue
+            if errorStop:
+                try:
+                    loc, exprtokens = e._parse( instring, loc, doActions )
+                except ParseSyntaxException:
+                    raise
+                except ParseBaseException as pe:
+                    pe.__traceback__ = None
+                    raise ParseSyntaxException._from_exception(pe)
+                except IndexError:
+                    raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
+            else:
+                loc, exprtokens = e._parse( instring, loc, doActions )
+            if exprtokens or exprtokens.haskeys():
+                resultlist += exprtokens
+        return loc, resultlist
+
+    def __iadd__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #And( [ self, other ] )
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+            if not e.mayReturnEmpty:
+                break
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+
+class Or(ParseExpression):
+    """
+    Requires that at least one C{ParseExpression} is found.
+    If two expressions match, the expression that matches the longest string will be used.
+    May be constructed using the C{'^'} operator.
+
+    Example::
+        # construct Or using '^' operator
+        
+        number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
+        print(number.searchString("123 3.1416 789"))
+    prints::
+        [['123'], ['3.1416'], ['789']]
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(Or,self).__init__(exprs, savelist)
+        if self.exprs:
+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+        else:
+            self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        maxExcLoc = -1
+        maxException = None
+        matches = []
+        for e in self.exprs:
+            try:
+                loc2 = e.tryParse( instring, loc )
+            except ParseException as err:
+                err.__traceback__ = None
+                if err.loc > maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+            else:
+                # save match among all matches, to retry longest to shortest
+                matches.append((loc2, e))
+
+        if matches:
+            matches.sort(key=lambda x: -x[0])
+            for _,e in matches:
+                try:
+                    return e._parse( instring, loc, doActions )
+                except ParseException as err:
+                    err.__traceback__ = None
+                    if err.loc > maxExcLoc:
+                        maxException = err
+                        maxExcLoc = err.loc
+
+        if maxException is not None:
+            maxException.msg = self.errmsg
+            raise maxException
+        else:
+            raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+
+    def __ixor__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #Or( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class MatchFirst(ParseExpression):
+    """
+    Requires that at least one C{ParseExpression} is found.
+    If two expressions match, the first one listed is the one that will match.
+    May be constructed using the C{'|'} operator.
+
+    Example::
+        # construct MatchFirst using '|' operator
+        
+        # watch the order of expressions to match
+        number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
+        print(number.searchString("123 3.1416 789")) #  Fail! -> [['123'], ['3'], ['1416'], ['789']]
+
+        # put more selective expression first
+        number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
+        print(number.searchString("123 3.1416 789")) #  Better -> [['123'], ['3.1416'], ['789']]
+    """
+    def __init__( self, exprs, savelist = False ):
+        super(MatchFirst,self).__init__(exprs, savelist)
+        if self.exprs:
+            self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+        else:
+            self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        maxExcLoc = -1
+        maxException = None
+        for e in self.exprs:
+            try:
+                ret = e._parse( instring, loc, doActions )
+                return ret
+            except ParseException as err:
+                if err.loc > maxExcLoc:
+                    maxException = err
+                    maxExcLoc = err.loc
+            except IndexError:
+                if len(instring) > maxExcLoc:
+                    maxException = ParseException(instring,len(instring),e.errmsg,self)
+                    maxExcLoc = len(instring)
+
+        # only got here if no expression matched, raise exception for match that made it the furthest
+        else:
+            if maxException is not None:
+                maxException.msg = self.errmsg
+                raise maxException
+            else:
+                raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+    def __ior__(self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass( other )
+        return self.append( other ) #MatchFirst( [ self, other ] )
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class Each(ParseExpression):
+    """
+    Requires all given C{ParseExpression}s to be found, but in any order.
+    Expressions may be separated by whitespace.
+    May be constructed using the C{'&'} operator.
+
+    Example::
+        color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
+        shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
+        integer = Word(nums)
+        shape_attr = "shape:" + shape_type("shape")
+        posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
+        color_attr = "color:" + color("color")
+        size_attr = "size:" + integer("size")
+
+        # use Each (using operator '&') to accept attributes in any order 
+        # (shape and posn are required, color and size are optional)
+        shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
+
+        shape_spec.runTests('''
+            shape: SQUARE color: BLACK posn: 100, 120
+            shape: CIRCLE size: 50 color: BLUE posn: 50,80
+            color:GREEN size:20 shape:TRIANGLE posn:20,40
+            '''
+            )
+    prints::
+        shape: SQUARE color: BLACK posn: 100, 120
+        ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
+        - color: BLACK
+        - posn: ['100', ',', '120']
+          - x: 100
+          - y: 120
+        - shape: SQUARE
+
+
+        shape: CIRCLE size: 50 color: BLUE posn: 50,80
+        ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
+        - color: BLUE
+        - posn: ['50', ',', '80']
+          - x: 50
+          - y: 80
+        - shape: CIRCLE
+        - size: 50
+
+
+        color: GREEN size: 20 shape: TRIANGLE posn: 20,40
+        ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
+        - color: GREEN
+        - posn: ['20', ',', '40']
+          - x: 20
+          - y: 40
+        - shape: TRIANGLE
+        - size: 20
+    """
+    def __init__( self, exprs, savelist = True ):
+        super(Each,self).__init__(exprs, savelist)
+        self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+        self.skipWhitespace = True
+        self.initExprGroups = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.initExprGroups:
+            self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
+            opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
+            opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
+            self.optionals = opt1 + opt2
+            self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
+            self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
+            self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
+            self.required += self.multirequired
+            self.initExprGroups = False
+        tmpLoc = loc
+        tmpReqd = self.required[:]
+        tmpOpt  = self.optionals[:]
+        matchOrder = []
+
+        keepMatching = True
+        while keepMatching:
+            tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
+            failed = []
+            for e in tmpExprs:
+                try:
+                    tmpLoc = e.tryParse( instring, tmpLoc )
+                except ParseException:
+                    failed.append(e)
+                else:
+                    matchOrder.append(self.opt1map.get(id(e),e))
+                    if e in tmpReqd:
+                        tmpReqd.remove(e)
+                    elif e in tmpOpt:
+                        tmpOpt.remove(e)
+            if len(failed) == len(tmpExprs):
+                keepMatching = False
+
+        if tmpReqd:
+            missing = ", ".join(_ustr(e) for e in tmpReqd)
+            raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
+
+        # add any unmatched Optionals, in case they have default values defined
+        matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
+
+        resultlist = []
+        for e in matchOrder:
+            loc,results = e._parse(instring,loc,doActions)
+            resultlist.append(results)
+
+        finalResults = sum(resultlist, ParseResults([]))
+        return loc, finalResults
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
+
+        return self.strRepr
+
+    def checkRecursion( self, parseElementList ):
+        subRecCheckList = parseElementList[:] + [ self ]
+        for e in self.exprs:
+            e.checkRecursion( subRecCheckList )
+
+
+class ParseElementEnhance(ParserElement):
+    """
+    Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
+    """
+    def __init__( self, expr, savelist=False ):
+        super(ParseElementEnhance,self).__init__(savelist)
+        if isinstance( expr, basestring ):
+            if issubclass(ParserElement._literalStringClass, Token):
+                expr = ParserElement._literalStringClass(expr)
+            else:
+                expr = ParserElement._literalStringClass(Literal(expr))
+        self.expr = expr
+        self.strRepr = None
+        if expr is not None:
+            self.mayIndexError = expr.mayIndexError
+            self.mayReturnEmpty = expr.mayReturnEmpty
+            self.setWhitespaceChars( expr.whiteChars )
+            self.skipWhitespace = expr.skipWhitespace
+            self.saveAsList = expr.saveAsList
+            self.callPreparse = expr.callPreparse
+            self.ignoreExprs.extend(expr.ignoreExprs)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr is not None:
+            return self.expr._parse( instring, loc, doActions, callPreParse=False )
+        else:
+            raise ParseException("",loc,self.errmsg,self)
+
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        self.expr = self.expr.copy()
+        if self.expr is not None:
+            self.expr.leaveWhitespace()
+        return self
+
+    def ignore( self, other ):
+        if isinstance( other, Suppress ):
+            if other not in self.ignoreExprs:
+                super( ParseElementEnhance, self).ignore( other )
+                if self.expr is not None:
+                    self.expr.ignore( self.ignoreExprs[-1] )
+        else:
+            super( ParseElementEnhance, self).ignore( other )
+            if self.expr is not None:
+                self.expr.ignore( self.ignoreExprs[-1] )
+        return self
+
+    def streamline( self ):
+        super(ParseElementEnhance,self).streamline()
+        if self.expr is not None:
+            self.expr.streamline()
+        return self
+
+    def checkRecursion( self, parseElementList ):
+        if self in parseElementList:
+            raise RecursiveGrammarException( parseElementList+[self] )
+        subRecCheckList = parseElementList[:] + [ self ]
+        if self.expr is not None:
+            self.expr.checkRecursion( subRecCheckList )
+
+    def validate( self, validateTrace=[] ):
+        tmp = validateTrace[:]+[self]
+        if self.expr is not None:
+            self.expr.validate(tmp)
+        self.checkRecursion( [] )
+
+    def __str__( self ):
+        try:
+            return super(ParseElementEnhance,self).__str__()
+        except Exception:
+            pass
+
+        if self.strRepr is None and self.expr is not None:
+            self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
+        return self.strRepr
+
+
+class FollowedBy(ParseElementEnhance):
+    """
+    Lookahead matching of the given parse expression.  C{FollowedBy}
+    does I{not} advance the parsing position within the input string, it only
+    verifies that the specified parse expression matches at the current
+    position.  C{FollowedBy} always returns a null token list.
+
+    Example::
+        # use FollowedBy to match a label only if it is followed by a ':'
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        
+        OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
+    prints::
+        [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
+    """
+    def __init__( self, expr ):
+        super(FollowedBy,self).__init__(expr)
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        self.expr.tryParse( instring, loc )
+        return loc, []
+
+
+class NotAny(ParseElementEnhance):
+    """
+    Lookahead to disallow matching with the given parse expression.  C{NotAny}
+    does I{not} advance the parsing position within the input string, it only
+    verifies that the specified parse expression does I{not} match at the current
+    position.  Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
+    always returns a null token list.  May be constructed using the '~' operator.
+
+    Example::
+        
+    """
+    def __init__( self, expr ):
+        super(NotAny,self).__init__(expr)
+        #~ self.leaveWhitespace()
+        self.skipWhitespace = False  # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
+        self.mayReturnEmpty = True
+        self.errmsg = "Found unwanted token, "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        if self.expr.canParseNext(instring, loc):
+            raise ParseException(instring, loc, self.errmsg, self)
+        return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "~{" + _ustr(self.expr) + "}"
+
+        return self.strRepr
+
+class _MultipleMatch(ParseElementEnhance):
+    def __init__( self, expr, stopOn=None):
+        super(_MultipleMatch, self).__init__(expr)
+        self.saveAsList = True
+        ender = stopOn
+        if isinstance(ender, basestring):
+            ender = ParserElement._literalStringClass(ender)
+        self.not_ender = ~ender if ender is not None else None
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        self_expr_parse = self.expr._parse
+        self_skip_ignorables = self._skipIgnorables
+        check_ender = self.not_ender is not None
+        if check_ender:
+            try_not_ender = self.not_ender.tryParse
+        
+        # must be at least one (but first see if we are the stopOn sentinel;
+        # if so, fail)
+        if check_ender:
+            try_not_ender(instring, loc)
+        loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
+        try:
+            hasIgnoreExprs = (not not self.ignoreExprs)
+            while 1:
+                if check_ender:
+                    try_not_ender(instring, loc)
+                if hasIgnoreExprs:
+                    preloc = self_skip_ignorables( instring, loc )
+                else:
+                    preloc = loc
+                loc, tmptokens = self_expr_parse( instring, preloc, doActions )
+                if tmptokens or tmptokens.haskeys():
+                    tokens += tmptokens
+        except (ParseException,IndexError):
+            pass
+
+        return loc, tokens
+        
+class OneOrMore(_MultipleMatch):
+    """
+    Repetition of one or more of the given expression.
+    
+    Parameters:
+     - expr - expression that must match one or more times
+     - stopOn - (default=C{None}) - expression for a terminating sentinel
+          (only required if the sentinel would ordinarily match the repetition 
+          expression)          
+
+    Example::
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+        text = "shape: SQUARE posn: upper left color: BLACK"
+        OneOrMore(attr_expr).parseString(text).pprint()  # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+
+        # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
+        
+        # could also be written as
+        (attr_expr * (1,)).parseString(text).pprint()
+    """
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "{" + _ustr(self.expr) + "}..."
+
+        return self.strRepr
+
+class ZeroOrMore(_MultipleMatch):
+    """
+    Optional repetition of zero or more of the given expression.
+    
+    Parameters:
+     - expr - expression that must match zero or more times
+     - stopOn - (default=C{None}) - expression for a terminating sentinel
+          (only required if the sentinel would ordinarily match the repetition 
+          expression)          
+
+    Example: similar to L{OneOrMore}
+    """
+    def __init__( self, expr, stopOn=None):
+        super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
+        self.mayReturnEmpty = True
+        
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
+        except (ParseException,IndexError):
+            return loc, []
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]..."
+
+        return self.strRepr
+
+class _NullToken(object):
+    def __bool__(self):
+        return False
+    __nonzero__ = __bool__
+    def __str__(self):
+        return ""
+
+_optionalNotMatched = _NullToken()
+class Optional(ParseElementEnhance):
+    """
+    Optional matching of the given expression.
+
+    Parameters:
+     - expr - expression that must match zero or more times
+     - default (optional) - value to be returned if the optional expression is not found.
+
+    Example::
+        # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
+        zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
+        zip.runTests('''
+            # traditional ZIP code
+            12345
+            
+            # ZIP+4 form
+            12101-0001
+            
+            # invalid ZIP
+            98765-
+            ''')
+    prints::
+        # traditional ZIP code
+        12345
+        ['12345']
+
+        # ZIP+4 form
+        12101-0001
+        ['12101-0001']
+
+        # invalid ZIP
+        98765-
+             ^
+        FAIL: Expected end of text (at char 5), (line:1, col:6)
+    """
+    def __init__( self, expr, default=_optionalNotMatched ):
+        super(Optional,self).__init__( expr, savelist=False )
+        self.saveAsList = self.expr.saveAsList
+        self.defaultValue = default
+        self.mayReturnEmpty = True
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        try:
+            loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+        except (ParseException,IndexError):
+            if self.defaultValue is not _optionalNotMatched:
+                if self.expr.resultsName:
+                    tokens = ParseResults([ self.defaultValue ])
+                    tokens[self.expr.resultsName] = self.defaultValue
+                else:
+                    tokens = [ self.defaultValue ]
+            else:
+                tokens = []
+        return loc, tokens
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+
+        if self.strRepr is None:
+            self.strRepr = "[" + _ustr(self.expr) + "]"
+
+        return self.strRepr
+
+class SkipTo(ParseElementEnhance):
+    """
+    Token for skipping over all undefined text until the matched expression is found.
+
+    Parameters:
+     - expr - target expression marking the end of the data to be skipped
+     - include - (default=C{False}) if True, the target expression is also parsed 
+          (the skipped text and target expression are returned as a 2-element list).
+     - ignore - (default=C{None}) used to define grammars (typically quoted strings and 
+          comments) that might contain false matches to the target expression
+     - failOn - (default=C{None}) define expressions that are not allowed to be 
+          included in the skipped test; if found before the target expression is found, 
+          the SkipTo is not a match
+
+    Example::
+        report = '''
+            Outstanding Issues Report - 1 Jan 2000
+
+               # | Severity | Description                               |  Days Open
+            -----+----------+-------------------------------------------+-----------
+             101 | Critical | Intermittent system crash                 |          6
+              94 | Cosmetic | Spelling error on Login ('log|n')         |         14
+              79 | Minor    | System slow when running too many reports |         47
+            '''
+        integer = Word(nums)
+        SEP = Suppress('|')
+        # use SkipTo to simply match everything up until the next SEP
+        # - ignore quoted strings, so that a '|' character inside a quoted string does not match
+        # - parse action will call token.strip() for each matched token, i.e., the description body
+        string_data = SkipTo(SEP, ignore=quotedString)
+        string_data.setParseAction(tokenMap(str.strip))
+        ticket_expr = (integer("issue_num") + SEP 
+                      + string_data("sev") + SEP 
+                      + string_data("desc") + SEP 
+                      + integer("days_open"))
+        
+        for tkt in ticket_expr.searchString(report):
+            print tkt.dump()
+    prints::
+        ['101', 'Critical', 'Intermittent system crash', '6']
+        - days_open: 6
+        - desc: Intermittent system crash
+        - issue_num: 101
+        - sev: Critical
+        ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
+        - days_open: 14
+        - desc: Spelling error on Login ('log|n')
+        - issue_num: 94
+        - sev: Cosmetic
+        ['79', 'Minor', 'System slow when running too many reports', '47']
+        - days_open: 47
+        - desc: System slow when running too many reports
+        - issue_num: 79
+        - sev: Minor
+    """
+    def __init__( self, other, include=False, ignore=None, failOn=None ):
+        super( SkipTo, self ).__init__( other )
+        self.ignoreExpr = ignore
+        self.mayReturnEmpty = True
+        self.mayIndexError = False
+        self.includeMatch = include
+        self.asList = False
+        if isinstance(failOn, basestring):
+            self.failOn = ParserElement._literalStringClass(failOn)
+        else:
+            self.failOn = failOn
+        self.errmsg = "No match found for "+_ustr(self.expr)
+
+    def parseImpl( self, instring, loc, doActions=True ):
+        startloc = loc
+        instrlen = len(instring)
+        expr = self.expr
+        expr_parse = self.expr._parse
+        self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
+        self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
+        
+        tmploc = loc
+        while tmploc <= instrlen:
+            if self_failOn_canParseNext is not None:
+                # break if failOn expression matches
+                if self_failOn_canParseNext(instring, tmploc):
+                    break
+                    
+            if self_ignoreExpr_tryParse is not None:
+                # advance past ignore expressions
+                while 1:
+                    try:
+                        tmploc = self_ignoreExpr_tryParse(instring, tmploc)
+                    except ParseBaseException:
+                        break
+            
+            try:
+                expr_parse(instring, tmploc, doActions=False, callPreParse=False)
+            except (ParseException, IndexError):
+                # no match, advance loc in string
+                tmploc += 1
+            else:
+                # matched skipto expr, done
+                break
+
+        else:
+            # ran off the end of the input string without matching skipto expr, fail
+            raise ParseException(instring, loc, self.errmsg, self)
+
+        # build up return values
+        loc = tmploc
+        skiptext = instring[startloc:loc]
+        skipresult = ParseResults(skiptext)
+        
+        if self.includeMatch:
+            loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
+            skipresult += mat
+
+        return loc, skipresult
+
+class Forward(ParseElementEnhance):
+    """
+    Forward declaration of an expression to be defined later -
+    used for recursive grammars, such as algebraic infix notation.
+    When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
+
+    Note: take care when assigning to C{Forward} not to overlook precedence of operators.
+    Specifically, '|' has a lower precedence than '<<', so that::
+        fwdExpr << a | b | c
+    will actually be evaluated as::
+        (fwdExpr << a) | b | c
+    thereby leaving b and c out as parseable alternatives.  It is recommended that you
+    explicitly group the values inserted into the C{Forward}::
+        fwdExpr << (a | b | c)
+    Converting to use the '<<=' operator instead will avoid this problem.
+
+    See L{ParseResults.pprint} for an example of a recursive parser created using
+    C{Forward}.
+    """
+    def __init__( self, other=None ):
+        super(Forward,self).__init__( other, savelist=False )
+
+    def __lshift__( self, other ):
+        if isinstance( other, basestring ):
+            other = ParserElement._literalStringClass(other)
+        self.expr = other
+        self.strRepr = None
+        self.mayIndexError = self.expr.mayIndexError
+        self.mayReturnEmpty = self.expr.mayReturnEmpty
+        self.setWhitespaceChars( self.expr.whiteChars )
+        self.skipWhitespace = self.expr.skipWhitespace
+        self.saveAsList = self.expr.saveAsList
+        self.ignoreExprs.extend(self.expr.ignoreExprs)
+        return self
+        
+    def __ilshift__(self, other):
+        return self << other
+    
+    def leaveWhitespace( self ):
+        self.skipWhitespace = False
+        return self
+
+    def streamline( self ):
+        if not self.streamlined:
+            self.streamlined = True
+            if self.expr is not None:
+                self.expr.streamline()
+        return self
+
+    def validate( self, validateTrace=[] ):
+        if self not in validateTrace:
+            tmp = validateTrace[:]+[self]
+            if self.expr is not None:
+                self.expr.validate(tmp)
+        self.checkRecursion([])
+
+    def __str__( self ):
+        if hasattr(self,"name"):
+            return self.name
+        return self.__class__.__name__ + ": ..."
+
+        # stubbed out for now - creates awful memory and perf issues
+        self._revertClass = self.__class__
+        self.__class__ = _ForwardNoRecurse
+        try:
+            if self.expr is not None:
+                retString = _ustr(self.expr)
+            else:
+                retString = "None"
+        finally:
+            self.__class__ = self._revertClass
+        return self.__class__.__name__ + ": " + retString
+
+    def copy(self):
+        if self.expr is not None:
+            return super(Forward,self).copy()
+        else:
+            ret = Forward()
+            ret <<= self
+            return ret
+
+class _ForwardNoRecurse(Forward):
+    def __str__( self ):
+        return "..."
+
+class TokenConverter(ParseElementEnhance):
+    """
+    Abstract subclass of C{ParseExpression}, for converting parsed results.
+    """
+    def __init__( self, expr, savelist=False ):
+        super(TokenConverter,self).__init__( expr )#, savelist )
+        self.saveAsList = False
+
+class Combine(TokenConverter):
+    """
+    Converter to concatenate all matching tokens to a single string.
+    By default, the matching patterns must also be contiguous in the input string;
+    this can be disabled by specifying C{'adjacent=False'} in the constructor.
+
+    Example::
+        real = Word(nums) + '.' + Word(nums)
+        print(real.parseString('3.1416')) # -> ['3', '.', '1416']
+        # will also erroneously match the following
+        print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
+
+        real = Combine(Word(nums) + '.' + Word(nums))
+        print(real.parseString('3.1416')) # -> ['3.1416']
+        # no match when there are internal spaces
+        print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
+    """
+    def __init__( self, expr, joinString="", adjacent=True ):
+        super(Combine,self).__init__( expr )
+        # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+        if adjacent:
+            self.leaveWhitespace()
+        self.adjacent = adjacent
+        self.skipWhitespace = True
+        self.joinString = joinString
+        self.callPreparse = True
+
+    def ignore( self, other ):
+        if self.adjacent:
+            ParserElement.ignore(self, other)
+        else:
+            super( Combine, self).ignore( other )
+        return self
+
+    def postParse( self, instring, loc, tokenlist ):
+        retToks = tokenlist.copy()
+        del retToks[:]
+        retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
+
+        if self.resultsName and retToks.haskeys():
+            return [ retToks ]
+        else:
+            return retToks
+
+class Group(TokenConverter):
+    """
+    Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
+
+    Example::
+        ident = Word(alphas)
+        num = Word(nums)
+        term = ident | num
+        func = ident + Optional(delimitedList(term))
+        print(func.parseString("fn a,b,100"))  # -> ['fn', 'a', 'b', '100']
+
+        func = ident + Group(Optional(delimitedList(term)))
+        print(func.parseString("fn a,b,100"))  # -> ['fn', ['a', 'b', '100']]
+    """
+    def __init__( self, expr ):
+        super(Group,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        return [ tokenlist ]
+
+class Dict(TokenConverter):
+    """
+    Converter to return a repetitive expression as a list, but also as a dictionary.
+    Each element can also be referenced using the first token in the expression as its key.
+    Useful for tabular report scraping when the first column can be used as a item key.
+
+    Example::
+        data_word = Word(alphas)
+        label = data_word + FollowedBy(':')
+        attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        
+        # print attributes as plain groups
+        print(OneOrMore(attr_expr).parseString(text).dump())
+        
+        # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
+        result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
+        print(result.dump())
+        
+        # access named fields as dict entries, or output as dict
+        print(result['shape'])        
+        print(result.asDict())
+    prints::
+        ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
+
+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+        - color: light blue
+        - posn: upper left
+        - shape: SQUARE
+        - texture: burlap
+        SQUARE
+        {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
+    See more examples at L{ParseResults} of accessing fields by results name.
+    """
+    def __init__( self, expr ):
+        super(Dict,self).__init__( expr )
+        self.saveAsList = True
+
+    def postParse( self, instring, loc, tokenlist ):
+        for i,tok in enumerate(tokenlist):
+            if len(tok) == 0:
+                continue
+            ikey = tok[0]
+            if isinstance(ikey,int):
+                ikey = _ustr(tok[0]).strip()
+            if len(tok)==1:
+                tokenlist[ikey] = _ParseResultsWithOffset("",i)
+            elif len(tok)==2 and not isinstance(tok[1],ParseResults):
+                tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
+            else:
+                dictvalue = tok.copy() #ParseResults(i)
+                del dictvalue[0]
+                if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
+                else:
+                    tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
+
+        if self.resultsName:
+            return [ tokenlist ]
+        else:
+            return tokenlist
+
+
+class Suppress(TokenConverter):
+    """
+    Converter for ignoring the results of a parsed expression.
+
+    Example::
+        source = "a, b, c,d"
+        wd = Word(alphas)
+        wd_list1 = wd + ZeroOrMore(',' + wd)
+        print(wd_list1.parseString(source))
+
+        # often, delimiters that are useful during parsing are just in the
+        # way afterward - use Suppress to keep them out of the parsed output
+        wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
+        print(wd_list2.parseString(source))
+    prints::
+        ['a', ',', 'b', ',', 'c', ',', 'd']
+        ['a', 'b', 'c', 'd']
+    (See also L{delimitedList}.)
+    """
+    def postParse( self, instring, loc, tokenlist ):
+        return []
+
+    def suppress( self ):
+        return self
+
+
+class OnlyOnce(object):
+    """
+    Wrapper for parse actions, to ensure they are only called once.
+    """
+    def __init__(self, methodCall):
+        self.callable = _trim_arity(methodCall)
+        self.called = False
+    def __call__(self,s,l,t):
+        if not self.called:
+            results = self.callable(s,l,t)
+            self.called = True
+            return results
+        raise ParseException(s,l,"")
+    def reset(self):
+        self.called = False
+
+def traceParseAction(f):
+    """
+    Decorator for debugging parse actions. 
+    
+    When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
+    When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
+
+    Example::
+        wd = Word(alphas)
+
+        @traceParseAction
+        def remove_duplicate_chars(tokens):
+            return ''.join(sorted(set(''.join(tokens))))
+
+        wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
+        print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
+    prints::
+        >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
+        <<leaving remove_duplicate_chars (ret: 'dfjkls')
+        ['dfjkls']
+    """
+    f = _trim_arity(f)
+    def z(*paArgs):
+        thisFunc = f.__name__
+        s,l,t = paArgs[-3:]
+        if len(paArgs)>3:
+            thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
+        sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
+        try:
+            ret = f(*paArgs)
+        except Exception as exc:
+            sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
+            raise
+        sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
+        return ret
+    try:
+        z.__name__ = f.__name__
+    except AttributeError:
+        pass
+    return z
+
+#
+# global helpers
+#
+def delimitedList( expr, delim=",", combine=False ):
+    """
+    Helper to define a delimited list of expressions - the delimiter defaults to ','.
+    By default, the list elements and delimiters can have intervening whitespace, and
+    comments, but this can be overridden by passing C{combine=True} in the constructor.
+    If C{combine} is set to C{True}, the matching tokens are returned as a single token
+    string, with the delimiters included; otherwise, the matching tokens are returned
+    as a list of tokens, with the delimiters suppressed.
+
+    Example::
+        delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
+        delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
+    """
+    dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
+    if combine:
+        return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
+    else:
+        return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
+
+def countedArray( expr, intExpr=None ):
+    """
+    Helper to define a counted list of expressions.
+    This helper defines a pattern of the form::
+        integer expr expr expr...
+    where the leading integer tells how many expr expressions follow.
+    The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
+    
+    If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
+
+    Example::
+        countedArray(Word(alphas)).parseString('2 ab cd ef')  # -> ['ab', 'cd']
+
+        # in this parser, the leading integer value is given in binary,
+        # '10' indicating that 2 values are in the array
+        binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
+        countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef')  # -> ['ab', 'cd']
+    """
+    arrayExpr = Forward()
+    def countFieldParseAction(s,l,t):
+        n = t[0]
+        arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
+        return []
+    if intExpr is None:
+        intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
+    else:
+        intExpr = intExpr.copy()
+    intExpr.setName("arrayLen")
+    intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
+    return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
+
+def _flatten(L):
+    ret = []
+    for i in L:
+        if isinstance(i,list):
+            ret.extend(_flatten(i))
+        else:
+            ret.append(i)
+    return ret
+
+def matchPreviousLiteral(expr):
+    """
+    Helper to define an expression that is indirectly defined from
+    the tokens matched in a previous expression, that is, it looks
+    for a 'repeat' of a previous expression.  For example::
+        first = Word(nums)
+        second = matchPreviousLiteral(first)
+        matchExpr = first + ":" + second
+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches a
+    previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
+    If this is not desired, use C{matchPreviousExpr}.
+    Do I{not} use with packrat parsing enabled.
+    """
+    rep = Forward()
+    def copyTokenToRepeater(s,l,t):
+        if t:
+            if len(t) == 1:
+                rep << t[0]
+            else:
+                # flatten t tokens
+                tflat = _flatten(t.asList())
+                rep << And(Literal(tt) for tt in tflat)
+        else:
+            rep << Empty()
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    rep.setName('(prev) ' + _ustr(expr))
+    return rep
+
+def matchPreviousExpr(expr):
+    """
+    Helper to define an expression that is indirectly defined from
+    the tokens matched in a previous expression, that is, it looks
+    for a 'repeat' of a previous expression.  For example::
+        first = Word(nums)
+        second = matchPreviousExpr(first)
+        matchExpr = first + ":" + second
+    will match C{"1:1"}, but not C{"1:2"}.  Because this matches by
+    expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
+    the expressions are evaluated first, and then compared, so
+    C{"1"} is compared with C{"10"}.
+    Do I{not} use with packrat parsing enabled.
+    """
+    rep = Forward()
+    e2 = expr.copy()
+    rep <<= e2
+    def copyTokenToRepeater(s,l,t):
+        matchTokens = _flatten(t.asList())
+        def mustMatchTheseTokens(s,l,t):
+            theseTokens = _flatten(t.asList())
+            if  theseTokens != matchTokens:
+                raise ParseException("",0,"")
+        rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
+    expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+    rep.setName('(prev) ' + _ustr(expr))
+    return rep
+
+def _escapeRegexRangeChars(s):
+    #~  escape these chars: ^-]
+    for c in r"\^-]":
+        s = s.replace(c,_bslash+c)
+    s = s.replace("\n",r"\n")
+    s = s.replace("\t",r"\t")
+    return _ustr(s)
+
+def oneOf( strs, caseless=False, useRegex=True ):
+    """
+    Helper to quickly define a set of alternative Literals, and makes sure to do
+    longest-first testing when there is a conflict, regardless of the input order,
+    but returns a C{L{MatchFirst}} for best performance.
+
+    Parameters:
+     - strs - a string of space-delimited literals, or a collection of string literals
+     - caseless - (default=C{False}) - treat all literals as caseless
+     - useRegex - (default=C{True}) - as an optimization, will generate a Regex
+          object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
+          if creating a C{Regex} raises an exception)
+
+    Example::
+        comp_oper = oneOf("< = > <= >= !=")
+        var = Word(alphas)
+        number = Word(nums)
+        term = var | number
+        comparison_expr = term + comp_oper + term
+        print(comparison_expr.searchString("B = 12  AA=23 B<=AA AA>12"))
+    prints::
+        [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+    """
+    if caseless:
+        isequal = ( lambda a,b: a.upper() == b.upper() )
+        masks = ( lambda a,b: b.upper().startswith(a.upper()) )
+        parseElementClass = CaselessLiteral
+    else:
+        isequal = ( lambda a,b: a == b )
+        masks = ( lambda a,b: b.startswith(a) )
+        parseElementClass = Literal
+
+    symbols = []
+    if isinstance(strs,basestring):
+        symbols = strs.split()
+    elif isinstance(strs, Iterable):
+        symbols = list(strs)
+    else:
+        warnings.warn("Invalid argument to oneOf, expected string or iterable",
+                SyntaxWarning, stacklevel=2)
+    if not symbols:
+        return NoMatch()
+
+    i = 0
+    while i < len(symbols)-1:
+        cur = symbols[i]
+        for j,other in enumerate(symbols[i+1:]):
+            if ( isequal(other, cur) ):
+                del symbols[i+j+1]
+                break
+            elif ( masks(cur, other) ):
+                del symbols[i+j+1]
+                symbols.insert(i,other)
+                cur = other
+                break
+        else:
+            i += 1
+
+    if not caseless and useRegex:
+        #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
+        try:
+            if len(symbols)==len("".join(symbols)):
+                return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
+            else:
+                return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
+        except Exception:
+            warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
+                    SyntaxWarning, stacklevel=2)
+
+
+    # last resort, just use MatchFirst
+    return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
+
+def dictOf( key, value ):
+    """
+    Helper to easily and clearly define a dictionary by specifying the respective patterns
+    for the key and value.  Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
+    in the proper order.  The key pattern can include delimiting markers or punctuation,
+    as long as they are suppressed, thereby leaving the significant key text.  The value
+    pattern can include named results, so that the C{Dict} results can include named token
+    fields.
+
+    Example::
+        text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+        attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+        print(OneOrMore(attr_expr).parseString(text).dump())
+        
+        attr_label = label
+        attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
+
+        # similar to Dict, but simpler call format
+        result = dictOf(attr_label, attr_value).parseString(text)
+        print(result.dump())
+        print(result['shape'])
+        print(result.shape)  # object attribute access works too
+        print(result.asDict())
+    prints::
+        [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+        - color: light blue
+        - posn: upper left
+        - shape: SQUARE
+        - texture: burlap
+        SQUARE
+        SQUARE
+        {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+    """
+    return Dict( ZeroOrMore( Group ( key + value ) ) )
+
+def originalTextFor(expr, asString=True):
+    """
+    Helper to return the original, untokenized text for a given expression.  Useful to
+    restore the parsed fields of an HTML start tag into the raw tag text itself, or to
+    revert separate tokens with intervening whitespace back to the original matching
+    input text. By default, returns astring containing the original parsed text.  
+       
+    If the optional C{asString} argument is passed as C{False}, then the return value is a 
+    C{L{ParseResults}} containing any results names that were originally matched, and a 
+    single token containing the original matched text from the input string.  So if 
+    the expression passed to C{L{originalTextFor}} contains expressions with defined
+    results names, you must set C{asString} to C{False} if you want to preserve those
+    results name values.
+
+    Example::
+        src = "this is test <b> bold <i>text</i> </b> normal text "
+        for tag in ("b","i"):
+            opener,closer = makeHTMLTags(tag)
+            patt = originalTextFor(opener + SkipTo(closer) + closer)
+            print(patt.searchString(src)[0])
+    prints::
+        ['<b> bold <i>text</i> </b>']
+        ['<i>text</i>']
+    """
+    locMarker = Empty().setParseAction(lambda s,loc,t: loc)
+    endlocMarker = locMarker.copy()
+    endlocMarker.callPreparse = False
+    matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+    if asString:
+        extractText = lambda s,l,t: s[t._original_start:t._original_end]
+    else:
+        def extractText(s,l,t):
+            t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
+    matchExpr.setParseAction(extractText)
+    matchExpr.ignoreExprs = expr.ignoreExprs
+    return matchExpr
+
+def ungroup(expr): 
+    """
+    Helper to undo pyparsing's default grouping of And expressions, even
+    if all but one are non-empty.
+    """
+    return TokenConverter(expr).setParseAction(lambda t:t[0])
+
+def locatedExpr(expr):
+    """
+    Helper to decorate a returned token with its starting and ending locations in the input string.
+    This helper adds the following results names:
+     - locn_start = location where matched expression begins
+     - locn_end = location where matched expression ends
+     - value = the actual parsed results
+
+    Be careful if the input text contains C{<TAB>} characters, you may want to call
+    C{L{ParserElement.parseWithTabs}}
+
+    Example::
+        wd = Word(alphas)
+        for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
+            print(match)
+    prints::
+        [[0, 'ljsdf', 5]]
+        [[8, 'lksdjjf', 15]]
+        [[18, 'lkkjj', 23]]
+    """
+    locator = Empty().setParseAction(lambda s,l,t: l)
+    return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
+
+
+# convenience constants for positional expressions
+empty       = Empty().setName("empty")
+lineStart   = LineStart().setName("lineStart")
+lineEnd     = LineEnd().setName("lineEnd")
+stringStart = StringStart().setName("stringStart")
+stringEnd   = StringEnd().setName("stringEnd")
+
+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
+
+def srange(s):
+    r"""
+    Helper to easily define string ranges for use in Word construction.  Borrows
+    syntax from regexp '[]' string range definitions::
+        srange("[0-9]")   -> "0123456789"
+        srange("[a-z]")   -> "abcdefghijklmnopqrstuvwxyz"
+        srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+    The input string must be enclosed in []'s, and the returned string is the expanded
+    character set joined into a single string.
+    The values enclosed in the []'s may be:
+     - a single character
+     - an escaped character with a leading backslash (such as C{\-} or C{\]})
+     - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) 
+         (C{\0x##} is also supported for backwards compatibility) 
+     - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
+     - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
+     - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
+    """
+    _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
+    try:
+        return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
+    except Exception:
+        return ""
+
+def matchOnlyAtCol(n):
+    """
+    Helper method for defining parse actions that require matching at a specific
+    column in the input text.
+    """
+    def verifyCol(strg,locn,toks):
+        if col(locn,strg) != n:
+            raise ParseException(strg,locn,"matched token not at column %d" % n)
+    return verifyCol
+
+def replaceWith(replStr):
+    """
+    Helper method for common parse actions that simply return a literal value.  Especially
+    useful when used with C{L{transformString<ParserElement.transformString>}()}.
+
+    Example::
+        num = Word(nums).setParseAction(lambda toks: int(toks[0]))
+        na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
+        term = na | num
+        
+        OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
+    """
+    return lambda s,l,t: [replStr]
+
+def removeQuotes(s,l,t):
+    """
+    Helper parse action for removing quotation marks from parsed quoted strings.
+
+    Example::
+        # by default, quotation marks are included in parsed results
+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+        # use removeQuotes to strip quotation marks from parsed results
+        quotedString.setParseAction(removeQuotes)
+        quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+    """
+    return t[0][1:-1]
+
+def tokenMap(func, *args):
+    """
+    Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional 
+    args are passed, they are forwarded to the given function as additional arguments after
+    the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
+    parsed data to an integer using base 16.
+
+    Example (compare the last to example in L{ParserElement.transformString}::
+        hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
+        hex_ints.runTests('''
+            00 11 22 aa FF 0a 0d 1a
+            ''')
+        
+        upperword = Word(alphas).setParseAction(tokenMap(str.upper))
+        OneOrMore(upperword).runTests('''
+            my kingdom for a horse
+            ''')
+
+        wd = Word(alphas).setParseAction(tokenMap(str.title))
+        OneOrMore(wd).setParseAction(' '.join).runTests('''
+            now is the winter of our discontent made glorious summer by this sun of york
+            ''')
+    prints::
+        00 11 22 aa FF 0a 0d 1a
+        [0, 17, 34, 170, 255, 10, 13, 26]
+
+        my kingdom for a horse
+        ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
+
+        now is the winter of our discontent made glorious summer by this sun of york
+        ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
+    """
+    def pa(s,l,t):
+        return [func(tokn, *args) for tokn in t]
+
+    try:
+        func_name = getattr(func, '__name__', 
+                            getattr(func, '__class__').__name__)
+    except Exception:
+        func_name = str(func)
+    pa.__name__ = func_name
+
+    return pa
+
+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
+"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
+
+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
+"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
+    
+def _makeTags(tagStr, xml):
+    """Internal helper to construct opening and closing tag expressions, given a tag name"""
+    if isinstance(tagStr,basestring):
+        resname = tagStr
+        tagStr = Keyword(tagStr, caseless=not xml)
+    else:
+        resname = tagStr.name
+
+    tagAttrName = Word(alphas,alphanums+"_-:")
+    if (xml):
+        tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
+        openTag = Suppress("<") + tagStr("tag") + \
+                Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+    else:
+        printablesLessRAbrack = "".join(c for c in printables if c not in ">")
+        tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
+        openTag = Suppress("<") + tagStr("tag") + \
+                Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
+                Optional( Suppress("=") + tagAttrValue ) ))) + \
+                Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+    closeTag = Combine(_L("</") + tagStr + ">")
+
+    openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
+    closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
+    openTag.tag = resname
+    closeTag.tag = resname
+    return openTag, closeTag
+
+def makeHTMLTags(tagStr):
+    """
+    Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
+    tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
+
+    Example::
+        text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
+        # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
+        a,a_end = makeHTMLTags("A")
+        link_expr = a + SkipTo(a_end)("link_text") + a_end
+        
+        for link in link_expr.searchString(text):
+            # attributes in the <A> tag (like "href" shown here) are also accessible as named results
+            print(link.link_text, '->', link.href)
+    prints::
+        pyparsing -> http://pyparsing.wikispaces.com
+    """
+    return _makeTags( tagStr, False )
+
+def makeXMLTags(tagStr):
+    """
+    Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
+    tags only in the given upper/lower case.
+
+    Example: similar to L{makeHTMLTags}
+    """
+    return _makeTags( tagStr, True )
+
+def withAttribute(*args,**attrDict):
+    """
+    Helper to create a validating parse action to be used with start tags created
+    with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
+    with a required attribute value, to avoid false matches on common tags such as
+    C{<TD>} or C{<DIV>}.
+
+    Call C{withAttribute} with a series of attribute names and values. Specify the list
+    of filter attributes names and values as:
+     - keyword arguments, as in C{(align="right")}, or
+     - as an explicit dict with C{**} operator, when an attribute name is also a Python
+          reserved word, as in C{**{"class":"Customer", "align":"right"}}
+     - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
+    For attribute names with a namespace prefix, you must use the second form.  Attribute
+    names are matched insensitive to upper/lower case.
+       
+    If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
+
+    To verify that the attribute exists, but without specifying a value, pass
+    C{withAttribute.ANY_VALUE} as the value.
+
+    Example::
+        html = '''
+            <div>
+            Some text
+            <div type="grid">1 4 0 1 0</div>
+            <div type="graph">1,3 2,3 1,1</div>
+            <div>this has no type</div>
+            </div>
+                
+        '''
+        div,div_end = makeHTMLTags("div")
+
+        # only match div tag having a type attribute with value "grid"
+        div_grid = div().setParseAction(withAttribute(type="grid"))
+        grid_expr = div_grid + SkipTo(div | div_end)("body")
+        for grid_header in grid_expr.searchString(html):
+            print(grid_header.body)
+        
+        # construct a match with any div tag having a type attribute, regardless of the value
+        div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
+        div_expr = div_any_type + SkipTo(div | div_end)("body")
+        for div_header in div_expr.searchString(html):
+            print(div_header.body)
+    prints::
+        1 4 0 1 0
+
+        1 4 0 1 0
+        1,3 2,3 1,1
+    """
+    if args:
+        attrs = args[:]
+    else:
+        attrs = attrDict.items()
+    attrs = [(k,v) for k,v in attrs]
+    def pa(s,l,tokens):
+        for attrName,attrValue in attrs:
+            if attrName not in tokens:
+                raise ParseException(s,l,"no matching attribute " + attrName)
+            if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
+                raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
+                                            (attrName, tokens[attrName], attrValue))
+    return pa
+withAttribute.ANY_VALUE = object()
+
+def withClass(classname, namespace=''):
+    """
+    Simplified version of C{L{withAttribute}} when matching on a div class - made
+    difficult because C{class} is a reserved word in Python.
+
+    Example::
+        html = '''
+            <div>
+            Some text
+            <div class="grid">1 4 0 1 0</div>
+            <div class="graph">1,3 2,3 1,1</div>
+            <div>this &lt;div&gt; has no class</div>
+            </div>
+                
+        '''
+        div,div_end = makeHTMLTags("div")
+        div_grid = div().setParseAction(withClass("grid"))
+        
+        grid_expr = div_grid + SkipTo(div | div_end)("body")
+        for grid_header in grid_expr.searchString(html):
+            print(grid_header.body)
+        
+        div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
+        div_expr = div_any_type + SkipTo(div | div_end)("body")
+        for div_header in div_expr.searchString(html):
+            print(div_header.body)
+    prints::
+        1 4 0 1 0
+
+        1 4 0 1 0
+        1,3 2,3 1,1
+    """
+    classattr = "%s:class" % namespace if namespace else "class"
+    return withAttribute(**{classattr : classname})        
+
+opAssoc = _Constants()
+opAssoc.LEFT = object()
+opAssoc.RIGHT = object()
+
+def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
+    """
+    Helper method for constructing grammars of expressions made up of
+    operators working in a precedence hierarchy.  Operators may be unary or
+    binary, left- or right-associative.  Parse actions can also be attached
+    to operator expressions. The generated parser will also recognize the use 
+    of parentheses to override operator precedences (see example below).
+    
+    Note: if you define a deep operator list, you may see performance issues
+    when using infixNotation. See L{ParserElement.enablePackrat} for a
+    mechanism to potentially improve your parser performance.
+
+    Parameters:
+     - baseExpr - expression representing the most basic element for the nested
+     - opList - list of tuples, one for each operator precedence level in the
+      expression grammar; each tuple is of the form
+      (opExpr, numTerms, rightLeftAssoc, parseAction), where:
+       - opExpr is the pyparsing expression for the operator;
+          may also be a string, which will be converted to a Literal;
+          if numTerms is 3, opExpr is a tuple of two expressions, for the
+          two operators separating the 3 terms
+       - numTerms is the number of terms for this operator (must
+          be 1, 2, or 3)
+       - rightLeftAssoc is the indicator whether the operator is
+          right or left associative, using the pyparsing-defined
+          constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
+       - parseAction is the parse action to be associated with
+          expressions matching this operator expression (the
+          parse action tuple member may be omitted); if the parse action
+          is passed a tuple or list of functions, this is equivalent to
+          calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
+     - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
+     - rpar - expression for matching right-parentheses (default=C{Suppress(')')})
+
+    Example::
+        # simple example of four-function arithmetic with ints and variable names
+        integer = pyparsing_common.signed_integer
+        varname = pyparsing_common.identifier 
+        
+        arith_expr = infixNotation(integer | varname,
+            [
+            ('-', 1, opAssoc.RIGHT),
+            (oneOf('* /'), 2, opAssoc.LEFT),
+            (oneOf('+ -'), 2, opAssoc.LEFT),
+            ])
+        
+        arith_expr.runTests('''
+            5+3*6
+            (5+3)*6
+            -2--11
+            ''', fullDump=False)
+    prints::
+        5+3*6
+        [[5, '+', [3, '*', 6]]]
+
+        (5+3)*6
+        [[[5, '+', 3], '*', 6]]
+
+        -2--11
+        [[['-', 2], '-', ['-', 11]]]
+    """
+    ret = Forward()
+    lastExpr = baseExpr | ( lpar + ret + rpar )
+    for i,operDef in enumerate(opList):
+        opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
+        termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
+        if arity == 3:
+            if opExpr is None or len(opExpr) != 2:
+                raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
+            opExpr1, opExpr2 = opExpr
+        thisExpr = Forward().setName(termName)
+        if rightLeftAssoc == opAssoc.LEFT:
+            if arity == 1:
+                matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
+            elif arity == 2:
+                if opExpr is not None:
+                    matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
+                else:
+                    matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
+            elif arity == 3:
+                matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
+                            Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
+            else:
+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+        elif rightLeftAssoc == opAssoc.RIGHT:
+            if arity == 1:
+                # try to avoid LR with this extra test
+                if not isinstance(opExpr, Optional):
+                    opExpr = Optional(opExpr)
+                matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
+            elif arity == 2:
+                if opExpr is not None:
+                    matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
+                else:
+                    matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
+            elif arity == 3:
+                matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
+                            Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
+            else:
+                raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+        else:
+            raise ValueError("operator must indicate right or left associativity")
+        if pa:
+            if isinstance(pa, (tuple, list)):
+                matchExpr.setParseAction(*pa)
+            else:
+                matchExpr.setParseAction(pa)
+        thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
+        lastExpr = thisExpr
+    ret <<= lastExpr
+    return ret
+
+operatorPrecedence = infixNotation
+"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
+
+dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
+sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
+quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
+                       Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
+unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
+
+def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
+    """
+    Helper method for defining nested lists enclosed in opening and closing
+    delimiters ("(" and ")" are the default).
+
+    Parameters:
+     - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
+     - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
+     - content - expression for items within the nested lists (default=C{None})
+     - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
+
+    If an expression is not provided for the content argument, the nested
+    expression will capture all whitespace-delimited content between delimiters
+    as a list of separate values.
+
+    Use the C{ignoreExpr} argument to define expressions that may contain
+    opening or closing characters that should not be treated as opening
+    or closing characters for nesting, such as quotedString or a comment
+    expression.  Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
+    The default is L{quotedString}, but if no expressions are to be ignored,
+    then pass C{None} for this argument.
+
+    Example::
+        data_type = oneOf("void int short long char float double")
+        decl_data_type = Combine(data_type + Optional(Word('*')))
+        ident = Word(alphas+'_', alphanums+'_')
+        number = pyparsing_common.number
+        arg = Group(decl_data_type + ident)
+        LPAR,RPAR = map(Suppress, "()")
+
+        code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
+
+        c_function = (decl_data_type("type") 
+                      + ident("name")
+                      + LPAR + Optional(delimitedList(arg), [])("args") + RPAR 
+                      + code_body("body"))
+        c_function.ignore(cStyleComment)
+        
+        source_code = '''
+            int is_odd(int x) { 
+                return (x%2); 
+            }
+                
+            int dec_to_hex(char hchar) { 
+                if (hchar >= '0' && hchar <= '9') { 
+                    return (ord(hchar)-ord('0')); 
+                } else { 
+                    return (10+ord(hchar)-ord('A'));
+                } 
+            }
+        '''
+        for func in c_function.searchString(source_code):
+            print("%(name)s (%(type)s) args: %(args)s" % func)
+
+    prints::
+        is_odd (int) args: [['int', 'x']]
+        dec_to_hex (int) args: [['char', 'hchar']]
+    """
+    if opener == closer:
+        raise ValueError("opening and closing strings cannot be the same")
+    if content is None:
+        if isinstance(opener,basestring) and isinstance(closer,basestring):
+            if len(opener) == 1 and len(closer)==1:
+                if ignoreExpr is not None:
+                    content = (Combine(OneOrMore(~ignoreExpr +
+                                    CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+                else:
+                    content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
+                                ).setParseAction(lambda t:t[0].strip()))
+            else:
+                if ignoreExpr is not None:
+                    content = (Combine(OneOrMore(~ignoreExpr + 
+                                    ~Literal(opener) + ~Literal(closer) +
+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+                else:
+                    content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
+                                    CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+                                ).setParseAction(lambda t:t[0].strip()))
+        else:
+            raise ValueError("opening and closing arguments must be strings if no content expression is given")
+    ret = Forward()
+    if ignoreExpr is not None:
+        ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
+    else:
+        ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content )  + Suppress(closer) )
+    ret.setName('nested %s%s expression' % (opener,closer))
+    return ret
+
+def indentedBlock(blockStatementExpr, indentStack, indent=True):
+    """
+    Helper method for defining space-delimited indentation blocks, such as
+    those used to define block statements in Python source code.
+
+    Parameters:
+     - blockStatementExpr - expression defining syntax of statement that
+            is repeated within the indented block
+     - indentStack - list created by caller to manage indentation stack
+            (multiple statementWithIndentedBlock expressions within a single grammar
+            should share a common indentStack)
+     - indent - boolean indicating whether block must be indented beyond the
+            the current level; set to False for block of left-most statements
+            (default=C{True})
+
+    A valid block must contain at least one C{blockStatement}.
+
+    Example::
+        data = '''
+        def A(z):
+          A1
+          B = 100
+          G = A2
+          A2
+          A3
+        B
+        def BB(a,b,c):
+          BB1
+          def BBA():
+            bba1
+            bba2
+            bba3
+        C
+        D
+        def spam(x,y):
+             def eggs(z):
+                 pass
+        '''
+
+
+        indentStack = [1]
+        stmt = Forward()
+
+        identifier = Word(alphas, alphanums)
+        funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
+        func_body = indentedBlock(stmt, indentStack)
+        funcDef = Group( funcDecl + func_body )
+
+        rvalue = Forward()
+        funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
+        rvalue << (funcCall | identifier | Word(nums))
+        assignment = Group(identifier + "=" + rvalue)
+        stmt << ( funcDef | assignment | identifier )
+
+        module_body = OneOrMore(stmt)
+
+        parseTree = module_body.parseString(data)
+        parseTree.pprint()
+    prints::
+        [['def',
+          'A',
+          ['(', 'z', ')'],
+          ':',
+          [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
+         'B',
+         ['def',
+          'BB',
+          ['(', 'a', 'b', 'c', ')'],
+          ':',
+          [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
+         'C',
+         'D',
+         ['def',
+          'spam',
+          ['(', 'x', 'y', ')'],
+          ':',
+          [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] 
+    """
+    def checkPeerIndent(s,l,t):
+        if l >= len(s): return
+        curCol = col(l,s)
+        if curCol != indentStack[-1]:
+            if curCol > indentStack[-1]:
+                raise ParseFatalException(s,l,"illegal nesting")
+            raise ParseException(s,l,"not a peer entry")
+
+    def checkSubIndent(s,l,t):
+        curCol = col(l,s)
+        if curCol > indentStack[-1]:
+            indentStack.append( curCol )
+        else:
+            raise ParseException(s,l,"not a subentry")
+
+    def checkUnindent(s,l,t):
+        if l >= len(s): return
+        curCol = col(l,s)
+        if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
+            raise ParseException(s,l,"not an unindent")
+        indentStack.pop()
+
+    NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
+    INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
+    PEER   = Empty().setParseAction(checkPeerIndent).setName('')
+    UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
+    if indent:
+        smExpr = Group( Optional(NL) +
+            #~ FollowedBy(blockStatementExpr) +
+            INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
+    else:
+        smExpr = Group( Optional(NL) +
+            (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
+    blockStatementExpr.ignore(_bslash + LineEnd())
+    return smExpr.setName('indented block')
+
+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
+
+anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
+_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
+commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
+def replaceHTMLEntity(t):
+    """Helper parser action to replace common HTML entities with their special characters"""
+    return _htmlEntityMap.get(t.entity)
+
+# it's easy to get these comment structures wrong - they're very common, so may as well make them available
+cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
+"Comment of the form C{/* ... */}"
+
+htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
+"Comment of the form C{<!-- ... -->}"
+
+restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
+dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
+"Comment of the form C{// ... (to end of line)}"
+
+cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
+"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
+
+javaStyleComment = cppStyleComment
+"Same as C{L{cppStyleComment}}"
+
+pythonStyleComment = Regex(r"#.*").setName("Python style comment")
+"Comment of the form C{# ... (to end of line)}"
+
+_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
+                                  Optional( Word(" \t") +
+                                            ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
+commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
+"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
+   This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
+
+# some other useful expressions - using lower-case class name since we are really using this as a namespace
+class pyparsing_common:
+    """
+    Here are some common low-level expressions that may be useful in jump-starting parser development:
+     - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
+     - common L{programming identifiers<identifier>}
+     - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
+     - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
+     - L{UUID<uuid>}
+     - L{comma-separated list<comma_separated_list>}
+    Parse actions:
+     - C{L{convertToInteger}}
+     - C{L{convertToFloat}}
+     - C{L{convertToDate}}
+     - C{L{convertToDatetime}}
+     - C{L{stripHTMLTags}}
+     - C{L{upcaseTokens}}
+     - C{L{downcaseTokens}}
+
+    Example::
+        pyparsing_common.number.runTests('''
+            # any int or real number, returned as the appropriate type
+            100
+            -100
+            +100
+            3.14159
+            6.02e23
+            1e-12
+            ''')
+
+        pyparsing_common.fnumber.runTests('''
+            # any int or real number, returned as float
+            100
+            -100
+            +100
+            3.14159
+            6.02e23
+            1e-12
+            ''')
+
+        pyparsing_common.hex_integer.runTests('''
+            # hex numbers
+            100
+            FF
+            ''')
+
+        pyparsing_common.fraction.runTests('''
+            # fractions
+            1/2
+            -3/4
+            ''')
+
+        pyparsing_common.mixed_integer.runTests('''
+            # mixed fractions
+            1
+            1/2
+            -3/4
+            1-3/4
+            ''')
+
+        import uuid
+        pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+        pyparsing_common.uuid.runTests('''
+            # uuid
+            12345678-1234-5678-1234-567812345678
+            ''')
+    prints::
+        # any int or real number, returned as the appropriate type
+        100
+        [100]
+
+        -100
+        [-100]
+
+        +100
+        [100]
+
+        3.14159
+        [3.14159]
+
+        6.02e23
+        [6.02e+23]
+
+        1e-12
+        [1e-12]
+
+        # any int or real number, returned as float
+        100
+        [100.0]
+
+        -100
+        [-100.0]
+
+        +100
+        [100.0]
+
+        3.14159
+        [3.14159]
+
+        6.02e23
+        [6.02e+23]
+
+        1e-12
+        [1e-12]
+
+        # hex numbers
+        100
+        [256]
+
+        FF
+        [255]
+
+        # fractions
+        1/2
+        [0.5]
+
+        -3/4
+        [-0.75]
+
+        # mixed fractions
+        1
+        [1]
+
+        1/2
+        [0.5]
+
+        -3/4
+        [-0.75]
+
+        1-3/4
+        [1.75]
+
+        # uuid
+        12345678-1234-5678-1234-567812345678
+        [UUID('12345678-1234-5678-1234-567812345678')]
+    """
+
+    convertToInteger = tokenMap(int)
+    """
+    Parse action for converting parsed integers to Python int
+    """
+
+    convertToFloat = tokenMap(float)
+    """
+    Parse action for converting parsed numbers to Python float
+    """
+
+    integer = Word(nums).setName("integer").setParseAction(convertToInteger)
+    """expression that parses an unsigned integer, returns an int"""
+
+    hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
+    """expression that parses a hexadecimal integer, returns an int"""
+
+    signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
+    """expression that parses an integer with optional leading sign, returns an int"""
+
+    fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
+    """fractional expression of an integer divided by an integer, returns a float"""
+    fraction.addParseAction(lambda t: t[0]/t[-1])
+
+    mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
+    """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
+    mixed_integer.addParseAction(sum)
+
+    real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
+    """expression that parses a floating point number and returns a float"""
+
+    sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
+    """expression that parses a floating point number with optional scientific notation and returns a float"""
+
+    # streamlining this expression makes the docs nicer-looking
+    number = (sci_real | real | signed_integer).streamline()
+    """any numeric expression, returns the corresponding Python type"""
+
+    fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
+    """any int or real number, returned as float"""
+    
+    identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
+    """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
+    
+    ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
+    "IPv4 address (C{0.0.0.0 - 255.255.255.255})"
+
+    _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
+    _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
+    _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
+    _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
+    _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
+    ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
+    "IPv6 address (long, short, or mixed form)"
+    
+    mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
+    "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
+
+    @staticmethod
+    def convertToDate(fmt="%Y-%m-%d"):
+        """
+        Helper to create a parse action for converting parsed date string to Python datetime.date
+
+        Params -
+         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
+
+        Example::
+            date_expr = pyparsing_common.iso8601_date.copy()
+            date_expr.setParseAction(pyparsing_common.convertToDate())
+            print(date_expr.parseString("1999-12-31"))
+        prints::
+            [datetime.date(1999, 12, 31)]
+        """
+        def cvt_fn(s,l,t):
+            try:
+                return datetime.strptime(t[0], fmt).date()
+            except ValueError as ve:
+                raise ParseException(s, l, str(ve))
+        return cvt_fn
+
+    @staticmethod
+    def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
+        """
+        Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
+
+        Params -
+         - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
+
+        Example::
+            dt_expr = pyparsing_common.iso8601_datetime.copy()
+            dt_expr.setParseAction(pyparsing_common.convertToDatetime())
+            print(dt_expr.parseString("1999-12-31T23:59:59.999"))
+        prints::
+            [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
+        """
+        def cvt_fn(s,l,t):
+            try:
+                return datetime.strptime(t[0], fmt)
+            except ValueError as ve:
+                raise ParseException(s, l, str(ve))
+        return cvt_fn
+
+    iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
+    "ISO8601 date (C{yyyy-mm-dd})"
+
+    iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
+    "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
+
+    uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
+    "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
+
+    _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
+    @staticmethod
+    def stripHTMLTags(s, l, tokens):
+        """
+        Parse action to remove HTML tags from web page HTML source
+
+        Example::
+            # strip HTML links from normal text 
+            text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
+            td,td_end = makeHTMLTags("TD")
+            table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
+            
+            print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
+        """
+        return pyparsing_common._html_stripper.transformString(tokens[0])
+
+    _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') 
+                                        + Optional( White(" \t") ) ) ).streamline().setName("commaItem")
+    comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
+    """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
+
+    upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
+    """Parse action to convert tokens to upper case."""
+
+    downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
+    """Parse action to convert tokens to lower case."""
+
+
+if __name__ == "__main__":
+
+    selectToken    = CaselessLiteral("select")
+    fromToken      = CaselessLiteral("from")
+
+    ident          = Word(alphas, alphanums + "_$")
+
+    columnName     = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+    columnNameList = Group(delimitedList(columnName)).setName("columns")
+    columnSpec     = ('*' | columnNameList)
+
+    tableName      = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+    tableNameList  = Group(delimitedList(tableName)).setName("tables")
+    
+    simpleSQL      = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
+
+    # demo runTests method, including embedded comments in test string
+    simpleSQL.runTests("""
+        # '*' as column list and dotted table name
+        select * from SYS.XYZZY
+
+        # caseless match on "SELECT", and casts back to "select"
+        SELECT * from XYZZY, ABC
+
+        # list of column names, and mixed case SELECT keyword
+        Select AA,BB,CC from Sys.dual
+
+        # multiple tables
+        Select A, B, C from Sys.dual, Table2
+
+        # invalid SELECT keyword - should fail
+        Xelect A, B, C from Sys.dual
+
+        # incomplete command - should fail
+        Select
+
+        # invalid column name - should fail
+        Select ^^^ frox Sys.dual
+
+        """)
+
+    pyparsing_common.number.runTests("""
+        100
+        -100
+        +100
+        3.14159
+        6.02e23
+        1e-12
+        """)
+
+    # any int or real number, returned as float
+    pyparsing_common.fnumber.runTests("""
+        100
+        -100
+        +100
+        3.14159
+        6.02e23
+        1e-12
+        """)
+
+    pyparsing_common.hex_integer.runTests("""
+        100
+        FF
+        """)
+
+    import uuid
+    pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+    pyparsing_common.uuid.runTests("""
+        12345678-1234-5678-1234-567812345678
+        """)
diff --git a/lib/python3.7/site-packages/setuptools/_vendor/six.py b/lib/python3.7/site-packages/setuptools/_vendor/six.py
new file mode 100644
index 0000000000000000000000000000000000000000..190c0239cd7d7af82a6e0cbc8d68053fa2e3dfaf
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/_vendor/six.py
@@ -0,0 +1,868 @@
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+# Copyright (c) 2010-2015 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.10.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+    string_types = str,
+    integer_types = int,
+    class_types = type,
+    text_type = str
+    binary_type = bytes
+
+    MAXSIZE = sys.maxsize
+else:
+    string_types = basestring,
+    integer_types = (int, long)
+    class_types = (type, types.ClassType)
+    text_type = unicode
+    binary_type = str
+
+    if sys.platform.startswith("java"):
+        # Jython always uses 32 bits.
+        MAXSIZE = int((1 << 31) - 1)
+    else:
+        # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+        class X(object):
+
+            def __len__(self):
+                return 1 << 31
+        try:
+            len(X())
+        except OverflowError:
+            # 32-bit
+            MAXSIZE = int((1 << 31) - 1)
+        else:
+            # 64-bit
+            MAXSIZE = int((1 << 63) - 1)
+        del X
+
+
+def _add_doc(func, doc):
+    """Add documentation to a function."""
+    func.__doc__ = doc
+
+
+def _import_module(name):
+    """Import module, returning the module after the last dot."""
+    __import__(name)
+    return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+    def __init__(self, name):
+        self.name = name
+
+    def __get__(self, obj, tp):
+        result = self._resolve()
+        setattr(obj, self.name, result)  # Invokes __set__.
+        try:
+            # This is a bit ugly, but it avoids running this again by
+            # removing this descriptor.
+            delattr(obj.__class__, self.name)
+        except AttributeError:
+            pass
+        return result
+
+
+class MovedModule(_LazyDescr):
+
+    def __init__(self, name, old, new=None):
+        super(MovedModule, self).__init__(name)
+        if PY3:
+            if new is None:
+                new = name
+            self.mod = new
+        else:
+            self.mod = old
+
+    def _resolve(self):
+        return _import_module(self.mod)
+
+    def __getattr__(self, attr):
+        _module = self._resolve()
+        value = getattr(_module, attr)
+        setattr(self, attr, value)
+        return value
+
+
+class _LazyModule(types.ModuleType):
+
+    def __init__(self, name):
+        super(_LazyModule, self).__init__(name)
+        self.__doc__ = self.__class__.__doc__
+
+    def __dir__(self):
+        attrs = ["__doc__", "__name__"]
+        attrs += [attr.name for attr in self._moved_attributes]
+        return attrs
+
+    # Subclasses should override this
+    _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+    def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+        super(MovedAttribute, self).__init__(name)
+        if PY3:
+            if new_mod is None:
+                new_mod = name
+            self.mod = new_mod
+            if new_attr is None:
+                if old_attr is None:
+                    new_attr = name
+                else:
+                    new_attr = old_attr
+            self.attr = new_attr
+        else:
+            self.mod = old_mod
+            if old_attr is None:
+                old_attr = name
+            self.attr = old_attr
+
+    def _resolve(self):
+        module = _import_module(self.mod)
+        return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+    """
+    A meta path importer to import six.moves and its submodules.
+
+    This class implements a PEP302 finder and loader. It should be compatible
+    with Python 2.5 and all existing versions of Python3
+    """
+
+    def __init__(self, six_module_name):
+        self.name = six_module_name
+        self.known_modules = {}
+
+    def _add_module(self, mod, *fullnames):
+        for fullname in fullnames:
+            self.known_modules[self.name + "." + fullname] = mod
+
+    def _get_module(self, fullname):
+        return self.known_modules[self.name + "." + fullname]
+
+    def find_module(self, fullname, path=None):
+        if fullname in self.known_modules:
+            return self
+        return None
+
+    def __get_module(self, fullname):
+        try:
+            return self.known_modules[fullname]
+        except KeyError:
+            raise ImportError("This loader does not know module " + fullname)
+
+    def load_module(self, fullname):
+        try:
+            # in case of a reload
+            return sys.modules[fullname]
+        except KeyError:
+            pass
+        mod = self.__get_module(fullname)
+        if isinstance(mod, MovedModule):
+            mod = mod._resolve()
+        else:
+            mod.__loader__ = self
+        sys.modules[fullname] = mod
+        return mod
+
+    def is_package(self, fullname):
+        """
+        Return true, if the named module is a package.
+
+        We need this method to get correct spec objects with
+        Python 3.4 (see PEP451)
+        """
+        return hasattr(self.__get_module(fullname), "__path__")
+
+    def get_code(self, fullname):
+        """Return None
+
+        Required, if is_package is implemented"""
+        self.__get_module(fullname)  # eventually raises ImportError
+        return None
+    get_source = get_code  # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+    """Lazy loading of moved objects"""
+    __path__ = []  # mark as package
+
+
+_moved_attributes = [
+    MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+    MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+    MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+    MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+    MovedAttribute("intern", "__builtin__", "sys"),
+    MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+    MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+    MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+    MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+    MovedAttribute("reduce", "__builtin__", "functools"),
+    MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+    MovedAttribute("StringIO", "StringIO", "io"),
+    MovedAttribute("UserDict", "UserDict", "collections"),
+    MovedAttribute("UserList", "UserList", "collections"),
+    MovedAttribute("UserString", "UserString", "collections"),
+    MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+    MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+    MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+    MovedModule("builtins", "__builtin__"),
+    MovedModule("configparser", "ConfigParser"),
+    MovedModule("copyreg", "copy_reg"),
+    MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+    MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+    MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+    MovedModule("http_cookies", "Cookie", "http.cookies"),
+    MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+    MovedModule("html_parser", "HTMLParser", "html.parser"),
+    MovedModule("http_client", "httplib", "http.client"),
+    MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+    MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+    MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+    MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+    MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+    MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+    MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+    MovedModule("cPickle", "cPickle", "pickle"),
+    MovedModule("queue", "Queue"),
+    MovedModule("reprlib", "repr"),
+    MovedModule("socketserver", "SocketServer"),
+    MovedModule("_thread", "thread", "_thread"),
+    MovedModule("tkinter", "Tkinter"),
+    MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+    MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+    MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+    MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+    MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+    MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+    MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+    MovedModule("tkinter_colorchooser", "tkColorChooser",
+                "tkinter.colorchooser"),
+    MovedModule("tkinter_commondialog", "tkCommonDialog",
+                "tkinter.commondialog"),
+    MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+    MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+    MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+    MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+                "tkinter.simpledialog"),
+    MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+    MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+    MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+    MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+    MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+    MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+    _moved_attributes += [
+        MovedModule("winreg", "_winreg"),
+    ]
+
+for attr in _moved_attributes:
+    setattr(_MovedItems, attr.name, attr)
+    if isinstance(attr, MovedModule):
+        _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+    MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+    MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+    MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+    MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+    MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+    MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+    MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+    MovedAttribute("quote", "urllib", "urllib.parse"),
+    MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("unquote", "urllib", "urllib.parse"),
+    MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+    MovedAttribute("urlencode", "urllib", "urllib.parse"),
+    MovedAttribute("splitquery", "urllib", "urllib.parse"),
+    MovedAttribute("splittag", "urllib", "urllib.parse"),
+    MovedAttribute("splituser", "urllib", "urllib.parse"),
+    MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+    MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+    setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+                      "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+    MovedAttribute("URLError", "urllib2", "urllib.error"),
+    MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+    MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+    setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+                      "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+    MovedAttribute("urlopen", "urllib2", "urllib.request"),
+    MovedAttribute("install_opener", "urllib2", "urllib.request"),
+    MovedAttribute("build_opener", "urllib2", "urllib.request"),
+    MovedAttribute("pathname2url", "urllib", "urllib.request"),
+    MovedAttribute("url2pathname", "urllib", "urllib.request"),
+    MovedAttribute("getproxies", "urllib", "urllib.request"),
+    MovedAttribute("Request", "urllib2", "urllib.request"),
+    MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+    MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+    MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+    MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+    MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+    MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+    MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+    MovedAttribute("URLopener", "urllib", "urllib.request"),
+    MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+    MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+    setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+                      "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+    MovedAttribute("addbase", "urllib", "urllib.response"),
+    MovedAttribute("addclosehook", "urllib", "urllib.response"),
+    MovedAttribute("addinfo", "urllib", "urllib.response"),
+    MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+    setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+                      "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+    """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+    MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+    setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+                      "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+    """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+    __path__ = []  # mark as package
+    parse = _importer._get_module("moves.urllib_parse")
+    error = _importer._get_module("moves.urllib_error")
+    request = _importer._get_module("moves.urllib_request")
+    response = _importer._get_module("moves.urllib_response")
+    robotparser = _importer._get_module("moves.urllib_robotparser")
+
+    def __dir__(self):
+        return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+                      "moves.urllib")
+
+
+def add_move(move):
+    """Add an item to six.moves."""
+    setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+    """Remove item from six.moves."""
+    try:
+        delattr(_MovedItems, name)
+    except AttributeError:
+        try:
+            del moves.__dict__[name]
+        except KeyError:
+            raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+    _meth_func = "__func__"
+    _meth_self = "__self__"
+
+    _func_closure = "__closure__"
+    _func_code = "__code__"
+    _func_defaults = "__defaults__"
+    _func_globals = "__globals__"
+else:
+    _meth_func = "im_func"
+    _meth_self = "im_self"
+
+    _func_closure = "func_closure"
+    _func_code = "func_code"
+    _func_defaults = "func_defaults"
+    _func_globals = "func_globals"
+
+
+try:
+    advance_iterator = next
+except NameError:
+    def advance_iterator(it):
+        return it.next()
+next = advance_iterator
+
+
+try:
+    callable = callable
+except NameError:
+    def callable(obj):
+        return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+    def get_unbound_function(unbound):
+        return unbound
+
+    create_bound_method = types.MethodType
+
+    def create_unbound_method(func, cls):
+        return func
+
+    Iterator = object
+else:
+    def get_unbound_function(unbound):
+        return unbound.im_func
+
+    def create_bound_method(func, obj):
+        return types.MethodType(func, obj, obj.__class__)
+
+    def create_unbound_method(func, cls):
+        return types.MethodType(func, None, cls)
+
+    class Iterator(object):
+
+        def next(self):
+            return type(self).__next__(self)
+
+    callable = callable
+_add_doc(get_unbound_function,
+         """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+    def iterkeys(d, **kw):
+        return iter(d.keys(**kw))
+
+    def itervalues(d, **kw):
+        return iter(d.values(**kw))
+
+    def iteritems(d, **kw):
+        return iter(d.items(**kw))
+
+    def iterlists(d, **kw):
+        return iter(d.lists(**kw))
+
+    viewkeys = operator.methodcaller("keys")
+
+    viewvalues = operator.methodcaller("values")
+
+    viewitems = operator.methodcaller("items")
+else:
+    def iterkeys(d, **kw):
+        return d.iterkeys(**kw)
+
+    def itervalues(d, **kw):
+        return d.itervalues(**kw)
+
+    def iteritems(d, **kw):
+        return d.iteritems(**kw)
+
+    def iterlists(d, **kw):
+        return d.iterlists(**kw)
+
+    viewkeys = operator.methodcaller("viewkeys")
+
+    viewvalues = operator.methodcaller("viewvalues")
+
+    viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+         "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+         "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+    def b(s):
+        return s.encode("latin-1")
+
+    def u(s):
+        return s
+    unichr = chr
+    import struct
+    int2byte = struct.Struct(">B").pack
+    del struct
+    byte2int = operator.itemgetter(0)
+    indexbytes = operator.getitem
+    iterbytes = iter
+    import io
+    StringIO = io.StringIO
+    BytesIO = io.BytesIO
+    _assertCountEqual = "assertCountEqual"
+    if sys.version_info[1] <= 1:
+        _assertRaisesRegex = "assertRaisesRegexp"
+        _assertRegex = "assertRegexpMatches"
+    else:
+        _assertRaisesRegex = "assertRaisesRegex"
+        _assertRegex = "assertRegex"
+else:
+    def b(s):
+        return s
+    # Workaround for standalone backslash
+
+    def u(s):
+        return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+    unichr = unichr
+    int2byte = chr
+
+    def byte2int(bs):
+        return ord(bs[0])
+
+    def indexbytes(buf, i):
+        return ord(buf[i])
+    iterbytes = functools.partial(itertools.imap, ord)
+    import StringIO
+    StringIO = BytesIO = StringIO.StringIO
+    _assertCountEqual = "assertItemsEqual"
+    _assertRaisesRegex = "assertRaisesRegexp"
+    _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+    return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+    return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+    return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+    exec_ = getattr(moves.builtins, "exec")
+
+    def reraise(tp, value, tb=None):
+        if value is None:
+            value = tp()
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+else:
+    def exec_(_code_, _globs_=None, _locs_=None):
+        """Execute code in a namespace."""
+        if _globs_ is None:
+            frame = sys._getframe(1)
+            _globs_ = frame.f_globals
+            if _locs_ is None:
+                _locs_ = frame.f_locals
+            del frame
+        elif _locs_ is None:
+            _locs_ = _globs_
+        exec("""exec _code_ in _globs_, _locs_""")
+
+    exec_("""def reraise(tp, value, tb=None):
+    raise tp, value, tb
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+    exec_("""def raise_from(value, from_value):
+    if from_value is None:
+        raise value
+    raise value from from_value
+""")
+elif sys.version_info[:2] > (3, 2):
+    exec_("""def raise_from(value, from_value):
+    raise value from from_value
+""")
+else:
+    def raise_from(value, from_value):
+        raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+    def print_(*args, **kwargs):
+        """The new-style print function for Python 2.4 and 2.5."""
+        fp = kwargs.pop("file", sys.stdout)
+        if fp is None:
+            return
+
+        def write(data):
+            if not isinstance(data, basestring):
+                data = str(data)
+            # If the file has an encoding, encode unicode with it.
+            if (isinstance(fp, file) and
+                    isinstance(data, unicode) and
+                    fp.encoding is not None):
+                errors = getattr(fp, "errors", None)
+                if errors is None:
+                    errors = "strict"
+                data = data.encode(fp.encoding, errors)
+            fp.write(data)
+        want_unicode = False
+        sep = kwargs.pop("sep", None)
+        if sep is not None:
+            if isinstance(sep, unicode):
+                want_unicode = True
+            elif not isinstance(sep, str):
+                raise TypeError("sep must be None or a string")
+        end = kwargs.pop("end", None)
+        if end is not None:
+            if isinstance(end, unicode):
+                want_unicode = True
+            elif not isinstance(end, str):
+                raise TypeError("end must be None or a string")
+        if kwargs:
+            raise TypeError("invalid keyword arguments to print()")
+        if not want_unicode:
+            for arg in args:
+                if isinstance(arg, unicode):
+                    want_unicode = True
+                    break
+        if want_unicode:
+            newline = unicode("\n")
+            space = unicode(" ")
+        else:
+            newline = "\n"
+            space = " "
+        if sep is None:
+            sep = space
+        if end is None:
+            end = newline
+        for i, arg in enumerate(args):
+            if i:
+                write(sep)
+            write(arg)
+        write(end)
+if sys.version_info[:2] < (3, 3):
+    _print = print_
+
+    def print_(*args, **kwargs):
+        fp = kwargs.get("file", sys.stdout)
+        flush = kwargs.pop("flush", False)
+        _print(*args, **kwargs)
+        if flush and fp is not None:
+            fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+    def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+              updated=functools.WRAPPER_UPDATES):
+        def wrapper(f):
+            f = functools.wraps(wrapped, assigned, updated)(f)
+            f.__wrapped__ = wrapped
+            return f
+        return wrapper
+else:
+    wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+    """Create a base class with a metaclass."""
+    # This requires a bit of explanation: the basic idea is to make a dummy
+    # metaclass for one level of class instantiation that replaces itself with
+    # the actual metaclass.
+    class metaclass(meta):
+
+        def __new__(cls, name, this_bases, d):
+            return meta(name, bases, d)
+    return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+    """Class decorator for creating a class with a metaclass."""
+    def wrapper(cls):
+        orig_vars = cls.__dict__.copy()
+        slots = orig_vars.get('__slots__')
+        if slots is not None:
+            if isinstance(slots, str):
+                slots = [slots]
+            for slots_var in slots:
+                orig_vars.pop(slots_var)
+        orig_vars.pop('__dict__', None)
+        orig_vars.pop('__weakref__', None)
+        return metaclass(cls.__name__, cls.__bases__, orig_vars)
+    return wrapper
+
+
+def python_2_unicode_compatible(klass):
+    """
+    A decorator that defines __unicode__ and __str__ methods under Python 2.
+    Under Python 3 it does nothing.
+
+    To support Python 2 and 3 with a single code base, define a __str__ method
+    returning text and apply this decorator to the class.
+    """
+    if PY2:
+        if '__str__' not in klass.__dict__:
+            raise ValueError("@python_2_unicode_compatible cannot be applied "
+                             "to %s because it doesn't define __str__()." %
+                             klass.__name__)
+        klass.__unicode__ = klass.__str__
+        klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+    return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = []  # required for PEP 302 and PEP 451
+__package__ = __name__  # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+    __spec__.submodule_search_locations = []  # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+    for i, importer in enumerate(sys.meta_path):
+        # Here's some real nastiness: Another "instance" of the six module might
+        # be floating around. Therefore, we can't use isinstance() to check for
+        # the six meta path importer, since the other six instance will have
+        # inserted an importer with different class.
+        if (type(importer).__name__ == "_SixMetaPathImporter" and
+                importer.name == __name__):
+            del sys.meta_path[i]
+            break
+    del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/lib/python3.7/site-packages/setuptools/archive_util.py b/lib/python3.7/site-packages/setuptools/archive_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..81436044d995ff430334a7ef324b08e616f4b7a7
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/archive_util.py
@@ -0,0 +1,173 @@
+"""Utilities for extracting common archive formats"""
+
+import zipfile
+import tarfile
+import os
+import shutil
+import posixpath
+import contextlib
+from distutils.errors import DistutilsError
+
+from pkg_resources import ensure_directory
+
+__all__ = [
+    "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
+    "UnrecognizedFormat", "extraction_drivers", "unpack_directory",
+]
+
+
+class UnrecognizedFormat(DistutilsError):
+    """Couldn't recognize the archive type"""
+
+
+def default_filter(src, dst):
+    """The default progress/filter callback; returns True for all files"""
+    return dst
+
+
+def unpack_archive(filename, extract_dir, progress_filter=default_filter,
+        drivers=None):
+    """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
+
+    `progress_filter` is a function taking two arguments: a source path
+    internal to the archive ('/'-separated), and a filesystem path where it
+    will be extracted.  The callback must return the desired extract path
+    (which may be the same as the one passed in), or else ``None`` to skip
+    that file or directory.  The callback can thus be used to report on the
+    progress of the extraction, as well as to filter the items extracted or
+    alter their extraction paths.
+
+    `drivers`, if supplied, must be a non-empty sequence of functions with the
+    same signature as this function (minus the `drivers` argument), that raise
+    ``UnrecognizedFormat`` if they do not support extracting the designated
+    archive type.  The `drivers` are tried in sequence until one is found that
+    does not raise an error, or until all are exhausted (in which case
+    ``UnrecognizedFormat`` is raised).  If you do not supply a sequence of
+    drivers, the module's ``extraction_drivers`` constant will be used, which
+    means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
+    order.
+    """
+    for driver in drivers or extraction_drivers:
+        try:
+            driver(filename, extract_dir, progress_filter)
+        except UnrecognizedFormat:
+            continue
+        else:
+            return
+    else:
+        raise UnrecognizedFormat(
+            "Not a recognized archive type: %s" % filename
+        )
+
+
+def unpack_directory(filename, extract_dir, progress_filter=default_filter):
+    """"Unpack" a directory, using the same interface as for archives
+
+    Raises ``UnrecognizedFormat`` if `filename` is not a directory
+    """
+    if not os.path.isdir(filename):
+        raise UnrecognizedFormat("%s is not a directory" % filename)
+
+    paths = {
+        filename: ('', extract_dir),
+    }
+    for base, dirs, files in os.walk(filename):
+        src, dst = paths[base]
+        for d in dirs:
+            paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
+        for f in files:
+            target = os.path.join(dst, f)
+            target = progress_filter(src + f, target)
+            if not target:
+                # skip non-files
+                continue
+            ensure_directory(target)
+            f = os.path.join(base, f)
+            shutil.copyfile(f, target)
+            shutil.copystat(f, target)
+
+
+def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
+    """Unpack zip `filename` to `extract_dir`
+
+    Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
+    by ``zipfile.is_zipfile()``).  See ``unpack_archive()`` for an explanation
+    of the `progress_filter` argument.
+    """
+
+    if not zipfile.is_zipfile(filename):
+        raise UnrecognizedFormat("%s is not a zip file" % (filename,))
+
+    with zipfile.ZipFile(filename) as z:
+        for info in z.infolist():
+            name = info.filename
+
+            # don't extract absolute paths or ones with .. in them
+            if name.startswith('/') or '..' in name.split('/'):
+                continue
+
+            target = os.path.join(extract_dir, *name.split('/'))
+            target = progress_filter(name, target)
+            if not target:
+                continue
+            if name.endswith('/'):
+                # directory
+                ensure_directory(target)
+            else:
+                # file
+                ensure_directory(target)
+                data = z.read(info.filename)
+                with open(target, 'wb') as f:
+                    f.write(data)
+            unix_attributes = info.external_attr >> 16
+            if unix_attributes:
+                os.chmod(target, unix_attributes)
+
+
+def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
+    """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
+
+    Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
+    by ``tarfile.open()``).  See ``unpack_archive()`` for an explanation
+    of the `progress_filter` argument.
+    """
+    try:
+        tarobj = tarfile.open(filename)
+    except tarfile.TarError:
+        raise UnrecognizedFormat(
+            "%s is not a compressed or uncompressed tar file" % (filename,)
+        )
+    with contextlib.closing(tarobj):
+        # don't do any chowning!
+        tarobj.chown = lambda *args: None
+        for member in tarobj:
+            name = member.name
+            # don't extract absolute paths or ones with .. in them
+            if not name.startswith('/') and '..' not in name.split('/'):
+                prelim_dst = os.path.join(extract_dir, *name.split('/'))
+
+                # resolve any links and to extract the link targets as normal
+                # files
+                while member is not None and (member.islnk() or member.issym()):
+                    linkpath = member.linkname
+                    if member.issym():
+                        base = posixpath.dirname(member.name)
+                        linkpath = posixpath.join(base, linkpath)
+                        linkpath = posixpath.normpath(linkpath)
+                    member = tarobj._getmember(linkpath)
+
+                if member is not None and (member.isfile() or member.isdir()):
+                    final_dst = progress_filter(name, prelim_dst)
+                    if final_dst:
+                        if final_dst.endswith(os.sep):
+                            final_dst = final_dst[:-1]
+                        try:
+                            # XXX Ugh
+                            tarobj._extract_member(member, final_dst)
+                        except tarfile.ExtractError:
+                            # chown/chmod/mkfifo/mknode/makedev failed
+                            pass
+        return True
+
+
+extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
diff --git a/lib/python3.7/site-packages/setuptools/build_meta.py b/lib/python3.7/site-packages/setuptools/build_meta.py
new file mode 100644
index 0000000000000000000000000000000000000000..e40904a5dfdcf60d73df7d68c3cccaa64f8d2076
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/build_meta.py
@@ -0,0 +1,254 @@
+"""A PEP 517 interface to setuptools
+
+Previously, when a user or a command line tool (let's call it a "frontend")
+needed to make a request of setuptools to take a certain action, for
+example, generating a list of installation requirements, the frontend would
+would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
+
+PEP 517 defines a different method of interfacing with setuptools. Rather
+than calling "setup.py" directly, the frontend should:
+
+  1. Set the current directory to the directory with a setup.py file
+  2. Import this module into a safe python interpreter (one in which
+     setuptools can potentially set global variables or crash hard).
+  3. Call one of the functions defined in PEP 517.
+
+What each function does is defined in PEP 517. However, here is a "casual"
+definition of the functions (this definition should not be relied on for
+bug reports or API stability):
+
+  - `build_wheel`: build a wheel in the folder and return the basename
+  - `get_requires_for_build_wheel`: get the `setup_requires` to build
+  - `prepare_metadata_for_build_wheel`: get the `install_requires`
+  - `build_sdist`: build an sdist in the folder and return the basename
+  - `get_requires_for_build_sdist`: get the `setup_requires` to build
+
+Again, this is not a formal definition! Just a "taste" of the module.
+"""
+
+import io
+import os
+import sys
+import tokenize
+import shutil
+import contextlib
+
+import setuptools
+import distutils
+from setuptools.py31compat import TemporaryDirectory
+
+from pkg_resources import parse_requirements
+
+__all__ = ['get_requires_for_build_sdist',
+           'get_requires_for_build_wheel',
+           'prepare_metadata_for_build_wheel',
+           'build_wheel',
+           'build_sdist',
+           '__legacy__',
+           'SetupRequirementsError']
+
+class SetupRequirementsError(BaseException):
+    def __init__(self, specifiers):
+        self.specifiers = specifiers
+
+
+class Distribution(setuptools.dist.Distribution):
+    def fetch_build_eggs(self, specifiers):
+        specifier_list = list(map(str, parse_requirements(specifiers)))
+
+        raise SetupRequirementsError(specifier_list)
+
+    @classmethod
+    @contextlib.contextmanager
+    def patch(cls):
+        """
+        Replace
+        distutils.dist.Distribution with this class
+        for the duration of this context.
+        """
+        orig = distutils.core.Distribution
+        distutils.core.Distribution = cls
+        try:
+            yield
+        finally:
+            distutils.core.Distribution = orig
+
+
+def _to_str(s):
+    """
+    Convert a filename to a string (on Python 2, explicitly
+    a byte string, not Unicode) as distutils checks for the
+    exact type str.
+    """
+    if sys.version_info[0] == 2 and not isinstance(s, str):
+        # Assume it's Unicode, as that's what the PEP says
+        # should be provided.
+        return s.encode(sys.getfilesystemencoding())
+    return s
+
+
+def _get_immediate_subdirectories(a_dir):
+    return [name for name in os.listdir(a_dir)
+            if os.path.isdir(os.path.join(a_dir, name))]
+
+
+def _file_with_extension(directory, extension):
+    matching = (
+        f for f in os.listdir(directory)
+        if f.endswith(extension)
+    )
+    file, = matching
+    return file
+
+
+def _open_setup_script(setup_script):
+    if not os.path.exists(setup_script):
+        # Supply a default setup.py
+        return io.StringIO(u"from setuptools import setup; setup()")
+
+    return getattr(tokenize, 'open', open)(setup_script)
+
+
+class _BuildMetaBackend(object):
+
+    def _fix_config(self, config_settings):
+        config_settings = config_settings or {}
+        config_settings.setdefault('--global-option', [])
+        return config_settings
+
+    def _get_build_requires(self, config_settings, requirements):
+        config_settings = self._fix_config(config_settings)
+
+        sys.argv = sys.argv[:1] + ['egg_info'] + \
+            config_settings["--global-option"]
+        try:
+            with Distribution.patch():
+                self.run_setup()
+        except SetupRequirementsError as e:
+            requirements += e.specifiers
+
+        return requirements
+
+    def run_setup(self, setup_script='setup.py'):
+        # Note that we can reuse our build directory between calls
+        # Correctness comes first, then optimization later
+        __file__ = setup_script
+        __name__ = '__main__'
+
+        with _open_setup_script(__file__) as f:
+            code = f.read().replace(r'\r\n', r'\n')
+
+        exec(compile(code, __file__, 'exec'), locals())
+
+    def get_requires_for_build_wheel(self, config_settings=None):
+        config_settings = self._fix_config(config_settings)
+        return self._get_build_requires(config_settings, requirements=['wheel'])
+
+    def get_requires_for_build_sdist(self, config_settings=None):
+        config_settings = self._fix_config(config_settings)
+        return self._get_build_requires(config_settings, requirements=[])
+
+    def prepare_metadata_for_build_wheel(self, metadata_directory,
+                                         config_settings=None):
+        sys.argv = sys.argv[:1] + ['dist_info', '--egg-base',
+                                   _to_str(metadata_directory)]
+        self.run_setup()
+
+        dist_info_directory = metadata_directory
+        while True:
+            dist_infos = [f for f in os.listdir(dist_info_directory)
+                          if f.endswith('.dist-info')]
+
+            if (len(dist_infos) == 0 and
+                len(_get_immediate_subdirectories(dist_info_directory)) == 1):
+
+                dist_info_directory = os.path.join(
+                    dist_info_directory, os.listdir(dist_info_directory)[0])
+                continue
+
+            assert len(dist_infos) == 1
+            break
+
+        # PEP 517 requires that the .dist-info directory be placed in the
+        # metadata_directory. To comply, we MUST copy the directory to the root
+        if dist_info_directory != metadata_directory:
+            shutil.move(
+                os.path.join(dist_info_directory, dist_infos[0]),
+                metadata_directory)
+            shutil.rmtree(dist_info_directory, ignore_errors=True)
+
+        return dist_infos[0]
+
+    def build_wheel(self, wheel_directory, config_settings=None,
+                    metadata_directory=None):
+        config_settings = self._fix_config(config_settings)
+        wheel_directory = os.path.abspath(wheel_directory)
+
+        # Build the wheel in a temporary directory, then copy to the target
+        with TemporaryDirectory(dir=wheel_directory) as tmp_dist_dir:
+            sys.argv = (sys.argv[:1] +
+                        ['bdist_wheel', '--dist-dir', tmp_dist_dir] +
+                        config_settings["--global-option"])
+            self.run_setup()
+
+            wheel_basename = _file_with_extension(tmp_dist_dir, '.whl')
+            wheel_path = os.path.join(wheel_directory, wheel_basename)
+            if os.path.exists(wheel_path):
+                # os.rename will fail overwriting on non-unix env
+                os.remove(wheel_path)
+            os.rename(os.path.join(tmp_dist_dir, wheel_basename), wheel_path)
+
+        return wheel_basename
+
+    def build_sdist(self, sdist_directory, config_settings=None):
+        config_settings = self._fix_config(config_settings)
+        sdist_directory = os.path.abspath(sdist_directory)
+        sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \
+            config_settings["--global-option"] + \
+            ["--dist-dir", sdist_directory]
+        self.run_setup()
+
+        return _file_with_extension(sdist_directory, '.tar.gz')
+
+
+class _BuildMetaLegacyBackend(_BuildMetaBackend):
+    """Compatibility backend for setuptools
+
+    This is a version of setuptools.build_meta that endeavors to maintain backwards
+    compatibility with pre-PEP 517 modes of invocation. It exists as a temporary
+    bridge between the old packaging mechanism and the new packaging mechanism,
+    and will eventually be removed.
+    """
+    def run_setup(self, setup_script='setup.py'):
+        # In order to maintain compatibility with scripts assuming that
+        # the setup.py script is in a directory on the PYTHONPATH, inject
+        # '' into sys.path. (pypa/setuptools#1642)
+        sys_path = list(sys.path)           # Save the original path
+
+        script_dir = os.path.dirname(os.path.abspath(setup_script))
+        if script_dir not in sys.path:
+            sys.path.insert(0, script_dir)
+
+        try:
+            super(_BuildMetaLegacyBackend,
+                  self).run_setup(setup_script=setup_script)
+        finally:
+            # While PEP 517 frontends should be calling each hook in a fresh
+            # subprocess according to the standard (and thus it should not be
+            # strictly necessary to restore the old sys.path), we'll restore
+            # the original path so that the path manipulation does not persist
+            # within the hook after run_setup is called.
+            sys.path[:] = sys_path
+
+# The primary backend
+_BACKEND = _BuildMetaBackend()
+
+get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
+get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
+prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
+build_wheel = _BACKEND.build_wheel
+build_sdist = _BACKEND.build_sdist
+
+
+# The legacy backend
+__legacy__ = _BuildMetaLegacyBackend()
diff --git a/lib/python3.7/site-packages/setuptools/cli-32.exe b/lib/python3.7/site-packages/setuptools/cli-32.exe
new file mode 100644
index 0000000000000000000000000000000000000000..b1487b7819e7286577a043c7726fbe0ca1543083
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/cli-32.exe differ
diff --git a/lib/python3.7/site-packages/setuptools/cli-64.exe b/lib/python3.7/site-packages/setuptools/cli-64.exe
new file mode 100644
index 0000000000000000000000000000000000000000..675e6bf3743f3d3011c238657e7128ee9960ef7f
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/cli-64.exe differ
diff --git a/lib/python3.7/site-packages/setuptools/cli.exe b/lib/python3.7/site-packages/setuptools/cli.exe
new file mode 100644
index 0000000000000000000000000000000000000000..b1487b7819e7286577a043c7726fbe0ca1543083
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/cli.exe differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__init__.py b/lib/python3.7/site-packages/setuptools/command/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe619e2e676f6e0cf95b2af63cdab33ed386f6b0
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/__init__.py
@@ -0,0 +1,18 @@
+__all__ = [
+    'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
+    'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
+    'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts',
+    'register', 'bdist_wininst', 'upload_docs', 'upload', 'build_clib',
+    'dist_info',
+]
+
+from distutils.command.bdist import bdist
+import sys
+
+from setuptools.command import install_scripts
+
+if 'egg' not in bdist.format_commands:
+    bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
+    bdist.format_commands.append('egg')
+
+del bdist, sys
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..30e97bb6e7eae3c3833ea3e6a62f9bc7cfafc3d2
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/alias.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/alias.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a503be51b41ff9ba6d906ab33666ccf7a741cce3
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/alias.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/bdist_egg.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/bdist_egg.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d02519b15f9dbcd0b4a8814e96c29d349c4a908d
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/bdist_egg.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/bdist_rpm.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/bdist_rpm.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d05ea3bc7bf9e0068f4c972117b8fbaffd21d05a
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/bdist_rpm.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/bdist_wininst.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/bdist_wininst.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..86604225e92e9be40cbe5ce2830eac1c189a209f
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/bdist_wininst.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/build_clib.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/build_clib.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3f5743dd5aecbb6895d43e97b0517cba04528e0a
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/build_clib.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/build_ext.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/build_ext.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..145beb5655a341900036a2c52ef3346e52b852bf
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/build_ext.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/build_py.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/build_py.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4942b275ddff3d3ce729809b7825e16c09f84cdb
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/build_py.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/develop.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/develop.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..53697356f5ad95f75c8b42ee0e81568d65e35e55
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/develop.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/dist_info.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/dist_info.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f120d8457591e6dbddf5517aaba069b593d6d5a6
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/dist_info.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/easy_install.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/easy_install.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ac3d3fc73cb15c6fb76e4d5f973c2ffefb9cc29c
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/easy_install.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/egg_info.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/egg_info.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6299243a2e85de7998aba3c0286690e64d53fae5
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/egg_info.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/install.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/install.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..636a7ddca6144017fedef1999746669b4dca8d9b
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/install.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/install_egg_info.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/install_egg_info.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e4c88ca105971076a4ea4d42b12265703afb376f
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/install_egg_info.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/install_lib.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/install_lib.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..543430052529dbdd26fa7c8e1bfd52bf5347eeca
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/install_lib.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/install_scripts.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/install_scripts.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3bd00a6d6099528ea9a052f3c2216fb0a89c424e
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/install_scripts.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/py36compat.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/py36compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f00b63a2e9804d950d70bdab855b1a98a9e89e2
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/py36compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/register.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/register.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..902ca5f4e553a6afcbd476cf2a4bfaf5be0bbf82
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/register.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/rotate.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/rotate.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d6da5d3ef9c71494b6035fbaa36c00bb4f02cbc2
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/rotate.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/saveopts.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/saveopts.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e85bc9e09eca51308303c7acb4de361584c78ff7
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/saveopts.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/sdist.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/sdist.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8612788e004ca2b8e6fb90d376aea0e23aef7a63
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/sdist.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/setopt.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/setopt.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..096f7e482690a0bb959a88fc873c1bd5ed962579
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/setopt.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/test.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/test.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b467e296df6cf85c1eb3e4bd075ac36443d2f5f
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/test.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/upload.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/upload.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..723957dc5dc28a0af242c85ccd19c0460644a5dd
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/upload.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/__pycache__/upload_docs.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/command/__pycache__/upload_docs.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3bfb0b276249caba55e35e83047b4d14345ba9dd
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/command/__pycache__/upload_docs.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/command/alias.py b/lib/python3.7/site-packages/setuptools/command/alias.py
new file mode 100644
index 0000000000000000000000000000000000000000..4532b1cc0dca76227927e873f9c64f01008e565a
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/alias.py
@@ -0,0 +1,80 @@
+from distutils.errors import DistutilsOptionError
+
+from setuptools.extern.six.moves import map
+
+from setuptools.command.setopt import edit_config, option_base, config_file
+
+
+def shquote(arg):
+    """Quote an argument for later parsing by shlex.split()"""
+    for c in '"', "'", "\\", "#":
+        if c in arg:
+            return repr(arg)
+    if arg.split() != [arg]:
+        return repr(arg)
+    return arg
+
+
+class alias(option_base):
+    """Define a shortcut that invokes one or more commands"""
+
+    description = "define a shortcut to invoke one or more commands"
+    command_consumes_arguments = True
+
+    user_options = [
+        ('remove', 'r', 'remove (unset) the alias'),
+    ] + option_base.user_options
+
+    boolean_options = option_base.boolean_options + ['remove']
+
+    def initialize_options(self):
+        option_base.initialize_options(self)
+        self.args = None
+        self.remove = None
+
+    def finalize_options(self):
+        option_base.finalize_options(self)
+        if self.remove and len(self.args) != 1:
+            raise DistutilsOptionError(
+                "Must specify exactly one argument (the alias name) when "
+                "using --remove"
+            )
+
+    def run(self):
+        aliases = self.distribution.get_option_dict('aliases')
+
+        if not self.args:
+            print("Command Aliases")
+            print("---------------")
+            for alias in aliases:
+                print("setup.py alias", format_alias(alias, aliases))
+            return
+
+        elif len(self.args) == 1:
+            alias, = self.args
+            if self.remove:
+                command = None
+            elif alias in aliases:
+                print("setup.py alias", format_alias(alias, aliases))
+                return
+            else:
+                print("No alias definition found for %r" % alias)
+                return
+        else:
+            alias = self.args[0]
+            command = ' '.join(map(shquote, self.args[1:]))
+
+        edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
+
+
+def format_alias(name, aliases):
+    source, command = aliases[name]
+    if source == config_file('global'):
+        source = '--global-config '
+    elif source == config_file('user'):
+        source = '--user-config '
+    elif source == config_file('local'):
+        source = ''
+    else:
+        source = '--filename=%r' % source
+    return source + name + ' ' + command
diff --git a/lib/python3.7/site-packages/setuptools/command/bdist_egg.py b/lib/python3.7/site-packages/setuptools/command/bdist_egg.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f8df917e6445d53d448250f6a298dd8df0f6ce7
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/bdist_egg.py
@@ -0,0 +1,502 @@
+"""setuptools.command.bdist_egg
+
+Build .egg distributions"""
+
+from distutils.errors import DistutilsSetupError
+from distutils.dir_util import remove_tree, mkpath
+from distutils import log
+from types import CodeType
+import sys
+import os
+import re
+import textwrap
+import marshal
+
+from setuptools.extern import six
+
+from pkg_resources import get_build_platform, Distribution, ensure_directory
+from pkg_resources import EntryPoint
+from setuptools.extension import Library
+from setuptools import Command
+
+try:
+    # Python 2.7 or >=3.2
+    from sysconfig import get_path, get_python_version
+
+    def _get_purelib():
+        return get_path("purelib")
+except ImportError:
+    from distutils.sysconfig import get_python_lib, get_python_version
+
+    def _get_purelib():
+        return get_python_lib(False)
+
+
+def strip_module(filename):
+    if '.' in filename:
+        filename = os.path.splitext(filename)[0]
+    if filename.endswith('module'):
+        filename = filename[:-6]
+    return filename
+
+
+def sorted_walk(dir):
+    """Do os.walk in a reproducible way,
+    independent of indeterministic filesystem readdir order
+    """
+    for base, dirs, files in os.walk(dir):
+        dirs.sort()
+        files.sort()
+        yield base, dirs, files
+
+
+def write_stub(resource, pyfile):
+    _stub_template = textwrap.dedent("""
+        def __bootstrap__():
+            global __bootstrap__, __loader__, __file__
+            import sys, pkg_resources, imp
+            __file__ = pkg_resources.resource_filename(__name__, %r)
+            __loader__ = None; del __bootstrap__, __loader__
+            imp.load_dynamic(__name__,__file__)
+        __bootstrap__()
+        """).lstrip()
+    with open(pyfile, 'w') as f:
+        f.write(_stub_template % resource)
+
+
+class bdist_egg(Command):
+    description = "create an \"egg\" distribution"
+
+    user_options = [
+        ('bdist-dir=', 'b',
+         "temporary directory for creating the distribution"),
+        ('plat-name=', 'p', "platform name to embed in generated filenames "
+                            "(default: %s)" % get_build_platform()),
+        ('exclude-source-files', None,
+         "remove all .py files from the generated egg"),
+        ('keep-temp', 'k',
+         "keep the pseudo-installation tree around after " +
+         "creating the distribution archive"),
+        ('dist-dir=', 'd',
+         "directory to put final built distributions in"),
+        ('skip-build', None,
+         "skip rebuilding everything (for testing/debugging)"),
+    ]
+
+    boolean_options = [
+        'keep-temp', 'skip-build', 'exclude-source-files'
+    ]
+
+    def initialize_options(self):
+        self.bdist_dir = None
+        self.plat_name = None
+        self.keep_temp = 0
+        self.dist_dir = None
+        self.skip_build = 0
+        self.egg_output = None
+        self.exclude_source_files = None
+
+    def finalize_options(self):
+        ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
+        self.egg_info = ei_cmd.egg_info
+
+        if self.bdist_dir is None:
+            bdist_base = self.get_finalized_command('bdist').bdist_base
+            self.bdist_dir = os.path.join(bdist_base, 'egg')
+
+        if self.plat_name is None:
+            self.plat_name = get_build_platform()
+
+        self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+        if self.egg_output is None:
+
+            # Compute filename of the output egg
+            basename = Distribution(
+                None, None, ei_cmd.egg_name, ei_cmd.egg_version,
+                get_python_version(),
+                self.distribution.has_ext_modules() and self.plat_name
+            ).egg_name()
+
+            self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
+
+    def do_install_data(self):
+        # Hack for packages that install data to install's --install-lib
+        self.get_finalized_command('install').install_lib = self.bdist_dir
+
+        site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
+        old, self.distribution.data_files = self.distribution.data_files, []
+
+        for item in old:
+            if isinstance(item, tuple) and len(item) == 2:
+                if os.path.isabs(item[0]):
+                    realpath = os.path.realpath(item[0])
+                    normalized = os.path.normcase(realpath)
+                    if normalized == site_packages or normalized.startswith(
+                        site_packages + os.sep
+                    ):
+                        item = realpath[len(site_packages) + 1:], item[1]
+                        # XXX else: raise ???
+            self.distribution.data_files.append(item)
+
+        try:
+            log.info("installing package data to %s", self.bdist_dir)
+            self.call_command('install_data', force=0, root=None)
+        finally:
+            self.distribution.data_files = old
+
+    def get_outputs(self):
+        return [self.egg_output]
+
+    def call_command(self, cmdname, **kw):
+        """Invoke reinitialized command `cmdname` with keyword args"""
+        for dirname in INSTALL_DIRECTORY_ATTRS:
+            kw.setdefault(dirname, self.bdist_dir)
+        kw.setdefault('skip_build', self.skip_build)
+        kw.setdefault('dry_run', self.dry_run)
+        cmd = self.reinitialize_command(cmdname, **kw)
+        self.run_command(cmdname)
+        return cmd
+
+    def run(self):
+        # Generate metadata first
+        self.run_command("egg_info")
+        # We run install_lib before install_data, because some data hacks
+        # pull their data path from the install_lib command.
+        log.info("installing library code to %s", self.bdist_dir)
+        instcmd = self.get_finalized_command('install')
+        old_root = instcmd.root
+        instcmd.root = None
+        if self.distribution.has_c_libraries() and not self.skip_build:
+            self.run_command('build_clib')
+        cmd = self.call_command('install_lib', warn_dir=0)
+        instcmd.root = old_root
+
+        all_outputs, ext_outputs = self.get_ext_outputs()
+        self.stubs = []
+        to_compile = []
+        for (p, ext_name) in enumerate(ext_outputs):
+            filename, ext = os.path.splitext(ext_name)
+            pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
+                                  '.py')
+            self.stubs.append(pyfile)
+            log.info("creating stub loader for %s", ext_name)
+            if not self.dry_run:
+                write_stub(os.path.basename(ext_name), pyfile)
+            to_compile.append(pyfile)
+            ext_outputs[p] = ext_name.replace(os.sep, '/')
+
+        if to_compile:
+            cmd.byte_compile(to_compile)
+        if self.distribution.data_files:
+            self.do_install_data()
+
+        # Make the EGG-INFO directory
+        archive_root = self.bdist_dir
+        egg_info = os.path.join(archive_root, 'EGG-INFO')
+        self.mkpath(egg_info)
+        if self.distribution.scripts:
+            script_dir = os.path.join(egg_info, 'scripts')
+            log.info("installing scripts to %s", script_dir)
+            self.call_command('install_scripts', install_dir=script_dir,
+                              no_ep=1)
+
+        self.copy_metadata_to(egg_info)
+        native_libs = os.path.join(egg_info, "native_libs.txt")
+        if all_outputs:
+            log.info("writing %s", native_libs)
+            if not self.dry_run:
+                ensure_directory(native_libs)
+                libs_file = open(native_libs, 'wt')
+                libs_file.write('\n'.join(all_outputs))
+                libs_file.write('\n')
+                libs_file.close()
+        elif os.path.isfile(native_libs):
+            log.info("removing %s", native_libs)
+            if not self.dry_run:
+                os.unlink(native_libs)
+
+        write_safety_flag(
+            os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
+        )
+
+        if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
+            log.warn(
+                "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
+                "Use the install_requires/extras_require setup() args instead."
+            )
+
+        if self.exclude_source_files:
+            self.zap_pyfiles()
+
+        # Make the archive
+        make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
+                     dry_run=self.dry_run, mode=self.gen_header())
+        if not self.keep_temp:
+            remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+        # Add to 'Distribution.dist_files' so that the "upload" command works
+        getattr(self.distribution, 'dist_files', []).append(
+            ('bdist_egg', get_python_version(), self.egg_output))
+
+    def zap_pyfiles(self):
+        log.info("Removing .py files from temporary directory")
+        for base, dirs, files in walk_egg(self.bdist_dir):
+            for name in files:
+                path = os.path.join(base, name)
+
+                if name.endswith('.py'):
+                    log.debug("Deleting %s", path)
+                    os.unlink(path)
+
+                if base.endswith('__pycache__'):
+                    path_old = path
+
+                    pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc'
+                    m = re.match(pattern, name)
+                    path_new = os.path.join(
+                        base, os.pardir, m.group('name') + '.pyc')
+                    log.info(
+                        "Renaming file from [%s] to [%s]"
+                        % (path_old, path_new))
+                    try:
+                        os.remove(path_new)
+                    except OSError:
+                        pass
+                    os.rename(path_old, path_new)
+
+    def zip_safe(self):
+        safe = getattr(self.distribution, 'zip_safe', None)
+        if safe is not None:
+            return safe
+        log.warn("zip_safe flag not set; analyzing archive contents...")
+        return analyze_egg(self.bdist_dir, self.stubs)
+
+    def gen_header(self):
+        epm = EntryPoint.parse_map(self.distribution.entry_points or '')
+        ep = epm.get('setuptools.installation', {}).get('eggsecutable')
+        if ep is None:
+            return 'w'  # not an eggsecutable, do it the usual way.
+
+        if not ep.attrs or ep.extras:
+            raise DistutilsSetupError(
+                "eggsecutable entry point (%r) cannot have 'extras' "
+                "or refer to a module" % (ep,)
+            )
+
+        pyver = sys.version[:3]
+        pkg = ep.module_name
+        full = '.'.join(ep.attrs)
+        base = ep.attrs[0]
+        basename = os.path.basename(self.egg_output)
+
+        header = (
+            "#!/bin/sh\n"
+            'if [ `basename $0` = "%(basename)s" ]\n'
+            'then exec python%(pyver)s -c "'
+            "import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
+            "from %(pkg)s import %(base)s; sys.exit(%(full)s())"
+            '" "$@"\n'
+            'else\n'
+            '  echo $0 is not the correct name for this egg file.\n'
+            '  echo Please rename it back to %(basename)s and try again.\n'
+            '  exec false\n'
+            'fi\n'
+        ) % locals()
+
+        if not self.dry_run:
+            mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
+            f = open(self.egg_output, 'w')
+            f.write(header)
+            f.close()
+        return 'a'
+
+    def copy_metadata_to(self, target_dir):
+        "Copy metadata (egg info) to the target_dir"
+        # normalize the path (so that a forward-slash in egg_info will
+        # match using startswith below)
+        norm_egg_info = os.path.normpath(self.egg_info)
+        prefix = os.path.join(norm_egg_info, '')
+        for path in self.ei_cmd.filelist.files:
+            if path.startswith(prefix):
+                target = os.path.join(target_dir, path[len(prefix):])
+                ensure_directory(target)
+                self.copy_file(path, target)
+
+    def get_ext_outputs(self):
+        """Get a list of relative paths to C extensions in the output distro"""
+
+        all_outputs = []
+        ext_outputs = []
+
+        paths = {self.bdist_dir: ''}
+        for base, dirs, files in sorted_walk(self.bdist_dir):
+            for filename in files:
+                if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
+                    all_outputs.append(paths[base] + filename)
+            for filename in dirs:
+                paths[os.path.join(base, filename)] = (paths[base] +
+                                                       filename + '/')
+
+        if self.distribution.has_ext_modules():
+            build_cmd = self.get_finalized_command('build_ext')
+            for ext in build_cmd.extensions:
+                if isinstance(ext, Library):
+                    continue
+                fullname = build_cmd.get_ext_fullname(ext.name)
+                filename = build_cmd.get_ext_filename(fullname)
+                if not os.path.basename(filename).startswith('dl-'):
+                    if os.path.exists(os.path.join(self.bdist_dir, filename)):
+                        ext_outputs.append(filename)
+
+        return all_outputs, ext_outputs
+
+
+NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
+
+
+def walk_egg(egg_dir):
+    """Walk an unpacked egg's contents, skipping the metadata directory"""
+    walker = sorted_walk(egg_dir)
+    base, dirs, files = next(walker)
+    if 'EGG-INFO' in dirs:
+        dirs.remove('EGG-INFO')
+    yield base, dirs, files
+    for bdf in walker:
+        yield bdf
+
+
+def analyze_egg(egg_dir, stubs):
+    # check for existing flag in EGG-INFO
+    for flag, fn in safety_flags.items():
+        if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
+            return flag
+    if not can_scan():
+        return False
+    safe = True
+    for base, dirs, files in walk_egg(egg_dir):
+        for name in files:
+            if name.endswith('.py') or name.endswith('.pyw'):
+                continue
+            elif name.endswith('.pyc') or name.endswith('.pyo'):
+                # always scan, even if we already know we're not safe
+                safe = scan_module(egg_dir, base, name, stubs) and safe
+    return safe
+
+
+def write_safety_flag(egg_dir, safe):
+    # Write or remove zip safety flag file(s)
+    for flag, fn in safety_flags.items():
+        fn = os.path.join(egg_dir, fn)
+        if os.path.exists(fn):
+            if safe is None or bool(safe) != flag:
+                os.unlink(fn)
+        elif safe is not None and bool(safe) == flag:
+            f = open(fn, 'wt')
+            f.write('\n')
+            f.close()
+
+
+safety_flags = {
+    True: 'zip-safe',
+    False: 'not-zip-safe',
+}
+
+
+def scan_module(egg_dir, base, name, stubs):
+    """Check whether module possibly uses unsafe-for-zipfile stuff"""
+
+    filename = os.path.join(base, name)
+    if filename[:-1] in stubs:
+        return True  # Extension module
+    pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
+    module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
+    if six.PY2:
+        skip = 8  # skip magic & date
+    elif sys.version_info < (3, 7):
+        skip = 12  # skip magic & date & file size
+    else:
+        skip = 16  # skip magic & reserved? & date & file size
+    f = open(filename, 'rb')
+    f.read(skip)
+    code = marshal.load(f)
+    f.close()
+    safe = True
+    symbols = dict.fromkeys(iter_symbols(code))
+    for bad in ['__file__', '__path__']:
+        if bad in symbols:
+            log.warn("%s: module references %s", module, bad)
+            safe = False
+    if 'inspect' in symbols:
+        for bad in [
+            'getsource', 'getabsfile', 'getsourcefile', 'getfile'
+            'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
+            'getinnerframes', 'getouterframes', 'stack', 'trace'
+        ]:
+            if bad in symbols:
+                log.warn("%s: module MAY be using inspect.%s", module, bad)
+                safe = False
+    return safe
+
+
+def iter_symbols(code):
+    """Yield names and strings used by `code` and its nested code objects"""
+    for name in code.co_names:
+        yield name
+    for const in code.co_consts:
+        if isinstance(const, six.string_types):
+            yield const
+        elif isinstance(const, CodeType):
+            for name in iter_symbols(const):
+                yield name
+
+
+def can_scan():
+    if not sys.platform.startswith('java') and sys.platform != 'cli':
+        # CPython, PyPy, etc.
+        return True
+    log.warn("Unable to analyze compiled code on this platform.")
+    log.warn("Please ask the author to include a 'zip_safe'"
+             " setting (either True or False) in the package's setup.py")
+
+
+# Attribute names of options for commands that might need to be convinced to
+# install to the egg build directory
+
+INSTALL_DIRECTORY_ATTRS = [
+    'install_lib', 'install_dir', 'install_data', 'install_base'
+]
+
+
+def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
+                 mode='w'):
+    """Create a zip file from all the files under 'base_dir'.  The output
+    zip file will be named 'base_dir' + ".zip".  Uses either the "zipfile"
+    Python module (if available) or the InfoZIP "zip" utility (if installed
+    and found on the default search path).  If neither tool is available,
+    raises DistutilsExecError.  Returns the name of the output zip file.
+    """
+    import zipfile
+
+    mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
+    log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
+
+    def visit(z, dirname, names):
+        for name in names:
+            path = os.path.normpath(os.path.join(dirname, name))
+            if os.path.isfile(path):
+                p = path[len(base_dir) + 1:]
+                if not dry_run:
+                    z.write(path, p)
+                log.debug("adding '%s'", p)
+
+    compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
+    if not dry_run:
+        z = zipfile.ZipFile(zip_filename, mode, compression=compression)
+        for dirname, dirs, files in sorted_walk(base_dir):
+            visit(z, dirname, files)
+        z.close()
+    else:
+        for dirname, dirs, files in sorted_walk(base_dir):
+            visit(None, dirname, files)
+    return zip_filename
diff --git a/lib/python3.7/site-packages/setuptools/command/bdist_rpm.py b/lib/python3.7/site-packages/setuptools/command/bdist_rpm.py
new file mode 100644
index 0000000000000000000000000000000000000000..70730927ecaed778ebbdee98eb37c24ec3f1a8e6
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/bdist_rpm.py
@@ -0,0 +1,43 @@
+import distutils.command.bdist_rpm as orig
+
+
+class bdist_rpm(orig.bdist_rpm):
+    """
+    Override the default bdist_rpm behavior to do the following:
+
+    1. Run egg_info to ensure the name and version are properly calculated.
+    2. Always run 'install' using --single-version-externally-managed to
+       disable eggs in RPM distributions.
+    3. Replace dash with underscore in the version numbers for better RPM
+       compatibility.
+    """
+
+    def run(self):
+        # ensure distro name is up-to-date
+        self.run_command('egg_info')
+
+        orig.bdist_rpm.run(self)
+
+    def _make_spec_file(self):
+        version = self.distribution.get_version()
+        rpmversion = version.replace('-', '_')
+        spec = orig.bdist_rpm._make_spec_file(self)
+        line23 = '%define version ' + version
+        line24 = '%define version ' + rpmversion
+        spec = [
+            line.replace(
+                "Source0: %{name}-%{version}.tar",
+                "Source0: %{name}-%{unmangled_version}.tar"
+            ).replace(
+                "setup.py install ",
+                "setup.py install --single-version-externally-managed "
+            ).replace(
+                "%setup",
+                "%setup -n %{name}-%{unmangled_version}"
+            ).replace(line23, line24)
+            for line in spec
+        ]
+        insert_loc = spec.index(line24) + 1
+        unmangled_version = "%define unmangled_version " + version
+        spec.insert(insert_loc, unmangled_version)
+        return spec
diff --git a/lib/python3.7/site-packages/setuptools/command/bdist_wininst.py b/lib/python3.7/site-packages/setuptools/command/bdist_wininst.py
new file mode 100644
index 0000000000000000000000000000000000000000..073de97b46c92e2e221cade8c1350ab2c5cff891
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/bdist_wininst.py
@@ -0,0 +1,21 @@
+import distutils.command.bdist_wininst as orig
+
+
+class bdist_wininst(orig.bdist_wininst):
+    def reinitialize_command(self, command, reinit_subcommands=0):
+        """
+        Supplement reinitialize_command to work around
+        http://bugs.python.org/issue20819
+        """
+        cmd = self.distribution.reinitialize_command(
+            command, reinit_subcommands)
+        if command in ('install', 'install_lib'):
+            cmd.install_lib = None
+        return cmd
+
+    def run(self):
+        self._is_running = True
+        try:
+            orig.bdist_wininst.run(self)
+        finally:
+            self._is_running = False
diff --git a/lib/python3.7/site-packages/setuptools/command/build_clib.py b/lib/python3.7/site-packages/setuptools/command/build_clib.py
new file mode 100644
index 0000000000000000000000000000000000000000..09caff6ffde8fc3f368cb635dc3cbbbc8851530d
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/build_clib.py
@@ -0,0 +1,98 @@
+import distutils.command.build_clib as orig
+from distutils.errors import DistutilsSetupError
+from distutils import log
+from setuptools.dep_util import newer_pairwise_group
+
+
+class build_clib(orig.build_clib):
+    """
+    Override the default build_clib behaviour to do the following:
+
+    1. Implement a rudimentary timestamp-based dependency system
+       so 'compile()' doesn't run every time.
+    2. Add more keys to the 'build_info' dictionary:
+        * obj_deps - specify dependencies for each object compiled.
+                     this should be a dictionary mapping a key
+                     with the source filename to a list of
+                     dependencies. Use an empty string for global
+                     dependencies.
+        * cflags   - specify a list of additional flags to pass to
+                     the compiler.
+    """
+
+    def build_libraries(self, libraries):
+        for (lib_name, build_info) in libraries:
+            sources = build_info.get('sources')
+            if sources is None or not isinstance(sources, (list, tuple)):
+                raise DistutilsSetupError(
+                       "in 'libraries' option (library '%s'), "
+                       "'sources' must be present and must be "
+                       "a list of source filenames" % lib_name)
+            sources = list(sources)
+
+            log.info("building '%s' library", lib_name)
+
+            # Make sure everything is the correct type.
+            # obj_deps should be a dictionary of keys as sources
+            # and a list/tuple of files that are its dependencies.
+            obj_deps = build_info.get('obj_deps', dict())
+            if not isinstance(obj_deps, dict):
+                raise DistutilsSetupError(
+                       "in 'libraries' option (library '%s'), "
+                       "'obj_deps' must be a dictionary of "
+                       "type 'source: list'" % lib_name)
+            dependencies = []
+
+            # Get the global dependencies that are specified by the '' key.
+            # These will go into every source's dependency list.
+            global_deps = obj_deps.get('', list())
+            if not isinstance(global_deps, (list, tuple)):
+                raise DistutilsSetupError(
+                       "in 'libraries' option (library '%s'), "
+                       "'obj_deps' must be a dictionary of "
+                       "type 'source: list'" % lib_name)
+
+            # Build the list to be used by newer_pairwise_group
+            # each source will be auto-added to its dependencies.
+            for source in sources:
+                src_deps = [source]
+                src_deps.extend(global_deps)
+                extra_deps = obj_deps.get(source, list())
+                if not isinstance(extra_deps, (list, tuple)):
+                    raise DistutilsSetupError(
+                           "in 'libraries' option (library '%s'), "
+                           "'obj_deps' must be a dictionary of "
+                           "type 'source: list'" % lib_name)
+                src_deps.extend(extra_deps)
+                dependencies.append(src_deps)
+
+            expected_objects = self.compiler.object_filenames(
+                    sources,
+                    output_dir=self.build_temp
+                    )
+
+            if newer_pairwise_group(dependencies, expected_objects) != ([], []):
+                # First, compile the source code to object files in the library
+                # directory.  (This should probably change to putting object
+                # files in a temporary build directory.)
+                macros = build_info.get('macros')
+                include_dirs = build_info.get('include_dirs')
+                cflags = build_info.get('cflags')
+                objects = self.compiler.compile(
+                        sources,
+                        output_dir=self.build_temp,
+                        macros=macros,
+                        include_dirs=include_dirs,
+                        extra_postargs=cflags,
+                        debug=self.debug
+                        )
+
+            # Now "link" the object files together into a static library.
+            # (On Unix at least, this isn't really linking -- it just
+            # builds an archive.  Whatever.)
+            self.compiler.create_static_lib(
+                    expected_objects,
+                    lib_name,
+                    output_dir=self.build_clib,
+                    debug=self.debug
+                    )
diff --git a/lib/python3.7/site-packages/setuptools/command/build_ext.py b/lib/python3.7/site-packages/setuptools/command/build_ext.py
new file mode 100644
index 0000000000000000000000000000000000000000..60a8a32f08ce78e0881b2f64df1b98abd92120f1
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/build_ext.py
@@ -0,0 +1,321 @@
+import os
+import sys
+import itertools
+import imp
+from distutils.command.build_ext import build_ext as _du_build_ext
+from distutils.file_util import copy_file
+from distutils.ccompiler import new_compiler
+from distutils.sysconfig import customize_compiler, get_config_var
+from distutils.errors import DistutilsError
+from distutils import log
+
+from setuptools.extension import Library
+from setuptools.extern import six
+
+try:
+    # Attempt to use Cython for building extensions, if available
+    from Cython.Distutils.build_ext import build_ext as _build_ext
+    # Additionally, assert that the compiler module will load
+    # also. Ref #1229.
+    __import__('Cython.Compiler.Main')
+except ImportError:
+    _build_ext = _du_build_ext
+
+# make sure _config_vars is initialized
+get_config_var("LDSHARED")
+from distutils.sysconfig import _config_vars as _CONFIG_VARS
+
+
+def _customize_compiler_for_shlib(compiler):
+    if sys.platform == "darwin":
+        # building .dylib requires additional compiler flags on OSX; here we
+        # temporarily substitute the pyconfig.h variables so that distutils'
+        # 'customize_compiler' uses them before we build the shared libraries.
+        tmp = _CONFIG_VARS.copy()
+        try:
+            # XXX Help!  I don't have any idea whether these are right...
+            _CONFIG_VARS['LDSHARED'] = (
+                "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
+            _CONFIG_VARS['CCSHARED'] = " -dynamiclib"
+            _CONFIG_VARS['SO'] = ".dylib"
+            customize_compiler(compiler)
+        finally:
+            _CONFIG_VARS.clear()
+            _CONFIG_VARS.update(tmp)
+    else:
+        customize_compiler(compiler)
+
+
+have_rtld = False
+use_stubs = False
+libtype = 'shared'
+
+if sys.platform == "darwin":
+    use_stubs = True
+elif os.name != 'nt':
+    try:
+        import dl
+        use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
+    except ImportError:
+        pass
+
+if_dl = lambda s: s if have_rtld else ''
+
+
+def get_abi3_suffix():
+    """Return the file extension for an abi3-compliant Extension()"""
+    for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION):
+        if '.abi3' in suffix:  # Unix
+            return suffix
+        elif suffix == '.pyd':  # Windows
+            return suffix
+
+
+class build_ext(_build_ext):
+    def run(self):
+        """Build extensions in build directory, then copy if --inplace"""
+        old_inplace, self.inplace = self.inplace, 0
+        _build_ext.run(self)
+        self.inplace = old_inplace
+        if old_inplace:
+            self.copy_extensions_to_source()
+
+    def copy_extensions_to_source(self):
+        build_py = self.get_finalized_command('build_py')
+        for ext in self.extensions:
+            fullname = self.get_ext_fullname(ext.name)
+            filename = self.get_ext_filename(fullname)
+            modpath = fullname.split('.')
+            package = '.'.join(modpath[:-1])
+            package_dir = build_py.get_package_dir(package)
+            dest_filename = os.path.join(package_dir,
+                                         os.path.basename(filename))
+            src_filename = os.path.join(self.build_lib, filename)
+
+            # Always copy, even if source is older than destination, to ensure
+            # that the right extensions for the current Python/platform are
+            # used.
+            copy_file(
+                src_filename, dest_filename, verbose=self.verbose,
+                dry_run=self.dry_run
+            )
+            if ext._needs_stub:
+                self.write_stub(package_dir or os.curdir, ext, True)
+
+    def get_ext_filename(self, fullname):
+        filename = _build_ext.get_ext_filename(self, fullname)
+        if fullname in self.ext_map:
+            ext = self.ext_map[fullname]
+            use_abi3 = (
+                six.PY3
+                and getattr(ext, 'py_limited_api')
+                and get_abi3_suffix()
+            )
+            if use_abi3:
+                so_ext = get_config_var('EXT_SUFFIX')
+                filename = filename[:-len(so_ext)]
+                filename = filename + get_abi3_suffix()
+            if isinstance(ext, Library):
+                fn, ext = os.path.splitext(filename)
+                return self.shlib_compiler.library_filename(fn, libtype)
+            elif use_stubs and ext._links_to_dynamic:
+                d, fn = os.path.split(filename)
+                return os.path.join(d, 'dl-' + fn)
+        return filename
+
+    def initialize_options(self):
+        _build_ext.initialize_options(self)
+        self.shlib_compiler = None
+        self.shlibs = []
+        self.ext_map = {}
+
+    def finalize_options(self):
+        _build_ext.finalize_options(self)
+        self.extensions = self.extensions or []
+        self.check_extensions_list(self.extensions)
+        self.shlibs = [ext for ext in self.extensions
+                       if isinstance(ext, Library)]
+        if self.shlibs:
+            self.setup_shlib_compiler()
+        for ext in self.extensions:
+            ext._full_name = self.get_ext_fullname(ext.name)
+        for ext in self.extensions:
+            fullname = ext._full_name
+            self.ext_map[fullname] = ext
+
+            # distutils 3.1 will also ask for module names
+            # XXX what to do with conflicts?
+            self.ext_map[fullname.split('.')[-1]] = ext
+
+            ltd = self.shlibs and self.links_to_dynamic(ext) or False
+            ns = ltd and use_stubs and not isinstance(ext, Library)
+            ext._links_to_dynamic = ltd
+            ext._needs_stub = ns
+            filename = ext._file_name = self.get_ext_filename(fullname)
+            libdir = os.path.dirname(os.path.join(self.build_lib, filename))
+            if ltd and libdir not in ext.library_dirs:
+                ext.library_dirs.append(libdir)
+            if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
+                ext.runtime_library_dirs.append(os.curdir)
+
+    def setup_shlib_compiler(self):
+        compiler = self.shlib_compiler = new_compiler(
+            compiler=self.compiler, dry_run=self.dry_run, force=self.force
+        )
+        _customize_compiler_for_shlib(compiler)
+
+        if self.include_dirs is not None:
+            compiler.set_include_dirs(self.include_dirs)
+        if self.define is not None:
+            # 'define' option is a list of (name,value) tuples
+            for (name, value) in self.define:
+                compiler.define_macro(name, value)
+        if self.undef is not None:
+            for macro in self.undef:
+                compiler.undefine_macro(macro)
+        if self.libraries is not None:
+            compiler.set_libraries(self.libraries)
+        if self.library_dirs is not None:
+            compiler.set_library_dirs(self.library_dirs)
+        if self.rpath is not None:
+            compiler.set_runtime_library_dirs(self.rpath)
+        if self.link_objects is not None:
+            compiler.set_link_objects(self.link_objects)
+
+        # hack so distutils' build_extension() builds a library instead
+        compiler.link_shared_object = link_shared_object.__get__(compiler)
+
+    def get_export_symbols(self, ext):
+        if isinstance(ext, Library):
+            return ext.export_symbols
+        return _build_ext.get_export_symbols(self, ext)
+
+    def build_extension(self, ext):
+        ext._convert_pyx_sources_to_lang()
+        _compiler = self.compiler
+        try:
+            if isinstance(ext, Library):
+                self.compiler = self.shlib_compiler
+            _build_ext.build_extension(self, ext)
+            if ext._needs_stub:
+                cmd = self.get_finalized_command('build_py').build_lib
+                self.write_stub(cmd, ext)
+        finally:
+            self.compiler = _compiler
+
+    def links_to_dynamic(self, ext):
+        """Return true if 'ext' links to a dynamic lib in the same package"""
+        # XXX this should check to ensure the lib is actually being built
+        # XXX as dynamic, and not just using a locally-found version or a
+        # XXX static-compiled version
+        libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
+        pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
+        return any(pkg + libname in libnames for libname in ext.libraries)
+
+    def get_outputs(self):
+        return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
+
+    def __get_stubs_outputs(self):
+        # assemble the base name for each extension that needs a stub
+        ns_ext_bases = (
+            os.path.join(self.build_lib, *ext._full_name.split('.'))
+            for ext in self.extensions
+            if ext._needs_stub
+        )
+        # pair each base with the extension
+        pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
+        return list(base + fnext for base, fnext in pairs)
+
+    def __get_output_extensions(self):
+        yield '.py'
+        yield '.pyc'
+        if self.get_finalized_command('build_py').optimize:
+            yield '.pyo'
+
+    def write_stub(self, output_dir, ext, compile=False):
+        log.info("writing stub loader for %s to %s", ext._full_name,
+                 output_dir)
+        stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
+                     '.py')
+        if compile and os.path.exists(stub_file):
+            raise DistutilsError(stub_file + " already exists! Please delete.")
+        if not self.dry_run:
+            f = open(stub_file, 'w')
+            f.write(
+                '\n'.join([
+                    "def __bootstrap__():",
+                    "   global __bootstrap__, __file__, __loader__",
+                    "   import sys, os, pkg_resources, imp" + if_dl(", dl"),
+                    "   __file__ = pkg_resources.resource_filename"
+                    "(__name__,%r)"
+                    % os.path.basename(ext._file_name),
+                    "   del __bootstrap__",
+                    "   if '__loader__' in globals():",
+                    "       del __loader__",
+                    if_dl("   old_flags = sys.getdlopenflags()"),
+                    "   old_dir = os.getcwd()",
+                    "   try:",
+                    "     os.chdir(os.path.dirname(__file__))",
+                    if_dl("     sys.setdlopenflags(dl.RTLD_NOW)"),
+                    "     imp.load_dynamic(__name__,__file__)",
+                    "   finally:",
+                    if_dl("     sys.setdlopenflags(old_flags)"),
+                    "     os.chdir(old_dir)",
+                    "__bootstrap__()",
+                    ""  # terminal \n
+                ])
+            )
+            f.close()
+        if compile:
+            from distutils.util import byte_compile
+
+            byte_compile([stub_file], optimize=0,
+                         force=True, dry_run=self.dry_run)
+            optimize = self.get_finalized_command('install_lib').optimize
+            if optimize > 0:
+                byte_compile([stub_file], optimize=optimize,
+                             force=True, dry_run=self.dry_run)
+            if os.path.exists(stub_file) and not self.dry_run:
+                os.unlink(stub_file)
+
+
+if use_stubs or os.name == 'nt':
+    # Build shared libraries
+    #
+    def link_shared_object(
+            self, objects, output_libname, output_dir=None, libraries=None,
+            library_dirs=None, runtime_library_dirs=None, export_symbols=None,
+            debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
+            target_lang=None):
+        self.link(
+            self.SHARED_LIBRARY, objects, output_libname,
+            output_dir, libraries, library_dirs, runtime_library_dirs,
+            export_symbols, debug, extra_preargs, extra_postargs,
+            build_temp, target_lang
+        )
+else:
+    # Build static libraries everywhere else
+    libtype = 'static'
+
+    def link_shared_object(
+            self, objects, output_libname, output_dir=None, libraries=None,
+            library_dirs=None, runtime_library_dirs=None, export_symbols=None,
+            debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
+            target_lang=None):
+        # XXX we need to either disallow these attrs on Library instances,
+        # or warn/abort here if set, or something...
+        # libraries=None, library_dirs=None, runtime_library_dirs=None,
+        # export_symbols=None, extra_preargs=None, extra_postargs=None,
+        # build_temp=None
+
+        assert output_dir is None  # distutils build_ext doesn't pass this
+        output_dir, filename = os.path.split(output_libname)
+        basename, ext = os.path.splitext(filename)
+        if self.library_filename("x").startswith('lib'):
+            # strip 'lib' prefix; this is kludgy if some platform uses
+            # a different prefix
+            basename = basename[3:]
+
+        self.create_static_lib(
+            objects, basename, output_dir, debug, target_lang
+        )
diff --git a/lib/python3.7/site-packages/setuptools/command/build_py.py b/lib/python3.7/site-packages/setuptools/command/build_py.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0314fd413ae7f8c1027ccde0b092fd493fb104b
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/build_py.py
@@ -0,0 +1,270 @@
+from glob import glob
+from distutils.util import convert_path
+import distutils.command.build_py as orig
+import os
+import fnmatch
+import textwrap
+import io
+import distutils.errors
+import itertools
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import map, filter, filterfalse
+
+try:
+    from setuptools.lib2to3_ex import Mixin2to3
+except ImportError:
+
+    class Mixin2to3:
+        def run_2to3(self, files, doctests=True):
+            "do nothing"
+
+
+class build_py(orig.build_py, Mixin2to3):
+    """Enhanced 'build_py' command that includes data files with packages
+
+    The data files are specified via a 'package_data' argument to 'setup()'.
+    See 'setuptools.dist.Distribution' for more details.
+
+    Also, this version of the 'build_py' command allows you to specify both
+    'py_modules' and 'packages' in the same setup operation.
+    """
+
+    def finalize_options(self):
+        orig.build_py.finalize_options(self)
+        self.package_data = self.distribution.package_data
+        self.exclude_package_data = (self.distribution.exclude_package_data or
+                                     {})
+        if 'data_files' in self.__dict__:
+            del self.__dict__['data_files']
+        self.__updated_files = []
+        self.__doctests_2to3 = []
+
+    def run(self):
+        """Build modules, packages, and copy data files to build directory"""
+        if not self.py_modules and not self.packages:
+            return
+
+        if self.py_modules:
+            self.build_modules()
+
+        if self.packages:
+            self.build_packages()
+            self.build_package_data()
+
+        self.run_2to3(self.__updated_files, False)
+        self.run_2to3(self.__updated_files, True)
+        self.run_2to3(self.__doctests_2to3, True)
+
+        # Only compile actual .py files, using our base class' idea of what our
+        # output files are.
+        self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
+
+    def __getattr__(self, attr):
+        "lazily compute data files"
+        if attr == 'data_files':
+            self.data_files = self._get_data_files()
+            return self.data_files
+        return orig.build_py.__getattr__(self, attr)
+
+    def build_module(self, module, module_file, package):
+        if six.PY2 and isinstance(package, six.string_types):
+            # avoid errors on Python 2 when unicode is passed (#190)
+            package = package.split('.')
+        outfile, copied = orig.build_py.build_module(self, module, module_file,
+                                                     package)
+        if copied:
+            self.__updated_files.append(outfile)
+        return outfile, copied
+
+    def _get_data_files(self):
+        """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
+        self.analyze_manifest()
+        return list(map(self._get_pkg_data_files, self.packages or ()))
+
+    def _get_pkg_data_files(self, package):
+        # Locate package source directory
+        src_dir = self.get_package_dir(package)
+
+        # Compute package build directory
+        build_dir = os.path.join(*([self.build_lib] + package.split('.')))
+
+        # Strip directory from globbed filenames
+        filenames = [
+            os.path.relpath(file, src_dir)
+            for file in self.find_data_files(package, src_dir)
+        ]
+        return package, src_dir, build_dir, filenames
+
+    def find_data_files(self, package, src_dir):
+        """Return filenames for package's data files in 'src_dir'"""
+        patterns = self._get_platform_patterns(
+            self.package_data,
+            package,
+            src_dir,
+        )
+        globs_expanded = map(glob, patterns)
+        # flatten the expanded globs into an iterable of matches
+        globs_matches = itertools.chain.from_iterable(globs_expanded)
+        glob_files = filter(os.path.isfile, globs_matches)
+        files = itertools.chain(
+            self.manifest_files.get(package, []),
+            glob_files,
+        )
+        return self.exclude_data_files(package, src_dir, files)
+
+    def build_package_data(self):
+        """Copy data files into build directory"""
+        for package, src_dir, build_dir, filenames in self.data_files:
+            for filename in filenames:
+                target = os.path.join(build_dir, filename)
+                self.mkpath(os.path.dirname(target))
+                srcfile = os.path.join(src_dir, filename)
+                outf, copied = self.copy_file(srcfile, target)
+                srcfile = os.path.abspath(srcfile)
+                if (copied and
+                        srcfile in self.distribution.convert_2to3_doctests):
+                    self.__doctests_2to3.append(outf)
+
+    def analyze_manifest(self):
+        self.manifest_files = mf = {}
+        if not self.distribution.include_package_data:
+            return
+        src_dirs = {}
+        for package in self.packages or ():
+            # Locate package source directory
+            src_dirs[assert_relative(self.get_package_dir(package))] = package
+
+        self.run_command('egg_info')
+        ei_cmd = self.get_finalized_command('egg_info')
+        for path in ei_cmd.filelist.files:
+            d, f = os.path.split(assert_relative(path))
+            prev = None
+            oldf = f
+            while d and d != prev and d not in src_dirs:
+                prev = d
+                d, df = os.path.split(d)
+                f = os.path.join(df, f)
+            if d in src_dirs:
+                if path.endswith('.py') and f == oldf:
+                    continue  # it's a module, not data
+                mf.setdefault(src_dirs[d], []).append(path)
+
+    def get_data_files(self):
+        pass  # Lazily compute data files in _get_data_files() function.
+
+    def check_package(self, package, package_dir):
+        """Check namespace packages' __init__ for declare_namespace"""
+        try:
+            return self.packages_checked[package]
+        except KeyError:
+            pass
+
+        init_py = orig.build_py.check_package(self, package, package_dir)
+        self.packages_checked[package] = init_py
+
+        if not init_py or not self.distribution.namespace_packages:
+            return init_py
+
+        for pkg in self.distribution.namespace_packages:
+            if pkg == package or pkg.startswith(package + '.'):
+                break
+        else:
+            return init_py
+
+        with io.open(init_py, 'rb') as f:
+            contents = f.read()
+        if b'declare_namespace' not in contents:
+            raise distutils.errors.DistutilsError(
+                "Namespace package problem: %s is a namespace package, but "
+                "its\n__init__.py does not call declare_namespace()! Please "
+                'fix it.\n(See the setuptools manual under '
+                '"Namespace Packages" for details.)\n"' % (package,)
+            )
+        return init_py
+
+    def initialize_options(self):
+        self.packages_checked = {}
+        orig.build_py.initialize_options(self)
+
+    def get_package_dir(self, package):
+        res = orig.build_py.get_package_dir(self, package)
+        if self.distribution.src_root is not None:
+            return os.path.join(self.distribution.src_root, res)
+        return res
+
+    def exclude_data_files(self, package, src_dir, files):
+        """Filter filenames for package's data files in 'src_dir'"""
+        files = list(files)
+        patterns = self._get_platform_patterns(
+            self.exclude_package_data,
+            package,
+            src_dir,
+        )
+        match_groups = (
+            fnmatch.filter(files, pattern)
+            for pattern in patterns
+        )
+        # flatten the groups of matches into an iterable of matches
+        matches = itertools.chain.from_iterable(match_groups)
+        bad = set(matches)
+        keepers = (
+            fn
+            for fn in files
+            if fn not in bad
+        )
+        # ditch dupes
+        return list(_unique_everseen(keepers))
+
+    @staticmethod
+    def _get_platform_patterns(spec, package, src_dir):
+        """
+        yield platform-specific path patterns (suitable for glob
+        or fn_match) from a glob-based spec (such as
+        self.package_data or self.exclude_package_data)
+        matching package in src_dir.
+        """
+        raw_patterns = itertools.chain(
+            spec.get('', []),
+            spec.get(package, []),
+        )
+        return (
+            # Each pattern has to be converted to a platform-specific path
+            os.path.join(src_dir, convert_path(pattern))
+            for pattern in raw_patterns
+        )
+
+
+# from Python docs
+def _unique_everseen(iterable, key=None):
+    "List unique elements, preserving order. Remember all elements ever seen."
+    # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+    # unique_everseen('ABBCcAD', str.lower) --> A B C D
+    seen = set()
+    seen_add = seen.add
+    if key is None:
+        for element in filterfalse(seen.__contains__, iterable):
+            seen_add(element)
+            yield element
+    else:
+        for element in iterable:
+            k = key(element)
+            if k not in seen:
+                seen_add(k)
+                yield element
+
+
+def assert_relative(path):
+    if not os.path.isabs(path):
+        return path
+    from distutils.errors import DistutilsSetupError
+
+    msg = textwrap.dedent("""
+        Error: setup script specifies an absolute path:
+
+            %s
+
+        setup() arguments must *always* be /-separated paths relative to the
+        setup.py directory, *never* absolute paths.
+        """).lstrip() % path
+    raise DistutilsSetupError(msg)
diff --git a/lib/python3.7/site-packages/setuptools/command/develop.py b/lib/python3.7/site-packages/setuptools/command/develop.py
new file mode 100644
index 0000000000000000000000000000000000000000..009e4f9368f5b29fffd160f3b712fb0cd19807bd
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/develop.py
@@ -0,0 +1,221 @@
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsError, DistutilsOptionError
+import os
+import glob
+import io
+
+from setuptools.extern import six
+
+import pkg_resources
+from setuptools.command.easy_install import easy_install
+from setuptools import namespaces
+import setuptools
+
+__metaclass__ = type
+
+
+class develop(namespaces.DevelopInstaller, easy_install):
+    """Set up package for development"""
+
+    description = "install package in 'development mode'"
+
+    user_options = easy_install.user_options + [
+        ("uninstall", "u", "Uninstall this source package"),
+        ("egg-path=", None, "Set the path to be used in the .egg-link file"),
+    ]
+
+    boolean_options = easy_install.boolean_options + ['uninstall']
+
+    command_consumes_arguments = False  # override base
+
+    def run(self):
+        if self.uninstall:
+            self.multi_version = True
+            self.uninstall_link()
+            self.uninstall_namespaces()
+        else:
+            self.install_for_development()
+        self.warn_deprecated_options()
+
+    def initialize_options(self):
+        self.uninstall = None
+        self.egg_path = None
+        easy_install.initialize_options(self)
+        self.setup_path = None
+        self.always_copy_from = '.'  # always copy eggs installed in curdir
+
+    def finalize_options(self):
+        ei = self.get_finalized_command("egg_info")
+        if ei.broken_egg_info:
+            template = "Please rename %r to %r before using 'develop'"
+            args = ei.egg_info, ei.broken_egg_info
+            raise DistutilsError(template % args)
+        self.args = [ei.egg_name]
+
+        easy_install.finalize_options(self)
+        self.expand_basedirs()
+        self.expand_dirs()
+        # pick up setup-dir .egg files only: no .egg-info
+        self.package_index.scan(glob.glob('*.egg'))
+
+        egg_link_fn = ei.egg_name + '.egg-link'
+        self.egg_link = os.path.join(self.install_dir, egg_link_fn)
+        self.egg_base = ei.egg_base
+        if self.egg_path is None:
+            self.egg_path = os.path.abspath(ei.egg_base)
+
+        target = pkg_resources.normalize_path(self.egg_base)
+        egg_path = pkg_resources.normalize_path(
+            os.path.join(self.install_dir, self.egg_path))
+        if egg_path != target:
+            raise DistutilsOptionError(
+                "--egg-path must be a relative path from the install"
+                " directory to " + target
+            )
+
+        # Make a distribution for the package's source
+        self.dist = pkg_resources.Distribution(
+            target,
+            pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)),
+            project_name=ei.egg_name
+        )
+
+        self.setup_path = self._resolve_setup_path(
+            self.egg_base,
+            self.install_dir,
+            self.egg_path,
+        )
+
+    @staticmethod
+    def _resolve_setup_path(egg_base, install_dir, egg_path):
+        """
+        Generate a path from egg_base back to '.' where the
+        setup script resides and ensure that path points to the
+        setup path from $install_dir/$egg_path.
+        """
+        path_to_setup = egg_base.replace(os.sep, '/').rstrip('/')
+        if path_to_setup != os.curdir:
+            path_to_setup = '../' * (path_to_setup.count('/') + 1)
+        resolved = pkg_resources.normalize_path(
+            os.path.join(install_dir, egg_path, path_to_setup)
+        )
+        if resolved != pkg_resources.normalize_path(os.curdir):
+            raise DistutilsOptionError(
+                "Can't get a consistent path to setup script from"
+                " installation directory", resolved,
+                pkg_resources.normalize_path(os.curdir))
+        return path_to_setup
+
+    def install_for_development(self):
+        if six.PY3 and getattr(self.distribution, 'use_2to3', False):
+            # If we run 2to3 we can not do this inplace:
+
+            # Ensure metadata is up-to-date
+            self.reinitialize_command('build_py', inplace=0)
+            self.run_command('build_py')
+            bpy_cmd = self.get_finalized_command("build_py")
+            build_path = pkg_resources.normalize_path(bpy_cmd.build_lib)
+
+            # Build extensions
+            self.reinitialize_command('egg_info', egg_base=build_path)
+            self.run_command('egg_info')
+
+            self.reinitialize_command('build_ext', inplace=0)
+            self.run_command('build_ext')
+
+            # Fixup egg-link and easy-install.pth
+            ei_cmd = self.get_finalized_command("egg_info")
+            self.egg_path = build_path
+            self.dist.location = build_path
+            # XXX
+            self.dist._provider = pkg_resources.PathMetadata(
+                build_path, ei_cmd.egg_info)
+        else:
+            # Without 2to3 inplace works fine:
+            self.run_command('egg_info')
+
+            # Build extensions in-place
+            self.reinitialize_command('build_ext', inplace=1)
+            self.run_command('build_ext')
+
+        self.install_site_py()  # ensure that target dir is site-safe
+        if setuptools.bootstrap_install_from:
+            self.easy_install(setuptools.bootstrap_install_from)
+            setuptools.bootstrap_install_from = None
+
+        self.install_namespaces()
+
+        # create an .egg-link in the installation dir, pointing to our egg
+        log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
+        if not self.dry_run:
+            with open(self.egg_link, "w") as f:
+                f.write(self.egg_path + "\n" + self.setup_path)
+        # postprocess the installed distro, fixing up .pth, installing scripts,
+        # and handling requirements
+        self.process_distribution(None, self.dist, not self.no_deps)
+
+    def uninstall_link(self):
+        if os.path.exists(self.egg_link):
+            log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
+            egg_link_file = open(self.egg_link)
+            contents = [line.rstrip() for line in egg_link_file]
+            egg_link_file.close()
+            if contents not in ([self.egg_path],
+                                [self.egg_path, self.setup_path]):
+                log.warn("Link points to %s: uninstall aborted", contents)
+                return
+            if not self.dry_run:
+                os.unlink(self.egg_link)
+        if not self.dry_run:
+            self.update_pth(self.dist)  # remove any .pth link to us
+        if self.distribution.scripts:
+            # XXX should also check for entry point scripts!
+            log.warn("Note: you must uninstall or replace scripts manually!")
+
+    def install_egg_scripts(self, dist):
+        if dist is not self.dist:
+            # Installing a dependency, so fall back to normal behavior
+            return easy_install.install_egg_scripts(self, dist)
+
+        # create wrapper scripts in the script dir, pointing to dist.scripts
+
+        # new-style...
+        self.install_wrapper_scripts(dist)
+
+        # ...and old-style
+        for script_name in self.distribution.scripts or []:
+            script_path = os.path.abspath(convert_path(script_name))
+            script_name = os.path.basename(script_path)
+            with io.open(script_path) as strm:
+                script_text = strm.read()
+            self.install_script(dist, script_name, script_text, script_path)
+
+    def install_wrapper_scripts(self, dist):
+        dist = VersionlessRequirement(dist)
+        return easy_install.install_wrapper_scripts(self, dist)
+
+
+class VersionlessRequirement:
+    """
+    Adapt a pkg_resources.Distribution to simply return the project
+    name as the 'requirement' so that scripts will work across
+    multiple versions.
+
+    >>> from pkg_resources import Distribution
+    >>> dist = Distribution(project_name='foo', version='1.0')
+    >>> str(dist.as_requirement())
+    'foo==1.0'
+    >>> adapted_dist = VersionlessRequirement(dist)
+    >>> str(adapted_dist.as_requirement())
+    'foo'
+    """
+
+    def __init__(self, dist):
+        self.__dist = dist
+
+    def __getattr__(self, name):
+        return getattr(self.__dist, name)
+
+    def as_requirement(self):
+        return self.project_name
diff --git a/lib/python3.7/site-packages/setuptools/command/dist_info.py b/lib/python3.7/site-packages/setuptools/command/dist_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..c45258fa03a3ddd6a73db4514365f8741d16ca86
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/dist_info.py
@@ -0,0 +1,36 @@
+"""
+Create a dist_info directory
+As defined in the wheel specification
+"""
+
+import os
+
+from distutils.core import Command
+from distutils import log
+
+
+class dist_info(Command):
+
+    description = 'create a .dist-info directory'
+
+    user_options = [
+        ('egg-base=', 'e', "directory containing .egg-info directories"
+                           " (default: top of the source tree)"),
+    ]
+
+    def initialize_options(self):
+        self.egg_base = None
+
+    def finalize_options(self):
+        pass
+
+    def run(self):
+        egg_info = self.get_finalized_command('egg_info')
+        egg_info.egg_base = self.egg_base
+        egg_info.finalize_options()
+        egg_info.run()
+        dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info'
+        log.info("creating '{}'".format(os.path.abspath(dist_info_dir)))
+
+        bdist_wheel = self.get_finalized_command('bdist_wheel')
+        bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir)
diff --git a/lib/python3.7/site-packages/setuptools/command/easy_install.py b/lib/python3.7/site-packages/setuptools/command/easy_install.py
new file mode 100644
index 0000000000000000000000000000000000000000..06c98271ba0b2cfd1e9c5a4e5a2590a6b52b382b
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/easy_install.py
@@ -0,0 +1,2342 @@
+#!/usr/bin/env python
+"""
+Easy Install
+------------
+
+A tool for doing automatic download/extract/build of distutils-based Python
+packages.  For detailed documentation, see the accompanying EasyInstall.txt
+file, or visit the `EasyInstall home page`__.
+
+__ https://setuptools.readthedocs.io/en/latest/easy_install.html
+
+"""
+
+from glob import glob
+from distutils.util import get_platform
+from distutils.util import convert_path, subst_vars
+from distutils.errors import (
+    DistutilsArgError, DistutilsOptionError,
+    DistutilsError, DistutilsPlatformError,
+)
+from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
+from distutils import log, dir_util
+from distutils.command.build_scripts import first_line_re
+from distutils.spawn import find_executable
+import sys
+import os
+import zipimport
+import shutil
+import tempfile
+import zipfile
+import re
+import stat
+import random
+import textwrap
+import warnings
+import site
+import struct
+import contextlib
+import subprocess
+import shlex
+import io
+
+
+from sysconfig import get_config_vars, get_path
+
+from setuptools import SetuptoolsDeprecationWarning
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import configparser, map
+
+from setuptools import Command
+from setuptools.sandbox import run_setup
+from setuptools.py27compat import rmtree_safe
+from setuptools.command import setopt
+from setuptools.archive_util import unpack_archive
+from setuptools.package_index import (
+    PackageIndex, parse_requirement_arg, URL_SCHEME,
+)
+from setuptools.command import bdist_egg, egg_info
+from setuptools.wheel import Wheel
+from pkg_resources import (
+    yield_lines, normalize_path, resource_string, ensure_directory,
+    get_distribution, find_distributions, Environment, Requirement,
+    Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
+    VersionConflict, DEVELOP_DIST,
+)
+import pkg_resources.py31compat
+
+__metaclass__ = type
+
+# Turn on PEP440Warnings
+warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
+
+__all__ = [
+    'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
+    'main', 'get_exe_prefixes',
+]
+
+
+def is_64bit():
+    return struct.calcsize("P") == 8
+
+
+def samefile(p1, p2):
+    """
+    Determine if two paths reference the same file.
+
+    Augments os.path.samefile to work on Windows and
+    suppresses errors if the path doesn't exist.
+    """
+    both_exist = os.path.exists(p1) and os.path.exists(p2)
+    use_samefile = hasattr(os.path, 'samefile') and both_exist
+    if use_samefile:
+        return os.path.samefile(p1, p2)
+    norm_p1 = os.path.normpath(os.path.normcase(p1))
+    norm_p2 = os.path.normpath(os.path.normcase(p2))
+    return norm_p1 == norm_p2
+
+
+if six.PY2:
+
+    def _to_bytes(s):
+        return s
+
+    def isascii(s):
+        try:
+            six.text_type(s, 'ascii')
+            return True
+        except UnicodeError:
+            return False
+else:
+
+    def _to_bytes(s):
+        return s.encode('utf8')
+
+    def isascii(s):
+        try:
+            s.encode('ascii')
+            return True
+        except UnicodeError:
+            return False
+
+
+_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
+
+
+class easy_install(Command):
+    """Manage a download/build/install process"""
+    description = "Find/get/install Python packages"
+    command_consumes_arguments = True
+
+    user_options = [
+        ('prefix=', None, "installation prefix"),
+        ("zip-ok", "z", "install package as a zipfile"),
+        ("multi-version", "m", "make apps have to require() a version"),
+        ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
+        ("install-dir=", "d", "install package to DIR"),
+        ("script-dir=", "s", "install scripts to DIR"),
+        ("exclude-scripts", "x", "Don't install scripts"),
+        ("always-copy", "a", "Copy all needed packages to install dir"),
+        ("index-url=", "i", "base URL of Python Package Index"),
+        ("find-links=", "f", "additional URL(s) to search for packages"),
+        ("build-directory=", "b",
+         "download/extract/build in DIR; keep the results"),
+        ('optimize=', 'O',
+         "also compile with optimization: -O1 for \"python -O\", "
+         "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+        ('record=', None,
+         "filename in which to record list of installed files"),
+        ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
+        ('site-dirs=', 'S', "list of directories where .pth files work"),
+        ('editable', 'e', "Install specified packages in editable form"),
+        ('no-deps', 'N', "don't install dependencies"),
+        ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
+        ('local-snapshots-ok', 'l',
+         "allow building eggs from local checkouts"),
+        ('version', None, "print version information and exit"),
+        ('no-find-links', None,
+         "Don't load find-links defined in packages being installed")
+    ]
+    boolean_options = [
+        'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
+        'editable',
+        'no-deps', 'local-snapshots-ok', 'version'
+    ]
+
+    if site.ENABLE_USER_SITE:
+        help_msg = "install in user site-package '%s'" % site.USER_SITE
+        user_options.append(('user', None, help_msg))
+        boolean_options.append('user')
+
+    negative_opt = {'always-unzip': 'zip-ok'}
+    create_index = PackageIndex
+
+    def initialize_options(self):
+        # the --user option seems to be an opt-in one,
+        # so the default should be False.
+        self.user = 0
+        self.zip_ok = self.local_snapshots_ok = None
+        self.install_dir = self.script_dir = self.exclude_scripts = None
+        self.index_url = None
+        self.find_links = None
+        self.build_directory = None
+        self.args = None
+        self.optimize = self.record = None
+        self.upgrade = self.always_copy = self.multi_version = None
+        self.editable = self.no_deps = self.allow_hosts = None
+        self.root = self.prefix = self.no_report = None
+        self.version = None
+        self.install_purelib = None  # for pure module distributions
+        self.install_platlib = None  # non-pure (dists w/ extensions)
+        self.install_headers = None  # for C/C++ headers
+        self.install_lib = None  # set to either purelib or platlib
+        self.install_scripts = None
+        self.install_data = None
+        self.install_base = None
+        self.install_platbase = None
+        if site.ENABLE_USER_SITE:
+            self.install_userbase = site.USER_BASE
+            self.install_usersite = site.USER_SITE
+        else:
+            self.install_userbase = None
+            self.install_usersite = None
+        self.no_find_links = None
+
+        # Options not specifiable via command line
+        self.package_index = None
+        self.pth_file = self.always_copy_from = None
+        self.site_dirs = None
+        self.installed_projects = {}
+        self.sitepy_installed = False
+        # Always read easy_install options, even if we are subclassed, or have
+        # an independent instance created.  This ensures that defaults will
+        # always come from the standard configuration file(s)' "easy_install"
+        # section, even if this is a "develop" or "install" command, or some
+        # other embedding.
+        self._dry_run = None
+        self.verbose = self.distribution.verbose
+        self.distribution._set_command_options(
+            self, self.distribution.get_option_dict('easy_install')
+        )
+
+    def delete_blockers(self, blockers):
+        extant_blockers = (
+            filename for filename in blockers
+            if os.path.exists(filename) or os.path.islink(filename)
+        )
+        list(map(self._delete_path, extant_blockers))
+
+    def _delete_path(self, path):
+        log.info("Deleting %s", path)
+        if self.dry_run:
+            return
+
+        is_tree = os.path.isdir(path) and not os.path.islink(path)
+        remover = rmtree if is_tree else os.unlink
+        remover(path)
+
+    @staticmethod
+    def _render_version():
+        """
+        Render the Setuptools version and installation details, then exit.
+        """
+        ver = sys.version[:3]
+        dist = get_distribution('setuptools')
+        tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
+        print(tmpl.format(**locals()))
+        raise SystemExit()
+
+    def finalize_options(self):
+        self.version and self._render_version()
+
+        py_version = sys.version.split()[0]
+        prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
+
+        self.config_vars = {
+            'dist_name': self.distribution.get_name(),
+            'dist_version': self.distribution.get_version(),
+            'dist_fullname': self.distribution.get_fullname(),
+            'py_version': py_version,
+            'py_version_short': py_version[0:3],
+            'py_version_nodot': py_version[0] + py_version[2],
+            'sys_prefix': prefix,
+            'prefix': prefix,
+            'sys_exec_prefix': exec_prefix,
+            'exec_prefix': exec_prefix,
+            # Only python 3.2+ has abiflags
+            'abiflags': getattr(sys, 'abiflags', ''),
+        }
+
+        if site.ENABLE_USER_SITE:
+            self.config_vars['userbase'] = self.install_userbase
+            self.config_vars['usersite'] = self.install_usersite
+
+        self._fix_install_dir_for_user_site()
+
+        self.expand_basedirs()
+        self.expand_dirs()
+
+        self._expand(
+            'install_dir', 'script_dir', 'build_directory',
+            'site_dirs',
+        )
+        # If a non-default installation directory was specified, default the
+        # script directory to match it.
+        if self.script_dir is None:
+            self.script_dir = self.install_dir
+
+        if self.no_find_links is None:
+            self.no_find_links = False
+
+        # Let install_dir get set by install_lib command, which in turn
+        # gets its info from the install command, and takes into account
+        # --prefix and --home and all that other crud.
+        self.set_undefined_options(
+            'install_lib', ('install_dir', 'install_dir')
+        )
+        # Likewise, set default script_dir from 'install_scripts.install_dir'
+        self.set_undefined_options(
+            'install_scripts', ('install_dir', 'script_dir')
+        )
+
+        if self.user and self.install_purelib:
+            self.install_dir = self.install_purelib
+            self.script_dir = self.install_scripts
+        # default --record from the install command
+        self.set_undefined_options('install', ('record', 'record'))
+        # Should this be moved to the if statement below? It's not used
+        # elsewhere
+        normpath = map(normalize_path, sys.path)
+        self.all_site_dirs = get_site_dirs()
+        if self.site_dirs is not None:
+            site_dirs = [
+                os.path.expanduser(s.strip()) for s in
+                self.site_dirs.split(',')
+            ]
+            for d in site_dirs:
+                if not os.path.isdir(d):
+                    log.warn("%s (in --site-dirs) does not exist", d)
+                elif normalize_path(d) not in normpath:
+                    raise DistutilsOptionError(
+                        d + " (in --site-dirs) is not on sys.path"
+                    )
+                else:
+                    self.all_site_dirs.append(normalize_path(d))
+        if not self.editable:
+            self.check_site_dir()
+        self.index_url = self.index_url or "https://pypi.org/simple/"
+        self.shadow_path = self.all_site_dirs[:]
+        for path_item in self.install_dir, normalize_path(self.script_dir):
+            if path_item not in self.shadow_path:
+                self.shadow_path.insert(0, path_item)
+
+        if self.allow_hosts is not None:
+            hosts = [s.strip() for s in self.allow_hosts.split(',')]
+        else:
+            hosts = ['*']
+        if self.package_index is None:
+            self.package_index = self.create_index(
+                self.index_url, search_path=self.shadow_path, hosts=hosts,
+            )
+        self.local_index = Environment(self.shadow_path + sys.path)
+
+        if self.find_links is not None:
+            if isinstance(self.find_links, six.string_types):
+                self.find_links = self.find_links.split()
+        else:
+            self.find_links = []
+        if self.local_snapshots_ok:
+            self.package_index.scan_egg_links(self.shadow_path + sys.path)
+        if not self.no_find_links:
+            self.package_index.add_find_links(self.find_links)
+        self.set_undefined_options('install_lib', ('optimize', 'optimize'))
+        if not isinstance(self.optimize, int):
+            try:
+                self.optimize = int(self.optimize)
+                if not (0 <= self.optimize <= 2):
+                    raise ValueError
+            except ValueError:
+                raise DistutilsOptionError("--optimize must be 0, 1, or 2")
+
+        if self.editable and not self.build_directory:
+            raise DistutilsArgError(
+                "Must specify a build directory (-b) when using --editable"
+            )
+        if not self.args:
+            raise DistutilsArgError(
+                "No urls, filenames, or requirements specified (see --help)")
+
+        self.outputs = []
+
+    def _fix_install_dir_for_user_site(self):
+        """
+        Fix the install_dir if "--user" was used.
+        """
+        if not self.user or not site.ENABLE_USER_SITE:
+            return
+
+        self.create_home_path()
+        if self.install_userbase is None:
+            msg = "User base directory is not specified"
+            raise DistutilsPlatformError(msg)
+        self.install_base = self.install_platbase = self.install_userbase
+        scheme_name = os.name.replace('posix', 'unix') + '_user'
+        self.select_scheme(scheme_name)
+
+    def _expand_attrs(self, attrs):
+        for attr in attrs:
+            val = getattr(self, attr)
+            if val is not None:
+                if os.name == 'posix' or os.name == 'nt':
+                    val = os.path.expanduser(val)
+                val = subst_vars(val, self.config_vars)
+                setattr(self, attr, val)
+
+    def expand_basedirs(self):
+        """Calls `os.path.expanduser` on install_base, install_platbase and
+        root."""
+        self._expand_attrs(['install_base', 'install_platbase', 'root'])
+
+    def expand_dirs(self):
+        """Calls `os.path.expanduser` on install dirs."""
+        dirs = [
+            'install_purelib',
+            'install_platlib',
+            'install_lib',
+            'install_headers',
+            'install_scripts',
+            'install_data',
+        ]
+        self._expand_attrs(dirs)
+
+    def run(self):
+        if self.verbose != self.distribution.verbose:
+            log.set_verbosity(self.verbose)
+        try:
+            for spec in self.args:
+                self.easy_install(spec, not self.no_deps)
+            if self.record:
+                outputs = self.outputs
+                if self.root:  # strip any package prefix
+                    root_len = len(self.root)
+                    for counter in range(len(outputs)):
+                        outputs[counter] = outputs[counter][root_len:]
+                from distutils import file_util
+
+                self.execute(
+                    file_util.write_file, (self.record, outputs),
+                    "writing list of installed files to '%s'" %
+                    self.record
+                )
+            self.warn_deprecated_options()
+        finally:
+            log.set_verbosity(self.distribution.verbose)
+
+    def pseudo_tempname(self):
+        """Return a pseudo-tempname base in the install directory.
+        This code is intentionally naive; if a malicious party can write to
+        the target directory you're already in deep doodoo.
+        """
+        try:
+            pid = os.getpid()
+        except Exception:
+            pid = random.randint(0, sys.maxsize)
+        return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
+
+    def warn_deprecated_options(self):
+        pass
+
+    def check_site_dir(self):
+        """Verify that self.install_dir is .pth-capable dir, if needed"""
+
+        instdir = normalize_path(self.install_dir)
+        pth_file = os.path.join(instdir, 'easy-install.pth')
+
+        # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
+        is_site_dir = instdir in self.all_site_dirs
+
+        if not is_site_dir and not self.multi_version:
+            # No?  Then directly test whether it does .pth file processing
+            is_site_dir = self.check_pth_processing()
+        else:
+            # make sure we can write to target dir
+            testfile = self.pseudo_tempname() + '.write-test'
+            test_exists = os.path.exists(testfile)
+            try:
+                if test_exists:
+                    os.unlink(testfile)
+                open(testfile, 'w').close()
+                os.unlink(testfile)
+            except (OSError, IOError):
+                self.cant_write_to_target()
+
+        if not is_site_dir and not self.multi_version:
+            # Can't install non-multi to non-site dir
+            raise DistutilsError(self.no_default_version_msg())
+
+        if is_site_dir:
+            if self.pth_file is None:
+                self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
+        else:
+            self.pth_file = None
+
+        if instdir not in map(normalize_path, _pythonpath()):
+            # only PYTHONPATH dirs need a site.py, so pretend it's there
+            self.sitepy_installed = True
+        elif self.multi_version and not os.path.exists(pth_file):
+            self.sitepy_installed = True  # don't need site.py in this case
+            self.pth_file = None  # and don't create a .pth file
+        self.install_dir = instdir
+
+    __cant_write_msg = textwrap.dedent("""
+        can't create or remove files in install directory
+
+        The following error occurred while trying to add or remove files in the
+        installation directory:
+
+            %s
+
+        The installation directory you specified (via --install-dir, --prefix, or
+        the distutils default setting) was:
+
+            %s
+        """).lstrip()
+
+    __not_exists_id = textwrap.dedent("""
+        This directory does not currently exist.  Please create it and try again, or
+        choose a different installation directory (using the -d or --install-dir
+        option).
+        """).lstrip()
+
+    __access_msg = textwrap.dedent("""
+        Perhaps your account does not have write access to this directory?  If the
+        installation directory is a system-owned directory, you may need to sign in
+        as the administrator or "root" account.  If you do not have administrative
+        access to this machine, you may wish to choose a different installation
+        directory, preferably one that is listed in your PYTHONPATH environment
+        variable.
+
+        For information on other options, you may wish to consult the
+        documentation at:
+
+          https://setuptools.readthedocs.io/en/latest/easy_install.html
+
+        Please make the appropriate changes for your system and try again.
+        """).lstrip()
+
+    def cant_write_to_target(self):
+        msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
+
+        if not os.path.exists(self.install_dir):
+            msg += '\n' + self.__not_exists_id
+        else:
+            msg += '\n' + self.__access_msg
+        raise DistutilsError(msg)
+
+    def check_pth_processing(self):
+        """Empirically verify whether .pth files are supported in inst. dir"""
+        instdir = self.install_dir
+        log.info("Checking .pth file support in %s", instdir)
+        pth_file = self.pseudo_tempname() + ".pth"
+        ok_file = pth_file + '.ok'
+        ok_exists = os.path.exists(ok_file)
+        tmpl = _one_liner("""
+            import os
+            f = open({ok_file!r}, 'w')
+            f.write('OK')
+            f.close()
+            """) + '\n'
+        try:
+            if ok_exists:
+                os.unlink(ok_file)
+            dirname = os.path.dirname(ok_file)
+            pkg_resources.py31compat.makedirs(dirname, exist_ok=True)
+            f = open(pth_file, 'w')
+        except (OSError, IOError):
+            self.cant_write_to_target()
+        else:
+            try:
+                f.write(tmpl.format(**locals()))
+                f.close()
+                f = None
+                executable = sys.executable
+                if os.name == 'nt':
+                    dirname, basename = os.path.split(executable)
+                    alt = os.path.join(dirname, 'pythonw.exe')
+                    use_alt = (
+                        basename.lower() == 'python.exe' and
+                        os.path.exists(alt)
+                    )
+                    if use_alt:
+                        # use pythonw.exe to avoid opening a console window
+                        executable = alt
+
+                from distutils.spawn import spawn
+
+                spawn([executable, '-E', '-c', 'pass'], 0)
+
+                if os.path.exists(ok_file):
+                    log.info(
+                        "TEST PASSED: %s appears to support .pth files",
+                        instdir
+                    )
+                    return True
+            finally:
+                if f:
+                    f.close()
+                if os.path.exists(ok_file):
+                    os.unlink(ok_file)
+                if os.path.exists(pth_file):
+                    os.unlink(pth_file)
+        if not self.multi_version:
+            log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
+        return False
+
+    def install_egg_scripts(self, dist):
+        """Write all the scripts for `dist`, unless scripts are excluded"""
+        if not self.exclude_scripts and dist.metadata_isdir('scripts'):
+            for script_name in dist.metadata_listdir('scripts'):
+                if dist.metadata_isdir('scripts/' + script_name):
+                    # The "script" is a directory, likely a Python 3
+                    # __pycache__ directory, so skip it.
+                    continue
+                self.install_script(
+                    dist, script_name,
+                    dist.get_metadata('scripts/' + script_name)
+                )
+        self.install_wrapper_scripts(dist)
+
+    def add_output(self, path):
+        if os.path.isdir(path):
+            for base, dirs, files in os.walk(path):
+                for filename in files:
+                    self.outputs.append(os.path.join(base, filename))
+        else:
+            self.outputs.append(path)
+
+    def not_editable(self, spec):
+        if self.editable:
+            raise DistutilsArgError(
+                "Invalid argument %r: you can't use filenames or URLs "
+                "with --editable (except via the --find-links option)."
+                % (spec,)
+            )
+
+    def check_editable(self, spec):
+        if not self.editable:
+            return
+
+        if os.path.exists(os.path.join(self.build_directory, spec.key)):
+            raise DistutilsArgError(
+                "%r already exists in %s; can't do a checkout there" %
+                (spec.key, self.build_directory)
+            )
+
+    @contextlib.contextmanager
+    def _tmpdir(self):
+        tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
+        try:
+            # cast to str as workaround for #709 and #710 and #712
+            yield str(tmpdir)
+        finally:
+            os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
+
+    def easy_install(self, spec, deps=False):
+        if not self.editable:
+            self.install_site_py()
+
+        with self._tmpdir() as tmpdir:
+            if not isinstance(spec, Requirement):
+                if URL_SCHEME(spec):
+                    # It's a url, download it to tmpdir and process
+                    self.not_editable(spec)
+                    dl = self.package_index.download(spec, tmpdir)
+                    return self.install_item(None, dl, tmpdir, deps, True)
+
+                elif os.path.exists(spec):
+                    # Existing file or directory, just process it directly
+                    self.not_editable(spec)
+                    return self.install_item(None, spec, tmpdir, deps, True)
+                else:
+                    spec = parse_requirement_arg(spec)
+
+            self.check_editable(spec)
+            dist = self.package_index.fetch_distribution(
+                spec, tmpdir, self.upgrade, self.editable,
+                not self.always_copy, self.local_index
+            )
+            if dist is None:
+                msg = "Could not find suitable distribution for %r" % spec
+                if self.always_copy:
+                    msg += " (--always-copy skips system and development eggs)"
+                raise DistutilsError(msg)
+            elif dist.precedence == DEVELOP_DIST:
+                # .egg-info dists don't need installing, just process deps
+                self.process_distribution(spec, dist, deps, "Using")
+                return dist
+            else:
+                return self.install_item(spec, dist.location, tmpdir, deps)
+
+    def install_item(self, spec, download, tmpdir, deps, install_needed=False):
+
+        # Installation is also needed if file in tmpdir or is not an egg
+        install_needed = install_needed or self.always_copy
+        install_needed = install_needed or os.path.dirname(download) == tmpdir
+        install_needed = install_needed or not download.endswith('.egg')
+        install_needed = install_needed or (
+            self.always_copy_from is not None and
+            os.path.dirname(normalize_path(download)) ==
+            normalize_path(self.always_copy_from)
+        )
+
+        if spec and not install_needed:
+            # at this point, we know it's a local .egg, we just don't know if
+            # it's already installed.
+            for dist in self.local_index[spec.project_name]:
+                if dist.location == download:
+                    break
+            else:
+                install_needed = True  # it's not in the local index
+
+        log.info("Processing %s", os.path.basename(download))
+
+        if install_needed:
+            dists = self.install_eggs(spec, download, tmpdir)
+            for dist in dists:
+                self.process_distribution(spec, dist, deps)
+        else:
+            dists = [self.egg_distribution(download)]
+            self.process_distribution(spec, dists[0], deps, "Using")
+
+        if spec is not None:
+            for dist in dists:
+                if dist in spec:
+                    return dist
+
+    def select_scheme(self, name):
+        """Sets the install directories by applying the install schemes."""
+        # it's the caller's problem if they supply a bad name!
+        scheme = INSTALL_SCHEMES[name]
+        for key in SCHEME_KEYS:
+            attrname = 'install_' + key
+            if getattr(self, attrname) is None:
+                setattr(self, attrname, scheme[key])
+
+    def process_distribution(self, requirement, dist, deps=True, *info):
+        self.update_pth(dist)
+        self.package_index.add(dist)
+        if dist in self.local_index[dist.key]:
+            self.local_index.remove(dist)
+        self.local_index.add(dist)
+        self.install_egg_scripts(dist)
+        self.installed_projects[dist.key] = dist
+        log.info(self.installation_report(requirement, dist, *info))
+        if (dist.has_metadata('dependency_links.txt') and
+                not self.no_find_links):
+            self.package_index.add_find_links(
+                dist.get_metadata_lines('dependency_links.txt')
+            )
+        if not deps and not self.always_copy:
+            return
+        elif requirement is not None and dist.key != requirement.key:
+            log.warn("Skipping dependencies for %s", dist)
+            return  # XXX this is not the distribution we were looking for
+        elif requirement is None or dist not in requirement:
+            # if we wound up with a different version, resolve what we've got
+            distreq = dist.as_requirement()
+            requirement = Requirement(str(distreq))
+        log.info("Processing dependencies for %s", requirement)
+        try:
+            distros = WorkingSet([]).resolve(
+                [requirement], self.local_index, self.easy_install
+            )
+        except DistributionNotFound as e:
+            raise DistutilsError(str(e))
+        except VersionConflict as e:
+            raise DistutilsError(e.report())
+        if self.always_copy or self.always_copy_from:
+            # Force all the relevant distros to be copied or activated
+            for dist in distros:
+                if dist.key not in self.installed_projects:
+                    self.easy_install(dist.as_requirement())
+        log.info("Finished processing dependencies for %s", requirement)
+
+    def should_unzip(self, dist):
+        if self.zip_ok is not None:
+            return not self.zip_ok
+        if dist.has_metadata('not-zip-safe'):
+            return True
+        if not dist.has_metadata('zip-safe'):
+            return True
+        return False
+
+    def maybe_move(self, spec, dist_filename, setup_base):
+        dst = os.path.join(self.build_directory, spec.key)
+        if os.path.exists(dst):
+            msg = (
+                "%r already exists in %s; build directory %s will not be kept"
+            )
+            log.warn(msg, spec.key, self.build_directory, setup_base)
+            return setup_base
+        if os.path.isdir(dist_filename):
+            setup_base = dist_filename
+        else:
+            if os.path.dirname(dist_filename) == setup_base:
+                os.unlink(dist_filename)  # get it out of the tmp dir
+            contents = os.listdir(setup_base)
+            if len(contents) == 1:
+                dist_filename = os.path.join(setup_base, contents[0])
+                if os.path.isdir(dist_filename):
+                    # if the only thing there is a directory, move it instead
+                    setup_base = dist_filename
+        ensure_directory(dst)
+        shutil.move(setup_base, dst)
+        return dst
+
+    def install_wrapper_scripts(self, dist):
+        if self.exclude_scripts:
+            return
+        for args in ScriptWriter.best().get_args(dist):
+            self.write_script(*args)
+
+    def install_script(self, dist, script_name, script_text, dev_path=None):
+        """Generate a legacy script wrapper and install it"""
+        spec = str(dist.as_requirement())
+        is_script = is_python_script(script_text, script_name)
+
+        if is_script:
+            body = self._load_template(dev_path) % locals()
+            script_text = ScriptWriter.get_header(script_text) + body
+        self.write_script(script_name, _to_bytes(script_text), 'b')
+
+    @staticmethod
+    def _load_template(dev_path):
+        """
+        There are a couple of template scripts in the package. This
+        function loads one of them and prepares it for use.
+        """
+        # See https://github.com/pypa/setuptools/issues/134 for info
+        # on script file naming and downstream issues with SVR4
+        name = 'script.tmpl'
+        if dev_path:
+            name = name.replace('.tmpl', ' (dev).tmpl')
+
+        raw_bytes = resource_string('setuptools', name)
+        return raw_bytes.decode('utf-8')
+
+    def write_script(self, script_name, contents, mode="t", blockers=()):
+        """Write an executable file to the scripts directory"""
+        self.delete_blockers(  # clean up old .py/.pyw w/o a script
+            [os.path.join(self.script_dir, x) for x in blockers]
+        )
+        log.info("Installing %s script to %s", script_name, self.script_dir)
+        target = os.path.join(self.script_dir, script_name)
+        self.add_output(target)
+
+        if self.dry_run:
+            return
+
+        mask = current_umask()
+        ensure_directory(target)
+        if os.path.exists(target):
+            os.unlink(target)
+        with open(target, "w" + mode) as f:
+            f.write(contents)
+        chmod(target, 0o777 - mask)
+
+    def install_eggs(self, spec, dist_filename, tmpdir):
+        # .egg dirs or files are already built, so just return them
+        if dist_filename.lower().endswith('.egg'):
+            return [self.install_egg(dist_filename, tmpdir)]
+        elif dist_filename.lower().endswith('.exe'):
+            return [self.install_exe(dist_filename, tmpdir)]
+        elif dist_filename.lower().endswith('.whl'):
+            return [self.install_wheel(dist_filename, tmpdir)]
+
+        # Anything else, try to extract and build
+        setup_base = tmpdir
+        if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
+            unpack_archive(dist_filename, tmpdir, self.unpack_progress)
+        elif os.path.isdir(dist_filename):
+            setup_base = os.path.abspath(dist_filename)
+
+        if (setup_base.startswith(tmpdir)  # something we downloaded
+                and self.build_directory and spec is not None):
+            setup_base = self.maybe_move(spec, dist_filename, setup_base)
+
+        # Find the setup.py file
+        setup_script = os.path.join(setup_base, 'setup.py')
+
+        if not os.path.exists(setup_script):
+            setups = glob(os.path.join(setup_base, '*', 'setup.py'))
+            if not setups:
+                raise DistutilsError(
+                    "Couldn't find a setup script in %s" %
+                    os.path.abspath(dist_filename)
+                )
+            if len(setups) > 1:
+                raise DistutilsError(
+                    "Multiple setup scripts in %s" %
+                    os.path.abspath(dist_filename)
+                )
+            setup_script = setups[0]
+
+        # Now run it, and return the result
+        if self.editable:
+            log.info(self.report_editable(spec, setup_script))
+            return []
+        else:
+            return self.build_and_install(setup_script, setup_base)
+
+    def egg_distribution(self, egg_path):
+        if os.path.isdir(egg_path):
+            metadata = PathMetadata(egg_path, os.path.join(egg_path,
+                                                           'EGG-INFO'))
+        else:
+            metadata = EggMetadata(zipimport.zipimporter(egg_path))
+        return Distribution.from_filename(egg_path, metadata=metadata)
+
+    def install_egg(self, egg_path, tmpdir):
+        destination = os.path.join(
+            self.install_dir,
+            os.path.basename(egg_path),
+        )
+        destination = os.path.abspath(destination)
+        if not self.dry_run:
+            ensure_directory(destination)
+
+        dist = self.egg_distribution(egg_path)
+        if not samefile(egg_path, destination):
+            if os.path.isdir(destination) and not os.path.islink(destination):
+                dir_util.remove_tree(destination, dry_run=self.dry_run)
+            elif os.path.exists(destination):
+                self.execute(
+                    os.unlink,
+                    (destination,),
+                    "Removing " + destination,
+                )
+            try:
+                new_dist_is_zipped = False
+                if os.path.isdir(egg_path):
+                    if egg_path.startswith(tmpdir):
+                        f, m = shutil.move, "Moving"
+                    else:
+                        f, m = shutil.copytree, "Copying"
+                elif self.should_unzip(dist):
+                    self.mkpath(destination)
+                    f, m = self.unpack_and_compile, "Extracting"
+                else:
+                    new_dist_is_zipped = True
+                    if egg_path.startswith(tmpdir):
+                        f, m = shutil.move, "Moving"
+                    else:
+                        f, m = shutil.copy2, "Copying"
+                self.execute(
+                    f,
+                    (egg_path, destination),
+                    (m + " %s to %s") % (
+                        os.path.basename(egg_path),
+                        os.path.dirname(destination)
+                    ),
+                )
+                update_dist_caches(
+                    destination,
+                    fix_zipimporter_caches=new_dist_is_zipped,
+                )
+            except Exception:
+                update_dist_caches(destination, fix_zipimporter_caches=False)
+                raise
+
+        self.add_output(destination)
+        return self.egg_distribution(destination)
+
+    def install_exe(self, dist_filename, tmpdir):
+        # See if it's valid, get data
+        cfg = extract_wininst_cfg(dist_filename)
+        if cfg is None:
+            raise DistutilsError(
+                "%s is not a valid distutils Windows .exe" % dist_filename
+            )
+        # Create a dummy distribution object until we build the real distro
+        dist = Distribution(
+            None,
+            project_name=cfg.get('metadata', 'name'),
+            version=cfg.get('metadata', 'version'), platform=get_platform(),
+        )
+
+        # Convert the .exe to an unpacked egg
+        egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
+        dist.location = egg_path
+        egg_tmp = egg_path + '.tmp'
+        _egg_info = os.path.join(egg_tmp, 'EGG-INFO')
+        pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
+        ensure_directory(pkg_inf)  # make sure EGG-INFO dir exists
+        dist._provider = PathMetadata(egg_tmp, _egg_info)  # XXX
+        self.exe_to_egg(dist_filename, egg_tmp)
+
+        # Write EGG-INFO/PKG-INFO
+        if not os.path.exists(pkg_inf):
+            f = open(pkg_inf, 'w')
+            f.write('Metadata-Version: 1.0\n')
+            for k, v in cfg.items('metadata'):
+                if k != 'target_version':
+                    f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
+            f.close()
+        script_dir = os.path.join(_egg_info, 'scripts')
+        # delete entry-point scripts to avoid duping
+        self.delete_blockers([
+            os.path.join(script_dir, args[0])
+            for args in ScriptWriter.get_args(dist)
+        ])
+        # Build .egg file from tmpdir
+        bdist_egg.make_zipfile(
+            egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
+        )
+        # install the .egg
+        return self.install_egg(egg_path, tmpdir)
+
+    def exe_to_egg(self, dist_filename, egg_tmp):
+        """Extract a bdist_wininst to the directories an egg would use"""
+        # Check for .pth file and set up prefix translations
+        prefixes = get_exe_prefixes(dist_filename)
+        to_compile = []
+        native_libs = []
+        top_level = {}
+
+        def process(src, dst):
+            s = src.lower()
+            for old, new in prefixes:
+                if s.startswith(old):
+                    src = new + src[len(old):]
+                    parts = src.split('/')
+                    dst = os.path.join(egg_tmp, *parts)
+                    dl = dst.lower()
+                    if dl.endswith('.pyd') or dl.endswith('.dll'):
+                        parts[-1] = bdist_egg.strip_module(parts[-1])
+                        top_level[os.path.splitext(parts[0])[0]] = 1
+                        native_libs.append(src)
+                    elif dl.endswith('.py') and old != 'SCRIPTS/':
+                        top_level[os.path.splitext(parts[0])[0]] = 1
+                        to_compile.append(dst)
+                    return dst
+            if not src.endswith('.pth'):
+                log.warn("WARNING: can't process %s", src)
+            return None
+
+        # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
+        unpack_archive(dist_filename, egg_tmp, process)
+        stubs = []
+        for res in native_libs:
+            if res.lower().endswith('.pyd'):  # create stubs for .pyd's
+                parts = res.split('/')
+                resource = parts[-1]
+                parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
+                pyfile = os.path.join(egg_tmp, *parts)
+                to_compile.append(pyfile)
+                stubs.append(pyfile)
+                bdist_egg.write_stub(resource, pyfile)
+        self.byte_compile(to_compile)  # compile .py's
+        bdist_egg.write_safety_flag(
+            os.path.join(egg_tmp, 'EGG-INFO'),
+            bdist_egg.analyze_egg(egg_tmp, stubs))  # write zip-safety flag
+
+        for name in 'top_level', 'native_libs':
+            if locals()[name]:
+                txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
+                if not os.path.exists(txt):
+                    f = open(txt, 'w')
+                    f.write('\n'.join(locals()[name]) + '\n')
+                    f.close()
+
+    def install_wheel(self, wheel_path, tmpdir):
+        wheel = Wheel(wheel_path)
+        assert wheel.is_compatible()
+        destination = os.path.join(self.install_dir, wheel.egg_name())
+        destination = os.path.abspath(destination)
+        if not self.dry_run:
+            ensure_directory(destination)
+        if os.path.isdir(destination) and not os.path.islink(destination):
+            dir_util.remove_tree(destination, dry_run=self.dry_run)
+        elif os.path.exists(destination):
+            self.execute(
+                os.unlink,
+                (destination,),
+                "Removing " + destination,
+            )
+        try:
+            self.execute(
+                wheel.install_as_egg,
+                (destination,),
+                ("Installing %s to %s") % (
+                    os.path.basename(wheel_path),
+                    os.path.dirname(destination)
+                ),
+            )
+        finally:
+            update_dist_caches(destination, fix_zipimporter_caches=False)
+        self.add_output(destination)
+        return self.egg_distribution(destination)
+
+    __mv_warning = textwrap.dedent("""
+        Because this distribution was installed --multi-version, before you can
+        import modules from this package in an application, you will need to
+        'import pkg_resources' and then use a 'require()' call similar to one of
+        these examples, in order to select the desired version:
+
+            pkg_resources.require("%(name)s")  # latest installed version
+            pkg_resources.require("%(name)s==%(version)s")  # this exact version
+            pkg_resources.require("%(name)s>=%(version)s")  # this version or higher
+        """).lstrip()
+
+    __id_warning = textwrap.dedent("""
+        Note also that the installation directory must be on sys.path at runtime for
+        this to work.  (e.g. by being the application's script directory, by being on
+        PYTHONPATH, or by being added to sys.path by your code.)
+        """)
+
+    def installation_report(self, req, dist, what="Installed"):
+        """Helpful installation message for display to package users"""
+        msg = "\n%(what)s %(eggloc)s%(extras)s"
+        if self.multi_version and not self.no_report:
+            msg += '\n' + self.__mv_warning
+            if self.install_dir not in map(normalize_path, sys.path):
+                msg += '\n' + self.__id_warning
+
+        eggloc = dist.location
+        name = dist.project_name
+        version = dist.version
+        extras = ''  # TODO: self.report_extras(req, dist)
+        return msg % locals()
+
+    __editable_msg = textwrap.dedent("""
+        Extracted editable version of %(spec)s to %(dirname)s
+
+        If it uses setuptools in its setup script, you can activate it in
+        "development" mode by going to that directory and running::
+
+            %(python)s setup.py develop
+
+        See the setuptools documentation for the "develop" command for more info.
+        """).lstrip()
+
+    def report_editable(self, spec, setup_script):
+        dirname = os.path.dirname(setup_script)
+        python = sys.executable
+        return '\n' + self.__editable_msg % locals()
+
+    def run_setup(self, setup_script, setup_base, args):
+        sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
+        sys.modules.setdefault('distutils.command.egg_info', egg_info)
+
+        args = list(args)
+        if self.verbose > 2:
+            v = 'v' * (self.verbose - 1)
+            args.insert(0, '-' + v)
+        elif self.verbose < 2:
+            args.insert(0, '-q')
+        if self.dry_run:
+            args.insert(0, '-n')
+        log.info(
+            "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
+        )
+        try:
+            run_setup(setup_script, args)
+        except SystemExit as v:
+            raise DistutilsError("Setup script exited with %s" % (v.args[0],))
+
+    def build_and_install(self, setup_script, setup_base):
+        args = ['bdist_egg', '--dist-dir']
+
+        dist_dir = tempfile.mkdtemp(
+            prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
+        )
+        try:
+            self._set_fetcher_options(os.path.dirname(setup_script))
+            args.append(dist_dir)
+
+            self.run_setup(setup_script, setup_base, args)
+            all_eggs = Environment([dist_dir])
+            eggs = []
+            for key in all_eggs:
+                for dist in all_eggs[key]:
+                    eggs.append(self.install_egg(dist.location, setup_base))
+            if not eggs and not self.dry_run:
+                log.warn("No eggs found in %s (setup script problem?)",
+                         dist_dir)
+            return eggs
+        finally:
+            rmtree(dist_dir)
+            log.set_verbosity(self.verbose)  # restore our log verbosity
+
+    def _set_fetcher_options(self, base):
+        """
+        When easy_install is about to run bdist_egg on a source dist, that
+        source dist might have 'setup_requires' directives, requiring
+        additional fetching. Ensure the fetcher options given to easy_install
+        are available to that command as well.
+        """
+        # find the fetch options from easy_install and write them out
+        # to the setup.cfg file.
+        ei_opts = self.distribution.get_option_dict('easy_install').copy()
+        fetch_directives = (
+            'find_links', 'site_dirs', 'index_url', 'optimize',
+            'site_dirs', 'allow_hosts',
+        )
+        fetch_options = {}
+        for key, val in ei_opts.items():
+            if key not in fetch_directives:
+                continue
+            fetch_options[key.replace('_', '-')] = val[1]
+        # create a settings dictionary suitable for `edit_config`
+        settings = dict(easy_install=fetch_options)
+        cfg_filename = os.path.join(base, 'setup.cfg')
+        setopt.edit_config(cfg_filename, settings)
+
+    def update_pth(self, dist):
+        if self.pth_file is None:
+            return
+
+        for d in self.pth_file[dist.key]:  # drop old entries
+            if self.multi_version or d.location != dist.location:
+                log.info("Removing %s from easy-install.pth file", d)
+                self.pth_file.remove(d)
+                if d.location in self.shadow_path:
+                    self.shadow_path.remove(d.location)
+
+        if not self.multi_version:
+            if dist.location in self.pth_file.paths:
+                log.info(
+                    "%s is already the active version in easy-install.pth",
+                    dist,
+                )
+            else:
+                log.info("Adding %s to easy-install.pth file", dist)
+                self.pth_file.add(dist)  # add new entry
+                if dist.location not in self.shadow_path:
+                    self.shadow_path.append(dist.location)
+
+        if not self.dry_run:
+
+            self.pth_file.save()
+
+            if dist.key == 'setuptools':
+                # Ensure that setuptools itself never becomes unavailable!
+                # XXX should this check for latest version?
+                filename = os.path.join(self.install_dir, 'setuptools.pth')
+                if os.path.islink(filename):
+                    os.unlink(filename)
+                f = open(filename, 'wt')
+                f.write(self.pth_file.make_relative(dist.location) + '\n')
+                f.close()
+
+    def unpack_progress(self, src, dst):
+        # Progress filter for unpacking
+        log.debug("Unpacking %s to %s", src, dst)
+        return dst  # only unpack-and-compile skips files for dry run
+
+    def unpack_and_compile(self, egg_path, destination):
+        to_compile = []
+        to_chmod = []
+
+        def pf(src, dst):
+            if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
+                to_compile.append(dst)
+            elif dst.endswith('.dll') or dst.endswith('.so'):
+                to_chmod.append(dst)
+            self.unpack_progress(src, dst)
+            return not self.dry_run and dst or None
+
+        unpack_archive(egg_path, destination, pf)
+        self.byte_compile(to_compile)
+        if not self.dry_run:
+            for f in to_chmod:
+                mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
+                chmod(f, mode)
+
+    def byte_compile(self, to_compile):
+        if sys.dont_write_bytecode:
+            return
+
+        from distutils.util import byte_compile
+
+        try:
+            # try to make the byte compile messages quieter
+            log.set_verbosity(self.verbose - 1)
+
+            byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
+            if self.optimize:
+                byte_compile(
+                    to_compile, optimize=self.optimize, force=1,
+                    dry_run=self.dry_run,
+                )
+        finally:
+            log.set_verbosity(self.verbose)  # restore original verbosity
+
+    __no_default_msg = textwrap.dedent("""
+        bad install directory or PYTHONPATH
+
+        You are attempting to install a package to a directory that is not
+        on PYTHONPATH and which Python does not read ".pth" files from.  The
+        installation directory you specified (via --install-dir, --prefix, or
+        the distutils default setting) was:
+
+            %s
+
+        and your PYTHONPATH environment variable currently contains:
+
+            %r
+
+        Here are some of your options for correcting the problem:
+
+        * You can choose a different installation directory, i.e., one that is
+          on PYTHONPATH or supports .pth files
+
+        * You can add the installation directory to the PYTHONPATH environment
+          variable.  (It must then also be on PYTHONPATH whenever you run
+          Python and want to use the package(s) you are installing.)
+
+        * You can set up the installation directory to support ".pth" files by
+          using one of the approaches described here:
+
+          https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
+
+
+        Please make the appropriate changes for your system and try again.""").lstrip()
+
+    def no_default_version_msg(self):
+        template = self.__no_default_msg
+        return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
+
+    def install_site_py(self):
+        """Make sure there's a site.py in the target dir, if needed"""
+
+        if self.sitepy_installed:
+            return  # already did it, or don't need to
+
+        sitepy = os.path.join(self.install_dir, "site.py")
+        source = resource_string("setuptools", "site-patch.py")
+        source = source.decode('utf-8')
+        current = ""
+
+        if os.path.exists(sitepy):
+            log.debug("Checking existing site.py in %s", self.install_dir)
+            with io.open(sitepy) as strm:
+                current = strm.read()
+
+            if not current.startswith('def __boot():'):
+                raise DistutilsError(
+                    "%s is not a setuptools-generated site.py; please"
+                    " remove it." % sitepy
+                )
+
+        if current != source:
+            log.info("Creating %s", sitepy)
+            if not self.dry_run:
+                ensure_directory(sitepy)
+                with io.open(sitepy, 'w', encoding='utf-8') as strm:
+                    strm.write(source)
+            self.byte_compile([sitepy])
+
+        self.sitepy_installed = True
+
+    def create_home_path(self):
+        """Create directories under ~."""
+        if not self.user:
+            return
+        home = convert_path(os.path.expanduser("~"))
+        for name, path in six.iteritems(self.config_vars):
+            if path.startswith(home) and not os.path.isdir(path):
+                self.debug_print("os.makedirs('%s', 0o700)" % path)
+                os.makedirs(path, 0o700)
+
+    INSTALL_SCHEMES = dict(
+        posix=dict(
+            install_dir='$base/lib/python$py_version_short/site-packages',
+            script_dir='$base/bin',
+        ),
+    )
+
+    DEFAULT_SCHEME = dict(
+        install_dir='$base/Lib/site-packages',
+        script_dir='$base/Scripts',
+    )
+
+    def _expand(self, *attrs):
+        config_vars = self.get_finalized_command('install').config_vars
+
+        if self.prefix:
+            # Set default install_dir/scripts from --prefix
+            config_vars = config_vars.copy()
+            config_vars['base'] = self.prefix
+            scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
+            for attr, val in scheme.items():
+                if getattr(self, attr, None) is None:
+                    setattr(self, attr, val)
+
+        from distutils.util import subst_vars
+
+        for attr in attrs:
+            val = getattr(self, attr)
+            if val is not None:
+                val = subst_vars(val, config_vars)
+                if os.name == 'posix':
+                    val = os.path.expanduser(val)
+                setattr(self, attr, val)
+
+
+def _pythonpath():
+    items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
+    return filter(None, items)
+
+
+def get_site_dirs():
+    """
+    Return a list of 'site' dirs
+    """
+
+    sitedirs = []
+
+    # start with PYTHONPATH
+    sitedirs.extend(_pythonpath())
+
+    prefixes = [sys.prefix]
+    if sys.exec_prefix != sys.prefix:
+        prefixes.append(sys.exec_prefix)
+    for prefix in prefixes:
+        if prefix:
+            if sys.platform in ('os2emx', 'riscos'):
+                sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
+            elif os.sep == '/':
+                sitedirs.extend([
+                    os.path.join(
+                        prefix,
+                        "lib",
+                        "python" + sys.version[:3],
+                        "site-packages",
+                    ),
+                    os.path.join(prefix, "lib", "site-python"),
+                ])
+            else:
+                sitedirs.extend([
+                    prefix,
+                    os.path.join(prefix, "lib", "site-packages"),
+                ])
+            if sys.platform == 'darwin':
+                # for framework builds *only* we add the standard Apple
+                # locations. Currently only per-user, but /Library and
+                # /Network/Library could be added too
+                if 'Python.framework' in prefix:
+                    home = os.environ.get('HOME')
+                    if home:
+                        home_sp = os.path.join(
+                            home,
+                            'Library',
+                            'Python',
+                            sys.version[:3],
+                            'site-packages',
+                        )
+                        sitedirs.append(home_sp)
+    lib_paths = get_path('purelib'), get_path('platlib')
+    for site_lib in lib_paths:
+        if site_lib not in sitedirs:
+            sitedirs.append(site_lib)
+
+    if site.ENABLE_USER_SITE:
+        sitedirs.append(site.USER_SITE)
+
+    try:
+        sitedirs.extend(site.getsitepackages())
+    except AttributeError:
+        pass
+
+    sitedirs = list(map(normalize_path, sitedirs))
+
+    return sitedirs
+
+
+def expand_paths(inputs):
+    """Yield sys.path directories that might contain "old-style" packages"""
+
+    seen = {}
+
+    for dirname in inputs:
+        dirname = normalize_path(dirname)
+        if dirname in seen:
+            continue
+
+        seen[dirname] = 1
+        if not os.path.isdir(dirname):
+            continue
+
+        files = os.listdir(dirname)
+        yield dirname, files
+
+        for name in files:
+            if not name.endswith('.pth'):
+                # We only care about the .pth files
+                continue
+            if name in ('easy-install.pth', 'setuptools.pth'):
+                # Ignore .pth files that we control
+                continue
+
+            # Read the .pth file
+            f = open(os.path.join(dirname, name))
+            lines = list(yield_lines(f))
+            f.close()
+
+            # Yield existing non-dupe, non-import directory lines from it
+            for line in lines:
+                if not line.startswith("import"):
+                    line = normalize_path(line.rstrip())
+                    if line not in seen:
+                        seen[line] = 1
+                        if not os.path.isdir(line):
+                            continue
+                        yield line, os.listdir(line)
+
+
+def extract_wininst_cfg(dist_filename):
+    """Extract configuration data from a bdist_wininst .exe
+
+    Returns a configparser.RawConfigParser, or None
+    """
+    f = open(dist_filename, 'rb')
+    try:
+        endrec = zipfile._EndRecData(f)
+        if endrec is None:
+            return None
+
+        prepended = (endrec[9] - endrec[5]) - endrec[6]
+        if prepended < 12:  # no wininst data here
+            return None
+        f.seek(prepended - 12)
+
+        tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
+        if tag not in (0x1234567A, 0x1234567B):
+            return None  # not a valid tag
+
+        f.seek(prepended - (12 + cfglen))
+        init = {'version': '', 'target_version': ''}
+        cfg = configparser.RawConfigParser(init)
+        try:
+            part = f.read(cfglen)
+            # Read up to the first null byte.
+            config = part.split(b'\0', 1)[0]
+            # Now the config is in bytes, but for RawConfigParser, it should
+            #  be text, so decode it.
+            config = config.decode(sys.getfilesystemencoding())
+            cfg.readfp(six.StringIO(config))
+        except configparser.Error:
+            return None
+        if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
+            return None
+        return cfg
+
+    finally:
+        f.close()
+
+
+def get_exe_prefixes(exe_filename):
+    """Get exe->egg path translations for a given .exe file"""
+
+    prefixes = [
+        ('PURELIB/', ''),
+        ('PLATLIB/pywin32_system32', ''),
+        ('PLATLIB/', ''),
+        ('SCRIPTS/', 'EGG-INFO/scripts/'),
+        ('DATA/lib/site-packages', ''),
+    ]
+    z = zipfile.ZipFile(exe_filename)
+    try:
+        for info in z.infolist():
+            name = info.filename
+            parts = name.split('/')
+            if len(parts) == 3 and parts[2] == 'PKG-INFO':
+                if parts[1].endswith('.egg-info'):
+                    prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
+                    break
+            if len(parts) != 2 or not name.endswith('.pth'):
+                continue
+            if name.endswith('-nspkg.pth'):
+                continue
+            if parts[0].upper() in ('PURELIB', 'PLATLIB'):
+                contents = z.read(name)
+                if six.PY3:
+                    contents = contents.decode()
+                for pth in yield_lines(contents):
+                    pth = pth.strip().replace('\\', '/')
+                    if not pth.startswith('import'):
+                        prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
+    finally:
+        z.close()
+    prefixes = [(x.lower(), y) for x, y in prefixes]
+    prefixes.sort()
+    prefixes.reverse()
+    return prefixes
+
+
+class PthDistributions(Environment):
+    """A .pth file with Distribution paths in it"""
+
+    dirty = False
+
+    def __init__(self, filename, sitedirs=()):
+        self.filename = filename
+        self.sitedirs = list(map(normalize_path, sitedirs))
+        self.basedir = normalize_path(os.path.dirname(self.filename))
+        self._load()
+        Environment.__init__(self, [], None, None)
+        for path in yield_lines(self.paths):
+            list(map(self.add, find_distributions(path, True)))
+
+    def _load(self):
+        self.paths = []
+        saw_import = False
+        seen = dict.fromkeys(self.sitedirs)
+        if os.path.isfile(self.filename):
+            f = open(self.filename, 'rt')
+            for line in f:
+                if line.startswith('import'):
+                    saw_import = True
+                    continue
+                path = line.rstrip()
+                self.paths.append(path)
+                if not path.strip() or path.strip().startswith('#'):
+                    continue
+                # skip non-existent paths, in case somebody deleted a package
+                # manually, and duplicate paths as well
+                path = self.paths[-1] = normalize_path(
+                    os.path.join(self.basedir, path)
+                )
+                if not os.path.exists(path) or path in seen:
+                    self.paths.pop()  # skip it
+                    self.dirty = True  # we cleaned up, so we're dirty now :)
+                    continue
+                seen[path] = 1
+            f.close()
+
+        if self.paths and not saw_import:
+            self.dirty = True  # ensure anything we touch has import wrappers
+        while self.paths and not self.paths[-1].strip():
+            self.paths.pop()
+
+    def save(self):
+        """Write changed .pth file back to disk"""
+        if not self.dirty:
+            return
+
+        rel_paths = list(map(self.make_relative, self.paths))
+        if rel_paths:
+            log.debug("Saving %s", self.filename)
+            lines = self._wrap_lines(rel_paths)
+            data = '\n'.join(lines) + '\n'
+
+            if os.path.islink(self.filename):
+                os.unlink(self.filename)
+            with open(self.filename, 'wt') as f:
+                f.write(data)
+
+        elif os.path.exists(self.filename):
+            log.debug("Deleting empty %s", self.filename)
+            os.unlink(self.filename)
+
+        self.dirty = False
+
+    @staticmethod
+    def _wrap_lines(lines):
+        return lines
+
+    def add(self, dist):
+        """Add `dist` to the distribution map"""
+        new_path = (
+            dist.location not in self.paths and (
+                dist.location not in self.sitedirs or
+                # account for '.' being in PYTHONPATH
+                dist.location == os.getcwd()
+            )
+        )
+        if new_path:
+            self.paths.append(dist.location)
+            self.dirty = True
+        Environment.add(self, dist)
+
+    def remove(self, dist):
+        """Remove `dist` from the distribution map"""
+        while dist.location in self.paths:
+            self.paths.remove(dist.location)
+            self.dirty = True
+        Environment.remove(self, dist)
+
+    def make_relative(self, path):
+        npath, last = os.path.split(normalize_path(path))
+        baselen = len(self.basedir)
+        parts = [last]
+        sep = os.altsep == '/' and '/' or os.sep
+        while len(npath) >= baselen:
+            if npath == self.basedir:
+                parts.append(os.curdir)
+                parts.reverse()
+                return sep.join(parts)
+            npath, last = os.path.split(npath)
+            parts.append(last)
+        else:
+            return path
+
+
+class RewritePthDistributions(PthDistributions):
+    @classmethod
+    def _wrap_lines(cls, lines):
+        yield cls.prelude
+        for line in lines:
+            yield line
+        yield cls.postlude
+
+    prelude = _one_liner("""
+        import sys
+        sys.__plen = len(sys.path)
+        """)
+    postlude = _one_liner("""
+        import sys
+        new = sys.path[sys.__plen:]
+        del sys.path[sys.__plen:]
+        p = getattr(sys, '__egginsert', 0)
+        sys.path[p:p] = new
+        sys.__egginsert = p + len(new)
+        """)
+
+
+if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
+    PthDistributions = RewritePthDistributions
+
+
+def _first_line_re():
+    """
+    Return a regular expression based on first_line_re suitable for matching
+    strings.
+    """
+    if isinstance(first_line_re.pattern, str):
+        return first_line_re
+
+    # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
+    return re.compile(first_line_re.pattern.decode())
+
+
+def auto_chmod(func, arg, exc):
+    if func in [os.unlink, os.remove] and os.name == 'nt':
+        chmod(arg, stat.S_IWRITE)
+        return func(arg)
+    et, ev, _ = sys.exc_info()
+    six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
+
+
+def update_dist_caches(dist_path, fix_zipimporter_caches):
+    """
+    Fix any globally cached `dist_path` related data
+
+    `dist_path` should be a path of a newly installed egg distribution (zipped
+    or unzipped).
+
+    sys.path_importer_cache contains finder objects that have been cached when
+    importing data from the original distribution. Any such finders need to be
+    cleared since the replacement distribution might be packaged differently,
+    e.g. a zipped egg distribution might get replaced with an unzipped egg
+    folder or vice versa. Having the old finders cached may then cause Python
+    to attempt loading modules from the replacement distribution using an
+    incorrect loader.
+
+    zipimport.zipimporter objects are Python loaders charged with importing
+    data packaged inside zip archives. If stale loaders referencing the
+    original distribution, are left behind, they can fail to load modules from
+    the replacement distribution. E.g. if an old zipimport.zipimporter instance
+    is used to load data from a new zipped egg archive, it may cause the
+    operation to attempt to locate the requested data in the wrong location -
+    one indicated by the original distribution's zip archive directory
+    information. Such an operation may then fail outright, e.g. report having
+    read a 'bad local file header', or even worse, it may fail silently &
+    return invalid data.
+
+    zipimport._zip_directory_cache contains cached zip archive directory
+    information for all existing zipimport.zipimporter instances and all such
+    instances connected to the same archive share the same cached directory
+    information.
+
+    If asked, and the underlying Python implementation allows it, we can fix
+    all existing zipimport.zipimporter instances instead of having to track
+    them down and remove them one by one, by updating their shared cached zip
+    archive directory information. This, of course, assumes that the
+    replacement distribution is packaged as a zipped egg.
+
+    If not asked to fix existing zipimport.zipimporter instances, we still do
+    our best to clear any remaining zipimport.zipimporter related cached data
+    that might somehow later get used when attempting to load data from the new
+    distribution and thus cause such load operations to fail. Note that when
+    tracking down such remaining stale data, we can not catch every conceivable
+    usage from here, and we clear only those that we know of and have found to
+    cause problems if left alive. Any remaining caches should be updated by
+    whomever is in charge of maintaining them, i.e. they should be ready to
+    handle us replacing their zip archives with new distributions at runtime.
+
+    """
+    # There are several other known sources of stale zipimport.zipimporter
+    # instances that we do not clear here, but might if ever given a reason to
+    # do so:
+    # * Global setuptools pkg_resources.working_set (a.k.a. 'master working
+    # set') may contain distributions which may in turn contain their
+    #   zipimport.zipimporter loaders.
+    # * Several zipimport.zipimporter loaders held by local variables further
+    #   up the function call stack when running the setuptools installation.
+    # * Already loaded modules may have their __loader__ attribute set to the
+    #   exact loader instance used when importing them. Python 3.4 docs state
+    #   that this information is intended mostly for introspection and so is
+    #   not expected to cause us problems.
+    normalized_path = normalize_path(dist_path)
+    _uncache(normalized_path, sys.path_importer_cache)
+    if fix_zipimporter_caches:
+        _replace_zip_directory_cache_data(normalized_path)
+    else:
+        # Here, even though we do not want to fix existing and now stale
+        # zipimporter cache information, we still want to remove it. Related to
+        # Python's zip archive directory information cache, we clear each of
+        # its stale entries in two phases:
+        #   1. Clear the entry so attempting to access zip archive information
+        #      via any existing stale zipimport.zipimporter instances fails.
+        #   2. Remove the entry from the cache so any newly constructed
+        #      zipimport.zipimporter instances do not end up using old stale
+        #      zip archive directory information.
+        # This whole stale data removal step does not seem strictly necessary,
+        # but has been left in because it was done before we started replacing
+        # the zip archive directory information cache content if possible, and
+        # there are no relevant unit tests that we can depend on to tell us if
+        # this is really needed.
+        _remove_and_clear_zip_directory_cache_data(normalized_path)
+
+
+def _collect_zipimporter_cache_entries(normalized_path, cache):
+    """
+    Return zipimporter cache entry keys related to a given normalized path.
+
+    Alternative path spellings (e.g. those using different character case or
+    those using alternative path separators) related to the same path are
+    included. Any sub-path entries are included as well, i.e. those
+    corresponding to zip archives embedded in other zip archives.
+
+    """
+    result = []
+    prefix_len = len(normalized_path)
+    for p in cache:
+        np = normalize_path(p)
+        if (np.startswith(normalized_path) and
+                np[prefix_len:prefix_len + 1] in (os.sep, '')):
+            result.append(p)
+    return result
+
+
+def _update_zipimporter_cache(normalized_path, cache, updater=None):
+    """
+    Update zipimporter cache data for a given normalized path.
+
+    Any sub-path entries are processed as well, i.e. those corresponding to zip
+    archives embedded in other zip archives.
+
+    Given updater is a callable taking a cache entry key and the original entry
+    (after already removing the entry from the cache), and expected to update
+    the entry and possibly return a new one to be inserted in its place.
+    Returning None indicates that the entry should not be replaced with a new
+    one. If no updater is given, the cache entries are simply removed without
+    any additional processing, the same as if the updater simply returned None.
+
+    """
+    for p in _collect_zipimporter_cache_entries(normalized_path, cache):
+        # N.B. pypy's custom zipimport._zip_directory_cache implementation does
+        # not support the complete dict interface:
+        # * Does not support item assignment, thus not allowing this function
+        #    to be used only for removing existing cache entries.
+        #  * Does not support the dict.pop() method, forcing us to use the
+        #    get/del patterns instead. For more detailed information see the
+        #    following links:
+        #      https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
+        #      http://bit.ly/2h9itJX
+        old_entry = cache[p]
+        del cache[p]
+        new_entry = updater and updater(p, old_entry)
+        if new_entry is not None:
+            cache[p] = new_entry
+
+
+def _uncache(normalized_path, cache):
+    _update_zipimporter_cache(normalized_path, cache)
+
+
+def _remove_and_clear_zip_directory_cache_data(normalized_path):
+    def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
+        old_entry.clear()
+
+    _update_zipimporter_cache(
+        normalized_path, zipimport._zip_directory_cache,
+        updater=clear_and_remove_cached_zip_archive_directory_data)
+
+
+# PyPy Python implementation does not allow directly writing to the
+# zipimport._zip_directory_cache and so prevents us from attempting to correct
+# its content. The best we can do there is clear the problematic cache content
+# and have PyPy repopulate it as needed. The downside is that if there are any
+# stale zipimport.zipimporter instances laying around, attempting to use them
+# will fail due to not having its zip archive directory information available
+# instead of being automatically corrected to use the new correct zip archive
+# directory information.
+if '__pypy__' in sys.builtin_module_names:
+    _replace_zip_directory_cache_data = \
+        _remove_and_clear_zip_directory_cache_data
+else:
+
+    def _replace_zip_directory_cache_data(normalized_path):
+        def replace_cached_zip_archive_directory_data(path, old_entry):
+            # N.B. In theory, we could load the zip directory information just
+            # once for all updated path spellings, and then copy it locally and
+            # update its contained path strings to contain the correct
+            # spelling, but that seems like a way too invasive move (this cache
+            # structure is not officially documented anywhere and could in
+            # theory change with new Python releases) for no significant
+            # benefit.
+            old_entry.clear()
+            zipimport.zipimporter(path)
+            old_entry.update(zipimport._zip_directory_cache[path])
+            return old_entry
+
+        _update_zipimporter_cache(
+            normalized_path, zipimport._zip_directory_cache,
+            updater=replace_cached_zip_archive_directory_data)
+
+
+def is_python(text, filename='<string>'):
+    "Is this string a valid Python script?"
+    try:
+        compile(text, filename, 'exec')
+    except (SyntaxError, TypeError):
+        return False
+    else:
+        return True
+
+
+def is_sh(executable):
+    """Determine if the specified executable is a .sh (contains a #! line)"""
+    try:
+        with io.open(executable, encoding='latin-1') as fp:
+            magic = fp.read(2)
+    except (OSError, IOError):
+        return executable
+    return magic == '#!'
+
+
+def nt_quote_arg(arg):
+    """Quote a command line argument according to Windows parsing rules"""
+    return subprocess.list2cmdline([arg])
+
+
+def is_python_script(script_text, filename):
+    """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
+    """
+    if filename.endswith('.py') or filename.endswith('.pyw'):
+        return True  # extension says it's Python
+    if is_python(script_text, filename):
+        return True  # it's syntactically valid Python
+    if script_text.startswith('#!'):
+        # It begins with a '#!' line, so check if 'python' is in it somewhere
+        return 'python' in script_text.splitlines()[0].lower()
+
+    return False  # Not any Python I can recognize
+
+
+try:
+    from os import chmod as _chmod
+except ImportError:
+    # Jython compatibility
+    def _chmod(*args):
+        pass
+
+
+def chmod(path, mode):
+    log.debug("changing mode of %s to %o", path, mode)
+    try:
+        _chmod(path, mode)
+    except os.error as e:
+        log.debug("chmod failed: %s", e)
+
+
+class CommandSpec(list):
+    """
+    A command spec for a #! header, specified as a list of arguments akin to
+    those passed to Popen.
+    """
+
+    options = []
+    split_args = dict()
+
+    @classmethod
+    def best(cls):
+        """
+        Choose the best CommandSpec class based on environmental conditions.
+        """
+        return cls
+
+    @classmethod
+    def _sys_executable(cls):
+        _default = os.path.normpath(sys.executable)
+        return os.environ.get('__PYVENV_LAUNCHER__', _default)
+
+    @classmethod
+    def from_param(cls, param):
+        """
+        Construct a CommandSpec from a parameter to build_scripts, which may
+        be None.
+        """
+        if isinstance(param, cls):
+            return param
+        if isinstance(param, list):
+            return cls(param)
+        if param is None:
+            return cls.from_environment()
+        # otherwise, assume it's a string.
+        return cls.from_string(param)
+
+    @classmethod
+    def from_environment(cls):
+        return cls([cls._sys_executable()])
+
+    @classmethod
+    def from_string(cls, string):
+        """
+        Construct a command spec from a simple string representing a command
+        line parseable by shlex.split.
+        """
+        items = shlex.split(string, **cls.split_args)
+        return cls(items)
+
+    def install_options(self, script_text):
+        self.options = shlex.split(self._extract_options(script_text))
+        cmdline = subprocess.list2cmdline(self)
+        if not isascii(cmdline):
+            self.options[:0] = ['-x']
+
+    @staticmethod
+    def _extract_options(orig_script):
+        """
+        Extract any options from the first line of the script.
+        """
+        first = (orig_script + '\n').splitlines()[0]
+        match = _first_line_re().match(first)
+        options = match.group(1) or '' if match else ''
+        return options.strip()
+
+    def as_header(self):
+        return self._render(self + list(self.options))
+
+    @staticmethod
+    def _strip_quotes(item):
+        _QUOTES = '"\''
+        for q in _QUOTES:
+            if item.startswith(q) and item.endswith(q):
+                return item[1:-1]
+        return item
+
+    @staticmethod
+    def _render(items):
+        cmdline = subprocess.list2cmdline(
+            CommandSpec._strip_quotes(item.strip()) for item in items)
+        return '#!' + cmdline + '\n'
+
+
+# For pbr compat; will be removed in a future version.
+sys_executable = CommandSpec._sys_executable()
+
+
+class WindowsCommandSpec(CommandSpec):
+    split_args = dict(posix=False)
+
+
+class ScriptWriter:
+    """
+    Encapsulates behavior around writing entry point scripts for console and
+    gui apps.
+    """
+
+    template = textwrap.dedent(r"""
+        # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
+        __requires__ = %(spec)r
+        import re
+        import sys
+        from pkg_resources import load_entry_point
+
+        if __name__ == '__main__':
+            sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+            sys.exit(
+                load_entry_point(%(spec)r, %(group)r, %(name)r)()
+            )
+    """).lstrip()
+
+    command_spec_class = CommandSpec
+
+    @classmethod
+    def get_script_args(cls, dist, executable=None, wininst=False):
+        # for backward compatibility
+        warnings.warn("Use get_args", EasyInstallDeprecationWarning)
+        writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
+        header = cls.get_script_header("", executable, wininst)
+        return writer.get_args(dist, header)
+
+    @classmethod
+    def get_script_header(cls, script_text, executable=None, wininst=False):
+        # for backward compatibility
+        warnings.warn("Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
+        if wininst:
+            executable = "python.exe"
+        return cls.get_header(script_text, executable)
+
+    @classmethod
+    def get_args(cls, dist, header=None):
+        """
+        Yield write_script() argument tuples for a distribution's
+        console_scripts and gui_scripts entry points.
+        """
+        if header is None:
+            header = cls.get_header()
+        spec = str(dist.as_requirement())
+        for type_ in 'console', 'gui':
+            group = type_ + '_scripts'
+            for name, ep in dist.get_entry_map(group).items():
+                cls._ensure_safe_name(name)
+                script_text = cls.template % locals()
+                args = cls._get_script_args(type_, name, header, script_text)
+                for res in args:
+                    yield res
+
+    @staticmethod
+    def _ensure_safe_name(name):
+        """
+        Prevent paths in *_scripts entry point names.
+        """
+        has_path_sep = re.search(r'[\\/]', name)
+        if has_path_sep:
+            raise ValueError("Path separators not allowed in script names")
+
+    @classmethod
+    def get_writer(cls, force_windows):
+        # for backward compatibility
+        warnings.warn("Use best", EasyInstallDeprecationWarning)
+        return WindowsScriptWriter.best() if force_windows else cls.best()
+
+    @classmethod
+    def best(cls):
+        """
+        Select the best ScriptWriter for this environment.
+        """
+        if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
+            return WindowsScriptWriter.best()
+        else:
+            return cls
+
+    @classmethod
+    def _get_script_args(cls, type_, name, header, script_text):
+        # Simply write the stub with no extension.
+        yield (name, header + script_text)
+
+    @classmethod
+    def get_header(cls, script_text="", executable=None):
+        """Create a #! line, getting options (if any) from script_text"""
+        cmd = cls.command_spec_class.best().from_param(executable)
+        cmd.install_options(script_text)
+        return cmd.as_header()
+
+
+class WindowsScriptWriter(ScriptWriter):
+    command_spec_class = WindowsCommandSpec
+
+    @classmethod
+    def get_writer(cls):
+        # for backward compatibility
+        warnings.warn("Use best", EasyInstallDeprecationWarning)
+        return cls.best()
+
+    @classmethod
+    def best(cls):
+        """
+        Select the best ScriptWriter suitable for Windows
+        """
+        writer_lookup = dict(
+            executable=WindowsExecutableLauncherWriter,
+            natural=cls,
+        )
+        # for compatibility, use the executable launcher by default
+        launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
+        return writer_lookup[launcher]
+
+    @classmethod
+    def _get_script_args(cls, type_, name, header, script_text):
+        "For Windows, add a .py extension"
+        ext = dict(console='.pya', gui='.pyw')[type_]
+        if ext not in os.environ['PATHEXT'].lower().split(';'):
+            msg = (
+                "{ext} not listed in PATHEXT; scripts will not be "
+                "recognized as executables."
+            ).format(**locals())
+            warnings.warn(msg, UserWarning)
+        old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
+        old.remove(ext)
+        header = cls._adjust_header(type_, header)
+        blockers = [name + x for x in old]
+        yield name + ext, header + script_text, 't', blockers
+
+    @classmethod
+    def _adjust_header(cls, type_, orig_header):
+        """
+        Make sure 'pythonw' is used for gui and and 'python' is used for
+        console (regardless of what sys.executable is).
+        """
+        pattern = 'pythonw.exe'
+        repl = 'python.exe'
+        if type_ == 'gui':
+            pattern, repl = repl, pattern
+        pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
+        new_header = pattern_ob.sub(string=orig_header, repl=repl)
+        return new_header if cls._use_header(new_header) else orig_header
+
+    @staticmethod
+    def _use_header(new_header):
+        """
+        Should _adjust_header use the replaced header?
+
+        On non-windows systems, always use. On
+        Windows systems, only use the replaced header if it resolves
+        to an executable on the system.
+        """
+        clean_header = new_header[2:-1].strip('"')
+        return sys.platform != 'win32' or find_executable(clean_header)
+
+
+class WindowsExecutableLauncherWriter(WindowsScriptWriter):
+    @classmethod
+    def _get_script_args(cls, type_, name, header, script_text):
+        """
+        For Windows, add a .py extension and an .exe launcher
+        """
+        if type_ == 'gui':
+            launcher_type = 'gui'
+            ext = '-script.pyw'
+            old = ['.pyw']
+        else:
+            launcher_type = 'cli'
+            ext = '-script.py'
+            old = ['.py', '.pyc', '.pyo']
+        hdr = cls._adjust_header(type_, header)
+        blockers = [name + x for x in old]
+        yield (name + ext, hdr + script_text, 't', blockers)
+        yield (
+            name + '.exe', get_win_launcher(launcher_type),
+            'b'  # write in binary mode
+        )
+        if not is_64bit():
+            # install a manifest for the launcher to prevent Windows
+            # from detecting it as an installer (which it will for
+            #  launchers like easy_install.exe). Consider only
+            #  adding a manifest for launchers detected as installers.
+            #  See Distribute #143 for details.
+            m_name = name + '.exe.manifest'
+            yield (m_name, load_launcher_manifest(name), 't')
+
+
+# for backward-compatibility
+get_script_args = ScriptWriter.get_script_args
+get_script_header = ScriptWriter.get_script_header
+
+
+def get_win_launcher(type):
+    """
+    Load the Windows launcher (executable) suitable for launching a script.
+
+    `type` should be either 'cli' or 'gui'
+
+    Returns the executable as a byte string.
+    """
+    launcher_fn = '%s.exe' % type
+    if is_64bit():
+        launcher_fn = launcher_fn.replace(".", "-64.")
+    else:
+        launcher_fn = launcher_fn.replace(".", "-32.")
+    return resource_string('setuptools', launcher_fn)
+
+
+def load_launcher_manifest(name):
+    manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
+    if six.PY2:
+        return manifest % vars()
+    else:
+        return manifest.decode('utf-8') % vars()
+
+
+def rmtree(path, ignore_errors=False, onerror=auto_chmod):
+    return shutil.rmtree(path, ignore_errors, onerror)
+
+
+def current_umask():
+    tmp = os.umask(0o022)
+    os.umask(tmp)
+    return tmp
+
+
+def bootstrap():
+    # This function is called when setuptools*.egg is run using /bin/sh
+    import setuptools
+
+    argv0 = os.path.dirname(setuptools.__path__[0])
+    sys.argv[0] = argv0
+    sys.argv.append(argv0)
+    main()
+
+
+def main(argv=None, **kw):
+    from setuptools import setup
+    from setuptools.dist import Distribution
+
+    class DistributionWithoutHelpCommands(Distribution):
+        common_usage = ""
+
+        def _show_help(self, *args, **kw):
+            with _patch_usage():
+                Distribution._show_help(self, *args, **kw)
+
+    if argv is None:
+        argv = sys.argv[1:]
+
+    with _patch_usage():
+        setup(
+            script_args=['-q', 'easy_install', '-v'] + argv,
+            script_name=sys.argv[0] or 'easy_install',
+            distclass=DistributionWithoutHelpCommands,
+            **kw
+        )
+
+
+@contextlib.contextmanager
+def _patch_usage():
+    import distutils.core
+    USAGE = textwrap.dedent("""
+        usage: %(script)s [options] requirement_or_url ...
+           or: %(script)s --help
+        """).lstrip()
+
+    def gen_usage(script_name):
+        return USAGE % dict(
+            script=os.path.basename(script_name),
+        )
+
+    saved = distutils.core.gen_usage
+    distutils.core.gen_usage = gen_usage
+    try:
+        yield
+    finally:
+        distutils.core.gen_usage = saved
+
+class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
+    """Class for warning about deprecations in EasyInstall in SetupTools. Not ignored by default, unlike DeprecationWarning."""
+    
diff --git a/lib/python3.7/site-packages/setuptools/command/egg_info.py b/lib/python3.7/site-packages/setuptools/command/egg_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d8f451ee4555449177821ece5ad9807ffefed11
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/egg_info.py
@@ -0,0 +1,717 @@
+"""setuptools.command.egg_info
+
+Create a distribution's .egg-info directory and contents"""
+
+from distutils.filelist import FileList as _FileList
+from distutils.errors import DistutilsInternalError
+from distutils.util import convert_path
+from distutils import log
+import distutils.errors
+import distutils.filelist
+import os
+import re
+import sys
+import io
+import warnings
+import time
+import collections
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import map
+
+from setuptools import Command
+from setuptools.command.sdist import sdist
+from setuptools.command.sdist import walk_revctrl
+from setuptools.command.setopt import edit_config
+from setuptools.command import bdist_egg
+from pkg_resources import (
+    parse_requirements, safe_name, parse_version,
+    safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
+import setuptools.unicode_utils as unicode_utils
+from setuptools.glob import glob
+
+from setuptools.extern import packaging
+from setuptools import SetuptoolsDeprecationWarning
+
+def translate_pattern(glob):
+    """
+    Translate a file path glob like '*.txt' in to a regular expression.
+    This differs from fnmatch.translate which allows wildcards to match
+    directory separators. It also knows about '**/' which matches any number of
+    directories.
+    """
+    pat = ''
+
+    # This will split on '/' within [character classes]. This is deliberate.
+    chunks = glob.split(os.path.sep)
+
+    sep = re.escape(os.sep)
+    valid_char = '[^%s]' % (sep,)
+
+    for c, chunk in enumerate(chunks):
+        last_chunk = c == len(chunks) - 1
+
+        # Chunks that are a literal ** are globstars. They match anything.
+        if chunk == '**':
+            if last_chunk:
+                # Match anything if this is the last component
+                pat += '.*'
+            else:
+                # Match '(name/)*'
+                pat += '(?:%s+%s)*' % (valid_char, sep)
+            continue  # Break here as the whole path component has been handled
+
+        # Find any special characters in the remainder
+        i = 0
+        chunk_len = len(chunk)
+        while i < chunk_len:
+            char = chunk[i]
+            if char == '*':
+                # Match any number of name characters
+                pat += valid_char + '*'
+            elif char == '?':
+                # Match a name character
+                pat += valid_char
+            elif char == '[':
+                # Character class
+                inner_i = i + 1
+                # Skip initial !/] chars
+                if inner_i < chunk_len and chunk[inner_i] == '!':
+                    inner_i = inner_i + 1
+                if inner_i < chunk_len and chunk[inner_i] == ']':
+                    inner_i = inner_i + 1
+
+                # Loop till the closing ] is found
+                while inner_i < chunk_len and chunk[inner_i] != ']':
+                    inner_i = inner_i + 1
+
+                if inner_i >= chunk_len:
+                    # Got to the end of the string without finding a closing ]
+                    # Do not treat this as a matching group, but as a literal [
+                    pat += re.escape(char)
+                else:
+                    # Grab the insides of the [brackets]
+                    inner = chunk[i + 1:inner_i]
+                    char_class = ''
+
+                    # Class negation
+                    if inner[0] == '!':
+                        char_class = '^'
+                        inner = inner[1:]
+
+                    char_class += re.escape(inner)
+                    pat += '[%s]' % (char_class,)
+
+                    # Skip to the end ]
+                    i = inner_i
+            else:
+                pat += re.escape(char)
+            i += 1
+
+        # Join each chunk with the dir separator
+        if not last_chunk:
+            pat += sep
+
+    pat += r'\Z'
+    return re.compile(pat, flags=re.MULTILINE|re.DOTALL)
+
+
+class InfoCommon:
+    tag_build = None
+    tag_date = None
+
+    @property
+    def name(self):
+        return safe_name(self.distribution.get_name())
+
+    def tagged_version(self):
+        version = self.distribution.get_version()
+        # egg_info may be called more than once for a distribution,
+        # in which case the version string already contains all tags.
+        if self.vtags and version.endswith(self.vtags):
+            return safe_version(version)
+        return safe_version(version + self.vtags)
+
+    def tags(self):
+        version = ''
+        if self.tag_build:
+            version += self.tag_build
+        if self.tag_date:
+            version += time.strftime("-%Y%m%d")
+        return version
+    vtags = property(tags)
+
+
+class egg_info(InfoCommon, Command):
+    description = "create a distribution's .egg-info directory"
+
+    user_options = [
+        ('egg-base=', 'e', "directory containing .egg-info directories"
+                           " (default: top of the source tree)"),
+        ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
+        ('tag-build=', 'b', "Specify explicit tag to add to version number"),
+        ('no-date', 'D', "Don't include date stamp [default]"),
+    ]
+
+    boolean_options = ['tag-date']
+    negative_opt = {
+        'no-date': 'tag-date',
+    }
+
+    def initialize_options(self):
+        self.egg_base = None
+        self.egg_name = None
+        self.egg_info = None
+        self.egg_version = None
+        self.broken_egg_info = False
+
+    ####################################
+    # allow the 'tag_svn_revision' to be detected and
+    # set, supporting sdists built on older Setuptools.
+    @property
+    def tag_svn_revision(self):
+        pass
+
+    @tag_svn_revision.setter
+    def tag_svn_revision(self, value):
+        pass
+    ####################################
+
+    def save_version_info(self, filename):
+        """
+        Materialize the value of date into the
+        build tag. Install build keys in a deterministic order
+        to avoid arbitrary reordering on subsequent builds.
+        """
+        egg_info = collections.OrderedDict()
+        # follow the order these keys would have been added
+        # when PYTHONHASHSEED=0
+        egg_info['tag_build'] = self.tags()
+        egg_info['tag_date'] = 0
+        edit_config(filename, dict(egg_info=egg_info))
+
+    def finalize_options(self):
+        # Note: we need to capture the current value returned
+        # by `self.tagged_version()`, so we can later update
+        # `self.distribution.metadata.version` without
+        # repercussions.
+        self.egg_name = self.name
+        self.egg_version = self.tagged_version()
+        parsed_version = parse_version(self.egg_version)
+
+        try:
+            is_version = isinstance(parsed_version, packaging.version.Version)
+            spec = (
+                "%s==%s" if is_version else "%s===%s"
+            )
+            list(
+                parse_requirements(spec % (self.egg_name, self.egg_version))
+            )
+        except ValueError:
+            raise distutils.errors.DistutilsOptionError(
+                "Invalid distribution name or version syntax: %s-%s" %
+                (self.egg_name, self.egg_version)
+            )
+
+        if self.egg_base is None:
+            dirs = self.distribution.package_dir
+            self.egg_base = (dirs or {}).get('', os.curdir)
+
+        self.ensure_dirname('egg_base')
+        self.egg_info = to_filename(self.egg_name) + '.egg-info'
+        if self.egg_base != os.curdir:
+            self.egg_info = os.path.join(self.egg_base, self.egg_info)
+        if '-' in self.egg_name:
+            self.check_broken_egg_info()
+
+        # Set package version for the benefit of dumber commands
+        # (e.g. sdist, bdist_wininst, etc.)
+        #
+        self.distribution.metadata.version = self.egg_version
+
+        # If we bootstrapped around the lack of a PKG-INFO, as might be the
+        # case in a fresh checkout, make sure that any special tags get added
+        # to the version info
+        #
+        pd = self.distribution._patched_dist
+        if pd is not None and pd.key == self.egg_name.lower():
+            pd._version = self.egg_version
+            pd._parsed_version = parse_version(self.egg_version)
+            self.distribution._patched_dist = None
+
+    def write_or_delete_file(self, what, filename, data, force=False):
+        """Write `data` to `filename` or delete if empty
+
+        If `data` is non-empty, this routine is the same as ``write_file()``.
+        If `data` is empty but not ``None``, this is the same as calling
+        ``delete_file(filename)`.  If `data` is ``None``, then this is a no-op
+        unless `filename` exists, in which case a warning is issued about the
+        orphaned file (if `force` is false), or deleted (if `force` is true).
+        """
+        if data:
+            self.write_file(what, filename, data)
+        elif os.path.exists(filename):
+            if data is None and not force:
+                log.warn(
+                    "%s not set in setup(), but %s exists", what, filename
+                )
+                return
+            else:
+                self.delete_file(filename)
+
+    def write_file(self, what, filename, data):
+        """Write `data` to `filename` (if not a dry run) after announcing it
+
+        `what` is used in a log message to identify what is being written
+        to the file.
+        """
+        log.info("writing %s to %s", what, filename)
+        if six.PY3:
+            data = data.encode("utf-8")
+        if not self.dry_run:
+            f = open(filename, 'wb')
+            f.write(data)
+            f.close()
+
+    def delete_file(self, filename):
+        """Delete `filename` (if not a dry run) after announcing it"""
+        log.info("deleting %s", filename)
+        if not self.dry_run:
+            os.unlink(filename)
+
+    def run(self):
+        self.mkpath(self.egg_info)
+        os.utime(self.egg_info, None)
+        installer = self.distribution.fetch_build_egg
+        for ep in iter_entry_points('egg_info.writers'):
+            ep.require(installer=installer)
+            writer = ep.resolve()
+            writer(self, ep.name, os.path.join(self.egg_info, ep.name))
+
+        # Get rid of native_libs.txt if it was put there by older bdist_egg
+        nl = os.path.join(self.egg_info, "native_libs.txt")
+        if os.path.exists(nl):
+            self.delete_file(nl)
+
+        self.find_sources()
+
+    def find_sources(self):
+        """Generate SOURCES.txt manifest file"""
+        manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
+        mm = manifest_maker(self.distribution)
+        mm.manifest = manifest_filename
+        mm.run()
+        self.filelist = mm.filelist
+
+    def check_broken_egg_info(self):
+        bei = self.egg_name + '.egg-info'
+        if self.egg_base != os.curdir:
+            bei = os.path.join(self.egg_base, bei)
+        if os.path.exists(bei):
+            log.warn(
+                "-" * 78 + '\n'
+                "Note: Your current .egg-info directory has a '-' in its name;"
+                '\nthis will not work correctly with "setup.py develop".\n\n'
+                'Please rename %s to %s to correct this problem.\n' + '-' * 78,
+                bei, self.egg_info
+            )
+            self.broken_egg_info = self.egg_info
+            self.egg_info = bei  # make it work for now
+
+
+class FileList(_FileList):
+    # Implementations of the various MANIFEST.in commands
+
+    def process_template_line(self, line):
+        # Parse the line: split it up, make sure the right number of words
+        # is there, and return the relevant words.  'action' is always
+        # defined: it's the first word of the line.  Which of the other
+        # three are defined depends on the action; it'll be either
+        # patterns, (dir and patterns), or (dir_pattern).
+        (action, patterns, dir, dir_pattern) = self._parse_template_line(line)
+
+        # OK, now we know that the action is valid and we have the
+        # right number of words on the line for that action -- so we
+        # can proceed with minimal error-checking.
+        if action == 'include':
+            self.debug_print("include " + ' '.join(patterns))
+            for pattern in patterns:
+                if not self.include(pattern):
+                    log.warn("warning: no files found matching '%s'", pattern)
+
+        elif action == 'exclude':
+            self.debug_print("exclude " + ' '.join(patterns))
+            for pattern in patterns:
+                if not self.exclude(pattern):
+                    log.warn(("warning: no previously-included files "
+                              "found matching '%s'"), pattern)
+
+        elif action == 'global-include':
+            self.debug_print("global-include " + ' '.join(patterns))
+            for pattern in patterns:
+                if not self.global_include(pattern):
+                    log.warn(("warning: no files found matching '%s' "
+                              "anywhere in distribution"), pattern)
+
+        elif action == 'global-exclude':
+            self.debug_print("global-exclude " + ' '.join(patterns))
+            for pattern in patterns:
+                if not self.global_exclude(pattern):
+                    log.warn(("warning: no previously-included files matching "
+                              "'%s' found anywhere in distribution"),
+                             pattern)
+
+        elif action == 'recursive-include':
+            self.debug_print("recursive-include %s %s" %
+                             (dir, ' '.join(patterns)))
+            for pattern in patterns:
+                if not self.recursive_include(dir, pattern):
+                    log.warn(("warning: no files found matching '%s' "
+                              "under directory '%s'"),
+                             pattern, dir)
+
+        elif action == 'recursive-exclude':
+            self.debug_print("recursive-exclude %s %s" %
+                             (dir, ' '.join(patterns)))
+            for pattern in patterns:
+                if not self.recursive_exclude(dir, pattern):
+                    log.warn(("warning: no previously-included files matching "
+                              "'%s' found under directory '%s'"),
+                             pattern, dir)
+
+        elif action == 'graft':
+            self.debug_print("graft " + dir_pattern)
+            if not self.graft(dir_pattern):
+                log.warn("warning: no directories found matching '%s'",
+                         dir_pattern)
+
+        elif action == 'prune':
+            self.debug_print("prune " + dir_pattern)
+            if not self.prune(dir_pattern):
+                log.warn(("no previously-included directories found "
+                          "matching '%s'"), dir_pattern)
+
+        else:
+            raise DistutilsInternalError(
+                "this cannot happen: invalid action '%s'" % action)
+
+    def _remove_files(self, predicate):
+        """
+        Remove all files from the file list that match the predicate.
+        Return True if any matching files were removed
+        """
+        found = False
+        for i in range(len(self.files) - 1, -1, -1):
+            if predicate(self.files[i]):
+                self.debug_print(" removing " + self.files[i])
+                del self.files[i]
+                found = True
+        return found
+
+    def include(self, pattern):
+        """Include files that match 'pattern'."""
+        found = [f for f in glob(pattern) if not os.path.isdir(f)]
+        self.extend(found)
+        return bool(found)
+
+    def exclude(self, pattern):
+        """Exclude files that match 'pattern'."""
+        match = translate_pattern(pattern)
+        return self._remove_files(match.match)
+
+    def recursive_include(self, dir, pattern):
+        """
+        Include all files anywhere in 'dir/' that match the pattern.
+        """
+        full_pattern = os.path.join(dir, '**', pattern)
+        found = [f for f in glob(full_pattern, recursive=True)
+                 if not os.path.isdir(f)]
+        self.extend(found)
+        return bool(found)
+
+    def recursive_exclude(self, dir, pattern):
+        """
+        Exclude any file anywhere in 'dir/' that match the pattern.
+        """
+        match = translate_pattern(os.path.join(dir, '**', pattern))
+        return self._remove_files(match.match)
+
+    def graft(self, dir):
+        """Include all files from 'dir/'."""
+        found = [
+            item
+            for match_dir in glob(dir)
+            for item in distutils.filelist.findall(match_dir)
+        ]
+        self.extend(found)
+        return bool(found)
+
+    def prune(self, dir):
+        """Filter out files from 'dir/'."""
+        match = translate_pattern(os.path.join(dir, '**'))
+        return self._remove_files(match.match)
+
+    def global_include(self, pattern):
+        """
+        Include all files anywhere in the current directory that match the
+        pattern. This is very inefficient on large file trees.
+        """
+        if self.allfiles is None:
+            self.findall()
+        match = translate_pattern(os.path.join('**', pattern))
+        found = [f for f in self.allfiles if match.match(f)]
+        self.extend(found)
+        return bool(found)
+
+    def global_exclude(self, pattern):
+        """
+        Exclude all files anywhere that match the pattern.
+        """
+        match = translate_pattern(os.path.join('**', pattern))
+        return self._remove_files(match.match)
+
+    def append(self, item):
+        if item.endswith('\r'):  # Fix older sdists built on Windows
+            item = item[:-1]
+        path = convert_path(item)
+
+        if self._safe_path(path):
+            self.files.append(path)
+
+    def extend(self, paths):
+        self.files.extend(filter(self._safe_path, paths))
+
+    def _repair(self):
+        """
+        Replace self.files with only safe paths
+
+        Because some owners of FileList manipulate the underlying
+        ``files`` attribute directly, this method must be called to
+        repair those paths.
+        """
+        self.files = list(filter(self._safe_path, self.files))
+
+    def _safe_path(self, path):
+        enc_warn = "'%s' not %s encodable -- skipping"
+
+        # To avoid accidental trans-codings errors, first to unicode
+        u_path = unicode_utils.filesys_decode(path)
+        if u_path is None:
+            log.warn("'%s' in unexpected encoding -- skipping" % path)
+            return False
+
+        # Must ensure utf-8 encodability
+        utf8_path = unicode_utils.try_encode(u_path, "utf-8")
+        if utf8_path is None:
+            log.warn(enc_warn, path, 'utf-8')
+            return False
+
+        try:
+            # accept is either way checks out
+            if os.path.exists(u_path) or os.path.exists(utf8_path):
+                return True
+        # this will catch any encode errors decoding u_path
+        except UnicodeEncodeError:
+            log.warn(enc_warn, path, sys.getfilesystemencoding())
+
+
+class manifest_maker(sdist):
+    template = "MANIFEST.in"
+
+    def initialize_options(self):
+        self.use_defaults = 1
+        self.prune = 1
+        self.manifest_only = 1
+        self.force_manifest = 1
+
+    def finalize_options(self):
+        pass
+
+    def run(self):
+        self.filelist = FileList()
+        if not os.path.exists(self.manifest):
+            self.write_manifest()  # it must exist so it'll get in the list
+        self.add_defaults()
+        if os.path.exists(self.template):
+            self.read_template()
+        self.prune_file_list()
+        self.filelist.sort()
+        self.filelist.remove_duplicates()
+        self.write_manifest()
+
+    def _manifest_normalize(self, path):
+        path = unicode_utils.filesys_decode(path)
+        return path.replace(os.sep, '/')
+
+    def write_manifest(self):
+        """
+        Write the file list in 'self.filelist' to the manifest file
+        named by 'self.manifest'.
+        """
+        self.filelist._repair()
+
+        # Now _repairs should encodability, but not unicode
+        files = [self._manifest_normalize(f) for f in self.filelist.files]
+        msg = "writing manifest file '%s'" % self.manifest
+        self.execute(write_file, (self.manifest, files), msg)
+
+    def warn(self, msg):
+        if not self._should_suppress_warning(msg):
+            sdist.warn(self, msg)
+
+    @staticmethod
+    def _should_suppress_warning(msg):
+        """
+        suppress missing-file warnings from sdist
+        """
+        return re.match(r"standard file .*not found", msg)
+
+    def add_defaults(self):
+        sdist.add_defaults(self)
+        self.check_license()
+        self.filelist.append(self.template)
+        self.filelist.append(self.manifest)
+        rcfiles = list(walk_revctrl())
+        if rcfiles:
+            self.filelist.extend(rcfiles)
+        elif os.path.exists(self.manifest):
+            self.read_manifest()
+
+        if os.path.exists("setup.py"):
+            # setup.py should be included by default, even if it's not
+            # the script called to create the sdist
+            self.filelist.append("setup.py")
+
+        ei_cmd = self.get_finalized_command('egg_info')
+        self.filelist.graft(ei_cmd.egg_info)
+
+    def prune_file_list(self):
+        build = self.get_finalized_command('build')
+        base_dir = self.distribution.get_fullname()
+        self.filelist.prune(build.build_base)
+        self.filelist.prune(base_dir)
+        sep = re.escape(os.sep)
+        self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
+                                      is_regex=1)
+
+
+def write_file(filename, contents):
+    """Create a file with the specified name and write 'contents' (a
+    sequence of strings without line terminators) to it.
+    """
+    contents = "\n".join(contents)
+
+    # assuming the contents has been vetted for utf-8 encoding
+    contents = contents.encode("utf-8")
+
+    with open(filename, "wb") as f:  # always write POSIX-style manifest
+        f.write(contents)
+
+
+def write_pkg_info(cmd, basename, filename):
+    log.info("writing %s", filename)
+    if not cmd.dry_run:
+        metadata = cmd.distribution.metadata
+        metadata.version, oldver = cmd.egg_version, metadata.version
+        metadata.name, oldname = cmd.egg_name, metadata.name
+
+        try:
+            # write unescaped data to PKG-INFO, so older pkg_resources
+            # can still parse it
+            metadata.write_pkg_info(cmd.egg_info)
+        finally:
+            metadata.name, metadata.version = oldname, oldver
+
+        safe = getattr(cmd.distribution, 'zip_safe', None)
+
+        bdist_egg.write_safety_flag(cmd.egg_info, safe)
+
+
+def warn_depends_obsolete(cmd, basename, filename):
+    if os.path.exists(filename):
+        log.warn(
+            "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
+            "Use the install_requires/extras_require setup() args instead."
+        )
+
+
+def _write_requirements(stream, reqs):
+    lines = yield_lines(reqs or ())
+    append_cr = lambda line: line + '\n'
+    lines = map(append_cr, lines)
+    stream.writelines(lines)
+
+
+def write_requirements(cmd, basename, filename):
+    dist = cmd.distribution
+    data = six.StringIO()
+    _write_requirements(data, dist.install_requires)
+    extras_require = dist.extras_require or {}
+    for extra in sorted(extras_require):
+        data.write('\n[{extra}]\n'.format(**vars()))
+        _write_requirements(data, extras_require[extra])
+    cmd.write_or_delete_file("requirements", filename, data.getvalue())
+
+
+def write_setup_requirements(cmd, basename, filename):
+    data = io.StringIO()
+    _write_requirements(data, cmd.distribution.setup_requires)
+    cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
+
+
+def write_toplevel_names(cmd, basename, filename):
+    pkgs = dict.fromkeys(
+        [
+            k.split('.', 1)[0]
+            for k in cmd.distribution.iter_distribution_names()
+        ]
+    )
+    cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
+
+
+def overwrite_arg(cmd, basename, filename):
+    write_arg(cmd, basename, filename, True)
+
+
+def write_arg(cmd, basename, filename, force=False):
+    argname = os.path.splitext(basename)[0]
+    value = getattr(cmd.distribution, argname, None)
+    if value is not None:
+        value = '\n'.join(value) + '\n'
+    cmd.write_or_delete_file(argname, filename, value, force)
+
+
+def write_entries(cmd, basename, filename):
+    ep = cmd.distribution.entry_points
+
+    if isinstance(ep, six.string_types) or ep is None:
+        data = ep
+    elif ep is not None:
+        data = []
+        for section, contents in sorted(ep.items()):
+            if not isinstance(contents, six.string_types):
+                contents = EntryPoint.parse_group(section, contents)
+                contents = '\n'.join(sorted(map(str, contents.values())))
+            data.append('[%s]\n%s\n\n' % (section, contents))
+        data = ''.join(data)
+
+    cmd.write_or_delete_file('entry points', filename, data, True)
+
+
+def get_pkg_info_revision():
+    """
+    Get a -r### off of PKG-INFO Version in case this is an sdist of
+    a subversion revision.
+    """
+    warnings.warn("get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning)
+    if os.path.exists('PKG-INFO'):
+        with io.open('PKG-INFO') as f:
+            for line in f:
+                match = re.match(r"Version:.*-r(\d+)\s*$", line)
+                if match:
+                    return int(match.group(1))
+    return 0
+
+
+class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning):
+    """Class for warning about deprecations in eggInfo in setupTools. Not ignored by default, unlike DeprecationWarning."""
diff --git a/lib/python3.7/site-packages/setuptools/command/install.py b/lib/python3.7/site-packages/setuptools/command/install.py
new file mode 100644
index 0000000000000000000000000000000000000000..31a5ddb57739f8dc132cef1b9f5d252befb67ca7
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/install.py
@@ -0,0 +1,125 @@
+from distutils.errors import DistutilsArgError
+import inspect
+import glob
+import warnings
+import platform
+import distutils.command.install as orig
+
+import setuptools
+
+# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
+# now. See https://github.com/pypa/setuptools/issues/199/
+_install = orig.install
+
+
+class install(orig.install):
+    """Use easy_install to install the package, w/dependencies"""
+
+    user_options = orig.install.user_options + [
+        ('old-and-unmanageable', None, "Try not to use this!"),
+        ('single-version-externally-managed', None,
+         "used by system package builders to create 'flat' eggs"),
+    ]
+    boolean_options = orig.install.boolean_options + [
+        'old-and-unmanageable', 'single-version-externally-managed',
+    ]
+    new_commands = [
+        ('install_egg_info', lambda self: True),
+        ('install_scripts', lambda self: True),
+    ]
+    _nc = dict(new_commands)
+
+    def initialize_options(self):
+        orig.install.initialize_options(self)
+        self.old_and_unmanageable = None
+        self.single_version_externally_managed = None
+
+    def finalize_options(self):
+        orig.install.finalize_options(self)
+        if self.root:
+            self.single_version_externally_managed = True
+        elif self.single_version_externally_managed:
+            if not self.root and not self.record:
+                raise DistutilsArgError(
+                    "You must specify --record or --root when building system"
+                    " packages"
+                )
+
+    def handle_extra_path(self):
+        if self.root or self.single_version_externally_managed:
+            # explicit backward-compatibility mode, allow extra_path to work
+            return orig.install.handle_extra_path(self)
+
+        # Ignore extra_path when installing an egg (or being run by another
+        # command without --root or --single-version-externally-managed
+        self.path_file = None
+        self.extra_dirs = ''
+
+    def run(self):
+        # Explicit request for old-style install?  Just do it
+        if self.old_and_unmanageable or self.single_version_externally_managed:
+            return orig.install.run(self)
+
+        if not self._called_from_setup(inspect.currentframe()):
+            # Run in backward-compatibility mode to support bdist_* commands.
+            orig.install.run(self)
+        else:
+            self.do_egg_install()
+
+    @staticmethod
+    def _called_from_setup(run_frame):
+        """
+        Attempt to detect whether run() was called from setup() or by another
+        command.  If called by setup(), the parent caller will be the
+        'run_command' method in 'distutils.dist', and *its* caller will be
+        the 'run_commands' method.  If called any other way, the
+        immediate caller *might* be 'run_command', but it won't have been
+        called by 'run_commands'. Return True in that case or if a call stack
+        is unavailable. Return False otherwise.
+        """
+        if run_frame is None:
+            msg = "Call stack not available. bdist_* commands may fail."
+            warnings.warn(msg)
+            if platform.python_implementation() == 'IronPython':
+                msg = "For best results, pass -X:Frames to enable call stack."
+                warnings.warn(msg)
+            return True
+        res = inspect.getouterframes(run_frame)[2]
+        caller, = res[:1]
+        info = inspect.getframeinfo(caller)
+        caller_module = caller.f_globals.get('__name__', '')
+        return (
+            caller_module == 'distutils.dist'
+            and info.function == 'run_commands'
+        )
+
+    def do_egg_install(self):
+
+        easy_install = self.distribution.get_command_class('easy_install')
+
+        cmd = easy_install(
+            self.distribution, args="x", root=self.root, record=self.record,
+        )
+        cmd.ensure_finalized()  # finalize before bdist_egg munges install cmd
+        cmd.always_copy_from = '.'  # make sure local-dir eggs get installed
+
+        # pick up setup-dir .egg files only: no .egg-info
+        cmd.package_index.scan(glob.glob('*.egg'))
+
+        self.run_command('bdist_egg')
+        args = [self.distribution.get_command_obj('bdist_egg').egg_output]
+
+        if setuptools.bootstrap_install_from:
+            # Bootstrap self-installation of setuptools
+            args.insert(0, setuptools.bootstrap_install_from)
+
+        cmd.args = args
+        cmd.run()
+        setuptools.bootstrap_install_from = None
+
+
+# XXX Python 3.1 doesn't see _nc if this is inside the class
+install.sub_commands = (
+    [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
+    install.new_commands
+)
diff --git a/lib/python3.7/site-packages/setuptools/command/install_egg_info.py b/lib/python3.7/site-packages/setuptools/command/install_egg_info.py
new file mode 100644
index 0000000000000000000000000000000000000000..edc4718b686203312cea4487a30d8f7801f49b08
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/install_egg_info.py
@@ -0,0 +1,62 @@
+from distutils import log, dir_util
+import os
+
+from setuptools import Command
+from setuptools import namespaces
+from setuptools.archive_util import unpack_archive
+import pkg_resources
+
+
+class install_egg_info(namespaces.Installer, Command):
+    """Install an .egg-info directory for the package"""
+
+    description = "Install an .egg-info directory for the package"
+
+    user_options = [
+        ('install-dir=', 'd', "directory to install to"),
+    ]
+
+    def initialize_options(self):
+        self.install_dir = None
+
+    def finalize_options(self):
+        self.set_undefined_options('install_lib',
+                                   ('install_dir', 'install_dir'))
+        ei_cmd = self.get_finalized_command("egg_info")
+        basename = pkg_resources.Distribution(
+            None, None, ei_cmd.egg_name, ei_cmd.egg_version
+        ).egg_name() + '.egg-info'
+        self.source = ei_cmd.egg_info
+        self.target = os.path.join(self.install_dir, basename)
+        self.outputs = []
+
+    def run(self):
+        self.run_command('egg_info')
+        if os.path.isdir(self.target) and not os.path.islink(self.target):
+            dir_util.remove_tree(self.target, dry_run=self.dry_run)
+        elif os.path.exists(self.target):
+            self.execute(os.unlink, (self.target,), "Removing " + self.target)
+        if not self.dry_run:
+            pkg_resources.ensure_directory(self.target)
+        self.execute(
+            self.copytree, (), "Copying %s to %s" % (self.source, self.target)
+        )
+        self.install_namespaces()
+
+    def get_outputs(self):
+        return self.outputs
+
+    def copytree(self):
+        # Copy the .egg-info tree to site-packages
+        def skimmer(src, dst):
+            # filter out source-control directories; note that 'src' is always
+            # a '/'-separated path, regardless of platform.  'dst' is a
+            # platform-specific path.
+            for skip in '.svn/', 'CVS/':
+                if src.startswith(skip) or '/' + skip in src:
+                    return None
+            self.outputs.append(dst)
+            log.debug("Copying %s to %s", src, dst)
+            return dst
+
+        unpack_archive(self.source, self.target, skimmer)
diff --git a/lib/python3.7/site-packages/setuptools/command/install_lib.py b/lib/python3.7/site-packages/setuptools/command/install_lib.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b31c3e38b573701e2a49e54e5267170ccd7ed4b
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/install_lib.py
@@ -0,0 +1,121 @@
+import os
+import imp
+from itertools import product, starmap
+import distutils.command.install_lib as orig
+
+
+class install_lib(orig.install_lib):
+    """Don't add compiled flags to filenames of non-Python files"""
+
+    def run(self):
+        self.build()
+        outfiles = self.install()
+        if outfiles is not None:
+            # always compile, in case we have any extension stubs to deal with
+            self.byte_compile(outfiles)
+
+    def get_exclusions(self):
+        """
+        Return a collections.Sized collections.Container of paths to be
+        excluded for single_version_externally_managed installations.
+        """
+        all_packages = (
+            pkg
+            for ns_pkg in self._get_SVEM_NSPs()
+            for pkg in self._all_packages(ns_pkg)
+        )
+
+        excl_specs = product(all_packages, self._gen_exclusion_paths())
+        return set(starmap(self._exclude_pkg_path, excl_specs))
+
+    def _exclude_pkg_path(self, pkg, exclusion_path):
+        """
+        Given a package name and exclusion path within that package,
+        compute the full exclusion path.
+        """
+        parts = pkg.split('.') + [exclusion_path]
+        return os.path.join(self.install_dir, *parts)
+
+    @staticmethod
+    def _all_packages(pkg_name):
+        """
+        >>> list(install_lib._all_packages('foo.bar.baz'))
+        ['foo.bar.baz', 'foo.bar', 'foo']
+        """
+        while pkg_name:
+            yield pkg_name
+            pkg_name, sep, child = pkg_name.rpartition('.')
+
+    def _get_SVEM_NSPs(self):
+        """
+        Get namespace packages (list) but only for
+        single_version_externally_managed installations and empty otherwise.
+        """
+        # TODO: is it necessary to short-circuit here? i.e. what's the cost
+        # if get_finalized_command is called even when namespace_packages is
+        # False?
+        if not self.distribution.namespace_packages:
+            return []
+
+        install_cmd = self.get_finalized_command('install')
+        svem = install_cmd.single_version_externally_managed
+
+        return self.distribution.namespace_packages if svem else []
+
+    @staticmethod
+    def _gen_exclusion_paths():
+        """
+        Generate file paths to be excluded for namespace packages (bytecode
+        cache files).
+        """
+        # always exclude the package module itself
+        yield '__init__.py'
+
+        yield '__init__.pyc'
+        yield '__init__.pyo'
+
+        if not hasattr(imp, 'get_tag'):
+            return
+
+        base = os.path.join('__pycache__', '__init__.' + imp.get_tag())
+        yield base + '.pyc'
+        yield base + '.pyo'
+        yield base + '.opt-1.pyc'
+        yield base + '.opt-2.pyc'
+
+    def copy_tree(
+            self, infile, outfile,
+            preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
+    ):
+        assert preserve_mode and preserve_times and not preserve_symlinks
+        exclude = self.get_exclusions()
+
+        if not exclude:
+            return orig.install_lib.copy_tree(self, infile, outfile)
+
+        # Exclude namespace package __init__.py* files from the output
+
+        from setuptools.archive_util import unpack_directory
+        from distutils import log
+
+        outfiles = []
+
+        def pf(src, dst):
+            if dst in exclude:
+                log.warn("Skipping installation of %s (namespace package)",
+                         dst)
+                return False
+
+            log.info("copying %s -> %s", src, os.path.dirname(dst))
+            outfiles.append(dst)
+            return dst
+
+        unpack_directory(infile, outfile, pf)
+        return outfiles
+
+    def get_outputs(self):
+        outputs = orig.install_lib.get_outputs(self)
+        exclude = self.get_exclusions()
+        if exclude:
+            return [f for f in outputs if f not in exclude]
+        return outputs
diff --git a/lib/python3.7/site-packages/setuptools/command/install_scripts.py b/lib/python3.7/site-packages/setuptools/command/install_scripts.py
new file mode 100644
index 0000000000000000000000000000000000000000..16234273a2d36b0b3d821a7a97bf8f03cf3f2948
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/install_scripts.py
@@ -0,0 +1,65 @@
+from distutils import log
+import distutils.command.install_scripts as orig
+import os
+import sys
+
+from pkg_resources import Distribution, PathMetadata, ensure_directory
+
+
+class install_scripts(orig.install_scripts):
+    """Do normal script install, plus any egg_info wrapper scripts"""
+
+    def initialize_options(self):
+        orig.install_scripts.initialize_options(self)
+        self.no_ep = False
+
+    def run(self):
+        import setuptools.command.easy_install as ei
+
+        self.run_command("egg_info")
+        if self.distribution.scripts:
+            orig.install_scripts.run(self)  # run first to set up self.outfiles
+        else:
+            self.outfiles = []
+        if self.no_ep:
+            # don't install entry point scripts into .egg file!
+            return
+
+        ei_cmd = self.get_finalized_command("egg_info")
+        dist = Distribution(
+            ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
+            ei_cmd.egg_name, ei_cmd.egg_version,
+        )
+        bs_cmd = self.get_finalized_command('build_scripts')
+        exec_param = getattr(bs_cmd, 'executable', None)
+        bw_cmd = self.get_finalized_command("bdist_wininst")
+        is_wininst = getattr(bw_cmd, '_is_running', False)
+        writer = ei.ScriptWriter
+        if is_wininst:
+            exec_param = "python.exe"
+            writer = ei.WindowsScriptWriter
+        if exec_param == sys.executable:
+            # In case the path to the Python executable contains a space, wrap
+            # it so it's not split up.
+            exec_param = [exec_param]
+        # resolve the writer to the environment
+        writer = writer.best()
+        cmd = writer.command_spec_class.best().from_param(exec_param)
+        for args in writer.get_args(dist, cmd.as_header()):
+            self.write_script(*args)
+
+    def write_script(self, script_name, contents, mode="t", *ignored):
+        """Write an executable file to the scripts directory"""
+        from setuptools.command.easy_install import chmod, current_umask
+
+        log.info("Installing %s script to %s", script_name, self.install_dir)
+        target = os.path.join(self.install_dir, script_name)
+        self.outfiles.append(target)
+
+        mask = current_umask()
+        if not self.dry_run:
+            ensure_directory(target)
+            f = open(target, "w" + mode)
+            f.write(contents)
+            f.close()
+            chmod(target, 0o777 - mask)
diff --git a/lib/python3.7/site-packages/setuptools/command/launcher manifest.xml b/lib/python3.7/site-packages/setuptools/command/launcher manifest.xml
new file mode 100644
index 0000000000000000000000000000000000000000..5972a96d8ded85cc14147ffc1400ec67c3b5a578
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/launcher manifest.xml	
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+    <assemblyIdentity version="1.0.0.0"
+                      processorArchitecture="X86"
+                      name="%(name)s"
+                      type="win32"/>
+    <!-- Identify the application security requirements. -->
+    <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+        <security>
+            <requestedPrivileges>
+                <requestedExecutionLevel level="asInvoker" uiAccess="false"/>
+            </requestedPrivileges>
+        </security>
+    </trustInfo>
+</assembly>
diff --git a/lib/python3.7/site-packages/setuptools/command/py36compat.py b/lib/python3.7/site-packages/setuptools/command/py36compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..61063e7542586c05c3af21d31cd917ebd1118272
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/py36compat.py
@@ -0,0 +1,136 @@
+import os
+from glob import glob
+from distutils.util import convert_path
+from distutils.command import sdist
+
+from setuptools.extern.six.moves import filter
+
+
+class sdist_add_defaults:
+    """
+    Mix-in providing forward-compatibility for functionality as found in
+    distutils on Python 3.7.
+
+    Do not edit the code in this class except to update functionality
+    as implemented in distutils. Instead, override in the subclass.
+    """
+
+    def add_defaults(self):
+        """Add all the default files to self.filelist:
+          - README or README.txt
+          - setup.py
+          - test/test*.py
+          - all pure Python modules mentioned in setup script
+          - all files pointed by package_data (build_py)
+          - all files defined in data_files.
+          - all files defined as scripts.
+          - all C sources listed as part of extensions or C libraries
+            in the setup script (doesn't catch C headers!)
+        Warns if (README or README.txt) or setup.py are missing; everything
+        else is optional.
+        """
+        self._add_defaults_standards()
+        self._add_defaults_optional()
+        self._add_defaults_python()
+        self._add_defaults_data_files()
+        self._add_defaults_ext()
+        self._add_defaults_c_libs()
+        self._add_defaults_scripts()
+
+    @staticmethod
+    def _cs_path_exists(fspath):
+        """
+        Case-sensitive path existence check
+
+        >>> sdist_add_defaults._cs_path_exists(__file__)
+        True
+        >>> sdist_add_defaults._cs_path_exists(__file__.upper())
+        False
+        """
+        if not os.path.exists(fspath):
+            return False
+        # make absolute so we always have a directory
+        abspath = os.path.abspath(fspath)
+        directory, filename = os.path.split(abspath)
+        return filename in os.listdir(directory)
+
+    def _add_defaults_standards(self):
+        standards = [self.READMES, self.distribution.script_name]
+        for fn in standards:
+            if isinstance(fn, tuple):
+                alts = fn
+                got_it = False
+                for fn in alts:
+                    if self._cs_path_exists(fn):
+                        got_it = True
+                        self.filelist.append(fn)
+                        break
+
+                if not got_it:
+                    self.warn("standard file not found: should have one of " +
+                              ', '.join(alts))
+            else:
+                if self._cs_path_exists(fn):
+                    self.filelist.append(fn)
+                else:
+                    self.warn("standard file '%s' not found" % fn)
+
+    def _add_defaults_optional(self):
+        optional = ['test/test*.py', 'setup.cfg']
+        for pattern in optional:
+            files = filter(os.path.isfile, glob(pattern))
+            self.filelist.extend(files)
+
+    def _add_defaults_python(self):
+        # build_py is used to get:
+        #  - python modules
+        #  - files defined in package_data
+        build_py = self.get_finalized_command('build_py')
+
+        # getting python files
+        if self.distribution.has_pure_modules():
+            self.filelist.extend(build_py.get_source_files())
+
+        # getting package_data files
+        # (computed in build_py.data_files by build_py.finalize_options)
+        for pkg, src_dir, build_dir, filenames in build_py.data_files:
+            for filename in filenames:
+                self.filelist.append(os.path.join(src_dir, filename))
+
+    def _add_defaults_data_files(self):
+        # getting distribution.data_files
+        if self.distribution.has_data_files():
+            for item in self.distribution.data_files:
+                if isinstance(item, str):
+                    # plain file
+                    item = convert_path(item)
+                    if os.path.isfile(item):
+                        self.filelist.append(item)
+                else:
+                    # a (dirname, filenames) tuple
+                    dirname, filenames = item
+                    for f in filenames:
+                        f = convert_path(f)
+                        if os.path.isfile(f):
+                            self.filelist.append(f)
+
+    def _add_defaults_ext(self):
+        if self.distribution.has_ext_modules():
+            build_ext = self.get_finalized_command('build_ext')
+            self.filelist.extend(build_ext.get_source_files())
+
+    def _add_defaults_c_libs(self):
+        if self.distribution.has_c_libraries():
+            build_clib = self.get_finalized_command('build_clib')
+            self.filelist.extend(build_clib.get_source_files())
+
+    def _add_defaults_scripts(self):
+        if self.distribution.has_scripts():
+            build_scripts = self.get_finalized_command('build_scripts')
+            self.filelist.extend(build_scripts.get_source_files())
+
+
+if hasattr(sdist.sdist, '_add_defaults_standards'):
+    # disable the functionality already available upstream
+    class sdist_add_defaults:
+        pass
diff --git a/lib/python3.7/site-packages/setuptools/command/register.py b/lib/python3.7/site-packages/setuptools/command/register.py
new file mode 100644
index 0000000000000000000000000000000000000000..98bc01566f42b289e08cc180fb6e00cc5f4824a3
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/register.py
@@ -0,0 +1,18 @@
+from distutils import log
+import distutils.command.register as orig
+
+
+class register(orig.register):
+    __doc__ = orig.register.__doc__
+
+    def run(self):
+        try:
+            # Make sure that we are using valid current name/version info
+            self.run_command('egg_info')
+            orig.register.run(self)
+        finally:
+            self.announce(
+                "WARNING: Registering is deprecated, use twine to "
+                "upload instead (https://pypi.org/p/twine/)",
+                log.WARN
+            )
diff --git a/lib/python3.7/site-packages/setuptools/command/rotate.py b/lib/python3.7/site-packages/setuptools/command/rotate.py
new file mode 100644
index 0000000000000000000000000000000000000000..b89353f529b3d08e768dea69a9dc8b5e7403003d
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/rotate.py
@@ -0,0 +1,66 @@
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
+import shutil
+
+from setuptools.extern import six
+
+from setuptools import Command
+
+
+class rotate(Command):
+    """Delete older distributions"""
+
+    description = "delete older distributions, keeping N newest files"
+    user_options = [
+        ('match=', 'm', "patterns to match (required)"),
+        ('dist-dir=', 'd', "directory where the distributions are"),
+        ('keep=', 'k', "number of matching distributions to keep"),
+    ]
+
+    boolean_options = []
+
+    def initialize_options(self):
+        self.match = None
+        self.dist_dir = None
+        self.keep = None
+
+    def finalize_options(self):
+        if self.match is None:
+            raise DistutilsOptionError(
+                "Must specify one or more (comma-separated) match patterns "
+                "(e.g. '.zip' or '.egg')"
+            )
+        if self.keep is None:
+            raise DistutilsOptionError("Must specify number of files to keep")
+        try:
+            self.keep = int(self.keep)
+        except ValueError:
+            raise DistutilsOptionError("--keep must be an integer")
+        if isinstance(self.match, six.string_types):
+            self.match = [
+                convert_path(p.strip()) for p in self.match.split(',')
+            ]
+        self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+    def run(self):
+        self.run_command("egg_info")
+        from glob import glob
+
+        for pattern in self.match:
+            pattern = self.distribution.get_name() + '*' + pattern
+            files = glob(os.path.join(self.dist_dir, pattern))
+            files = [(os.path.getmtime(f), f) for f in files]
+            files.sort()
+            files.reverse()
+
+            log.info("%d file(s) matching %s", len(files), pattern)
+            files = files[self.keep:]
+            for (t, f) in files:
+                log.info("Deleting %s", f)
+                if not self.dry_run:
+                    if os.path.isdir(f):
+                        shutil.rmtree(f)
+                    else:
+                        os.unlink(f)
diff --git a/lib/python3.7/site-packages/setuptools/command/saveopts.py b/lib/python3.7/site-packages/setuptools/command/saveopts.py
new file mode 100644
index 0000000000000000000000000000000000000000..611cec552867a6d50b7edd700c86c7396d906ea2
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/saveopts.py
@@ -0,0 +1,22 @@
+from setuptools.command.setopt import edit_config, option_base
+
+
+class saveopts(option_base):
+    """Save command-line options to a file"""
+
+    description = "save supplied options to setup.cfg or other config file"
+
+    def run(self):
+        dist = self.distribution
+        settings = {}
+
+        for cmd in dist.command_options:
+
+            if cmd == 'saveopts':
+                continue  # don't save our own options!
+
+            for opt, (src, val) in dist.get_option_dict(cmd).items():
+                if src == "command line":
+                    settings.setdefault(cmd, {})[opt] = val
+
+        edit_config(self.filename, settings, self.dry_run)
diff --git a/lib/python3.7/site-packages/setuptools/command/sdist.py b/lib/python3.7/site-packages/setuptools/command/sdist.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc25398147cfedcbc01be33ee25b693361a685cf
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/sdist.py
@@ -0,0 +1,221 @@
+from distutils import log
+import distutils.command.sdist as orig
+import os
+import sys
+import io
+import contextlib
+
+from setuptools.extern import six
+
+from .py36compat import sdist_add_defaults
+
+import pkg_resources
+
+_default_revctrl = list
+
+
+def walk_revctrl(dirname=''):
+    """Find all files under revision control"""
+    for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
+        for item in ep.load()(dirname):
+            yield item
+
+
+class sdist(sdist_add_defaults, orig.sdist):
+    """Smart sdist that finds anything supported by revision control"""
+
+    user_options = [
+        ('formats=', None,
+         "formats for source distribution (comma-separated list)"),
+        ('keep-temp', 'k',
+         "keep the distribution tree around after creating " +
+         "archive file(s)"),
+        ('dist-dir=', 'd',
+         "directory to put the source distribution archive(s) in "
+         "[default: dist]"),
+    ]
+
+    negative_opt = {}
+
+    README_EXTENSIONS = ['', '.rst', '.txt', '.md']
+    READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS)
+
+    def run(self):
+        self.run_command('egg_info')
+        ei_cmd = self.get_finalized_command('egg_info')
+        self.filelist = ei_cmd.filelist
+        self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
+        self.check_readme()
+
+        # Run sub commands
+        for cmd_name in self.get_sub_commands():
+            self.run_command(cmd_name)
+
+        self.make_distribution()
+
+        dist_files = getattr(self.distribution, 'dist_files', [])
+        for file in self.archive_files:
+            data = ('sdist', '', file)
+            if data not in dist_files:
+                dist_files.append(data)
+
+    def initialize_options(self):
+        orig.sdist.initialize_options(self)
+
+        self._default_to_gztar()
+
+    def _default_to_gztar(self):
+        # only needed on Python prior to 3.6.
+        if sys.version_info >= (3, 6, 0, 'beta', 1):
+            return
+        self.formats = ['gztar']
+
+    def make_distribution(self):
+        """
+        Workaround for #516
+        """
+        with self._remove_os_link():
+            orig.sdist.make_distribution(self)
+
+    @staticmethod
+    @contextlib.contextmanager
+    def _remove_os_link():
+        """
+        In a context, remove and restore os.link if it exists
+        """
+
+        class NoValue:
+            pass
+
+        orig_val = getattr(os, 'link', NoValue)
+        try:
+            del os.link
+        except Exception:
+            pass
+        try:
+            yield
+        finally:
+            if orig_val is not NoValue:
+                setattr(os, 'link', orig_val)
+
+    def __read_template_hack(self):
+        # This grody hack closes the template file (MANIFEST.in) if an
+        #  exception occurs during read_template.
+        # Doing so prevents an error when easy_install attempts to delete the
+        #  file.
+        try:
+            orig.sdist.read_template(self)
+        except Exception:
+            _, _, tb = sys.exc_info()
+            tb.tb_next.tb_frame.f_locals['template'].close()
+            raise
+
+    # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle
+    #  has been fixed, so only override the method if we're using an earlier
+    #  Python.
+    has_leaky_handle = (
+        sys.version_info < (2, 7, 2)
+        or (3, 0) <= sys.version_info < (3, 1, 4)
+        or (3, 2) <= sys.version_info < (3, 2, 1)
+    )
+    if has_leaky_handle:
+        read_template = __read_template_hack
+
+    def _add_defaults_python(self):
+        """getting python files"""
+        if self.distribution.has_pure_modules():
+            build_py = self.get_finalized_command('build_py')
+            self.filelist.extend(build_py.get_source_files())
+            # This functionality is incompatible with include_package_data, and
+            # will in fact create an infinite recursion if include_package_data
+            # is True.  Use of include_package_data will imply that
+            # distutils-style automatic handling of package_data is disabled
+            if not self.distribution.include_package_data:
+                for _, src_dir, _, filenames in build_py.data_files:
+                    self.filelist.extend([os.path.join(src_dir, filename)
+                                          for filename in filenames])
+
+    def _add_defaults_data_files(self):
+        try:
+            if six.PY2:
+                sdist_add_defaults._add_defaults_data_files(self)
+            else:
+                super()._add_defaults_data_files()
+        except TypeError:
+            log.warn("data_files contains unexpected objects")
+
+    def check_readme(self):
+        for f in self.READMES:
+            if os.path.exists(f):
+                return
+        else:
+            self.warn(
+                "standard file not found: should have one of " +
+                ', '.join(self.READMES)
+            )
+
+    def make_release_tree(self, base_dir, files):
+        orig.sdist.make_release_tree(self, base_dir, files)
+
+        # Save any egg_info command line options used to create this sdist
+        dest = os.path.join(base_dir, 'setup.cfg')
+        if hasattr(os, 'link') and os.path.exists(dest):
+            # unlink and re-copy, since it might be hard-linked, and
+            # we don't want to change the source version
+            os.unlink(dest)
+            self.copy_file('setup.cfg', dest)
+
+        self.get_finalized_command('egg_info').save_version_info(dest)
+
+    def _manifest_is_not_generated(self):
+        # check for special comment used in 2.7.1 and higher
+        if not os.path.isfile(self.manifest):
+            return False
+
+        with io.open(self.manifest, 'rb') as fp:
+            first_line = fp.readline()
+        return (first_line !=
+                '# file GENERATED by distutils, do NOT edit\n'.encode())
+
+    def read_manifest(self):
+        """Read the manifest file (named by 'self.manifest') and use it to
+        fill in 'self.filelist', the list of files to include in the source
+        distribution.
+        """
+        log.info("reading manifest file '%s'", self.manifest)
+        manifest = open(self.manifest, 'rb')
+        for line in manifest:
+            # The manifest must contain UTF-8. See #303.
+            if six.PY3:
+                try:
+                    line = line.decode('UTF-8')
+                except UnicodeDecodeError:
+                    log.warn("%r not UTF-8 decodable -- skipping" % line)
+                    continue
+            # ignore comments and blank lines
+            line = line.strip()
+            if line.startswith('#') or not line:
+                continue
+            self.filelist.append(line)
+        manifest.close()
+
+    def check_license(self):
+        """Checks if license_file' is configured and adds it to
+        'self.filelist' if the value contains a valid path.
+        """
+
+        opts = self.distribution.get_option_dict('metadata')
+
+        # ignore the source of the value
+        _, license_file = opts.get('license_file', (None, None))
+
+        if license_file is None:
+            log.debug("'license_file' option was not specified")
+            return
+
+        if not os.path.exists(license_file):
+            log.warn("warning: Failed to find the configured license file '%s'",
+                    license_file)
+            return
+
+        self.filelist.append(license_file)
diff --git a/lib/python3.7/site-packages/setuptools/command/setopt.py b/lib/python3.7/site-packages/setuptools/command/setopt.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e57cc02627fc3c3bb49613731a51c72452f96ba
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/setopt.py
@@ -0,0 +1,149 @@
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import distutils
+import os
+
+from setuptools.extern.six.moves import configparser
+
+from setuptools import Command
+
+__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
+
+
+def config_file(kind="local"):
+    """Get the filename of the distutils, local, global, or per-user config
+
+    `kind` must be one of "local", "global", or "user"
+    """
+    if kind == 'local':
+        return 'setup.cfg'
+    if kind == 'global':
+        return os.path.join(
+            os.path.dirname(distutils.__file__), 'distutils.cfg'
+        )
+    if kind == 'user':
+        dot = os.name == 'posix' and '.' or ''
+        return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
+    raise ValueError(
+        "config_file() type must be 'local', 'global', or 'user'", kind
+    )
+
+
+def edit_config(filename, settings, dry_run=False):
+    """Edit a configuration file to include `settings`
+
+    `settings` is a dictionary of dictionaries or ``None`` values, keyed by
+    command/section name.  A ``None`` value means to delete the entire section,
+    while a dictionary lists settings to be changed or deleted in that section.
+    A setting of ``None`` means to delete that setting.
+    """
+    log.debug("Reading configuration from %s", filename)
+    opts = configparser.RawConfigParser()
+    opts.read([filename])
+    for section, options in settings.items():
+        if options is None:
+            log.info("Deleting section [%s] from %s", section, filename)
+            opts.remove_section(section)
+        else:
+            if not opts.has_section(section):
+                log.debug("Adding new section [%s] to %s", section, filename)
+                opts.add_section(section)
+            for option, value in options.items():
+                if value is None:
+                    log.debug(
+                        "Deleting %s.%s from %s",
+                        section, option, filename
+                    )
+                    opts.remove_option(section, option)
+                    if not opts.options(section):
+                        log.info("Deleting empty [%s] section from %s",
+                                 section, filename)
+                        opts.remove_section(section)
+                else:
+                    log.debug(
+                        "Setting %s.%s to %r in %s",
+                        section, option, value, filename
+                    )
+                    opts.set(section, option, value)
+
+    log.info("Writing %s", filename)
+    if not dry_run:
+        with open(filename, 'w') as f:
+            opts.write(f)
+
+
+class option_base(Command):
+    """Abstract base class for commands that mess with config files"""
+
+    user_options = [
+        ('global-config', 'g',
+         "save options to the site-wide distutils.cfg file"),
+        ('user-config', 'u',
+         "save options to the current user's pydistutils.cfg file"),
+        ('filename=', 'f',
+         "configuration file to use (default=setup.cfg)"),
+    ]
+
+    boolean_options = [
+        'global-config', 'user-config',
+    ]
+
+    def initialize_options(self):
+        self.global_config = None
+        self.user_config = None
+        self.filename = None
+
+    def finalize_options(self):
+        filenames = []
+        if self.global_config:
+            filenames.append(config_file('global'))
+        if self.user_config:
+            filenames.append(config_file('user'))
+        if self.filename is not None:
+            filenames.append(self.filename)
+        if not filenames:
+            filenames.append(config_file('local'))
+        if len(filenames) > 1:
+            raise DistutilsOptionError(
+                "Must specify only one configuration file option",
+                filenames
+            )
+        self.filename, = filenames
+
+
+class setopt(option_base):
+    """Save command-line options to a file"""
+
+    description = "set an option in setup.cfg or another config file"
+
+    user_options = [
+        ('command=', 'c', 'command to set an option for'),
+        ('option=', 'o', 'option to set'),
+        ('set-value=', 's', 'value of the option'),
+        ('remove', 'r', 'remove (unset) the value'),
+    ] + option_base.user_options
+
+    boolean_options = option_base.boolean_options + ['remove']
+
+    def initialize_options(self):
+        option_base.initialize_options(self)
+        self.command = None
+        self.option = None
+        self.set_value = None
+        self.remove = None
+
+    def finalize_options(self):
+        option_base.finalize_options(self)
+        if self.command is None or self.option is None:
+            raise DistutilsOptionError("Must specify --command *and* --option")
+        if self.set_value is None and not self.remove:
+            raise DistutilsOptionError("Must specify --set-value or --remove")
+
+    def run(self):
+        edit_config(
+            self.filename, {
+                self.command: {self.option.replace('-', '_'): self.set_value}
+            },
+            self.dry_run
+        )
diff --git a/lib/python3.7/site-packages/setuptools/command/test.py b/lib/python3.7/site-packages/setuptools/command/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..973e4eb214b1305aadf771617b2b4ec162feea1f
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/test.py
@@ -0,0 +1,271 @@
+import os
+import operator
+import sys
+import contextlib
+import itertools
+import unittest
+from distutils.errors import DistutilsError, DistutilsOptionError
+from distutils import log
+from unittest import TestLoader
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import map, filter
+
+from pkg_resources import (resource_listdir, resource_exists, normalize_path,
+                           working_set, _namespace_packages, evaluate_marker,
+                           add_activation_listener, require, EntryPoint)
+from setuptools import Command
+from .build_py import _unique_everseen
+
+__metaclass__ = type
+
+
+class ScanningLoader(TestLoader):
+
+    def __init__(self):
+        TestLoader.__init__(self)
+        self._visited = set()
+
+    def loadTestsFromModule(self, module, pattern=None):
+        """Return a suite of all tests cases contained in the given module
+
+        If the module is a package, load tests from all the modules in it.
+        If the module has an ``additional_tests`` function, call it and add
+        the return value to the tests.
+        """
+        if module in self._visited:
+            return None
+        self._visited.add(module)
+
+        tests = []
+        tests.append(TestLoader.loadTestsFromModule(self, module))
+
+        if hasattr(module, "additional_tests"):
+            tests.append(module.additional_tests())
+
+        if hasattr(module, '__path__'):
+            for file in resource_listdir(module.__name__, ''):
+                if file.endswith('.py') and file != '__init__.py':
+                    submodule = module.__name__ + '.' + file[:-3]
+                else:
+                    if resource_exists(module.__name__, file + '/__init__.py'):
+                        submodule = module.__name__ + '.' + file
+                    else:
+                        continue
+                tests.append(self.loadTestsFromName(submodule))
+
+        if len(tests) != 1:
+            return self.suiteClass(tests)
+        else:
+            return tests[0]  # don't create a nested suite for only one return
+
+
+# adapted from jaraco.classes.properties:NonDataProperty
+class NonDataProperty:
+    def __init__(self, fget):
+        self.fget = fget
+
+    def __get__(self, obj, objtype=None):
+        if obj is None:
+            return self
+        return self.fget(obj)
+
+
+class test(Command):
+    """Command to run unit tests after in-place build"""
+
+    description = "run unit tests after in-place build"
+
+    user_options = [
+        ('test-module=', 'm', "Run 'test_suite' in specified module"),
+        ('test-suite=', 's',
+         "Run single test, case or suite (e.g. 'module.test_suite')"),
+        ('test-runner=', 'r', "Test runner to use"),
+    ]
+
+    def initialize_options(self):
+        self.test_suite = None
+        self.test_module = None
+        self.test_loader = None
+        self.test_runner = None
+
+    def finalize_options(self):
+
+        if self.test_suite and self.test_module:
+            msg = "You may specify a module or a suite, but not both"
+            raise DistutilsOptionError(msg)
+
+        if self.test_suite is None:
+            if self.test_module is None:
+                self.test_suite = self.distribution.test_suite
+            else:
+                self.test_suite = self.test_module + ".test_suite"
+
+        if self.test_loader is None:
+            self.test_loader = getattr(self.distribution, 'test_loader', None)
+        if self.test_loader is None:
+            self.test_loader = "setuptools.command.test:ScanningLoader"
+        if self.test_runner is None:
+            self.test_runner = getattr(self.distribution, 'test_runner', None)
+
+    @NonDataProperty
+    def test_args(self):
+        return list(self._test_args())
+
+    def _test_args(self):
+        if not self.test_suite and sys.version_info >= (2, 7):
+            yield 'discover'
+        if self.verbose:
+            yield '--verbose'
+        if self.test_suite:
+            yield self.test_suite
+
+    def with_project_on_sys_path(self, func):
+        """
+        Backward compatibility for project_on_sys_path context.
+        """
+        with self.project_on_sys_path():
+            func()
+
+    @contextlib.contextmanager
+    def project_on_sys_path(self, include_dists=[]):
+        with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False)
+
+        if with_2to3:
+            # If we run 2to3 we can not do this inplace:
+
+            # Ensure metadata is up-to-date
+            self.reinitialize_command('build_py', inplace=0)
+            self.run_command('build_py')
+            bpy_cmd = self.get_finalized_command("build_py")
+            build_path = normalize_path(bpy_cmd.build_lib)
+
+            # Build extensions
+            self.reinitialize_command('egg_info', egg_base=build_path)
+            self.run_command('egg_info')
+
+            self.reinitialize_command('build_ext', inplace=0)
+            self.run_command('build_ext')
+        else:
+            # Without 2to3 inplace works fine:
+            self.run_command('egg_info')
+
+            # Build extensions in-place
+            self.reinitialize_command('build_ext', inplace=1)
+            self.run_command('build_ext')
+
+        ei_cmd = self.get_finalized_command("egg_info")
+
+        old_path = sys.path[:]
+        old_modules = sys.modules.copy()
+
+        try:
+            project_path = normalize_path(ei_cmd.egg_base)
+            sys.path.insert(0, project_path)
+            working_set.__init__()
+            add_activation_listener(lambda dist: dist.activate())
+            require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
+            with self.paths_on_pythonpath([project_path]):
+                yield
+        finally:
+            sys.path[:] = old_path
+            sys.modules.clear()
+            sys.modules.update(old_modules)
+            working_set.__init__()
+
+    @staticmethod
+    @contextlib.contextmanager
+    def paths_on_pythonpath(paths):
+        """
+        Add the indicated paths to the head of the PYTHONPATH environment
+        variable so that subprocesses will also see the packages at
+        these paths.
+
+        Do this in a context that restores the value on exit.
+        """
+        nothing = object()
+        orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
+        current_pythonpath = os.environ.get('PYTHONPATH', '')
+        try:
+            prefix = os.pathsep.join(_unique_everseen(paths))
+            to_join = filter(None, [prefix, current_pythonpath])
+            new_path = os.pathsep.join(to_join)
+            if new_path:
+                os.environ['PYTHONPATH'] = new_path
+            yield
+        finally:
+            if orig_pythonpath is nothing:
+                os.environ.pop('PYTHONPATH', None)
+            else:
+                os.environ['PYTHONPATH'] = orig_pythonpath
+
+    @staticmethod
+    def install_dists(dist):
+        """
+        Install the requirements indicated by self.distribution and
+        return an iterable of the dists that were built.
+        """
+        ir_d = dist.fetch_build_eggs(dist.install_requires)
+        tr_d = dist.fetch_build_eggs(dist.tests_require or [])
+        er_d = dist.fetch_build_eggs(
+            v for k, v in dist.extras_require.items()
+            if k.startswith(':') and evaluate_marker(k[1:])
+        )
+        return itertools.chain(ir_d, tr_d, er_d)
+
+    def run(self):
+        installed_dists = self.install_dists(self.distribution)
+
+        cmd = ' '.join(self._argv)
+        if self.dry_run:
+            self.announce('skipping "%s" (dry run)' % cmd)
+            return
+
+        self.announce('running "%s"' % cmd)
+
+        paths = map(operator.attrgetter('location'), installed_dists)
+        with self.paths_on_pythonpath(paths):
+            with self.project_on_sys_path():
+                self.run_tests()
+
+    def run_tests(self):
+        # Purge modules under test from sys.modules. The test loader will
+        # re-import them from the build location. Required when 2to3 is used
+        # with namespace packages.
+        if six.PY3 and getattr(self.distribution, 'use_2to3', False):
+            module = self.test_suite.split('.')[0]
+            if module in _namespace_packages:
+                del_modules = []
+                if module in sys.modules:
+                    del_modules.append(module)
+                module += '.'
+                for name in sys.modules:
+                    if name.startswith(module):
+                        del_modules.append(name)
+                list(map(sys.modules.__delitem__, del_modules))
+
+        test = unittest.main(
+            None, None, self._argv,
+            testLoader=self._resolve_as_ep(self.test_loader),
+            testRunner=self._resolve_as_ep(self.test_runner),
+            exit=False,
+        )
+        if not test.result.wasSuccessful():
+            msg = 'Test failed: %s' % test.result
+            self.announce(msg, log.ERROR)
+            raise DistutilsError(msg)
+
+    @property
+    def _argv(self):
+        return ['unittest'] + self.test_args
+
+    @staticmethod
+    def _resolve_as_ep(val):
+        """
+        Load the indicated attribute value, called, as a as if it were
+        specified as an entry point.
+        """
+        if val is None:
+            return
+        parsed = EntryPoint.parse("x=" + val)
+        return parsed.resolve()()
diff --git a/lib/python3.7/site-packages/setuptools/command/upload.py b/lib/python3.7/site-packages/setuptools/command/upload.py
new file mode 100644
index 0000000000000000000000000000000000000000..6db8888bb29cc59ef23e42a4b3335c311a6ef0ad
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/upload.py
@@ -0,0 +1,196 @@
+import io
+import os
+import hashlib
+import getpass
+
+from base64 import standard_b64encode
+
+from distutils import log
+from distutils.command import upload as orig
+from distutils.spawn import spawn
+
+from distutils.errors import DistutilsError
+
+from setuptools.extern.six.moves.urllib.request import urlopen, Request
+from setuptools.extern.six.moves.urllib.error import HTTPError
+from setuptools.extern.six.moves.urllib.parse import urlparse
+
+
+class upload(orig.upload):
+    """
+    Override default upload behavior to obtain password
+    in a variety of different ways.
+    """
+    def run(self):
+        try:
+            orig.upload.run(self)
+        finally:
+            self.announce(
+                "WARNING: Uploading via this command is deprecated, use twine "
+                "to upload instead (https://pypi.org/p/twine/)",
+                log.WARN
+            )
+
+    def finalize_options(self):
+        orig.upload.finalize_options(self)
+        self.username = (
+            self.username or
+            getpass.getuser()
+        )
+        # Attempt to obtain password. Short circuit evaluation at the first
+        # sign of success.
+        self.password = (
+            self.password or
+            self._load_password_from_keyring() or
+            self._prompt_for_password()
+        )
+
+    def upload_file(self, command, pyversion, filename):
+        # Makes sure the repository URL is compliant
+        schema, netloc, url, params, query, fragments = \
+            urlparse(self.repository)
+        if params or query or fragments:
+            raise AssertionError("Incompatible url %s" % self.repository)
+
+        if schema not in ('http', 'https'):
+            raise AssertionError("unsupported schema " + schema)
+
+        # Sign if requested
+        if self.sign:
+            gpg_args = ["gpg", "--detach-sign", "-a", filename]
+            if self.identity:
+                gpg_args[2:2] = ["--local-user", self.identity]
+            spawn(gpg_args,
+                  dry_run=self.dry_run)
+
+        # Fill in the data - send all the meta-data in case we need to
+        # register a new release
+        with open(filename, 'rb') as f:
+            content = f.read()
+
+        meta = self.distribution.metadata
+
+        data = {
+            # action
+            ':action': 'file_upload',
+            'protocol_version': '1',
+
+            # identify release
+            'name': meta.get_name(),
+            'version': meta.get_version(),
+
+            # file content
+            'content': (os.path.basename(filename), content),
+            'filetype': command,
+            'pyversion': pyversion,
+            'md5_digest': hashlib.md5(content).hexdigest(),
+
+            # additional meta-data
+            'metadata_version': str(meta.get_metadata_version()),
+            'summary': meta.get_description(),
+            'home_page': meta.get_url(),
+            'author': meta.get_contact(),
+            'author_email': meta.get_contact_email(),
+            'license': meta.get_licence(),
+            'description': meta.get_long_description(),
+            'keywords': meta.get_keywords(),
+            'platform': meta.get_platforms(),
+            'classifiers': meta.get_classifiers(),
+            'download_url': meta.get_download_url(),
+            # PEP 314
+            'provides': meta.get_provides(),
+            'requires': meta.get_requires(),
+            'obsoletes': meta.get_obsoletes(),
+            }
+
+        data['comment'] = ''
+
+        if self.sign:
+            data['gpg_signature'] = (os.path.basename(filename) + ".asc",
+                                     open(filename+".asc", "rb").read())
+
+        # set up the authentication
+        user_pass = (self.username + ":" + self.password).encode('ascii')
+        # The exact encoding of the authentication string is debated.
+        # Anyway PyPI only accepts ascii for both username or password.
+        auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
+
+        # Build up the MIME payload for the POST data
+        boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+        sep_boundary = b'\r\n--' + boundary.encode('ascii')
+        end_boundary = sep_boundary + b'--\r\n'
+        body = io.BytesIO()
+        for key, value in data.items():
+            title = '\r\nContent-Disposition: form-data; name="%s"' % key
+            # handle multiple entries for the same name
+            if not isinstance(value, list):
+                value = [value]
+            for value in value:
+                if type(value) is tuple:
+                    title += '; filename="%s"' % value[0]
+                    value = value[1]
+                else:
+                    value = str(value).encode('utf-8')
+                body.write(sep_boundary)
+                body.write(title.encode('utf-8'))
+                body.write(b"\r\n\r\n")
+                body.write(value)
+        body.write(end_boundary)
+        body = body.getvalue()
+
+        msg = "Submitting %s to %s" % (filename, self.repository)
+        self.announce(msg, log.INFO)
+
+        # build the Request
+        headers = {
+            'Content-type': 'multipart/form-data; boundary=%s' % boundary,
+            'Content-length': str(len(body)),
+            'Authorization': auth,
+        }
+
+        request = Request(self.repository, data=body,
+                          headers=headers)
+        # send the data
+        try:
+            result = urlopen(request)
+            status = result.getcode()
+            reason = result.msg
+        except HTTPError as e:
+            status = e.code
+            reason = e.msg
+        except OSError as e:
+            self.announce(str(e), log.ERROR)
+            raise
+
+        if status == 200:
+            self.announce('Server response (%s): %s' % (status, reason),
+                          log.INFO)
+            if self.show_response:
+                text = getattr(self, '_read_pypi_response',
+                               lambda x: None)(result)
+                if text is not None:
+                    msg = '\n'.join(('-' * 75, text, '-' * 75))
+                    self.announce(msg, log.INFO)
+        else:
+            msg = 'Upload failed (%s): %s' % (status, reason)
+            self.announce(msg, log.ERROR)
+            raise DistutilsError(msg)
+
+    def _load_password_from_keyring(self):
+        """
+        Attempt to load password from keyring. Suppress Exceptions.
+        """
+        try:
+            keyring = __import__('keyring')
+            return keyring.get_password(self.repository, self.username)
+        except Exception:
+            pass
+
+    def _prompt_for_password(self):
+        """
+        Prompt for a password on the tty. Suppress Exceptions.
+        """
+        try:
+            return getpass.getpass()
+        except (Exception, KeyboardInterrupt):
+            pass
diff --git a/lib/python3.7/site-packages/setuptools/command/upload_docs.py b/lib/python3.7/site-packages/setuptools/command/upload_docs.py
new file mode 100644
index 0000000000000000000000000000000000000000..07aa564af451ce41d818d72f8ee93cb46887cecf
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/command/upload_docs.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+"""upload_docs
+
+Implements a Distutils 'upload_docs' subcommand (upload documentation to
+PyPI's pythonhosted.org).
+"""
+
+from base64 import standard_b64encode
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
+import socket
+import zipfile
+import tempfile
+import shutil
+import itertools
+import functools
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import http_client, urllib
+
+from pkg_resources import iter_entry_points
+from .upload import upload
+
+
+def _encode(s):
+    errors = 'surrogateescape' if six.PY3 else 'strict'
+    return s.encode('utf-8', errors)
+
+
+class upload_docs(upload):
+    # override the default repository as upload_docs isn't
+    # supported by Warehouse (and won't be).
+    DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
+
+    description = 'Upload documentation to PyPI'
+
+    user_options = [
+        ('repository=', 'r',
+         "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
+        ('show-response', None,
+         'display full response text from server'),
+        ('upload-dir=', None, 'directory to upload'),
+    ]
+    boolean_options = upload.boolean_options
+
+    def has_sphinx(self):
+        if self.upload_dir is None:
+            for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
+                return True
+
+    sub_commands = [('build_sphinx', has_sphinx)]
+
+    def initialize_options(self):
+        upload.initialize_options(self)
+        self.upload_dir = None
+        self.target_dir = None
+
+    def finalize_options(self):
+        upload.finalize_options(self)
+        if self.upload_dir is None:
+            if self.has_sphinx():
+                build_sphinx = self.get_finalized_command('build_sphinx')
+                self.target_dir = build_sphinx.builder_target_dir
+            else:
+                build = self.get_finalized_command('build')
+                self.target_dir = os.path.join(build.build_base, 'docs')
+        else:
+            self.ensure_dirname('upload_dir')
+            self.target_dir = self.upload_dir
+        if 'pypi.python.org' in self.repository:
+            log.warn("Upload_docs command is deprecated. Use RTD instead.")
+        self.announce('Using upload directory %s' % self.target_dir)
+
+    def create_zipfile(self, filename):
+        zip_file = zipfile.ZipFile(filename, "w")
+        try:
+            self.mkpath(self.target_dir)  # just in case
+            for root, dirs, files in os.walk(self.target_dir):
+                if root == self.target_dir and not files:
+                    tmpl = "no files found in upload directory '%s'"
+                    raise DistutilsOptionError(tmpl % self.target_dir)
+                for name in files:
+                    full = os.path.join(root, name)
+                    relative = root[len(self.target_dir):].lstrip(os.path.sep)
+                    dest = os.path.join(relative, name)
+                    zip_file.write(full, dest)
+        finally:
+            zip_file.close()
+
+    def run(self):
+        # Run sub commands
+        for cmd_name in self.get_sub_commands():
+            self.run_command(cmd_name)
+
+        tmp_dir = tempfile.mkdtemp()
+        name = self.distribution.metadata.get_name()
+        zip_file = os.path.join(tmp_dir, "%s.zip" % name)
+        try:
+            self.create_zipfile(zip_file)
+            self.upload_file(zip_file)
+        finally:
+            shutil.rmtree(tmp_dir)
+
+    @staticmethod
+    def _build_part(item, sep_boundary):
+        key, values = item
+        title = '\nContent-Disposition: form-data; name="%s"' % key
+        # handle multiple entries for the same name
+        if not isinstance(values, list):
+            values = [values]
+        for value in values:
+            if isinstance(value, tuple):
+                title += '; filename="%s"' % value[0]
+                value = value[1]
+            else:
+                value = _encode(value)
+            yield sep_boundary
+            yield _encode(title)
+            yield b"\n\n"
+            yield value
+            if value and value[-1:] == b'\r':
+                yield b'\n'  # write an extra newline (lurve Macs)
+
+    @classmethod
+    def _build_multipart(cls, data):
+        """
+        Build up the MIME payload for the POST data
+        """
+        boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+        sep_boundary = b'\n--' + boundary
+        end_boundary = sep_boundary + b'--'
+        end_items = end_boundary, b"\n",
+        builder = functools.partial(
+            cls._build_part,
+            sep_boundary=sep_boundary,
+        )
+        part_groups = map(builder, data.items())
+        parts = itertools.chain.from_iterable(part_groups)
+        body_items = itertools.chain(parts, end_items)
+        content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii')
+        return b''.join(body_items), content_type
+
+    def upload_file(self, filename):
+        with open(filename, 'rb') as f:
+            content = f.read()
+        meta = self.distribution.metadata
+        data = {
+            ':action': 'doc_upload',
+            'name': meta.get_name(),
+            'content': (os.path.basename(filename), content),
+        }
+        # set up the authentication
+        credentials = _encode(self.username + ':' + self.password)
+        credentials = standard_b64encode(credentials)
+        if six.PY3:
+            credentials = credentials.decode('ascii')
+        auth = "Basic " + credentials
+
+        body, ct = self._build_multipart(data)
+
+        msg = "Submitting documentation to %s" % (self.repository)
+        self.announce(msg, log.INFO)
+
+        # build the Request
+        # We can't use urllib2 since we need to send the Basic
+        # auth right with the first request
+        schema, netloc, url, params, query, fragments = \
+            urllib.parse.urlparse(self.repository)
+        assert not params and not query and not fragments
+        if schema == 'http':
+            conn = http_client.HTTPConnection(netloc)
+        elif schema == 'https':
+            conn = http_client.HTTPSConnection(netloc)
+        else:
+            raise AssertionError("unsupported schema " + schema)
+
+        data = ''
+        try:
+            conn.connect()
+            conn.putrequest("POST", url)
+            content_type = ct
+            conn.putheader('Content-type', content_type)
+            conn.putheader('Content-length', str(len(body)))
+            conn.putheader('Authorization', auth)
+            conn.endheaders()
+            conn.send(body)
+        except socket.error as e:
+            self.announce(str(e), log.ERROR)
+            return
+
+        r = conn.getresponse()
+        if r.status == 200:
+            msg = 'Server response (%s): %s' % (r.status, r.reason)
+            self.announce(msg, log.INFO)
+        elif r.status == 301:
+            location = r.getheader('Location')
+            if location is None:
+                location = 'https://pythonhosted.org/%s/' % meta.get_name()
+            msg = 'Upload successful. Visit %s' % location
+            self.announce(msg, log.INFO)
+        else:
+            msg = 'Upload failed (%s): %s' % (r.status, r.reason)
+            self.announce(msg, log.ERROR)
+        if self.show_response:
+            print('-' * 75, r.read(), '-' * 75)
diff --git a/lib/python3.7/site-packages/setuptools/config.py b/lib/python3.7/site-packages/setuptools/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6626043fce15e07dbe89ff1f06bb97d77e80c06
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/config.py
@@ -0,0 +1,656 @@
+from __future__ import absolute_import, unicode_literals
+import io
+import os
+import sys
+
+import warnings
+import functools
+from collections import defaultdict
+from functools import partial
+from functools import wraps
+from importlib import import_module
+
+from distutils.errors import DistutilsOptionError, DistutilsFileError
+from setuptools.extern.packaging.version import LegacyVersion, parse
+from setuptools.extern.six import string_types, PY3
+
+
+__metaclass__ = type
+
+
+def read_configuration(
+        filepath, find_others=False, ignore_option_errors=False):
+    """Read given configuration file and returns options from it as a dict.
+
+    :param str|unicode filepath: Path to configuration file
+        to get options from.
+
+    :param bool find_others: Whether to search for other configuration files
+        which could be on in various places.
+
+    :param bool ignore_option_errors: Whether to silently ignore
+        options, values of which could not be resolved (e.g. due to exceptions
+        in directives such as file:, attr:, etc.).
+        If False exceptions are propagated as expected.
+
+    :rtype: dict
+    """
+    from setuptools.dist import Distribution, _Distribution
+
+    filepath = os.path.abspath(filepath)
+
+    if not os.path.isfile(filepath):
+        raise DistutilsFileError(
+            'Configuration file %s does not exist.' % filepath)
+
+    current_directory = os.getcwd()
+    os.chdir(os.path.dirname(filepath))
+
+    try:
+        dist = Distribution()
+
+        filenames = dist.find_config_files() if find_others else []
+        if filepath not in filenames:
+            filenames.append(filepath)
+
+        _Distribution.parse_config_files(dist, filenames=filenames)
+
+        handlers = parse_configuration(
+            dist, dist.command_options,
+            ignore_option_errors=ignore_option_errors)
+
+    finally:
+        os.chdir(current_directory)
+
+    return configuration_to_dict(handlers)
+
+
+def _get_option(target_obj, key):
+    """
+    Given a target object and option key, get that option from
+    the target object, either through a get_{key} method or
+    from an attribute directly.
+    """
+    getter_name = 'get_{key}'.format(**locals())
+    by_attribute = functools.partial(getattr, target_obj, key)
+    getter = getattr(target_obj, getter_name, by_attribute)
+    return getter()
+
+
+def configuration_to_dict(handlers):
+    """Returns configuration data gathered by given handlers as a dict.
+
+    :param list[ConfigHandler] handlers: Handlers list,
+        usually from parse_configuration()
+
+    :rtype: dict
+    """
+    config_dict = defaultdict(dict)
+
+    for handler in handlers:
+        for option in handler.set_options:
+            value = _get_option(handler.target_obj, option)
+            config_dict[handler.section_prefix][option] = value
+
+    return config_dict
+
+
+def parse_configuration(
+        distribution, command_options, ignore_option_errors=False):
+    """Performs additional parsing of configuration options
+    for a distribution.
+
+    Returns a list of used option handlers.
+
+    :param Distribution distribution:
+    :param dict command_options:
+    :param bool ignore_option_errors: Whether to silently ignore
+        options, values of which could not be resolved (e.g. due to exceptions
+        in directives such as file:, attr:, etc.).
+        If False exceptions are propagated as expected.
+    :rtype: list
+    """
+    options = ConfigOptionsHandler(
+        distribution, command_options, ignore_option_errors)
+    options.parse()
+
+    meta = ConfigMetadataHandler(
+        distribution.metadata, command_options, ignore_option_errors,
+        distribution.package_dir)
+    meta.parse()
+
+    return meta, options
+
+
+class ConfigHandler:
+    """Handles metadata supplied in configuration files."""
+
+    section_prefix = None
+    """Prefix for config sections handled by this handler.
+    Must be provided by class heirs.
+
+    """
+
+    aliases = {}
+    """Options aliases.
+    For compatibility with various packages. E.g.: d2to1 and pbr.
+    Note: `-` in keys is replaced with `_` by config parser.
+
+    """
+
+    def __init__(self, target_obj, options, ignore_option_errors=False):
+        sections = {}
+
+        section_prefix = self.section_prefix
+        for section_name, section_options in options.items():
+            if not section_name.startswith(section_prefix):
+                continue
+
+            section_name = section_name.replace(section_prefix, '').strip('.')
+            sections[section_name] = section_options
+
+        self.ignore_option_errors = ignore_option_errors
+        self.target_obj = target_obj
+        self.sections = sections
+        self.set_options = []
+
+    @property
+    def parsers(self):
+        """Metadata item name to parser function mapping."""
+        raise NotImplementedError(
+            '%s must provide .parsers property' % self.__class__.__name__)
+
+    def __setitem__(self, option_name, value):
+        unknown = tuple()
+        target_obj = self.target_obj
+
+        # Translate alias into real name.
+        option_name = self.aliases.get(option_name, option_name)
+
+        current_value = getattr(target_obj, option_name, unknown)
+
+        if current_value is unknown:
+            raise KeyError(option_name)
+
+        if current_value:
+            # Already inhabited. Skipping.
+            return
+
+        skip_option = False
+        parser = self.parsers.get(option_name)
+        if parser:
+            try:
+                value = parser(value)
+
+            except Exception:
+                skip_option = True
+                if not self.ignore_option_errors:
+                    raise
+
+        if skip_option:
+            return
+
+        setter = getattr(target_obj, 'set_%s' % option_name, None)
+        if setter is None:
+            setattr(target_obj, option_name, value)
+        else:
+            setter(value)
+
+        self.set_options.append(option_name)
+
+    @classmethod
+    def _parse_list(cls, value, separator=','):
+        """Represents value as a list.
+
+        Value is split either by separator (defaults to comma) or by lines.
+
+        :param value:
+        :param separator: List items separator character.
+        :rtype: list
+        """
+        if isinstance(value, list):  # _get_parser_compound case
+            return value
+
+        if '\n' in value:
+            value = value.splitlines()
+        else:
+            value = value.split(separator)
+
+        return [chunk.strip() for chunk in value if chunk.strip()]
+
+    @classmethod
+    def _parse_dict(cls, value):
+        """Represents value as a dict.
+
+        :param value:
+        :rtype: dict
+        """
+        separator = '='
+        result = {}
+        for line in cls._parse_list(value):
+            key, sep, val = line.partition(separator)
+            if sep != separator:
+                raise DistutilsOptionError(
+                    'Unable to parse option value to dict: %s' % value)
+            result[key.strip()] = val.strip()
+
+        return result
+
+    @classmethod
+    def _parse_bool(cls, value):
+        """Represents value as boolean.
+
+        :param value:
+        :rtype: bool
+        """
+        value = value.lower()
+        return value in ('1', 'true', 'yes')
+
+    @classmethod
+    def _exclude_files_parser(cls, key):
+        """Returns a parser function to make sure field inputs
+        are not files.
+
+        Parses a value after getting the key so error messages are
+        more informative.
+
+        :param key:
+        :rtype: callable
+        """
+        def parser(value):
+            exclude_directive = 'file:'
+            if value.startswith(exclude_directive):
+                raise ValueError(
+                    'Only strings are accepted for the {0} field, '
+                    'files are not accepted'.format(key))
+            return value
+        return parser
+
+    @classmethod
+    def _parse_file(cls, value):
+        """Represents value as a string, allowing including text
+        from nearest files using `file:` directive.
+
+        Directive is sandboxed and won't reach anything outside
+        directory with setup.py.
+
+        Examples:
+            file: README.rst, CHANGELOG.md, src/file.txt
+
+        :param str value:
+        :rtype: str
+        """
+        include_directive = 'file:'
+
+        if not isinstance(value, string_types):
+            return value
+
+        if not value.startswith(include_directive):
+            return value
+
+        spec = value[len(include_directive):]
+        filepaths = (os.path.abspath(path.strip()) for path in spec.split(','))
+        return '\n'.join(
+            cls._read_file(path)
+            for path in filepaths
+            if (cls._assert_local(path) or True)
+            and os.path.isfile(path)
+        )
+
+    @staticmethod
+    def _assert_local(filepath):
+        if not filepath.startswith(os.getcwd()):
+            raise DistutilsOptionError(
+                '`file:` directive can not access %s' % filepath)
+
+    @staticmethod
+    def _read_file(filepath):
+        with io.open(filepath, encoding='utf-8') as f:
+            return f.read()
+
+    @classmethod
+    def _parse_attr(cls, value, package_dir=None):
+        """Represents value as a module attribute.
+
+        Examples:
+            attr: package.attr
+            attr: package.module.attr
+
+        :param str value:
+        :rtype: str
+        """
+        attr_directive = 'attr:'
+        if not value.startswith(attr_directive):
+            return value
+
+        attrs_path = value.replace(attr_directive, '').strip().split('.')
+        attr_name = attrs_path.pop()
+
+        module_name = '.'.join(attrs_path)
+        module_name = module_name or '__init__'
+
+        parent_path = os.getcwd()
+        if package_dir:
+            if attrs_path[0] in package_dir:
+                # A custom path was specified for the module we want to import
+                custom_path = package_dir[attrs_path[0]]
+                parts = custom_path.rsplit('/', 1)
+                if len(parts) > 1:
+                    parent_path = os.path.join(os.getcwd(), parts[0])
+                    module_name = parts[1]
+                else:
+                    module_name = custom_path
+            elif '' in package_dir:
+                # A custom parent directory was specified for all root modules
+                parent_path = os.path.join(os.getcwd(), package_dir[''])
+        sys.path.insert(0, parent_path)
+        try:
+            module = import_module(module_name)
+            value = getattr(module, attr_name)
+
+        finally:
+            sys.path = sys.path[1:]
+
+        return value
+
+    @classmethod
+    def _get_parser_compound(cls, *parse_methods):
+        """Returns parser function to represents value as a list.
+
+        Parses a value applying given methods one after another.
+
+        :param parse_methods:
+        :rtype: callable
+        """
+        def parse(value):
+            parsed = value
+
+            for method in parse_methods:
+                parsed = method(parsed)
+
+            return parsed
+
+        return parse
+
+    @classmethod
+    def _parse_section_to_dict(cls, section_options, values_parser=None):
+        """Parses section options into a dictionary.
+
+        Optionally applies a given parser to values.
+
+        :param dict section_options:
+        :param callable values_parser:
+        :rtype: dict
+        """
+        value = {}
+        values_parser = values_parser or (lambda val: val)
+        for key, (_, val) in section_options.items():
+            value[key] = values_parser(val)
+        return value
+
+    def parse_section(self, section_options):
+        """Parses configuration file section.
+
+        :param dict section_options:
+        """
+        for (name, (_, value)) in section_options.items():
+            try:
+                self[name] = value
+
+            except KeyError:
+                pass  # Keep silent for a new option may appear anytime.
+
+    def parse(self):
+        """Parses configuration file items from one
+        or more related sections.
+
+        """
+        for section_name, section_options in self.sections.items():
+
+            method_postfix = ''
+            if section_name:  # [section.option] variant
+                method_postfix = '_%s' % section_name
+
+            section_parser_method = getattr(
+                self,
+                # Dots in section names are translated into dunderscores.
+                ('parse_section%s' % method_postfix).replace('.', '__'),
+                None)
+
+            if section_parser_method is None:
+                raise DistutilsOptionError(
+                    'Unsupported distribution option section: [%s.%s]' % (
+                        self.section_prefix, section_name))
+
+            section_parser_method(section_options)
+
+    def _deprecated_config_handler(self, func, msg, warning_class):
+        """ this function will wrap around parameters that are deprecated
+
+        :param msg: deprecation message
+        :param warning_class: class of warning exception to be raised
+        :param func: function to be wrapped around
+        """
+        @wraps(func)
+        def config_handler(*args, **kwargs):
+            warnings.warn(msg, warning_class)
+            return func(*args, **kwargs)
+
+        return config_handler
+
+
+class ConfigMetadataHandler(ConfigHandler):
+
+    section_prefix = 'metadata'
+
+    aliases = {
+        'home_page': 'url',
+        'summary': 'description',
+        'classifier': 'classifiers',
+        'platform': 'platforms',
+    }
+
+    strict_mode = False
+    """We need to keep it loose, to be partially compatible with
+    `pbr` and `d2to1` packages which also uses `metadata` section.
+
+    """
+
+    def __init__(self, target_obj, options, ignore_option_errors=False,
+                 package_dir=None):
+        super(ConfigMetadataHandler, self).__init__(target_obj, options,
+                                                    ignore_option_errors)
+        self.package_dir = package_dir
+
+    @property
+    def parsers(self):
+        """Metadata item name to parser function mapping."""
+        parse_list = self._parse_list
+        parse_file = self._parse_file
+        parse_dict = self._parse_dict
+        exclude_files_parser = self._exclude_files_parser
+
+        return {
+            'platforms': parse_list,
+            'keywords': parse_list,
+            'provides': parse_list,
+            'requires': self._deprecated_config_handler(
+                parse_list,
+                "The requires parameter is deprecated, please use "
+                "install_requires for runtime dependencies.",
+                DeprecationWarning),
+            'obsoletes': parse_list,
+            'classifiers': self._get_parser_compound(parse_file, parse_list),
+            'license': exclude_files_parser('license'),
+            'description': parse_file,
+            'long_description': parse_file,
+            'version': self._parse_version,
+            'project_urls': parse_dict,
+        }
+
+    def _parse_version(self, value):
+        """Parses `version` option value.
+
+        :param value:
+        :rtype: str
+
+        """
+        version = self._parse_file(value)
+
+        if version != value:
+            version = version.strip()
+            # Be strict about versions loaded from file because it's easy to
+            # accidentally include newlines and other unintended content
+            if isinstance(parse(version), LegacyVersion):
+                tmpl = (
+                    'Version loaded from {value} does not '
+                    'comply with PEP 440: {version}'
+                )
+                raise DistutilsOptionError(tmpl.format(**locals()))
+
+            return version
+
+        version = self._parse_attr(value, self.package_dir)
+
+        if callable(version):
+            version = version()
+
+        if not isinstance(version, string_types):
+            if hasattr(version, '__iter__'):
+                version = '.'.join(map(str, version))
+            else:
+                version = '%s' % version
+
+        return version
+
+
+class ConfigOptionsHandler(ConfigHandler):
+
+    section_prefix = 'options'
+
+    @property
+    def parsers(self):
+        """Metadata item name to parser function mapping."""
+        parse_list = self._parse_list
+        parse_list_semicolon = partial(self._parse_list, separator=';')
+        parse_bool = self._parse_bool
+        parse_dict = self._parse_dict
+
+        return {
+            'zip_safe': parse_bool,
+            'use_2to3': parse_bool,
+            'include_package_data': parse_bool,
+            'package_dir': parse_dict,
+            'use_2to3_fixers': parse_list,
+            'use_2to3_exclude_fixers': parse_list,
+            'convert_2to3_doctests': parse_list,
+            'scripts': parse_list,
+            'eager_resources': parse_list,
+            'dependency_links': parse_list,
+            'namespace_packages': parse_list,
+            'install_requires': parse_list_semicolon,
+            'setup_requires': parse_list_semicolon,
+            'tests_require': parse_list_semicolon,
+            'packages': self._parse_packages,
+            'entry_points': self._parse_file,
+            'py_modules': parse_list,
+        }
+
+    def _parse_packages(self, value):
+        """Parses `packages` option value.
+
+        :param value:
+        :rtype: list
+        """
+        find_directives = ['find:', 'find_namespace:']
+        trimmed_value = value.strip()
+
+        if trimmed_value not in find_directives:
+            return self._parse_list(value)
+
+        findns = trimmed_value == find_directives[1]
+        if findns and not PY3:
+            raise DistutilsOptionError(
+                'find_namespace: directive is unsupported on Python < 3.3')
+
+        # Read function arguments from a dedicated section.
+        find_kwargs = self.parse_section_packages__find(
+            self.sections.get('packages.find', {}))
+
+        if findns:
+            from setuptools import find_namespace_packages as find_packages
+        else:
+            from setuptools import find_packages
+
+        return find_packages(**find_kwargs)
+
+    def parse_section_packages__find(self, section_options):
+        """Parses `packages.find` configuration file section.
+
+        To be used in conjunction with _parse_packages().
+
+        :param dict section_options:
+        """
+        section_data = self._parse_section_to_dict(
+            section_options, self._parse_list)
+
+        valid_keys = ['where', 'include', 'exclude']
+
+        find_kwargs = dict(
+            [(k, v) for k, v in section_data.items() if k in valid_keys and v])
+
+        where = find_kwargs.get('where')
+        if where is not None:
+            find_kwargs['where'] = where[0]  # cast list to single val
+
+        return find_kwargs
+
+    def parse_section_entry_points(self, section_options):
+        """Parses `entry_points` configuration file section.
+
+        :param dict section_options:
+        """
+        parsed = self._parse_section_to_dict(section_options, self._parse_list)
+        self['entry_points'] = parsed
+
+    def _parse_package_data(self, section_options):
+        parsed = self._parse_section_to_dict(section_options, self._parse_list)
+
+        root = parsed.get('*')
+        if root:
+            parsed[''] = root
+            del parsed['*']
+
+        return parsed
+
+    def parse_section_package_data(self, section_options):
+        """Parses `package_data` configuration file section.
+
+        :param dict section_options:
+        """
+        self['package_data'] = self._parse_package_data(section_options)
+
+    def parse_section_exclude_package_data(self, section_options):
+        """Parses `exclude_package_data` configuration file section.
+
+        :param dict section_options:
+        """
+        self['exclude_package_data'] = self._parse_package_data(
+            section_options)
+
+    def parse_section_extras_require(self, section_options):
+        """Parses `extras_require` configuration file section.
+
+        :param dict section_options:
+        """
+        parse_list = partial(self._parse_list, separator=';')
+        self['extras_require'] = self._parse_section_to_dict(
+            section_options, parse_list)
+
+    def parse_section_data_files(self, section_options):
+        """Parses `data_files` configuration file section.
+
+        :param dict section_options:
+        """
+        parsed = self._parse_section_to_dict(section_options, self._parse_list)
+        self['data_files'] = [(k, v) for k, v in parsed.items()]
diff --git a/lib/python3.7/site-packages/setuptools/dep_util.py b/lib/python3.7/site-packages/setuptools/dep_util.py
new file mode 100644
index 0000000000000000000000000000000000000000..2931c13ec35aa60b742ac4c46ceabd4ed32a5511
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/dep_util.py
@@ -0,0 +1,23 @@
+from distutils.dep_util import newer_group
+
+# yes, this is was almost entirely copy-pasted from
+# 'newer_pairwise()', this is just another convenience
+# function.
+def newer_pairwise_group(sources_groups, targets):
+    """Walk both arguments in parallel, testing if each source group is newer
+    than its corresponding target. Returns a pair of lists (sources_groups,
+    targets) where sources is newer than target, according to the semantics
+    of 'newer_group()'.
+    """
+    if len(sources_groups) != len(targets):
+        raise ValueError("'sources_group' and 'targets' must be the same length")
+
+    # build a pair of lists (sources_groups, targets) where source is newer
+    n_sources = []
+    n_targets = []
+    for i in range(len(sources_groups)):
+        if newer_group(sources_groups[i], targets[i]):
+            n_sources.append(sources_groups[i])
+            n_targets.append(targets[i])
+
+    return n_sources, n_targets
diff --git a/lib/python3.7/site-packages/setuptools/depends.py b/lib/python3.7/site-packages/setuptools/depends.py
new file mode 100644
index 0000000000000000000000000000000000000000..45e7052d8d8561a946f782a67dc6f83b6ad6ab99
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/depends.py
@@ -0,0 +1,186 @@
+import sys
+import imp
+import marshal
+from distutils.version import StrictVersion
+from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
+
+from .py33compat import Bytecode
+
+
+__all__ = [
+    'Require', 'find_module', 'get_module_constant', 'extract_constant'
+]
+
+
+class Require:
+    """A prerequisite to building or installing a distribution"""
+
+    def __init__(self, name, requested_version, module, homepage='',
+            attribute=None, format=None):
+
+        if format is None and requested_version is not None:
+            format = StrictVersion
+
+        if format is not None:
+            requested_version = format(requested_version)
+            if attribute is None:
+                attribute = '__version__'
+
+        self.__dict__.update(locals())
+        del self.self
+
+    def full_name(self):
+        """Return full package/distribution name, w/version"""
+        if self.requested_version is not None:
+            return '%s-%s' % (self.name, self.requested_version)
+        return self.name
+
+    def version_ok(self, version):
+        """Is 'version' sufficiently up-to-date?"""
+        return self.attribute is None or self.format is None or \
+            str(version) != "unknown" and version >= self.requested_version
+
+    def get_version(self, paths=None, default="unknown"):
+        """Get version number of installed module, 'None', or 'default'
+
+        Search 'paths' for module.  If not found, return 'None'.  If found,
+        return the extracted version attribute, or 'default' if no version
+        attribute was specified, or the value cannot be determined without
+        importing the module.  The version is formatted according to the
+        requirement's version format (if any), unless it is 'None' or the
+        supplied 'default'.
+        """
+
+        if self.attribute is None:
+            try:
+                f, p, i = find_module(self.module, paths)
+                if f:
+                    f.close()
+                return default
+            except ImportError:
+                return None
+
+        v = get_module_constant(self.module, self.attribute, default, paths)
+
+        if v is not None and v is not default and self.format is not None:
+            return self.format(v)
+
+        return v
+
+    def is_present(self, paths=None):
+        """Return true if dependency is present on 'paths'"""
+        return self.get_version(paths) is not None
+
+    def is_current(self, paths=None):
+        """Return true if dependency is present and up-to-date on 'paths'"""
+        version = self.get_version(paths)
+        if version is None:
+            return False
+        return self.version_ok(version)
+
+
+def find_module(module, paths=None):
+    """Just like 'imp.find_module()', but with package support"""
+
+    parts = module.split('.')
+
+    while parts:
+        part = parts.pop(0)
+        f, path, (suffix, mode, kind) = info = imp.find_module(part, paths)
+
+        if kind == PKG_DIRECTORY:
+            parts = parts or ['__init__']
+            paths = [path]
+
+        elif parts:
+            raise ImportError("Can't find %r in %s" % (parts, module))
+
+    return info
+
+
+def get_module_constant(module, symbol, default=-1, paths=None):
+    """Find 'module' by searching 'paths', and extract 'symbol'
+
+    Return 'None' if 'module' does not exist on 'paths', or it does not define
+    'symbol'.  If the module defines 'symbol' as a constant, return the
+    constant.  Otherwise, return 'default'."""
+
+    try:
+        f, path, (suffix, mode, kind) = find_module(module, paths)
+    except ImportError:
+        # Module doesn't exist
+        return None
+
+    try:
+        if kind == PY_COMPILED:
+            f.read(8)  # skip magic & date
+            code = marshal.load(f)
+        elif kind == PY_FROZEN:
+            code = imp.get_frozen_object(module)
+        elif kind == PY_SOURCE:
+            code = compile(f.read(), path, 'exec')
+        else:
+            # Not something we can parse; we'll have to import it.  :(
+            if module not in sys.modules:
+                imp.load_module(module, f, path, (suffix, mode, kind))
+            return getattr(sys.modules[module], symbol, None)
+
+    finally:
+        if f:
+            f.close()
+
+    return extract_constant(code, symbol, default)
+
+
+def extract_constant(code, symbol, default=-1):
+    """Extract the constant value of 'symbol' from 'code'
+
+    If the name 'symbol' is bound to a constant value by the Python code
+    object 'code', return that value.  If 'symbol' is bound to an expression,
+    return 'default'.  Otherwise, return 'None'.
+
+    Return value is based on the first assignment to 'symbol'.  'symbol' must
+    be a global, or at least a non-"fast" local in the code block.  That is,
+    only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
+    must be present in 'code.co_names'.
+    """
+    if symbol not in code.co_names:
+        # name's not there, can't possibly be an assignment
+        return None
+
+    name_idx = list(code.co_names).index(symbol)
+
+    STORE_NAME = 90
+    STORE_GLOBAL = 97
+    LOAD_CONST = 100
+
+    const = default
+
+    for byte_code in Bytecode(code):
+        op = byte_code.opcode
+        arg = byte_code.arg
+
+        if op == LOAD_CONST:
+            const = code.co_consts[arg]
+        elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
+            return const
+        else:
+            const = default
+
+
+def _update_globals():
+    """
+    Patch the globals to remove the objects not available on some platforms.
+
+    XXX it'd be better to test assertions about bytecode instead.
+    """
+
+    if not sys.platform.startswith('java') and sys.platform != 'cli':
+        return
+    incompatible = 'extract_constant', 'get_module_constant'
+    for name in incompatible:
+        del globals()[name]
+        __all__.remove(name)
+
+
+_update_globals()
diff --git a/lib/python3.7/site-packages/setuptools/dist.py b/lib/python3.7/site-packages/setuptools/dist.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a165de0dd6b5ee9c216ce37e8895802de6ac6ca
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/dist.py
@@ -0,0 +1,1278 @@
+# -*- coding: utf-8 -*-
+__all__ = ['Distribution']
+
+import io
+import sys
+import re
+import os
+import warnings
+import numbers
+import distutils.log
+import distutils.core
+import distutils.cmd
+import distutils.dist
+from distutils.util import strtobool
+from distutils.debug import DEBUG
+from distutils.fancy_getopt import translate_longopt
+import itertools
+
+from collections import defaultdict
+from email import message_from_file
+
+from distutils.errors import (
+    DistutilsOptionError, DistutilsPlatformError, DistutilsSetupError,
+)
+from distutils.util import rfc822_escape
+from distutils.version import StrictVersion
+
+from setuptools.extern import six
+from setuptools.extern import packaging
+from setuptools.extern.six.moves import map, filter, filterfalse
+
+from . import SetuptoolsDeprecationWarning
+
+from setuptools.depends import Require
+from setuptools import windows_support
+from setuptools.monkey import get_unpatched
+from setuptools.config import parse_configuration
+import pkg_resources
+
+__import__('setuptools.extern.packaging.specifiers')
+__import__('setuptools.extern.packaging.version')
+
+
+def _get_unpatched(cls):
+    warnings.warn("Do not call this function", DistDeprecationWarning)
+    return get_unpatched(cls)
+
+
+def get_metadata_version(self):
+    mv = getattr(self, 'metadata_version', None)
+
+    if mv is None:
+        if self.long_description_content_type or self.provides_extras:
+            mv = StrictVersion('2.1')
+        elif (self.maintainer is not None or
+              self.maintainer_email is not None or
+              getattr(self, 'python_requires', None) is not None):
+            mv = StrictVersion('1.2')
+        elif (self.provides or self.requires or self.obsoletes or
+                self.classifiers or self.download_url):
+            mv = StrictVersion('1.1')
+        else:
+            mv = StrictVersion('1.0')
+
+        self.metadata_version = mv
+
+    return mv
+
+
+def read_pkg_file(self, file):
+    """Reads the metadata values from a file object."""
+    msg = message_from_file(file)
+
+    def _read_field(name):
+        value = msg[name]
+        if value == 'UNKNOWN':
+            return None
+        return value
+
+    def _read_list(name):
+        values = msg.get_all(name, None)
+        if values == []:
+            return None
+        return values
+
+    self.metadata_version = StrictVersion(msg['metadata-version'])
+    self.name = _read_field('name')
+    self.version = _read_field('version')
+    self.description = _read_field('summary')
+    # we are filling author only.
+    self.author = _read_field('author')
+    self.maintainer = None
+    self.author_email = _read_field('author-email')
+    self.maintainer_email = None
+    self.url = _read_field('home-page')
+    self.license = _read_field('license')
+
+    if 'download-url' in msg:
+        self.download_url = _read_field('download-url')
+    else:
+        self.download_url = None
+
+    self.long_description = _read_field('description')
+    self.description = _read_field('summary')
+
+    if 'keywords' in msg:
+        self.keywords = _read_field('keywords').split(',')
+
+    self.platforms = _read_list('platform')
+    self.classifiers = _read_list('classifier')
+
+    # PEP 314 - these fields only exist in 1.1
+    if self.metadata_version == StrictVersion('1.1'):
+        self.requires = _read_list('requires')
+        self.provides = _read_list('provides')
+        self.obsoletes = _read_list('obsoletes')
+    else:
+        self.requires = None
+        self.provides = None
+        self.obsoletes = None
+
+
+# Based on Python 3.5 version
+def write_pkg_file(self, file):
+    """Write the PKG-INFO format data to a file object.
+    """
+    version = self.get_metadata_version()
+
+    if six.PY2:
+        def write_field(key, value):
+            file.write("%s: %s\n" % (key, self._encode_field(value)))
+    else:
+        def write_field(key, value):
+            file.write("%s: %s\n" % (key, value))
+
+    write_field('Metadata-Version', str(version))
+    write_field('Name', self.get_name())
+    write_field('Version', self.get_version())
+    write_field('Summary', self.get_description())
+    write_field('Home-page', self.get_url())
+
+    if version < StrictVersion('1.2'):
+        write_field('Author', self.get_contact())
+        write_field('Author-email', self.get_contact_email())
+    else:
+        optional_fields = (
+            ('Author', 'author'),
+            ('Author-email', 'author_email'),
+            ('Maintainer', 'maintainer'),
+            ('Maintainer-email', 'maintainer_email'),
+        )
+
+        for field, attr in optional_fields:
+            attr_val = getattr(self, attr)
+
+            if attr_val is not None:
+                write_field(field, attr_val)
+
+    write_field('License', self.get_license())
+    if self.download_url:
+        write_field('Download-URL', self.download_url)
+    for project_url in self.project_urls.items():
+        write_field('Project-URL',  '%s, %s' % project_url)
+
+    long_desc = rfc822_escape(self.get_long_description())
+    write_field('Description', long_desc)
+
+    keywords = ','.join(self.get_keywords())
+    if keywords:
+        write_field('Keywords', keywords)
+
+    if version >= StrictVersion('1.2'):
+        for platform in self.get_platforms():
+            write_field('Platform', platform)
+    else:
+        self._write_list(file, 'Platform', self.get_platforms())
+
+    self._write_list(file, 'Classifier', self.get_classifiers())
+
+    # PEP 314
+    self._write_list(file, 'Requires', self.get_requires())
+    self._write_list(file, 'Provides', self.get_provides())
+    self._write_list(file, 'Obsoletes', self.get_obsoletes())
+
+    # Setuptools specific for PEP 345
+    if hasattr(self, 'python_requires'):
+        write_field('Requires-Python', self.python_requires)
+
+    # PEP 566
+    if self.long_description_content_type:
+        write_field(
+            'Description-Content-Type',
+            self.long_description_content_type
+        )
+    if self.provides_extras:
+        for extra in self.provides_extras:
+            write_field('Provides-Extra', extra)
+
+
+sequence = tuple, list
+
+
+def check_importable(dist, attr, value):
+    try:
+        ep = pkg_resources.EntryPoint.parse('x=' + value)
+        assert not ep.extras
+    except (TypeError, ValueError, AttributeError, AssertionError):
+        raise DistutilsSetupError(
+            "%r must be importable 'module:attrs' string (got %r)"
+            % (attr, value)
+        )
+
+
+def assert_string_list(dist, attr, value):
+    """Verify that value is a string list or None"""
+    try:
+        assert ''.join(value) != value
+    except (TypeError, ValueError, AttributeError, AssertionError):
+        raise DistutilsSetupError(
+            "%r must be a list of strings (got %r)" % (attr, value)
+        )
+
+
+def check_nsp(dist, attr, value):
+    """Verify that namespace packages are valid"""
+    ns_packages = value
+    assert_string_list(dist, attr, ns_packages)
+    for nsp in ns_packages:
+        if not dist.has_contents_for(nsp):
+            raise DistutilsSetupError(
+                "Distribution contains no modules or packages for " +
+                "namespace package %r" % nsp
+            )
+        parent, sep, child = nsp.rpartition('.')
+        if parent and parent not in ns_packages:
+            distutils.log.warn(
+                "WARNING: %r is declared as a package namespace, but %r"
+                " is not: please correct this in setup.py", nsp, parent
+            )
+
+
+def check_extras(dist, attr, value):
+    """Verify that extras_require mapping is valid"""
+    try:
+        list(itertools.starmap(_check_extra, value.items()))
+    except (TypeError, ValueError, AttributeError):
+        raise DistutilsSetupError(
+            "'extras_require' must be a dictionary whose values are "
+            "strings or lists of strings containing valid project/version "
+            "requirement specifiers."
+        )
+
+
+def _check_extra(extra, reqs):
+    name, sep, marker = extra.partition(':')
+    if marker and pkg_resources.invalid_marker(marker):
+        raise DistutilsSetupError("Invalid environment marker: " + marker)
+    list(pkg_resources.parse_requirements(reqs))
+
+
+def assert_bool(dist, attr, value):
+    """Verify that value is True, False, 0, or 1"""
+    if bool(value) != value:
+        tmpl = "{attr!r} must be a boolean value (got {value!r})"
+        raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
+
+
+def check_requirements(dist, attr, value):
+    """Verify that install_requires is a valid requirements list"""
+    try:
+        list(pkg_resources.parse_requirements(value))
+        if isinstance(value, (dict, set)):
+            raise TypeError("Unordered types are not allowed")
+    except (TypeError, ValueError) as error:
+        tmpl = (
+            "{attr!r} must be a string or list of strings "
+            "containing valid project/version requirement specifiers; {error}"
+        )
+        raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
+
+
+def check_specifier(dist, attr, value):
+    """Verify that value is a valid version specifier"""
+    try:
+        packaging.specifiers.SpecifierSet(value)
+    except packaging.specifiers.InvalidSpecifier as error:
+        tmpl = (
+            "{attr!r} must be a string "
+            "containing valid version specifiers; {error}"
+        )
+        raise DistutilsSetupError(tmpl.format(attr=attr, error=error))
+
+
+def check_entry_points(dist, attr, value):
+    """Verify that entry_points map is parseable"""
+    try:
+        pkg_resources.EntryPoint.parse_map(value)
+    except ValueError as e:
+        raise DistutilsSetupError(e)
+
+
+def check_test_suite(dist, attr, value):
+    if not isinstance(value, six.string_types):
+        raise DistutilsSetupError("test_suite must be a string")
+
+
+def check_package_data(dist, attr, value):
+    """Verify that value is a dictionary of package names to glob lists"""
+    if isinstance(value, dict):
+        for k, v in value.items():
+            if not isinstance(k, str):
+                break
+            try:
+                iter(v)
+            except TypeError:
+                break
+        else:
+            return
+    raise DistutilsSetupError(
+        attr + " must be a dictionary mapping package names to lists of "
+        "wildcard patterns"
+    )
+
+
+def check_packages(dist, attr, value):
+    for pkgname in value:
+        if not re.match(r'\w+(\.\w+)*', pkgname):
+            distutils.log.warn(
+                "WARNING: %r not a valid package name; please use only "
+                ".-separated package names in setup.py", pkgname
+            )
+
+
+_Distribution = get_unpatched(distutils.core.Distribution)
+
+
+class Distribution(_Distribution):
+    """Distribution with support for features, tests, and package data
+
+    This is an enhanced version of 'distutils.dist.Distribution' that
+    effectively adds the following new optional keyword arguments to 'setup()':
+
+     'install_requires' -- a string or sequence of strings specifying project
+        versions that the distribution requires when installed, in the format
+        used by 'pkg_resources.require()'.  They will be installed
+        automatically when the package is installed.  If you wish to use
+        packages that are not available in PyPI, or want to give your users an
+        alternate download location, you can add a 'find_links' option to the
+        '[easy_install]' section of your project's 'setup.cfg' file, and then
+        setuptools will scan the listed web pages for links that satisfy the
+        requirements.
+
+     'extras_require' -- a dictionary mapping names of optional "extras" to the
+        additional requirement(s) that using those extras incurs. For example,
+        this::
+
+            extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
+
+        indicates that the distribution can optionally provide an extra
+        capability called "reST", but it can only be used if docutils and
+        reSTedit are installed.  If the user installs your package using
+        EasyInstall and requests one of your extras, the corresponding
+        additional requirements will be installed if needed.
+
+     'features' **deprecated** -- a dictionary mapping option names to
+        'setuptools.Feature'
+        objects.  Features are a portion of the distribution that can be
+        included or excluded based on user options, inter-feature dependencies,
+        and availability on the current system.  Excluded features are omitted
+        from all setup commands, including source and binary distributions, so
+        you can create multiple distributions from the same source tree.
+        Feature names should be valid Python identifiers, except that they may
+        contain the '-' (minus) sign.  Features can be included or excluded
+        via the command line options '--with-X' and '--without-X', where 'X' is
+        the name of the feature.  Whether a feature is included by default, and
+        whether you are allowed to control this from the command line, is
+        determined by the Feature object.  See the 'Feature' class for more
+        information.
+
+     'test_suite' -- the name of a test suite to run for the 'test' command.
+        If the user runs 'python setup.py test', the package will be installed,
+        and the named test suite will be run.  The format is the same as
+        would be used on a 'unittest.py' command line.  That is, it is the
+        dotted name of an object to import and call to generate a test suite.
+
+     'package_data' -- a dictionary mapping package names to lists of filenames
+        or globs to use to find data files contained in the named packages.
+        If the dictionary has filenames or globs listed under '""' (the empty
+        string), those names will be searched for in every package, in addition
+        to any names for the specific package.  Data files found using these
+        names/globs will be installed along with the package, in the same
+        location as the package.  Note that globs are allowed to reference
+        the contents of non-package subdirectories, as long as you use '/' as
+        a path separator.  (Globs are automatically converted to
+        platform-specific paths at runtime.)
+
+    In addition to these new keywords, this class also has several new methods
+    for manipulating the distribution's contents.  For example, the 'include()'
+    and 'exclude()' methods can be thought of as in-place add and subtract
+    commands that add or remove packages, modules, extensions, and so on from
+    the distribution.  They are used by the feature subsystem to configure the
+    distribution for the included and excluded features.
+    """
+
+    _DISTUTILS_UNSUPPORTED_METADATA = {
+        'long_description_content_type': None,
+        'project_urls': dict,
+        'provides_extras': set,
+    }
+
+    _patched_dist = None
+
+    def patch_missing_pkg_info(self, attrs):
+        # Fake up a replacement for the data that would normally come from
+        # PKG-INFO, but which might not yet be built if this is a fresh
+        # checkout.
+        #
+        if not attrs or 'name' not in attrs or 'version' not in attrs:
+            return
+        key = pkg_resources.safe_name(str(attrs['name'])).lower()
+        dist = pkg_resources.working_set.by_key.get(key)
+        if dist is not None and not dist.has_metadata('PKG-INFO'):
+            dist._version = pkg_resources.safe_version(str(attrs['version']))
+            self._patched_dist = dist
+
+    def __init__(self, attrs=None):
+        have_package_data = hasattr(self, "package_data")
+        if not have_package_data:
+            self.package_data = {}
+        attrs = attrs or {}
+        if 'features' in attrs or 'require_features' in attrs:
+            Feature.warn_deprecated()
+        self.require_features = []
+        self.features = {}
+        self.dist_files = []
+        # Filter-out setuptools' specific options.
+        self.src_root = attrs.pop("src_root", None)
+        self.patch_missing_pkg_info(attrs)
+        self.dependency_links = attrs.pop('dependency_links', [])
+        self.setup_requires = attrs.pop('setup_requires', [])
+        for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+            vars(self).setdefault(ep.name, None)
+        _Distribution.__init__(self, {
+            k: v for k, v in attrs.items()
+            if k not in self._DISTUTILS_UNSUPPORTED_METADATA
+        })
+
+        # Fill-in missing metadata fields not supported by distutils.
+        # Note some fields may have been set by other tools (e.g. pbr)
+        # above; they are taken preferrentially to setup() arguments
+        for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items():
+            for source in self.metadata.__dict__, attrs:
+                if option in source:
+                    value = source[option]
+                    break
+            else:
+                value = default() if default else None
+            setattr(self.metadata, option, value)
+
+        if isinstance(self.metadata.version, numbers.Number):
+            # Some people apparently take "version number" too literally :)
+            self.metadata.version = str(self.metadata.version)
+
+        if self.metadata.version is not None:
+            try:
+                ver = packaging.version.Version(self.metadata.version)
+                normalized_version = str(ver)
+                if self.metadata.version != normalized_version:
+                    warnings.warn(
+                        "Normalizing '%s' to '%s'" % (
+                            self.metadata.version,
+                            normalized_version,
+                        )
+                    )
+                    self.metadata.version = normalized_version
+            except (packaging.version.InvalidVersion, TypeError):
+                warnings.warn(
+                    "The version specified (%r) is an invalid version, this "
+                    "may not work as expected with newer versions of "
+                    "setuptools, pip, and PyPI. Please see PEP 440 for more "
+                    "details." % self.metadata.version
+                )
+        self._finalize_requires()
+
+    def _finalize_requires(self):
+        """
+        Set `metadata.python_requires` and fix environment markers
+        in `install_requires` and `extras_require`.
+        """
+        if getattr(self, 'python_requires', None):
+            self.metadata.python_requires = self.python_requires
+
+        if getattr(self, 'extras_require', None):
+            for extra in self.extras_require.keys():
+                # Since this gets called multiple times at points where the
+                # keys have become 'converted' extras, ensure that we are only
+                # truly adding extras we haven't seen before here.
+                extra = extra.split(':')[0]
+                if extra:
+                    self.metadata.provides_extras.add(extra)
+
+        self._convert_extras_requirements()
+        self._move_install_requirements_markers()
+
+    def _convert_extras_requirements(self):
+        """
+        Convert requirements in `extras_require` of the form
+        `"extra": ["barbazquux; {marker}"]` to
+        `"extra:{marker}": ["barbazquux"]`.
+        """
+        spec_ext_reqs = getattr(self, 'extras_require', None) or {}
+        self._tmp_extras_require = defaultdict(list)
+        for section, v in spec_ext_reqs.items():
+            # Do not strip empty sections.
+            self._tmp_extras_require[section]
+            for r in pkg_resources.parse_requirements(v):
+                suffix = self._suffix_for(r)
+                self._tmp_extras_require[section + suffix].append(r)
+
+    @staticmethod
+    def _suffix_for(req):
+        """
+        For a requirement, return the 'extras_require' suffix for
+        that requirement.
+        """
+        return ':' + str(req.marker) if req.marker else ''
+
+    def _move_install_requirements_markers(self):
+        """
+        Move requirements in `install_requires` that are using environment
+        markers `extras_require`.
+        """
+
+        # divide the install_requires into two sets, simple ones still
+        # handled by install_requires and more complex ones handled
+        # by extras_require.
+
+        def is_simple_req(req):
+            return not req.marker
+
+        spec_inst_reqs = getattr(self, 'install_requires', None) or ()
+        inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))
+        simple_reqs = filter(is_simple_req, inst_reqs)
+        complex_reqs = filterfalse(is_simple_req, inst_reqs)
+        self.install_requires = list(map(str, simple_reqs))
+
+        for r in complex_reqs:
+            self._tmp_extras_require[':' + str(r.marker)].append(r)
+        self.extras_require = dict(
+            (k, [str(r) for r in map(self._clean_req, v)])
+            for k, v in self._tmp_extras_require.items()
+        )
+
+    def _clean_req(self, req):
+        """
+        Given a Requirement, remove environment markers and return it.
+        """
+        req.marker = None
+        return req
+
+    def _parse_config_files(self, filenames=None):
+        """
+        Adapted from distutils.dist.Distribution.parse_config_files,
+        this method provides the same functionality in subtly-improved
+        ways.
+        """
+        from setuptools.extern.six.moves.configparser import ConfigParser
+
+        # Ignore install directory options if we have a venv
+        if six.PY3 and sys.prefix != sys.base_prefix:
+            ignore_options = [
+                'install-base', 'install-platbase', 'install-lib',
+                'install-platlib', 'install-purelib', 'install-headers',
+                'install-scripts', 'install-data', 'prefix', 'exec-prefix',
+                'home', 'user', 'root']
+        else:
+            ignore_options = []
+
+        ignore_options = frozenset(ignore_options)
+
+        if filenames is None:
+            filenames = self.find_config_files()
+
+        if DEBUG:
+            self.announce("Distribution.parse_config_files():")
+
+        parser = ConfigParser()
+        for filename in filenames:
+            with io.open(filename, encoding='utf-8') as reader:
+                if DEBUG:
+                    self.announce("  reading {filename}".format(**locals()))
+                (parser.read_file if six.PY3 else parser.readfp)(reader)
+            for section in parser.sections():
+                options = parser.options(section)
+                opt_dict = self.get_option_dict(section)
+
+                for opt in options:
+                    if opt != '__name__' and opt not in ignore_options:
+                        val = self._try_str(parser.get(section, opt))
+                        opt = opt.replace('-', '_')
+                        opt_dict[opt] = (filename, val)
+
+            # Make the ConfigParser forget everything (so we retain
+            # the original filenames that options come from)
+            parser.__init__()
+
+        # If there was a "global" section in the config file, use it
+        # to set Distribution options.
+
+        if 'global' in self.command_options:
+            for (opt, (src, val)) in self.command_options['global'].items():
+                alias = self.negative_opt.get(opt)
+                try:
+                    if alias:
+                        setattr(self, alias, not strtobool(val))
+                    elif opt in ('verbose', 'dry_run'):  # ugh!
+                        setattr(self, opt, strtobool(val))
+                    else:
+                        setattr(self, opt, val)
+                except ValueError as msg:
+                    raise DistutilsOptionError(msg)
+
+    @staticmethod
+    def _try_str(val):
+        """
+        On Python 2, much of distutils relies on string values being of
+        type 'str' (bytes) and not unicode text. If the value can be safely
+        encoded to bytes using the default encoding, prefer that.
+
+        Why the default encoding? Because that value can be implicitly
+        decoded back to text if needed.
+
+        Ref #1653
+        """
+        if six.PY3:
+            return val
+        try:
+            return val.encode()
+        except UnicodeEncodeError:
+            pass
+        return val
+
+    def _set_command_options(self, command_obj, option_dict=None):
+        """
+        Set the options for 'command_obj' from 'option_dict'.  Basically
+        this means copying elements of a dictionary ('option_dict') to
+        attributes of an instance ('command').
+
+        'command_obj' must be a Command instance.  If 'option_dict' is not
+        supplied, uses the standard option dictionary for this command
+        (from 'self.command_options').
+
+        (Adopted from distutils.dist.Distribution._set_command_options)
+        """
+        command_name = command_obj.get_command_name()
+        if option_dict is None:
+            option_dict = self.get_option_dict(command_name)
+
+        if DEBUG:
+            self.announce("  setting options for '%s' command:" % command_name)
+        for (option, (source, value)) in option_dict.items():
+            if DEBUG:
+                self.announce("    %s = %s (from %s)" % (option, value,
+                                                         source))
+            try:
+                bool_opts = [translate_longopt(o)
+                             for o in command_obj.boolean_options]
+            except AttributeError:
+                bool_opts = []
+            try:
+                neg_opt = command_obj.negative_opt
+            except AttributeError:
+                neg_opt = {}
+
+            try:
+                is_string = isinstance(value, six.string_types)
+                if option in neg_opt and is_string:
+                    setattr(command_obj, neg_opt[option], not strtobool(value))
+                elif option in bool_opts and is_string:
+                    setattr(command_obj, option, strtobool(value))
+                elif hasattr(command_obj, option):
+                    setattr(command_obj, option, value)
+                else:
+                    raise DistutilsOptionError(
+                        "error in %s: command '%s' has no such option '%s'"
+                        % (source, command_name, option))
+            except ValueError as msg:
+                raise DistutilsOptionError(msg)
+
+    def parse_config_files(self, filenames=None, ignore_option_errors=False):
+        """Parses configuration files from various levels
+        and loads configuration.
+
+        """
+        self._parse_config_files(filenames=filenames)
+
+        parse_configuration(self, self.command_options,
+                            ignore_option_errors=ignore_option_errors)
+        self._finalize_requires()
+
+    def parse_command_line(self):
+        """Process features after parsing command line options"""
+        result = _Distribution.parse_command_line(self)
+        if self.features:
+            self._finalize_features()
+        return result
+
+    def _feature_attrname(self, name):
+        """Convert feature name to corresponding option attribute name"""
+        return 'with_' + name.replace('-', '_')
+
+    def fetch_build_eggs(self, requires):
+        """Resolve pre-setup requirements"""
+        resolved_dists = pkg_resources.working_set.resolve(
+            pkg_resources.parse_requirements(requires),
+            installer=self.fetch_build_egg,
+            replace_conflicting=True,
+        )
+        for dist in resolved_dists:
+            pkg_resources.working_set.add(dist, replace=True)
+        return resolved_dists
+
+    def finalize_options(self):
+        _Distribution.finalize_options(self)
+        if self.features:
+            self._set_global_opts_from_features()
+
+        for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+            value = getattr(self, ep.name, None)
+            if value is not None:
+                ep.require(installer=self.fetch_build_egg)
+                ep.load()(self, ep.name, value)
+        if getattr(self, 'convert_2to3_doctests', None):
+            # XXX may convert to set here when we can rely on set being builtin
+            self.convert_2to3_doctests = [
+                os.path.abspath(p)
+                for p in self.convert_2to3_doctests
+            ]
+        else:
+            self.convert_2to3_doctests = []
+
+    def get_egg_cache_dir(self):
+        egg_cache_dir = os.path.join(os.curdir, '.eggs')
+        if not os.path.exists(egg_cache_dir):
+            os.mkdir(egg_cache_dir)
+            windows_support.hide_file(egg_cache_dir)
+            readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
+            with open(readme_txt_filename, 'w') as f:
+                f.write('This directory contains eggs that were downloaded '
+                        'by setuptools to build, test, and run plug-ins.\n\n')
+                f.write('This directory caches those eggs to prevent '
+                        'repeated downloads.\n\n')
+                f.write('However, it is safe to delete this directory.\n\n')
+
+        return egg_cache_dir
+
+    def fetch_build_egg(self, req):
+        """Fetch an egg needed for building"""
+        from setuptools.command.easy_install import easy_install
+        dist = self.__class__({'script_args': ['easy_install']})
+        opts = dist.get_option_dict('easy_install')
+        opts.clear()
+        opts.update(
+            (k, v)
+            for k, v in self.get_option_dict('easy_install').items()
+            if k in (
+                # don't use any other settings
+                'find_links', 'site_dirs', 'index_url',
+                'optimize', 'site_dirs', 'allow_hosts',
+            ))
+        if self.dependency_links:
+            links = self.dependency_links[:]
+            if 'find_links' in opts:
+                links = opts['find_links'][1] + links
+            opts['find_links'] = ('setup', links)
+        install_dir = self.get_egg_cache_dir()
+        cmd = easy_install(
+            dist, args=["x"], install_dir=install_dir,
+            exclude_scripts=True,
+            always_copy=False, build_directory=None, editable=False,
+            upgrade=False, multi_version=True, no_report=True, user=False
+        )
+        cmd.ensure_finalized()
+        return cmd.easy_install(req)
+
+    def _set_global_opts_from_features(self):
+        """Add --with-X/--without-X options based on optional features"""
+
+        go = []
+        no = self.negative_opt.copy()
+
+        for name, feature in self.features.items():
+            self._set_feature(name, None)
+            feature.validate(self)
+
+            if feature.optional:
+                descr = feature.description
+                incdef = ' (default)'
+                excdef = ''
+                if not feature.include_by_default():
+                    excdef, incdef = incdef, excdef
+
+                new = (
+                    ('with-' + name, None, 'include ' + descr + incdef),
+                    ('without-' + name, None, 'exclude ' + descr + excdef),
+                )
+                go.extend(new)
+                no['without-' + name] = 'with-' + name
+
+        self.global_options = self.feature_options = go + self.global_options
+        self.negative_opt = self.feature_negopt = no
+
+    def _finalize_features(self):
+        """Add/remove features and resolve dependencies between them"""
+
+        # First, flag all the enabled items (and thus their dependencies)
+        for name, feature in self.features.items():
+            enabled = self.feature_is_included(name)
+            if enabled or (enabled is None and feature.include_by_default()):
+                feature.include_in(self)
+                self._set_feature(name, 1)
+
+        # Then disable the rest, so that off-by-default features don't
+        # get flagged as errors when they're required by an enabled feature
+        for name, feature in self.features.items():
+            if not self.feature_is_included(name):
+                feature.exclude_from(self)
+                self._set_feature(name, 0)
+
+    def get_command_class(self, command):
+        """Pluggable version of get_command_class()"""
+        if command in self.cmdclass:
+            return self.cmdclass[command]
+
+        eps = pkg_resources.iter_entry_points('distutils.commands', command)
+        for ep in eps:
+            ep.require(installer=self.fetch_build_egg)
+            self.cmdclass[command] = cmdclass = ep.load()
+            return cmdclass
+        else:
+            return _Distribution.get_command_class(self, command)
+
+    def print_commands(self):
+        for ep in pkg_resources.iter_entry_points('distutils.commands'):
+            if ep.name not in self.cmdclass:
+                # don't require extras as the commands won't be invoked
+                cmdclass = ep.resolve()
+                self.cmdclass[ep.name] = cmdclass
+        return _Distribution.print_commands(self)
+
+    def get_command_list(self):
+        for ep in pkg_resources.iter_entry_points('distutils.commands'):
+            if ep.name not in self.cmdclass:
+                # don't require extras as the commands won't be invoked
+                cmdclass = ep.resolve()
+                self.cmdclass[ep.name] = cmdclass
+        return _Distribution.get_command_list(self)
+
+    def _set_feature(self, name, status):
+        """Set feature's inclusion status"""
+        setattr(self, self._feature_attrname(name), status)
+
+    def feature_is_included(self, name):
+        """Return 1 if feature is included, 0 if excluded, 'None' if unknown"""
+        return getattr(self, self._feature_attrname(name))
+
+    def include_feature(self, name):
+        """Request inclusion of feature named 'name'"""
+
+        if self.feature_is_included(name) == 0:
+            descr = self.features[name].description
+            raise DistutilsOptionError(
+                descr + " is required, but was excluded or is not available"
+            )
+        self.features[name].include_in(self)
+        self._set_feature(name, 1)
+
+    def include(self, **attrs):
+        """Add items to distribution that are named in keyword arguments
+
+        For example, 'dist.include(py_modules=["x"])' would add 'x' to
+        the distribution's 'py_modules' attribute, if it was not already
+        there.
+
+        Currently, this method only supports inclusion for attributes that are
+        lists or tuples.  If you need to add support for adding to other
+        attributes in this or a subclass, you can add an '_include_X' method,
+        where 'X' is the name of the attribute.  The method will be called with
+        the value passed to 'include()'.  So, 'dist.include(foo={"bar":"baz"})'
+        will try to call 'dist._include_foo({"bar":"baz"})', which can then
+        handle whatever special inclusion logic is needed.
+        """
+        for k, v in attrs.items():
+            include = getattr(self, '_include_' + k, None)
+            if include:
+                include(v)
+            else:
+                self._include_misc(k, v)
+
+    def exclude_package(self, package):
+        """Remove packages, modules, and extensions in named package"""
+
+        pfx = package + '.'
+        if self.packages:
+            self.packages = [
+                p for p in self.packages
+                if p != package and not p.startswith(pfx)
+            ]
+
+        if self.py_modules:
+            self.py_modules = [
+                p for p in self.py_modules
+                if p != package and not p.startswith(pfx)
+            ]
+
+        if self.ext_modules:
+            self.ext_modules = [
+                p for p in self.ext_modules
+                if p.name != package and not p.name.startswith(pfx)
+            ]
+
+    def has_contents_for(self, package):
+        """Return true if 'exclude_package(package)' would do something"""
+
+        pfx = package + '.'
+
+        for p in self.iter_distribution_names():
+            if p == package or p.startswith(pfx):
+                return True
+
+    def _exclude_misc(self, name, value):
+        """Handle 'exclude()' for list/tuple attrs without a special handler"""
+        if not isinstance(value, sequence):
+            raise DistutilsSetupError(
+                "%s: setting must be a list or tuple (%r)" % (name, value)
+            )
+        try:
+            old = getattr(self, name)
+        except AttributeError:
+            raise DistutilsSetupError(
+                "%s: No such distribution setting" % name
+            )
+        if old is not None and not isinstance(old, sequence):
+            raise DistutilsSetupError(
+                name + ": this setting cannot be changed via include/exclude"
+            )
+        elif old:
+            setattr(self, name, [item for item in old if item not in value])
+
+    def _include_misc(self, name, value):
+        """Handle 'include()' for list/tuple attrs without a special handler"""
+
+        if not isinstance(value, sequence):
+            raise DistutilsSetupError(
+                "%s: setting must be a list (%r)" % (name, value)
+            )
+        try:
+            old = getattr(self, name)
+        except AttributeError:
+            raise DistutilsSetupError(
+                "%s: No such distribution setting" % name
+            )
+        if old is None:
+            setattr(self, name, value)
+        elif not isinstance(old, sequence):
+            raise DistutilsSetupError(
+                name + ": this setting cannot be changed via include/exclude"
+            )
+        else:
+            new = [item for item in value if item not in old]
+            setattr(self, name, old + new)
+
+    def exclude(self, **attrs):
+        """Remove items from distribution that are named in keyword arguments
+
+        For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
+        the distribution's 'py_modules' attribute.  Excluding packages uses
+        the 'exclude_package()' method, so all of the package's contained
+        packages, modules, and extensions are also excluded.
+
+        Currently, this method only supports exclusion from attributes that are
+        lists or tuples.  If you need to add support for excluding from other
+        attributes in this or a subclass, you can add an '_exclude_X' method,
+        where 'X' is the name of the attribute.  The method will be called with
+        the value passed to 'exclude()'.  So, 'dist.exclude(foo={"bar":"baz"})'
+        will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
+        handle whatever special exclusion logic is needed.
+        """
+        for k, v in attrs.items():
+            exclude = getattr(self, '_exclude_' + k, None)
+            if exclude:
+                exclude(v)
+            else:
+                self._exclude_misc(k, v)
+
+    def _exclude_packages(self, packages):
+        if not isinstance(packages, sequence):
+            raise DistutilsSetupError(
+                "packages: setting must be a list or tuple (%r)" % (packages,)
+            )
+        list(map(self.exclude_package, packages))
+
+    def _parse_command_opts(self, parser, args):
+        # Remove --with-X/--without-X options when processing command args
+        self.global_options = self.__class__.global_options
+        self.negative_opt = self.__class__.negative_opt
+
+        # First, expand any aliases
+        command = args[0]
+        aliases = self.get_option_dict('aliases')
+        while command in aliases:
+            src, alias = aliases[command]
+            del aliases[command]  # ensure each alias can expand only once!
+            import shlex
+            args[:1] = shlex.split(alias, True)
+            command = args[0]
+
+        nargs = _Distribution._parse_command_opts(self, parser, args)
+
+        # Handle commands that want to consume all remaining arguments
+        cmd_class = self.get_command_class(command)
+        if getattr(cmd_class, 'command_consumes_arguments', None):
+            self.get_option_dict(command)['args'] = ("command line", nargs)
+            if nargs is not None:
+                return []
+
+        return nargs
+
+    def get_cmdline_options(self):
+        """Return a '{cmd: {opt:val}}' map of all command-line options
+
+        Option names are all long, but do not include the leading '--', and
+        contain dashes rather than underscores.  If the option doesn't take
+        an argument (e.g. '--quiet'), the 'val' is 'None'.
+
+        Note that options provided by config files are intentionally excluded.
+        """
+
+        d = {}
+
+        for cmd, opts in self.command_options.items():
+
+            for opt, (src, val) in opts.items():
+
+                if src != "command line":
+                    continue
+
+                opt = opt.replace('_', '-')
+
+                if val == 0:
+                    cmdobj = self.get_command_obj(cmd)
+                    neg_opt = self.negative_opt.copy()
+                    neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
+                    for neg, pos in neg_opt.items():
+                        if pos == opt:
+                            opt = neg
+                            val = None
+                            break
+                    else:
+                        raise AssertionError("Shouldn't be able to get here")
+
+                elif val == 1:
+                    val = None
+
+                d.setdefault(cmd, {})[opt] = val
+
+        return d
+
+    def iter_distribution_names(self):
+        """Yield all packages, modules, and extension names in distribution"""
+
+        for pkg in self.packages or ():
+            yield pkg
+
+        for module in self.py_modules or ():
+            yield module
+
+        for ext in self.ext_modules or ():
+            if isinstance(ext, tuple):
+                name, buildinfo = ext
+            else:
+                name = ext.name
+            if name.endswith('module'):
+                name = name[:-6]
+            yield name
+
+    def handle_display_options(self, option_order):
+        """If there were any non-global "display-only" options
+        (--help-commands or the metadata display options) on the command
+        line, display the requested info and return true; else return
+        false.
+        """
+        import sys
+
+        if six.PY2 or self.help_commands:
+            return _Distribution.handle_display_options(self, option_order)
+
+        # Stdout may be StringIO (e.g. in tests)
+        if not isinstance(sys.stdout, io.TextIOWrapper):
+            return _Distribution.handle_display_options(self, option_order)
+
+        # Don't wrap stdout if utf-8 is already the encoding. Provides
+        #  workaround for #334.
+        if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
+            return _Distribution.handle_display_options(self, option_order)
+
+        # Print metadata in UTF-8 no matter the platform
+        encoding = sys.stdout.encoding
+        errors = sys.stdout.errors
+        newline = sys.platform != 'win32' and '\n' or None
+        line_buffering = sys.stdout.line_buffering
+
+        sys.stdout = io.TextIOWrapper(
+            sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
+        try:
+            return _Distribution.handle_display_options(self, option_order)
+        finally:
+            sys.stdout = io.TextIOWrapper(
+                sys.stdout.detach(), encoding, errors, newline, line_buffering)
+
+
+class Feature:
+    """
+    **deprecated** -- The `Feature` facility was never completely implemented
+    or supported, `has reported issues
+    <https://github.com/pypa/setuptools/issues/58>`_ and will be removed in
+    a future version.
+
+    A subset of the distribution that can be excluded if unneeded/wanted
+
+    Features are created using these keyword arguments:
+
+      'description' -- a short, human readable description of the feature, to
+         be used in error messages, and option help messages.
+
+      'standard' -- if true, the feature is included by default if it is
+         available on the current system.  Otherwise, the feature is only
+         included if requested via a command line '--with-X' option, or if
+         another included feature requires it.  The default setting is 'False'.
+
+      'available' -- if true, the feature is available for installation on the
+         current system.  The default setting is 'True'.
+
+      'optional' -- if true, the feature's inclusion can be controlled from the
+         command line, using the '--with-X' or '--without-X' options.  If
+         false, the feature's inclusion status is determined automatically,
+         based on 'availabile', 'standard', and whether any other feature
+         requires it.  The default setting is 'True'.
+
+      'require_features' -- a string or sequence of strings naming features
+         that should also be included if this feature is included.  Defaults to
+         empty list.  May also contain 'Require' objects that should be
+         added/removed from the distribution.
+
+      'remove' -- a string or list of strings naming packages to be removed
+         from the distribution if this feature is *not* included.  If the
+         feature *is* included, this argument is ignored.  This argument exists
+         to support removing features that "crosscut" a distribution, such as
+         defining a 'tests' feature that removes all the 'tests' subpackages
+         provided by other features.  The default for this argument is an empty
+         list.  (Note: the named package(s) or modules must exist in the base
+         distribution when the 'setup()' function is initially called.)
+
+      other keywords -- any other keyword arguments are saved, and passed to
+         the distribution's 'include()' and 'exclude()' methods when the
+         feature is included or excluded, respectively.  So, for example, you
+         could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be
+         added or removed from the distribution as appropriate.
+
+    A feature must include at least one 'requires', 'remove', or other
+    keyword argument.  Otherwise, it can't affect the distribution in any way.
+    Note also that you can subclass 'Feature' to create your own specialized
+    feature types that modify the distribution in other ways when included or
+    excluded.  See the docstrings for the various methods here for more detail.
+    Aside from the methods, the only feature attributes that distributions look
+    at are 'description' and 'optional'.
+    """
+
+    @staticmethod
+    def warn_deprecated():
+        msg = (
+            "Features are deprecated and will be removed in a future "
+            "version. See https://github.com/pypa/setuptools/issues/65."
+        )
+        warnings.warn(msg, DistDeprecationWarning, stacklevel=3)
+
+    def __init__(
+            self, description, standard=False, available=True,
+            optional=True, require_features=(), remove=(), **extras):
+        self.warn_deprecated()
+
+        self.description = description
+        self.standard = standard
+        self.available = available
+        self.optional = optional
+        if isinstance(require_features, (str, Require)):
+            require_features = require_features,
+
+        self.require_features = [
+            r for r in require_features if isinstance(r, str)
+        ]
+        er = [r for r in require_features if not isinstance(r, str)]
+        if er:
+            extras['require_features'] = er
+
+        if isinstance(remove, str):
+            remove = remove,
+        self.remove = remove
+        self.extras = extras
+
+        if not remove and not require_features and not extras:
+            raise DistutilsSetupError(
+                "Feature %s: must define 'require_features', 'remove', or "
+                "at least one of 'packages', 'py_modules', etc."
+            )
+
+    def include_by_default(self):
+        """Should this feature be included by default?"""
+        return self.available and self.standard
+
+    def include_in(self, dist):
+        """Ensure feature and its requirements are included in distribution
+
+        You may override this in a subclass to perform additional operations on
+        the distribution.  Note that this method may be called more than once
+        per feature, and so should be idempotent.
+
+        """
+
+        if not self.available:
+            raise DistutilsPlatformError(
+                self.description + " is required, "
+                "but is not available on this platform"
+            )
+
+        dist.include(**self.extras)
+
+        for f in self.require_features:
+            dist.include_feature(f)
+
+    def exclude_from(self, dist):
+        """Ensure feature is excluded from distribution
+
+        You may override this in a subclass to perform additional operations on
+        the distribution.  This method will be called at most once per
+        feature, and only after all included features have been asked to
+        include themselves.
+        """
+
+        dist.exclude(**self.extras)
+
+        if self.remove:
+            for item in self.remove:
+                dist.exclude_package(item)
+
+    def validate(self, dist):
+        """Verify that feature makes sense in context of distribution
+
+        This method is called by the distribution just before it parses its
+        command line.  It checks to ensure that the 'remove' attribute, if any,
+        contains only valid package/module names that are present in the base
+        distribution when 'setup()' is called.  You may override it in a
+        subclass to perform any other required validation of the feature
+        against a target distribution.
+        """
+
+        for item in self.remove:
+            if not dist.has_contents_for(item):
+                raise DistutilsSetupError(
+                    "%s wants to be able to remove %s, but the distribution"
+                    " doesn't contain any packages or modules under %s"
+                    % (self.description, item, item)
+                )
+
+
+class DistDeprecationWarning(SetuptoolsDeprecationWarning):
+    """Class for warning about deprecations in dist in
+    setuptools. Not ignored by default, unlike DeprecationWarning."""
diff --git a/lib/python3.7/site-packages/setuptools/extension.py b/lib/python3.7/site-packages/setuptools/extension.py
new file mode 100644
index 0000000000000000000000000000000000000000..29468894f828128f4c36660167dd1f9e68e584be
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/extension.py
@@ -0,0 +1,57 @@
+import re
+import functools
+import distutils.core
+import distutils.errors
+import distutils.extension
+
+from setuptools.extern.six.moves import map
+
+from .monkey import get_unpatched
+
+
+def _have_cython():
+    """
+    Return True if Cython can be imported.
+    """
+    cython_impl = 'Cython.Distutils.build_ext'
+    try:
+        # from (cython_impl) import build_ext
+        __import__(cython_impl, fromlist=['build_ext']).build_ext
+        return True
+    except Exception:
+        pass
+    return False
+
+
+# for compatibility
+have_pyrex = _have_cython
+
+_Extension = get_unpatched(distutils.core.Extension)
+
+
+class Extension(_Extension):
+    """Extension that uses '.c' files in place of '.pyx' files"""
+
+    def __init__(self, name, sources, *args, **kw):
+        # The *args is needed for compatibility as calls may use positional
+        # arguments. py_limited_api may be set only via keyword.
+        self.py_limited_api = kw.pop("py_limited_api", False)
+        _Extension.__init__(self, name, sources, *args, **kw)
+
+    def _convert_pyx_sources_to_lang(self):
+        """
+        Replace sources with .pyx extensions to sources with the target
+        language extension. This mechanism allows language authors to supply
+        pre-converted sources but to prefer the .pyx sources.
+        """
+        if _have_cython():
+            # the build has Cython, so allow it to compile the .pyx files
+            return
+        lang = self.language or ''
+        target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
+        sub = functools.partial(re.sub, '.pyx$', target_ext)
+        self.sources = list(map(sub, self.sources))
+
+
+class Library(Extension):
+    """Just like a regular Extension, but built as a library instead"""
diff --git a/lib/python3.7/site-packages/setuptools/extern/__init__.py b/lib/python3.7/site-packages/setuptools/extern/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb2fa329272be7eb62a4a1eaf01e0e4288a7287c
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/extern/__init__.py
@@ -0,0 +1,73 @@
+import sys
+
+
+class VendorImporter:
+    """
+    A PEP 302 meta path importer for finding optionally-vendored
+    or otherwise naturally-installed packages from root_name.
+    """
+
+    def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
+        self.root_name = root_name
+        self.vendored_names = set(vendored_names)
+        self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
+
+    @property
+    def search_path(self):
+        """
+        Search first the vendor package then as a natural package.
+        """
+        yield self.vendor_pkg + '.'
+        yield ''
+
+    def find_module(self, fullname, path=None):
+        """
+        Return self when fullname starts with root_name and the
+        target module is one vendored through this importer.
+        """
+        root, base, target = fullname.partition(self.root_name + '.')
+        if root:
+            return
+        if not any(map(target.startswith, self.vendored_names)):
+            return
+        return self
+
+    def load_module(self, fullname):
+        """
+        Iterate over the search path to locate and load fullname.
+        """
+        root, base, target = fullname.partition(self.root_name + '.')
+        for prefix in self.search_path:
+            try:
+                extant = prefix + target
+                __import__(extant)
+                mod = sys.modules[extant]
+                sys.modules[fullname] = mod
+                # mysterious hack:
+                # Remove the reference to the extant package/module
+                # on later Python versions to cause relative imports
+                # in the vendor package to resolve the same modules
+                # as those going through this importer.
+                if sys.version_info >= (3, ):
+                    del sys.modules[extant]
+                return mod
+            except ImportError:
+                pass
+        else:
+            raise ImportError(
+                "The '{target}' package is required; "
+                "normally this is bundled with this package so if you get "
+                "this warning, consult the packager of your "
+                "distribution.".format(**locals())
+            )
+
+    def install(self):
+        """
+        Install this importer into sys.meta_path if not already present.
+        """
+        if self not in sys.meta_path:
+            sys.meta_path.append(self)
+
+
+names = 'six', 'packaging', 'pyparsing',
+VendorImporter(__name__, names, 'setuptools._vendor').install()
diff --git a/lib/python3.7/site-packages/setuptools/extern/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/setuptools/extern/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..75366322ecc7bcc14bc4e5299c7fccf3343a1bc5
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/extern/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/setuptools/glibc.py b/lib/python3.7/site-packages/setuptools/glibc.py
new file mode 100644
index 0000000000000000000000000000000000000000..a134591c30547300e0e55c6898a3a1590b3f0171
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/glibc.py
@@ -0,0 +1,86 @@
+# This file originally from pip:
+# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/utils/glibc.py
+from __future__ import absolute_import
+
+import ctypes
+import re
+import warnings
+
+
+def glibc_version_string():
+    "Returns glibc version string, or None if not using glibc."
+
+    # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+    # manpage says, "If filename is NULL, then the returned handle is for the
+    # main program". This way we can let the linker do the work to figure out
+    # which libc our process is actually using.
+    process_namespace = ctypes.CDLL(None)
+    try:
+        gnu_get_libc_version = process_namespace.gnu_get_libc_version
+    except AttributeError:
+        # Symbol doesn't exist -> therefore, we are not linked to
+        # glibc.
+        return None
+
+    # Call gnu_get_libc_version, which returns a string like "2.5"
+    gnu_get_libc_version.restype = ctypes.c_char_p
+    version_str = gnu_get_libc_version()
+    # py2 / py3 compatibility:
+    if not isinstance(version_str, str):
+        version_str = version_str.decode("ascii")
+
+    return version_str
+
+
+# Separated out from have_compatible_glibc for easier unit testing
+def check_glibc_version(version_str, required_major, minimum_minor):
+    # Parse string and check against requested version.
+    #
+    # We use a regexp instead of str.split because we want to discard any
+    # random junk that might come after the minor version -- this might happen
+    # in patched/forked versions of glibc (e.g. Linaro's version of glibc
+    # uses version strings like "2.20-2014.11"). See gh-3588.
+    m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+    if not m:
+        warnings.warn("Expected glibc version with 2 components major.minor,"
+                      " got: %s" % version_str, RuntimeWarning)
+        return False
+    return (int(m.group("major")) == required_major and
+            int(m.group("minor")) >= minimum_minor)
+
+
+def have_compatible_glibc(required_major, minimum_minor):
+    version_str = glibc_version_string()
+    if version_str is None:
+        return False
+    return check_glibc_version(version_str, required_major, minimum_minor)
+
+
+# platform.libc_ver regularly returns completely nonsensical glibc
+# versions. E.g. on my computer, platform says:
+#
+#   ~$ python2.7 -c 'import platform; print(platform.libc_ver())'
+#   ('glibc', '2.7')
+#   ~$ python3.5 -c 'import platform; print(platform.libc_ver())'
+#   ('glibc', '2.9')
+#
+# But the truth is:
+#
+#   ~$ ldd --version
+#   ldd (Debian GLIBC 2.22-11) 2.22
+#
+# This is unfortunate, because it means that the linehaul data on libc
+# versions that was generated by pip 8.1.2 and earlier is useless and
+# misleading. Solution: instead of using platform, use our code that actually
+# works.
+def libc_ver():
+    """Try to determine the glibc version
+
+    Returns a tuple of strings (lib, version) which default to empty strings
+    in case the lookup fails.
+    """
+    glibc_version = glibc_version_string()
+    if glibc_version is None:
+        return ("", "")
+    else:
+        return ("glibc", glibc_version)
diff --git a/lib/python3.7/site-packages/setuptools/glob.py b/lib/python3.7/site-packages/setuptools/glob.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d7cbc5da68da8605d271b9314befb206b87bca6
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/glob.py
@@ -0,0 +1,174 @@
+"""
+Filename globbing utility. Mostly a copy of `glob` from Python 3.5.
+
+Changes include:
+ * `yield from` and PEP3102 `*` removed.
+ * Hidden files are not ignored.
+"""
+
+import os
+import re
+import fnmatch
+
+__all__ = ["glob", "iglob", "escape"]
+
+
+def glob(pathname, recursive=False):
+    """Return a list of paths matching a pathname pattern.
+
+    The pattern may contain simple shell-style wildcards a la
+    fnmatch. However, unlike fnmatch, filenames starting with a
+    dot are special cases that are not matched by '*' and '?'
+    patterns.
+
+    If recursive is true, the pattern '**' will match any files and
+    zero or more directories and subdirectories.
+    """
+    return list(iglob(pathname, recursive=recursive))
+
+
+def iglob(pathname, recursive=False):
+    """Return an iterator which yields the paths matching a pathname pattern.
+
+    The pattern may contain simple shell-style wildcards a la
+    fnmatch. However, unlike fnmatch, filenames starting with a
+    dot are special cases that are not matched by '*' and '?'
+    patterns.
+
+    If recursive is true, the pattern '**' will match any files and
+    zero or more directories and subdirectories.
+    """
+    it = _iglob(pathname, recursive)
+    if recursive and _isrecursive(pathname):
+        s = next(it)  # skip empty string
+        assert not s
+    return it
+
+
+def _iglob(pathname, recursive):
+    dirname, basename = os.path.split(pathname)
+    if not has_magic(pathname):
+        if basename:
+            if os.path.lexists(pathname):
+                yield pathname
+        else:
+            # Patterns ending with a slash should match only directories
+            if os.path.isdir(dirname):
+                yield pathname
+        return
+    if not dirname:
+        if recursive and _isrecursive(basename):
+            for x in glob2(dirname, basename):
+                yield x
+        else:
+            for x in glob1(dirname, basename):
+                yield x
+        return
+    # `os.path.split()` returns the argument itself as a dirname if it is a
+    # drive or UNC path.  Prevent an infinite recursion if a drive or UNC path
+    # contains magic characters (i.e. r'\\?\C:').
+    if dirname != pathname and has_magic(dirname):
+        dirs = _iglob(dirname, recursive)
+    else:
+        dirs = [dirname]
+    if has_magic(basename):
+        if recursive and _isrecursive(basename):
+            glob_in_dir = glob2
+        else:
+            glob_in_dir = glob1
+    else:
+        glob_in_dir = glob0
+    for dirname in dirs:
+        for name in glob_in_dir(dirname, basename):
+            yield os.path.join(dirname, name)
+
+
+# These 2 helper functions non-recursively glob inside a literal directory.
+# They return a list of basenames. `glob1` accepts a pattern while `glob0`
+# takes a literal basename (so it only has to check for its existence).
+
+
+def glob1(dirname, pattern):
+    if not dirname:
+        if isinstance(pattern, bytes):
+            dirname = os.curdir.encode('ASCII')
+        else:
+            dirname = os.curdir
+    try:
+        names = os.listdir(dirname)
+    except OSError:
+        return []
+    return fnmatch.filter(names, pattern)
+
+
+def glob0(dirname, basename):
+    if not basename:
+        # `os.path.split()` returns an empty basename for paths ending with a
+        # directory separator.  'q*x/' should match only directories.
+        if os.path.isdir(dirname):
+            return [basename]
+    else:
+        if os.path.lexists(os.path.join(dirname, basename)):
+            return [basename]
+    return []
+
+
+# This helper function recursively yields relative pathnames inside a literal
+# directory.
+
+
+def glob2(dirname, pattern):
+    assert _isrecursive(pattern)
+    yield pattern[:0]
+    for x in _rlistdir(dirname):
+        yield x
+
+
+# Recursively yields relative pathnames inside a literal directory.
+def _rlistdir(dirname):
+    if not dirname:
+        if isinstance(dirname, bytes):
+            dirname = os.curdir.encode('ASCII')
+        else:
+            dirname = os.curdir
+    try:
+        names = os.listdir(dirname)
+    except os.error:
+        return
+    for x in names:
+        yield x
+        path = os.path.join(dirname, x) if dirname else x
+        for y in _rlistdir(path):
+            yield os.path.join(x, y)
+
+
+magic_check = re.compile('([*?[])')
+magic_check_bytes = re.compile(b'([*?[])')
+
+
+def has_magic(s):
+    if isinstance(s, bytes):
+        match = magic_check_bytes.search(s)
+    else:
+        match = magic_check.search(s)
+    return match is not None
+
+
+def _isrecursive(pattern):
+    if isinstance(pattern, bytes):
+        return pattern == b'**'
+    else:
+        return pattern == '**'
+
+
+def escape(pathname):
+    """Escape all special characters.
+    """
+    # Escaping is done by wrapping any of "*?[" between square brackets.
+    # Metacharacters do not work in the drive part and shouldn't be escaped.
+    drive, pathname = os.path.splitdrive(pathname)
+    if isinstance(pathname, bytes):
+        pathname = magic_check_bytes.sub(br'[\1]', pathname)
+    else:
+        pathname = magic_check.sub(r'[\1]', pathname)
+    return drive + pathname
diff --git a/lib/python3.7/site-packages/setuptools/gui-32.exe b/lib/python3.7/site-packages/setuptools/gui-32.exe
new file mode 100644
index 0000000000000000000000000000000000000000..f8d3509653ba8f80ca7f3aa7f95616142ba83a94
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/gui-32.exe differ
diff --git a/lib/python3.7/site-packages/setuptools/gui-64.exe b/lib/python3.7/site-packages/setuptools/gui-64.exe
new file mode 100644
index 0000000000000000000000000000000000000000..330c51a5dde15a0bb610a48cd0ca11770c914dae
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/gui-64.exe differ
diff --git a/lib/python3.7/site-packages/setuptools/gui.exe b/lib/python3.7/site-packages/setuptools/gui.exe
new file mode 100644
index 0000000000000000000000000000000000000000..f8d3509653ba8f80ca7f3aa7f95616142ba83a94
Binary files /dev/null and b/lib/python3.7/site-packages/setuptools/gui.exe differ
diff --git a/lib/python3.7/site-packages/setuptools/launch.py b/lib/python3.7/site-packages/setuptools/launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..308283ea939ed9bced7b099eb8a1879aa9c203d4
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/launch.py
@@ -0,0 +1,35 @@
+"""
+Launch the Python script on the command line after
+setuptools is bootstrapped via import.
+"""
+
+# Note that setuptools gets imported implicitly by the
+# invocation of this script using python -m setuptools.launch
+
+import tokenize
+import sys
+
+
+def run():
+    """
+    Run the script in sys.argv[1] as if it had
+    been invoked naturally.
+    """
+    __builtins__
+    script_name = sys.argv[1]
+    namespace = dict(
+        __file__=script_name,
+        __name__='__main__',
+        __doc__=None,
+    )
+    sys.argv[:] = sys.argv[1:]
+
+    open_ = getattr(tokenize, 'open', open)
+    script = open_(script_name).read()
+    norm_script = script.replace('\\r\\n', '\\n')
+    code = compile(norm_script, script_name, 'exec')
+    exec(code, namespace)
+
+
+if __name__ == '__main__':
+    run()
diff --git a/lib/python3.7/site-packages/setuptools/lib2to3_ex.py b/lib/python3.7/site-packages/setuptools/lib2to3_ex.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b1a73feb26fdad65bafdeb21f5ce6abfb905fc0
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/lib2to3_ex.py
@@ -0,0 +1,62 @@
+"""
+Customized Mixin2to3 support:
+
+ - adds support for converting doctests
+
+
+This module raises an ImportError on Python 2.
+"""
+
+from distutils.util import Mixin2to3 as _Mixin2to3
+from distutils import log
+from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+
+import setuptools
+
+
+class DistutilsRefactoringTool(RefactoringTool):
+    def log_error(self, msg, *args, **kw):
+        log.error(msg, *args)
+
+    def log_message(self, msg, *args):
+        log.info(msg, *args)
+
+    def log_debug(self, msg, *args):
+        log.debug(msg, *args)
+
+
+class Mixin2to3(_Mixin2to3):
+    def run_2to3(self, files, doctests=False):
+        # See of the distribution option has been set, otherwise check the
+        # setuptools default.
+        if self.distribution.use_2to3 is not True:
+            return
+        if not files:
+            return
+        log.info("Fixing " + " ".join(files))
+        self.__build_fixer_names()
+        self.__exclude_fixers()
+        if doctests:
+            if setuptools.run_2to3_on_doctests:
+                r = DistutilsRefactoringTool(self.fixer_names)
+                r.refactor(files, write=True, doctests_only=True)
+        else:
+            _Mixin2to3.run_2to3(self, files)
+
+    def __build_fixer_names(self):
+        if self.fixer_names:
+            return
+        self.fixer_names = []
+        for p in setuptools.lib2to3_fixer_packages:
+            self.fixer_names.extend(get_fixers_from_package(p))
+        if self.distribution.use_2to3_fixers is not None:
+            for p in self.distribution.use_2to3_fixers:
+                self.fixer_names.extend(get_fixers_from_package(p))
+
+    def __exclude_fixers(self):
+        excluded_fixers = getattr(self, 'exclude_fixers', [])
+        if self.distribution.use_2to3_exclude_fixers is not None:
+            excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
+        for fixer_name in excluded_fixers:
+            if fixer_name in self.fixer_names:
+                self.fixer_names.remove(fixer_name)
diff --git a/lib/python3.7/site-packages/setuptools/monkey.py b/lib/python3.7/site-packages/setuptools/monkey.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c77f8cf27f0ab1e71d64cfc114ef9d1bf72295c
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/monkey.py
@@ -0,0 +1,179 @@
+"""
+Monkey patching of distutils.
+"""
+
+import sys
+import distutils.filelist
+import platform
+import types
+import functools
+from importlib import import_module
+import inspect
+
+from setuptools.extern import six
+
+import setuptools
+
+__all__ = []
+"""
+Everything is private. Contact the project team
+if you think you need this functionality.
+"""
+
+
+def _get_mro(cls):
+    """
+    Returns the bases classes for cls sorted by the MRO.
+
+    Works around an issue on Jython where inspect.getmro will not return all
+    base classes if multiple classes share the same name. Instead, this
+    function will return a tuple containing the class itself, and the contents
+    of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
+    """
+    if platform.python_implementation() == "Jython":
+        return (cls,) + cls.__bases__
+    return inspect.getmro(cls)
+
+
+def get_unpatched(item):
+    lookup = (
+        get_unpatched_class if isinstance(item, six.class_types) else
+        get_unpatched_function if isinstance(item, types.FunctionType) else
+        lambda item: None
+    )
+    return lookup(item)
+
+
+def get_unpatched_class(cls):
+    """Protect against re-patching the distutils if reloaded
+
+    Also ensures that no other distutils extension monkeypatched the distutils
+    first.
+    """
+    external_bases = (
+        cls
+        for cls in _get_mro(cls)
+        if not cls.__module__.startswith('setuptools')
+    )
+    base = next(external_bases)
+    if not base.__module__.startswith('distutils'):
+        msg = "distutils has already been patched by %r" % cls
+        raise AssertionError(msg)
+    return base
+
+
+def patch_all():
+    # we can't patch distutils.cmd, alas
+    distutils.core.Command = setuptools.Command
+
+    has_issue_12885 = sys.version_info <= (3, 5, 3)
+
+    if has_issue_12885:
+        # fix findall bug in distutils (http://bugs.python.org/issue12885)
+        distutils.filelist.findall = setuptools.findall
+
+    needs_warehouse = (
+        sys.version_info < (2, 7, 13)
+        or
+        (3, 4) < sys.version_info < (3, 4, 6)
+        or
+        (3, 5) < sys.version_info <= (3, 5, 3)
+    )
+
+    if needs_warehouse:
+        warehouse = 'https://upload.pypi.org/legacy/'
+        distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
+
+    _patch_distribution_metadata()
+
+    # Install Distribution throughout the distutils
+    for module in distutils.dist, distutils.core, distutils.cmd:
+        module.Distribution = setuptools.dist.Distribution
+
+    # Install the patched Extension
+    distutils.core.Extension = setuptools.extension.Extension
+    distutils.extension.Extension = setuptools.extension.Extension
+    if 'distutils.command.build_ext' in sys.modules:
+        sys.modules['distutils.command.build_ext'].Extension = (
+            setuptools.extension.Extension
+        )
+
+    patch_for_msvc_specialized_compiler()
+
+
+def _patch_distribution_metadata():
+    """Patch write_pkg_file and read_pkg_file for higher metadata standards"""
+    for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'):
+        new_val = getattr(setuptools.dist, attr)
+        setattr(distutils.dist.DistributionMetadata, attr, new_val)
+
+
+def patch_func(replacement, target_mod, func_name):
+    """
+    Patch func_name in target_mod with replacement
+
+    Important - original must be resolved by name to avoid
+    patching an already patched function.
+    """
+    original = getattr(target_mod, func_name)
+
+    # set the 'unpatched' attribute on the replacement to
+    # point to the original.
+    vars(replacement).setdefault('unpatched', original)
+
+    # replace the function in the original module
+    setattr(target_mod, func_name, replacement)
+
+
+def get_unpatched_function(candidate):
+    return getattr(candidate, 'unpatched')
+
+
+def patch_for_msvc_specialized_compiler():
+    """
+    Patch functions in distutils to use standalone Microsoft Visual C++
+    compilers.
+    """
+    # import late to avoid circular imports on Python < 3.5
+    msvc = import_module('setuptools.msvc')
+
+    if platform.system() != 'Windows':
+        # Compilers only availables on Microsoft Windows
+        return
+
+    def patch_params(mod_name, func_name):
+        """
+        Prepare the parameters for patch_func to patch indicated function.
+        """
+        repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
+        repl_name = repl_prefix + func_name.lstrip('_')
+        repl = getattr(msvc, repl_name)
+        mod = import_module(mod_name)
+        if not hasattr(mod, func_name):
+            raise ImportError(func_name)
+        return repl, mod, func_name
+
+    # Python 2.7 to 3.4
+    msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
+
+    # Python 3.5+
+    msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
+
+    try:
+        # Patch distutils.msvc9compiler
+        patch_func(*msvc9('find_vcvarsall'))
+        patch_func(*msvc9('query_vcvarsall'))
+    except ImportError:
+        pass
+
+    try:
+        # Patch distutils._msvccompiler._get_vc_env
+        patch_func(*msvc14('_get_vc_env'))
+    except ImportError:
+        pass
+
+    try:
+        # Patch distutils._msvccompiler.gen_lib_options for Numpy
+        patch_func(*msvc14('gen_lib_options'))
+    except ImportError:
+        pass
diff --git a/lib/python3.7/site-packages/setuptools/msvc.py b/lib/python3.7/site-packages/setuptools/msvc.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9c472f14f876ecef6ca2541047d9ce9ec3ca0b8
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/msvc.py
@@ -0,0 +1,1301 @@
+"""
+Improved support for Microsoft Visual C++ compilers.
+
+Known supported compilers:
+--------------------------
+Microsoft Visual C++ 9.0:
+    Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
+    Microsoft Windows SDK 6.1 (x86, x64, ia64)
+    Microsoft Windows SDK 7.0 (x86, x64, ia64)
+
+Microsoft Visual C++ 10.0:
+    Microsoft Windows SDK 7.1 (x86, x64, ia64)
+
+Microsoft Visual C++ 14.0:
+    Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
+    Microsoft Visual Studio 2017 (x86, x64, arm, arm64)
+    Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
+"""
+
+import os
+import sys
+import platform
+import itertools
+import distutils.errors
+from setuptools.extern.packaging.version import LegacyVersion
+
+from setuptools.extern.six.moves import filterfalse
+
+from .monkey import get_unpatched
+
+if platform.system() == 'Windows':
+    from setuptools.extern.six.moves import winreg
+    safe_env = os.environ
+else:
+    """
+    Mock winreg and environ so the module can be imported
+    on this platform.
+    """
+
+    class winreg:
+        HKEY_USERS = None
+        HKEY_CURRENT_USER = None
+        HKEY_LOCAL_MACHINE = None
+        HKEY_CLASSES_ROOT = None
+
+    safe_env = dict()
+
+_msvc9_suppress_errors = (
+    # msvc9compiler isn't available on some platforms
+    ImportError,
+
+    # msvc9compiler raises DistutilsPlatformError in some
+    # environments. See #1118.
+    distutils.errors.DistutilsPlatformError,
+)
+
+try:
+    from distutils.msvc9compiler import Reg
+except _msvc9_suppress_errors:
+    pass
+
+
+def msvc9_find_vcvarsall(version):
+    """
+    Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone
+    compiler build for Python (VCForPython). Fall back to original behavior
+    when the standalone compiler is not available.
+
+    Redirect the path of "vcvarsall.bat".
+
+    Known supported compilers
+    -------------------------
+    Microsoft Visual C++ 9.0:
+        Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
+
+    Parameters
+    ----------
+    version: float
+        Required Microsoft Visual C++ version.
+
+    Return
+    ------
+    vcvarsall.bat path: str
+    """
+    VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
+    key = VC_BASE % ('', version)
+    try:
+        # Per-user installs register the compiler path here
+        productdir = Reg.get_value(key, "installdir")
+    except KeyError:
+        try:
+            # All-user installs on a 64-bit system register here
+            key = VC_BASE % ('Wow6432Node\\', version)
+            productdir = Reg.get_value(key, "installdir")
+        except KeyError:
+            productdir = None
+
+    if productdir:
+        vcvarsall = os.path.os.path.join(productdir, "vcvarsall.bat")
+        if os.path.isfile(vcvarsall):
+            return vcvarsall
+
+    return get_unpatched(msvc9_find_vcvarsall)(version)
+
+
+def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
+    """
+    Patched "distutils.msvc9compiler.query_vcvarsall" for support extra
+    compilers.
+
+    Set environment without use of "vcvarsall.bat".
+
+    Known supported compilers
+    -------------------------
+    Microsoft Visual C++ 9.0:
+        Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
+        Microsoft Windows SDK 6.1 (x86, x64, ia64)
+        Microsoft Windows SDK 7.0 (x86, x64, ia64)
+
+    Microsoft Visual C++ 10.0:
+        Microsoft Windows SDK 7.1 (x86, x64, ia64)
+
+    Parameters
+    ----------
+    ver: float
+        Required Microsoft Visual C++ version.
+    arch: str
+        Target architecture.
+
+    Return
+    ------
+    environment: dict
+    """
+    # Try to get environement from vcvarsall.bat (Classical way)
+    try:
+        orig = get_unpatched(msvc9_query_vcvarsall)
+        return orig(ver, arch, *args, **kwargs)
+    except distutils.errors.DistutilsPlatformError:
+        # Pass error if Vcvarsall.bat is missing
+        pass
+    except ValueError:
+        # Pass error if environment not set after executing vcvarsall.bat
+        pass
+
+    # If error, try to set environment directly
+    try:
+        return EnvironmentInfo(arch, ver).return_env()
+    except distutils.errors.DistutilsPlatformError as exc:
+        _augment_exception(exc, ver, arch)
+        raise
+
+
+def msvc14_get_vc_env(plat_spec):
+    """
+    Patched "distutils._msvccompiler._get_vc_env" for support extra
+    compilers.
+
+    Set environment without use of "vcvarsall.bat".
+
+    Known supported compilers
+    -------------------------
+    Microsoft Visual C++ 14.0:
+        Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
+        Microsoft Visual Studio 2017 (x86, x64, arm, arm64)
+        Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
+
+    Parameters
+    ----------
+    plat_spec: str
+        Target architecture.
+
+    Return
+    ------
+    environment: dict
+    """
+    # Try to get environment from vcvarsall.bat (Classical way)
+    try:
+        return get_unpatched(msvc14_get_vc_env)(plat_spec)
+    except distutils.errors.DistutilsPlatformError:
+        # Pass error Vcvarsall.bat is missing
+        pass
+
+    # If error, try to set environment directly
+    try:
+        return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env()
+    except distutils.errors.DistutilsPlatformError as exc:
+        _augment_exception(exc, 14.0)
+        raise
+
+
+def msvc14_gen_lib_options(*args, **kwargs):
+    """
+    Patched "distutils._msvccompiler.gen_lib_options" for fix
+    compatibility between "numpy.distutils" and "distutils._msvccompiler"
+    (for Numpy < 1.11.2)
+    """
+    if "numpy.distutils" in sys.modules:
+        import numpy as np
+        if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):
+            return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)
+    return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
+
+
+def _augment_exception(exc, version, arch=''):
+    """
+    Add details to the exception message to help guide the user
+    as to what action will resolve it.
+    """
+    # Error if MSVC++ directory not found or environment not set
+    message = exc.args[0]
+
+    if "vcvarsall" in message.lower() or "visual c" in message.lower():
+        # Special error message if MSVC++ not installed
+        tmpl = 'Microsoft Visual C++ {version:0.1f} is required.'
+        message = tmpl.format(**locals())
+        msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
+        if version == 9.0:
+            if arch.lower().find('ia64') > -1:
+                # For VC++ 9.0, if IA64 support is needed, redirect user
+                # to Windows SDK 7.0
+                message += ' Get it with "Microsoft Windows SDK 7.0": '
+                message += msdownload % 3138
+            else:
+                # For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
+                # This redirection link is maintained by Microsoft.
+                # Contact vspython@microsoft.com if it needs updating.
+                message += ' Get it from http://aka.ms/vcpython27'
+        elif version == 10.0:
+            # For VC++ 10.0 Redirect user to Windows SDK 7.1
+            message += ' Get it with "Microsoft Windows SDK 7.1": '
+            message += msdownload % 8279
+        elif version >= 14.0:
+            # For VC++ 14.0 Redirect user to Visual C++ Build Tools
+            message += (' Get it with "Microsoft Visual C++ Build Tools": '
+                        r'https://visualstudio.microsoft.com/downloads/')
+
+    exc.args = (message, )
+
+
+class PlatformInfo:
+    """
+    Current and Target Architectures informations.
+
+    Parameters
+    ----------
+    arch: str
+        Target architecture.
+    """
+    current_cpu = safe_env.get('processor_architecture', '').lower()
+
+    def __init__(self, arch):
+        self.arch = arch.lower().replace('x64', 'amd64')
+
+    @property
+    def target_cpu(self):
+        return self.arch[self.arch.find('_') + 1:]
+
+    def target_is_x86(self):
+        return self.target_cpu == 'x86'
+
+    def current_is_x86(self):
+        return self.current_cpu == 'x86'
+
+    def current_dir(self, hidex86=False, x64=False):
+        """
+        Current platform specific subfolder.
+
+        Parameters
+        ----------
+        hidex86: bool
+            return '' and not '\x86' if architecture is x86.
+        x64: bool
+            return '\x64' and not '\amd64' if architecture is amd64.
+
+        Return
+        ------
+        subfolder: str
+            '\target', or '' (see hidex86 parameter)
+        """
+        return (
+            '' if (self.current_cpu == 'x86' and hidex86) else
+            r'\x64' if (self.current_cpu == 'amd64' and x64) else
+            r'\%s' % self.current_cpu
+        )
+
+    def target_dir(self, hidex86=False, x64=False):
+        r"""
+        Target platform specific subfolder.
+
+        Parameters
+        ----------
+        hidex86: bool
+            return '' and not '\x86' if architecture is x86.
+        x64: bool
+            return '\x64' and not '\amd64' if architecture is amd64.
+
+        Return
+        ------
+        subfolder: str
+            '\current', or '' (see hidex86 parameter)
+        """
+        return (
+            '' if (self.target_cpu == 'x86' and hidex86) else
+            r'\x64' if (self.target_cpu == 'amd64' and x64) else
+            r'\%s' % self.target_cpu
+        )
+
+    def cross_dir(self, forcex86=False):
+        r"""
+        Cross platform specific subfolder.
+
+        Parameters
+        ----------
+        forcex86: bool
+            Use 'x86' as current architecture even if current acritecture is
+            not x86.
+
+        Return
+        ------
+        subfolder: str
+            '' if target architecture is current architecture,
+            '\current_target' if not.
+        """
+        current = 'x86' if forcex86 else self.current_cpu
+        return (
+            '' if self.target_cpu == current else
+            self.target_dir().replace('\\', '\\%s_' % current)
+        )
+
+
+class RegistryInfo:
+    """
+    Microsoft Visual Studio related registry informations.
+
+    Parameters
+    ----------
+    platform_info: PlatformInfo
+        "PlatformInfo" instance.
+    """
+    HKEYS = (winreg.HKEY_USERS,
+             winreg.HKEY_CURRENT_USER,
+             winreg.HKEY_LOCAL_MACHINE,
+             winreg.HKEY_CLASSES_ROOT)
+
+    def __init__(self, platform_info):
+        self.pi = platform_info
+
+    @property
+    def visualstudio(self):
+        """
+        Microsoft Visual Studio root registry key.
+        """
+        return 'VisualStudio'
+
+    @property
+    def sxs(self):
+        """
+        Microsoft Visual Studio SxS registry key.
+        """
+        return os.path.join(self.visualstudio, 'SxS')
+
+    @property
+    def vc(self):
+        """
+        Microsoft Visual C++ VC7 registry key.
+        """
+        return os.path.join(self.sxs, 'VC7')
+
+    @property
+    def vs(self):
+        """
+        Microsoft Visual Studio VS7 registry key.
+        """
+        return os.path.join(self.sxs, 'VS7')
+
+    @property
+    def vc_for_python(self):
+        """
+        Microsoft Visual C++ for Python registry key.
+        """
+        return r'DevDiv\VCForPython'
+
+    @property
+    def microsoft_sdk(self):
+        """
+        Microsoft SDK registry key.
+        """
+        return 'Microsoft SDKs'
+
+    @property
+    def windows_sdk(self):
+        """
+        Microsoft Windows/Platform SDK registry key.
+        """
+        return os.path.join(self.microsoft_sdk, 'Windows')
+
+    @property
+    def netfx_sdk(self):
+        """
+        Microsoft .NET Framework SDK registry key.
+        """
+        return os.path.join(self.microsoft_sdk, 'NETFXSDK')
+
+    @property
+    def windows_kits_roots(self):
+        """
+        Microsoft Windows Kits Roots registry key.
+        """
+        return r'Windows Kits\Installed Roots'
+
+    def microsoft(self, key, x86=False):
+        """
+        Return key in Microsoft software registry.
+
+        Parameters
+        ----------
+        key: str
+            Registry key path where look.
+        x86: str
+            Force x86 software registry.
+
+        Return
+        ------
+        str: value
+        """
+        node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
+        return os.path.join('Software', node64, 'Microsoft', key)
+
+    def lookup(self, key, name):
+        """
+        Look for values in registry in Microsoft software registry.
+
+        Parameters
+        ----------
+        key: str
+            Registry key path where look.
+        name: str
+            Value name to find.
+
+        Return
+        ------
+        str: value
+        """
+        KEY_READ = winreg.KEY_READ
+        openkey = winreg.OpenKey
+        ms = self.microsoft
+        for hkey in self.HKEYS:
+            try:
+                bkey = openkey(hkey, ms(key), 0, KEY_READ)
+            except (OSError, IOError):
+                if not self.pi.current_is_x86():
+                    try:
+                        bkey = openkey(hkey, ms(key, True), 0, KEY_READ)
+                    except (OSError, IOError):
+                        continue
+                else:
+                    continue
+            try:
+                return winreg.QueryValueEx(bkey, name)[0]
+            except (OSError, IOError):
+                pass
+
+
+class SystemInfo:
+    """
+    Microsoft Windows and Visual Studio related system inormations.
+
+    Parameters
+    ----------
+    registry_info: RegistryInfo
+        "RegistryInfo" instance.
+    vc_ver: float
+        Required Microsoft Visual C++ version.
+    """
+
+    # Variables and properties in this class use originals CamelCase variables
+    # names from Microsoft source files for more easy comparaison.
+    WinDir = safe_env.get('WinDir', '')
+    ProgramFiles = safe_env.get('ProgramFiles', '')
+    ProgramFilesx86 = safe_env.get('ProgramFiles(x86)', ProgramFiles)
+
+    def __init__(self, registry_info, vc_ver=None):
+        self.ri = registry_info
+        self.pi = self.ri.pi
+        self.vc_ver = vc_ver or self._find_latest_available_vc_ver()
+
+    def _find_latest_available_vc_ver(self):
+        try:
+            return self.find_available_vc_vers()[-1]
+        except IndexError:
+            err = 'No Microsoft Visual C++ version found'
+            raise distutils.errors.DistutilsPlatformError(err)
+
+    def find_available_vc_vers(self):
+        """
+        Find all available Microsoft Visual C++ versions.
+        """
+        ms = self.ri.microsoft
+        vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)
+        vc_vers = []
+        for hkey in self.ri.HKEYS:
+            for key in vckeys:
+                try:
+                    bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)
+                except (OSError, IOError):
+                    continue
+                subkeys, values, _ = winreg.QueryInfoKey(bkey)
+                for i in range(values):
+                    try:
+                        ver = float(winreg.EnumValue(bkey, i)[0])
+                        if ver not in vc_vers:
+                            vc_vers.append(ver)
+                    except ValueError:
+                        pass
+                for i in range(subkeys):
+                    try:
+                        ver = float(winreg.EnumKey(bkey, i))
+                        if ver not in vc_vers:
+                            vc_vers.append(ver)
+                    except ValueError:
+                        pass
+        return sorted(vc_vers)
+
+    @property
+    def VSInstallDir(self):
+        """
+        Microsoft Visual Studio directory.
+        """
+        # Default path
+        name = 'Microsoft Visual Studio %0.1f' % self.vc_ver
+        default = os.path.join(self.ProgramFilesx86, name)
+
+        # Try to get path from registry, if fail use default path
+        return self.ri.lookup(self.ri.vs, '%0.1f' % self.vc_ver) or default
+
+    @property
+    def VCInstallDir(self):
+        """
+        Microsoft Visual C++ directory.
+        """
+        self.VSInstallDir
+
+        guess_vc = self._guess_vc() or self._guess_vc_legacy()
+
+        # Try to get "VC++ for Python" path from registry as default path
+        reg_path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
+        python_vc = self.ri.lookup(reg_path, 'installdir')
+        default_vc = os.path.join(python_vc, 'VC') if python_vc else guess_vc
+
+        # Try to get path from registry, if fail use default path
+        path = self.ri.lookup(self.ri.vc, '%0.1f' % self.vc_ver) or default_vc
+
+        if not os.path.isdir(path):
+            msg = 'Microsoft Visual C++ directory not found'
+            raise distutils.errors.DistutilsPlatformError(msg)
+
+        return path
+
+    def _guess_vc(self):
+        """
+        Locate Visual C for 2017
+        """
+        if self.vc_ver <= 14.0:
+            return
+
+        default = r'VC\Tools\MSVC'
+        guess_vc = os.path.join(self.VSInstallDir, default)
+        # Subdir with VC exact version as name
+        try:
+            vc_exact_ver = os.listdir(guess_vc)[-1]
+            return os.path.join(guess_vc, vc_exact_ver)
+        except (OSError, IOError, IndexError):
+            pass
+
+    def _guess_vc_legacy(self):
+        """
+        Locate Visual C for versions prior to 2017
+        """
+        default = r'Microsoft Visual Studio %0.1f\VC' % self.vc_ver
+        return os.path.join(self.ProgramFilesx86, default)
+
+    @property
+    def WindowsSdkVersion(self):
+        """
+        Microsoft Windows SDK versions for specified MSVC++ version.
+        """
+        if self.vc_ver <= 9.0:
+            return ('7.0', '6.1', '6.0a')
+        elif self.vc_ver == 10.0:
+            return ('7.1', '7.0a')
+        elif self.vc_ver == 11.0:
+            return ('8.0', '8.0a')
+        elif self.vc_ver == 12.0:
+            return ('8.1', '8.1a')
+        elif self.vc_ver >= 14.0:
+            return ('10.0', '8.1')
+
+    @property
+    def WindowsSdkLastVersion(self):
+        """
+        Microsoft Windows SDK last version
+        """
+        return self._use_last_dir_name(os.path.join(
+            self.WindowsSdkDir, 'lib'))
+
+    @property
+    def WindowsSdkDir(self):
+        """
+        Microsoft Windows SDK directory.
+        """
+        sdkdir = ''
+        for ver in self.WindowsSdkVersion:
+            # Try to get it from registry
+            loc = os.path.join(self.ri.windows_sdk, 'v%s' % ver)
+            sdkdir = self.ri.lookup(loc, 'installationfolder')
+            if sdkdir:
+                break
+        if not sdkdir or not os.path.isdir(sdkdir):
+            # Try to get "VC++ for Python" version from registry
+            path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
+            install_base = self.ri.lookup(path, 'installdir')
+            if install_base:
+                sdkdir = os.path.join(install_base, 'WinSDK')
+        if not sdkdir or not os.path.isdir(sdkdir):
+            # If fail, use default new path
+            for ver in self.WindowsSdkVersion:
+                intver = ver[:ver.rfind('.')]
+                path = r'Microsoft SDKs\Windows Kits\%s' % (intver)
+                d = os.path.join(self.ProgramFiles, path)
+                if os.path.isdir(d):
+                    sdkdir = d
+        if not sdkdir or not os.path.isdir(sdkdir):
+            # If fail, use default old path
+            for ver in self.WindowsSdkVersion:
+                path = r'Microsoft SDKs\Windows\v%s' % ver
+                d = os.path.join(self.ProgramFiles, path)
+                if os.path.isdir(d):
+                    sdkdir = d
+        if not sdkdir:
+            # If fail, use Platform SDK
+            sdkdir = os.path.join(self.VCInstallDir, 'PlatformSDK')
+        return sdkdir
+
+    @property
+    def WindowsSDKExecutablePath(self):
+        """
+        Microsoft Windows SDK executable directory.
+        """
+        # Find WinSDK NetFx Tools registry dir name
+        if self.vc_ver <= 11.0:
+            netfxver = 35
+            arch = ''
+        else:
+            netfxver = 40
+            hidex86 = True if self.vc_ver <= 12.0 else False
+            arch = self.pi.current_dir(x64=True, hidex86=hidex86)
+        fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
+
+        # liste all possibles registry paths
+        regpaths = []
+        if self.vc_ver >= 14.0:
+            for ver in self.NetFxSdkVersion:
+                regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)]
+
+        for ver in self.WindowsSdkVersion:
+            regpaths += [os.path.join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
+
+        # Return installation folder from the more recent path
+        for path in regpaths:
+            execpath = self.ri.lookup(path, 'installationfolder')
+            if execpath:
+                break
+        return execpath
+
+    @property
+    def FSharpInstallDir(self):
+        """
+        Microsoft Visual F# directory.
+        """
+        path = r'%0.1f\Setup\F#' % self.vc_ver
+        path = os.path.join(self.ri.visualstudio, path)
+        return self.ri.lookup(path, 'productdir') or ''
+
+    @property
+    def UniversalCRTSdkDir(self):
+        """
+        Microsoft Universal CRT SDK directory.
+        """
+        # Set Kit Roots versions for specified MSVC++ version
+        if self.vc_ver >= 14.0:
+            vers = ('10', '81')
+        else:
+            vers = ()
+
+        # Find path of the more recent Kit
+        for ver in vers:
+            sdkdir = self.ri.lookup(self.ri.windows_kits_roots,
+                                    'kitsroot%s' % ver)
+            if sdkdir:
+                break
+        return sdkdir or ''
+
+    @property
+    def UniversalCRTSdkLastVersion(self):
+        """
+        Microsoft Universal C Runtime SDK last version
+        """
+        return self._use_last_dir_name(os.path.join(
+            self.UniversalCRTSdkDir, 'lib'))
+
+    @property
+    def NetFxSdkVersion(self):
+        """
+        Microsoft .NET Framework SDK versions.
+        """
+        # Set FxSdk versions for specified MSVC++ version
+        if self.vc_ver >= 14.0:
+            return ('4.6.1', '4.6')
+        else:
+            return ()
+
+    @property
+    def NetFxSdkDir(self):
+        """
+        Microsoft .NET Framework SDK directory.
+        """
+        for ver in self.NetFxSdkVersion:
+            loc = os.path.join(self.ri.netfx_sdk, ver)
+            sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
+            if sdkdir:
+                break
+        return sdkdir or ''
+
+    @property
+    def FrameworkDir32(self):
+        """
+        Microsoft .NET Framework 32bit directory.
+        """
+        # Default path
+        guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework')
+
+        # Try to get path from registry, if fail use default path
+        return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
+
+    @property
+    def FrameworkDir64(self):
+        """
+        Microsoft .NET Framework 64bit directory.
+        """
+        # Default path
+        guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework64')
+
+        # Try to get path from registry, if fail use default path
+        return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
+
+    @property
+    def FrameworkVersion32(self):
+        """
+        Microsoft .NET Framework 32bit versions.
+        """
+        return self._find_dot_net_versions(32)
+
+    @property
+    def FrameworkVersion64(self):
+        """
+        Microsoft .NET Framework 64bit versions.
+        """
+        return self._find_dot_net_versions(64)
+
+    def _find_dot_net_versions(self, bits):
+        """
+        Find Microsoft .NET Framework versions.
+
+        Parameters
+        ----------
+        bits: int
+            Platform number of bits: 32 or 64.
+        """
+        # Find actual .NET version in registry
+        reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits)
+        dot_net_dir = getattr(self, 'FrameworkDir%d' % bits)
+        ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or ''
+
+        # Set .NET versions for specified MSVC++ version
+        if self.vc_ver >= 12.0:
+            frameworkver = (ver, 'v4.0')
+        elif self.vc_ver >= 10.0:
+            frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver,
+                            'v3.5')
+        elif self.vc_ver == 9.0:
+            frameworkver = ('v3.5', 'v2.0.50727')
+        if self.vc_ver == 8.0:
+            frameworkver = ('v3.0', 'v2.0.50727')
+        return frameworkver
+
+    def _use_last_dir_name(self, path, prefix=''):
+        """
+        Return name of the last dir in path or '' if no dir found.
+
+        Parameters
+        ----------
+        path: str
+            Use dirs in this path
+        prefix: str
+            Use only dirs startings by this prefix
+        """
+        matching_dirs = (
+            dir_name
+            for dir_name in reversed(os.listdir(path))
+            if os.path.isdir(os.path.join(path, dir_name)) and
+            dir_name.startswith(prefix)
+        )
+        return next(matching_dirs, None) or ''
+
+
+class EnvironmentInfo:
+    """
+    Return environment variables for specified Microsoft Visual C++ version
+    and platform : Lib, Include, Path and libpath.
+
+    This function is compatible with Microsoft Visual C++ 9.0 to 14.0.
+
+    Script created by analysing Microsoft environment configuration files like
+    "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
+
+    Parameters
+    ----------
+    arch: str
+        Target architecture.
+    vc_ver: float
+        Required Microsoft Visual C++ version. If not set, autodetect the last
+        version.
+    vc_min_ver: float
+        Minimum Microsoft Visual C++ version.
+    """
+
+    # Variables and properties in this class use originals CamelCase variables
+    # names from Microsoft source files for more easy comparaison.
+
+    def __init__(self, arch, vc_ver=None, vc_min_ver=0):
+        self.pi = PlatformInfo(arch)
+        self.ri = RegistryInfo(self.pi)
+        self.si = SystemInfo(self.ri, vc_ver)
+
+        if self.vc_ver < vc_min_ver:
+            err = 'No suitable Microsoft Visual C++ version found'
+            raise distutils.errors.DistutilsPlatformError(err)
+
+    @property
+    def vc_ver(self):
+        """
+        Microsoft Visual C++ version.
+        """
+        return self.si.vc_ver
+
+    @property
+    def VSTools(self):
+        """
+        Microsoft Visual Studio Tools
+        """
+        paths = [r'Common7\IDE', r'Common7\Tools']
+
+        if self.vc_ver >= 14.0:
+            arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
+            paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
+            paths += [r'Team Tools\Performance Tools']
+            paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
+
+        return [os.path.join(self.si.VSInstallDir, path) for path in paths]
+
+    @property
+    def VCIncludes(self):
+        """
+        Microsoft Visual C++ & Microsoft Foundation Class Includes
+        """
+        return [os.path.join(self.si.VCInstallDir, 'Include'),
+                os.path.join(self.si.VCInstallDir, r'ATLMFC\Include')]
+
+    @property
+    def VCLibraries(self):
+        """
+        Microsoft Visual C++ & Microsoft Foundation Class Libraries
+        """
+        if self.vc_ver >= 15.0:
+            arch_subdir = self.pi.target_dir(x64=True)
+        else:
+            arch_subdir = self.pi.target_dir(hidex86=True)
+        paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
+
+        if self.vc_ver >= 14.0:
+            paths += [r'Lib\store%s' % arch_subdir]
+
+        return [os.path.join(self.si.VCInstallDir, path) for path in paths]
+
+    @property
+    def VCStoreRefs(self):
+        """
+        Microsoft Visual C++ store references Libraries
+        """
+        if self.vc_ver < 14.0:
+            return []
+        return [os.path.join(self.si.VCInstallDir, r'Lib\store\references')]
+
+    @property
+    def VCTools(self):
+        """
+        Microsoft Visual C++ Tools
+        """
+        si = self.si
+        tools = [os.path.join(si.VCInstallDir, 'VCPackages')]
+
+        forcex86 = True if self.vc_ver <= 10.0 else False
+        arch_subdir = self.pi.cross_dir(forcex86)
+        if arch_subdir:
+            tools += [os.path.join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
+
+        if self.vc_ver == 14.0:
+            path = 'Bin%s' % self.pi.current_dir(hidex86=True)
+            tools += [os.path.join(si.VCInstallDir, path)]
+
+        elif self.vc_ver >= 15.0:
+            host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else
+                        r'bin\HostX64%s')
+            tools += [os.path.join(
+                si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))]
+
+            if self.pi.current_cpu != self.pi.target_cpu:
+                tools += [os.path.join(
+                    si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))]
+
+        else:
+            tools += [os.path.join(si.VCInstallDir, 'Bin')]
+
+        return tools
+
+    @property
+    def OSLibraries(self):
+        """
+        Microsoft Windows SDK Libraries
+        """
+        if self.vc_ver <= 10.0:
+            arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
+            return [os.path.join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
+
+        else:
+            arch_subdir = self.pi.target_dir(x64=True)
+            lib = os.path.join(self.si.WindowsSdkDir, 'lib')
+            libver = self._sdk_subdir
+            return [os.path.join(lib, '%sum%s' % (libver , arch_subdir))]
+
+    @property
+    def OSIncludes(self):
+        """
+        Microsoft Windows SDK Include
+        """
+        include = os.path.join(self.si.WindowsSdkDir, 'include')
+
+        if self.vc_ver <= 10.0:
+            return [include, os.path.join(include, 'gl')]
+
+        else:
+            if self.vc_ver >= 14.0:
+                sdkver = self._sdk_subdir
+            else:
+                sdkver = ''
+            return [os.path.join(include, '%sshared' % sdkver),
+                    os.path.join(include, '%sum' % sdkver),
+                    os.path.join(include, '%swinrt' % sdkver)]
+
+    @property
+    def OSLibpath(self):
+        """
+        Microsoft Windows SDK Libraries Paths
+        """
+        ref = os.path.join(self.si.WindowsSdkDir, 'References')
+        libpath = []
+
+        if self.vc_ver <= 9.0:
+            libpath += self.OSLibraries
+
+        if self.vc_ver >= 11.0:
+            libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')]
+
+        if self.vc_ver >= 14.0:
+            libpath += [
+                ref,
+                os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'),
+                os.path.join(
+                    ref,
+                    'Windows.Foundation.UniversalApiContract',
+                    '1.0.0.0',
+                ),
+                os.path.join(
+                    ref,
+                    'Windows.Foundation.FoundationContract',
+                    '1.0.0.0',
+                ),
+                os.path.join(
+                    ref,
+                    'Windows.Networking.Connectivity.WwanContract',
+                    '1.0.0.0',
+                ),
+                os.path.join(
+                    self.si.WindowsSdkDir,
+                    'ExtensionSDKs',
+                    'Microsoft.VCLibs',
+                    '%0.1f' % self.vc_ver,
+                    'References',
+                    'CommonConfiguration',
+                    'neutral',
+                ),
+            ]
+        return libpath
+
+    @property
+    def SdkTools(self):
+        """
+        Microsoft Windows SDK Tools
+        """
+        return list(self._sdk_tools())
+
+    def _sdk_tools(self):
+        """
+        Microsoft Windows SDK Tools paths generator
+        """
+        if self.vc_ver < 15.0:
+            bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86'
+            yield os.path.join(self.si.WindowsSdkDir, bin_dir)
+
+        if not self.pi.current_is_x86():
+            arch_subdir = self.pi.current_dir(x64=True)
+            path = 'Bin%s' % arch_subdir
+            yield os.path.join(self.si.WindowsSdkDir, path)
+
+        if self.vc_ver == 10.0 or self.vc_ver == 11.0:
+            if self.pi.target_is_x86():
+                arch_subdir = ''
+            else:
+                arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
+            path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
+            yield os.path.join(self.si.WindowsSdkDir, path)
+
+        elif self.vc_ver >= 15.0:
+            path = os.path.join(self.si.WindowsSdkDir, 'Bin')
+            arch_subdir = self.pi.current_dir(x64=True)
+            sdkver = self.si.WindowsSdkLastVersion
+            yield os.path.join(path, '%s%s' % (sdkver, arch_subdir))
+
+        if self.si.WindowsSDKExecutablePath:
+            yield self.si.WindowsSDKExecutablePath
+
+    @property
+    def _sdk_subdir(self):
+        """
+        Microsoft Windows SDK version subdir
+        """
+        ucrtver = self.si.WindowsSdkLastVersion
+        return ('%s\\' % ucrtver) if ucrtver else ''
+
+    @property
+    def SdkSetup(self):
+        """
+        Microsoft Windows SDK Setup
+        """
+        if self.vc_ver > 9.0:
+            return []
+
+        return [os.path.join(self.si.WindowsSdkDir, 'Setup')]
+
+    @property
+    def FxTools(self):
+        """
+        Microsoft .NET Framework Tools
+        """
+        pi = self.pi
+        si = self.si
+
+        if self.vc_ver <= 10.0:
+            include32 = True
+            include64 = not pi.target_is_x86() and not pi.current_is_x86()
+        else:
+            include32 = pi.target_is_x86() or pi.current_is_x86()
+            include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
+
+        tools = []
+        if include32:
+            tools += [os.path.join(si.FrameworkDir32, ver)
+                      for ver in si.FrameworkVersion32]
+        if include64:
+            tools += [os.path.join(si.FrameworkDir64, ver)
+                      for ver in si.FrameworkVersion64]
+        return tools
+
+    @property
+    def NetFxSDKLibraries(self):
+        """
+        Microsoft .Net Framework SDK Libraries
+        """
+        if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
+            return []
+
+        arch_subdir = self.pi.target_dir(x64=True)
+        return [os.path.join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
+
+    @property
+    def NetFxSDKIncludes(self):
+        """
+        Microsoft .Net Framework SDK Includes
+        """
+        if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
+            return []
+
+        return [os.path.join(self.si.NetFxSdkDir, r'include\um')]
+
+    @property
+    def VsTDb(self):
+        """
+        Microsoft Visual Studio Team System Database
+        """
+        return [os.path.join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
+
+    @property
+    def MSBuild(self):
+        """
+        Microsoft Build Engine
+        """
+        if self.vc_ver < 12.0:
+            return []
+        elif self.vc_ver < 15.0:
+            base_path = self.si.ProgramFilesx86
+            arch_subdir = self.pi.current_dir(hidex86=True)
+        else:
+            base_path = self.si.VSInstallDir
+            arch_subdir = ''
+
+        path = r'MSBuild\%0.1f\bin%s' % (self.vc_ver, arch_subdir)
+        build = [os.path.join(base_path, path)]
+
+        if self.vc_ver >= 15.0:
+            # Add Roslyn C# & Visual Basic Compiler
+            build += [os.path.join(base_path, path, 'Roslyn')]
+
+        return build
+
+    @property
+    def HTMLHelpWorkshop(self):
+        """
+        Microsoft HTML Help Workshop
+        """
+        if self.vc_ver < 11.0:
+            return []
+
+        return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
+
+    @property
+    def UCRTLibraries(self):
+        """
+        Microsoft Universal C Runtime SDK Libraries
+        """
+        if self.vc_ver < 14.0:
+            return []
+
+        arch_subdir = self.pi.target_dir(x64=True)
+        lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib')
+        ucrtver = self._ucrt_subdir
+        return [os.path.join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
+
+    @property
+    def UCRTIncludes(self):
+        """
+        Microsoft Universal C Runtime SDK Include
+        """
+        if self.vc_ver < 14.0:
+            return []
+
+        include = os.path.join(self.si.UniversalCRTSdkDir, 'include')
+        return [os.path.join(include, '%sucrt' % self._ucrt_subdir)]
+
+    @property
+    def _ucrt_subdir(self):
+        """
+        Microsoft Universal C Runtime SDK version subdir
+        """
+        ucrtver = self.si.UniversalCRTSdkLastVersion
+        return ('%s\\' % ucrtver) if ucrtver else ''
+
+    @property
+    def FSharp(self):
+        """
+        Microsoft Visual F#
+        """
+        if self.vc_ver < 11.0 and self.vc_ver > 12.0:
+            return []
+
+        return self.si.FSharpInstallDir
+
+    @property
+    def VCRuntimeRedist(self):
+        """
+        Microsoft Visual C++ runtime redistribuable dll
+        """
+        arch_subdir = self.pi.target_dir(x64=True)
+        if self.vc_ver < 15:
+            redist_path = self.si.VCInstallDir
+            vcruntime = 'redist%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll'
+        else:
+            redist_path = self.si.VCInstallDir.replace('\\Tools', '\\Redist')
+            vcruntime = 'onecore%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll'
+
+        # Visual Studio 2017  is still Visual C++ 14.0
+        dll_ver = 14.0 if self.vc_ver == 15 else self.vc_ver
+
+        vcruntime = vcruntime % (arch_subdir, self.vc_ver, dll_ver)
+        return os.path.join(redist_path, vcruntime)
+
+    def return_env(self, exists=True):
+        """
+        Return environment dict.
+
+        Parameters
+        ----------
+        exists: bool
+            It True, only return existing paths.
+        """
+        env = dict(
+            include=self._build_paths('include',
+                                      [self.VCIncludes,
+                                       self.OSIncludes,
+                                       self.UCRTIncludes,
+                                       self.NetFxSDKIncludes],
+                                      exists),
+            lib=self._build_paths('lib',
+                                  [self.VCLibraries,
+                                   self.OSLibraries,
+                                   self.FxTools,
+                                   self.UCRTLibraries,
+                                   self.NetFxSDKLibraries],
+                                  exists),
+            libpath=self._build_paths('libpath',
+                                      [self.VCLibraries,
+                                       self.FxTools,
+                                       self.VCStoreRefs,
+                                       self.OSLibpath],
+                                      exists),
+            path=self._build_paths('path',
+                                   [self.VCTools,
+                                    self.VSTools,
+                                    self.VsTDb,
+                                    self.SdkTools,
+                                    self.SdkSetup,
+                                    self.FxTools,
+                                    self.MSBuild,
+                                    self.HTMLHelpWorkshop,
+                                    self.FSharp],
+                                   exists),
+        )
+        if self.vc_ver >= 14 and os.path.isfile(self.VCRuntimeRedist):
+            env['py_vcruntime_redist'] = self.VCRuntimeRedist
+        return env
+
+    def _build_paths(self, name, spec_path_lists, exists):
+        """
+        Given an environment variable name and specified paths,
+        return a pathsep-separated string of paths containing
+        unique, extant, directories from those paths and from
+        the environment variable. Raise an error if no paths
+        are resolved.
+        """
+        # flatten spec_path_lists
+        spec_paths = itertools.chain.from_iterable(spec_path_lists)
+        env_paths = safe_env.get(name, '').split(os.pathsep)
+        paths = itertools.chain(spec_paths, env_paths)
+        extant_paths = list(filter(os.path.isdir, paths)) if exists else paths
+        if not extant_paths:
+            msg = "%s environment variable is empty" % name.upper()
+            raise distutils.errors.DistutilsPlatformError(msg)
+        unique_paths = self._unique_everseen(extant_paths)
+        return os.pathsep.join(unique_paths)
+
+    # from Python docs
+    def _unique_everseen(self, iterable, key=None):
+        """
+        List unique elements, preserving order.
+        Remember all elements ever seen.
+
+        _unique_everseen('AAAABBBCCDAABBB') --> A B C D
+
+        _unique_everseen('ABBCcAD', str.lower) --> A B C D
+        """
+        seen = set()
+        seen_add = seen.add
+        if key is None:
+            for element in filterfalse(seen.__contains__, iterable):
+                seen_add(element)
+                yield element
+        else:
+            for element in iterable:
+                k = key(element)
+                if k not in seen:
+                    seen_add(k)
+                    yield element
diff --git a/lib/python3.7/site-packages/setuptools/namespaces.py b/lib/python3.7/site-packages/setuptools/namespaces.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc16106d3dc7048a160129745756bbc9b1fb51d9
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/namespaces.py
@@ -0,0 +1,107 @@
+import os
+from distutils import log
+import itertools
+
+from setuptools.extern.six.moves import map
+
+
+flatten = itertools.chain.from_iterable
+
+
+class Installer:
+
+    nspkg_ext = '-nspkg.pth'
+
+    def install_namespaces(self):
+        nsp = self._get_all_ns_packages()
+        if not nsp:
+            return
+        filename, ext = os.path.splitext(self._get_target())
+        filename += self.nspkg_ext
+        self.outputs.append(filename)
+        log.info("Installing %s", filename)
+        lines = map(self._gen_nspkg_line, nsp)
+
+        if self.dry_run:
+            # always generate the lines, even in dry run
+            list(lines)
+            return
+
+        with open(filename, 'wt') as f:
+            f.writelines(lines)
+
+    def uninstall_namespaces(self):
+        filename, ext = os.path.splitext(self._get_target())
+        filename += self.nspkg_ext
+        if not os.path.exists(filename):
+            return
+        log.info("Removing %s", filename)
+        os.remove(filename)
+
+    def _get_target(self):
+        return self.target
+
+    _nspkg_tmpl = (
+        "import sys, types, os",
+        "has_mfs = sys.version_info > (3, 5)",
+        "p = os.path.join(%(root)s, *%(pth)r)",
+        "importlib = has_mfs and __import__('importlib.util')",
+        "has_mfs and __import__('importlib.machinery')",
+        "m = has_mfs and "
+            "sys.modules.setdefault(%(pkg)r, "
+                "importlib.util.module_from_spec("
+                    "importlib.machinery.PathFinder.find_spec(%(pkg)r, "
+                        "[os.path.dirname(p)])))",
+        "m = m or "
+            "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
+        "mp = (m or []) and m.__dict__.setdefault('__path__',[])",
+        "(p not in mp) and mp.append(p)",
+    )
+    "lines for the namespace installer"
+
+    _nspkg_tmpl_multi = (
+        'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
+    )
+    "additional line(s) when a parent package is indicated"
+
+    def _get_root(self):
+        return "sys._getframe(1).f_locals['sitedir']"
+
+    def _gen_nspkg_line(self, pkg):
+        # ensure pkg is not a unicode string under Python 2.7
+        pkg = str(pkg)
+        pth = tuple(pkg.split('.'))
+        root = self._get_root()
+        tmpl_lines = self._nspkg_tmpl
+        parent, sep, child = pkg.rpartition('.')
+        if parent:
+            tmpl_lines += self._nspkg_tmpl_multi
+        return ';'.join(tmpl_lines) % locals() + '\n'
+
+    def _get_all_ns_packages(self):
+        """Return sorted list of all package namespaces"""
+        pkgs = self.distribution.namespace_packages or []
+        return sorted(flatten(map(self._pkg_names, pkgs)))
+
+    @staticmethod
+    def _pkg_names(pkg):
+        """
+        Given a namespace package, yield the components of that
+        package.
+
+        >>> names = Installer._pkg_names('a.b.c')
+        >>> set(names) == set(['a', 'a.b', 'a.b.c'])
+        True
+        """
+        parts = pkg.split('.')
+        while parts:
+            yield '.'.join(parts)
+            parts.pop()
+
+
+class DevelopInstaller(Installer):
+    def _get_root(self):
+        return repr(str(self.egg_path))
+
+    def _get_target(self):
+        return self.egg_link
diff --git a/lib/python3.7/site-packages/setuptools/package_index.py b/lib/python3.7/site-packages/setuptools/package_index.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b06f2ca281296c989fa55c6c77c9d8b7356b552
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/package_index.py
@@ -0,0 +1,1136 @@
+"""PyPI and direct package downloading"""
+import sys
+import os
+import re
+import shutil
+import socket
+import base64
+import hashlib
+import itertools
+import warnings
+from functools import wraps
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import urllib, http_client, configparser, map
+
+import setuptools
+from pkg_resources import (
+    CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
+    Environment, find_distributions, safe_name, safe_version,
+    to_filename, Requirement, DEVELOP_DIST, EGG_DIST,
+)
+from setuptools import ssl_support
+from distutils import log
+from distutils.errors import DistutilsError
+from fnmatch import translate
+from setuptools.py27compat import get_all_headers
+from setuptools.py33compat import unescape
+from setuptools.wheel import Wheel
+
+__metaclass__ = type
+
+EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
+HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
+PYPI_MD5 = re.compile(
+    r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)'
+    r'href="[^?]+\?:action=show_md5&amp;digest=([0-9a-f]{32})">md5</a>\)'
+)
+URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
+EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
+
+__all__ = [
+    'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
+    'interpret_distro_name',
+]
+
+_SOCKET_TIMEOUT = 15
+
+_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
+user_agent = _tmpl.format(py_major=sys.version[:3], setuptools=setuptools)
+
+
+def parse_requirement_arg(spec):
+    try:
+        return Requirement.parse(spec)
+    except ValueError:
+        raise DistutilsError(
+            "Not a URL, existing file, or requirement spec: %r" % (spec,)
+        )
+
+
+def parse_bdist_wininst(name):
+    """Return (base,pyversion) or (None,None) for possible .exe name"""
+
+    lower = name.lower()
+    base, py_ver, plat = None, None, None
+
+    if lower.endswith('.exe'):
+        if lower.endswith('.win32.exe'):
+            base = name[:-10]
+            plat = 'win32'
+        elif lower.startswith('.win32-py', -16):
+            py_ver = name[-7:-4]
+            base = name[:-16]
+            plat = 'win32'
+        elif lower.endswith('.win-amd64.exe'):
+            base = name[:-14]
+            plat = 'win-amd64'
+        elif lower.startswith('.win-amd64-py', -20):
+            py_ver = name[-7:-4]
+            base = name[:-20]
+            plat = 'win-amd64'
+    return base, py_ver, plat
+
+
+def egg_info_for_url(url):
+    parts = urllib.parse.urlparse(url)
+    scheme, server, path, parameters, query, fragment = parts
+    base = urllib.parse.unquote(path.split('/')[-1])
+    if server == 'sourceforge.net' and base == 'download':  # XXX Yuck
+        base = urllib.parse.unquote(path.split('/')[-2])
+    if '#' in base:
+        base, fragment = base.split('#', 1)
+    return base, fragment
+
+
+def distros_for_url(url, metadata=None):
+    """Yield egg or source distribution objects that might be found at a URL"""
+    base, fragment = egg_info_for_url(url)
+    for dist in distros_for_location(url, base, metadata):
+        yield dist
+    if fragment:
+        match = EGG_FRAGMENT.match(fragment)
+        if match:
+            for dist in interpret_distro_name(
+                url, match.group(1), metadata, precedence=CHECKOUT_DIST
+            ):
+                yield dist
+
+
+def distros_for_location(location, basename, metadata=None):
+    """Yield egg or source distribution objects based on basename"""
+    if basename.endswith('.egg.zip'):
+        basename = basename[:-4]  # strip the .zip
+    if basename.endswith('.egg') and '-' in basename:
+        # only one, unambiguous interpretation
+        return [Distribution.from_location(location, basename, metadata)]
+    if basename.endswith('.whl') and '-' in basename:
+        wheel = Wheel(basename)
+        if not wheel.is_compatible():
+            return []
+        return [Distribution(
+            location=location,
+            project_name=wheel.project_name,
+            version=wheel.version,
+            # Increase priority over eggs.
+            precedence=EGG_DIST + 1,
+        )]
+    if basename.endswith('.exe'):
+        win_base, py_ver, platform = parse_bdist_wininst(basename)
+        if win_base is not None:
+            return interpret_distro_name(
+                location, win_base, metadata, py_ver, BINARY_DIST, platform
+            )
+    # Try source distro extensions (.zip, .tgz, etc.)
+    #
+    for ext in EXTENSIONS:
+        if basename.endswith(ext):
+            basename = basename[:-len(ext)]
+            return interpret_distro_name(location, basename, metadata)
+    return []  # no extension matched
+
+
+def distros_for_filename(filename, metadata=None):
+    """Yield possible egg or source distribution objects based on a filename"""
+    return distros_for_location(
+        normalize_path(filename), os.path.basename(filename), metadata
+    )
+
+
+def interpret_distro_name(
+        location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
+        platform=None
+):
+    """Generate alternative interpretations of a source distro name
+
+    Note: if `location` is a filesystem filename, you should call
+    ``pkg_resources.normalize_path()`` on it before passing it to this
+    routine!
+    """
+    # Generate alternative interpretations of a source distro name
+    # Because some packages are ambiguous as to name/versions split
+    # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
+    # So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
+    # "adns-python, 1.1.0", and "adns-python-1.1.0, no version").  In practice,
+    # the spurious interpretations should be ignored, because in the event
+    # there's also an "adns" package, the spurious "python-1.1.0" version will
+    # compare lower than any numeric version number, and is therefore unlikely
+    # to match a request for it.  It's still a potential problem, though, and
+    # in the long run PyPI and the distutils should go for "safe" names and
+    # versions in distribution archive names (sdist and bdist).
+
+    parts = basename.split('-')
+    if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
+        # it is a bdist_dumb, not an sdist -- bail out
+        return
+
+    for p in range(1, len(parts) + 1):
+        yield Distribution(
+            location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
+            py_version=py_version, precedence=precedence,
+            platform=platform
+        )
+
+
+# From Python 2.7 docs
+def unique_everseen(iterable, key=None):
+    "List unique elements, preserving order. Remember all elements ever seen."
+    # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+    # unique_everseen('ABBCcAD', str.lower) --> A B C D
+    seen = set()
+    seen_add = seen.add
+    if key is None:
+        for element in six.moves.filterfalse(seen.__contains__, iterable):
+            seen_add(element)
+            yield element
+    else:
+        for element in iterable:
+            k = key(element)
+            if k not in seen:
+                seen_add(k)
+                yield element
+
+
+def unique_values(func):
+    """
+    Wrap a function returning an iterable such that the resulting iterable
+    only ever yields unique items.
+    """
+
+    @wraps(func)
+    def wrapper(*args, **kwargs):
+        return unique_everseen(func(*args, **kwargs))
+
+    return wrapper
+
+
+REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
+# this line is here to fix emacs' cruddy broken syntax highlighting
+
+
+@unique_values
+def find_external_links(url, page):
+    """Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
+
+    for match in REL.finditer(page):
+        tag, rel = match.groups()
+        rels = set(map(str.strip, rel.lower().split(',')))
+        if 'homepage' in rels or 'download' in rels:
+            for match in HREF.finditer(tag):
+                yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
+
+    for tag in ("<th>Home Page", "<th>Download URL"):
+        pos = page.find(tag)
+        if pos != -1:
+            match = HREF.search(page, pos)
+            if match:
+                yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
+
+
+class ContentChecker:
+    """
+    A null content checker that defines the interface for checking content
+    """
+
+    def feed(self, block):
+        """
+        Feed a block of data to the hash.
+        """
+        return
+
+    def is_valid(self):
+        """
+        Check the hash. Return False if validation fails.
+        """
+        return True
+
+    def report(self, reporter, template):
+        """
+        Call reporter with information about the checker (hash name)
+        substituted into the template.
+        """
+        return
+
+
+class HashChecker(ContentChecker):
+    pattern = re.compile(
+        r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
+        r'(?P<expected>[a-f0-9]+)'
+    )
+
+    def __init__(self, hash_name, expected):
+        self.hash_name = hash_name
+        self.hash = hashlib.new(hash_name)
+        self.expected = expected
+
+    @classmethod
+    def from_url(cls, url):
+        "Construct a (possibly null) ContentChecker from a URL"
+        fragment = urllib.parse.urlparse(url)[-1]
+        if not fragment:
+            return ContentChecker()
+        match = cls.pattern.search(fragment)
+        if not match:
+            return ContentChecker()
+        return cls(**match.groupdict())
+
+    def feed(self, block):
+        self.hash.update(block)
+
+    def is_valid(self):
+        return self.hash.hexdigest() == self.expected
+
+    def report(self, reporter, template):
+        msg = template % self.hash_name
+        return reporter(msg)
+
+
+class PackageIndex(Environment):
+    """A distribution index that scans web pages for download URLs"""
+
+    def __init__(
+            self, index_url="https://pypi.org/simple/", hosts=('*',),
+            ca_bundle=None, verify_ssl=True, *args, **kw
+    ):
+        Environment.__init__(self, *args, **kw)
+        self.index_url = index_url + "/" [:not index_url.endswith('/')]
+        self.scanned_urls = {}
+        self.fetched_urls = {}
+        self.package_pages = {}
+        self.allows = re.compile('|'.join(map(translate, hosts))).match
+        self.to_scan = []
+        use_ssl = (
+            verify_ssl
+            and ssl_support.is_available
+            and (ca_bundle or ssl_support.find_ca_bundle())
+        )
+        if use_ssl:
+            self.opener = ssl_support.opener_for(ca_bundle)
+        else:
+            self.opener = urllib.request.urlopen
+
+    def process_url(self, url, retrieve=False):
+        """Evaluate a URL as a possible download, and maybe retrieve it"""
+        if url in self.scanned_urls and not retrieve:
+            return
+        self.scanned_urls[url] = True
+        if not URL_SCHEME(url):
+            self.process_filename(url)
+            return
+        else:
+            dists = list(distros_for_url(url))
+            if dists:
+                if not self.url_ok(url):
+                    return
+                self.debug("Found link: %s", url)
+
+        if dists or not retrieve or url in self.fetched_urls:
+            list(map(self.add, dists))
+            return  # don't need the actual page
+
+        if not self.url_ok(url):
+            self.fetched_urls[url] = True
+            return
+
+        self.info("Reading %s", url)
+        self.fetched_urls[url] = True  # prevent multiple fetch attempts
+        tmpl = "Download error on %s: %%s -- Some packages may not be found!"
+        f = self.open_url(url, tmpl % url)
+        if f is None:
+            return
+        self.fetched_urls[f.url] = True
+        if 'html' not in f.headers.get('content-type', '').lower():
+            f.close()  # not html, we can't process it
+            return
+
+        base = f.url  # handle redirects
+        page = f.read()
+        if not isinstance(page, str):
+            # In Python 3 and got bytes but want str.
+            if isinstance(f, urllib.error.HTTPError):
+                # Errors have no charset, assume latin1:
+                charset = 'latin-1'
+            else:
+                charset = f.headers.get_param('charset') or 'latin-1'
+            page = page.decode(charset, "ignore")
+        f.close()
+        for match in HREF.finditer(page):
+            link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
+            self.process_url(link)
+        if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
+            page = self.process_index(url, page)
+
+    def process_filename(self, fn, nested=False):
+        # process filenames or directories
+        if not os.path.exists(fn):
+            self.warn("Not found: %s", fn)
+            return
+
+        if os.path.isdir(fn) and not nested:
+            path = os.path.realpath(fn)
+            for item in os.listdir(path):
+                self.process_filename(os.path.join(path, item), True)
+
+        dists = distros_for_filename(fn)
+        if dists:
+            self.debug("Found: %s", fn)
+            list(map(self.add, dists))
+
+    def url_ok(self, url, fatal=False):
+        s = URL_SCHEME(url)
+        is_file = s and s.group(1).lower() == 'file'
+        if is_file or self.allows(urllib.parse.urlparse(url)[1]):
+            return True
+        msg = (
+            "\nNote: Bypassing %s (disallowed host; see "
+            "http://bit.ly/2hrImnY for details).\n")
+        if fatal:
+            raise DistutilsError(msg % url)
+        else:
+            self.warn(msg, url)
+
+    def scan_egg_links(self, search_path):
+        dirs = filter(os.path.isdir, search_path)
+        egg_links = (
+            (path, entry)
+            for path in dirs
+            for entry in os.listdir(path)
+            if entry.endswith('.egg-link')
+        )
+        list(itertools.starmap(self.scan_egg_link, egg_links))
+
+    def scan_egg_link(self, path, entry):
+        with open(os.path.join(path, entry)) as raw_lines:
+            # filter non-empty lines
+            lines = list(filter(None, map(str.strip, raw_lines)))
+
+        if len(lines) != 2:
+            # format is not recognized; punt
+            return
+
+        egg_path, setup_path = lines
+
+        for dist in find_distributions(os.path.join(path, egg_path)):
+            dist.location = os.path.join(path, *lines)
+            dist.precedence = SOURCE_DIST
+            self.add(dist)
+
+    def process_index(self, url, page):
+        """Process the contents of a PyPI page"""
+
+        def scan(link):
+            # Process a URL to see if it's for a package page
+            if link.startswith(self.index_url):
+                parts = list(map(
+                    urllib.parse.unquote, link[len(self.index_url):].split('/')
+                ))
+                if len(parts) == 2 and '#' not in parts[1]:
+                    # it's a package page, sanitize and index it
+                    pkg = safe_name(parts[0])
+                    ver = safe_version(parts[1])
+                    self.package_pages.setdefault(pkg.lower(), {})[link] = True
+                    return to_filename(pkg), to_filename(ver)
+            return None, None
+
+        # process an index page into the package-page index
+        for match in HREF.finditer(page):
+            try:
+                scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
+            except ValueError:
+                pass
+
+        pkg, ver = scan(url)  # ensure this page is in the page index
+        if pkg:
+            # process individual package page
+            for new_url in find_external_links(url, page):
+                # Process the found URL
+                base, frag = egg_info_for_url(new_url)
+                if base.endswith('.py') and not frag:
+                    if ver:
+                        new_url += '#egg=%s-%s' % (pkg, ver)
+                    else:
+                        self.need_version_info(url)
+                self.scan_url(new_url)
+
+            return PYPI_MD5.sub(
+                lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
+            )
+        else:
+            return ""  # no sense double-scanning non-package pages
+
+    def need_version_info(self, url):
+        self.scan_all(
+            "Page at %s links to .py file(s) without version info; an index "
+            "scan is required.", url
+        )
+
+    def scan_all(self, msg=None, *args):
+        if self.index_url not in self.fetched_urls:
+            if msg:
+                self.warn(msg, *args)
+            self.info(
+                "Scanning index of all packages (this may take a while)"
+            )
+        self.scan_url(self.index_url)
+
+    def find_packages(self, requirement):
+        self.scan_url(self.index_url + requirement.unsafe_name + '/')
+
+        if not self.package_pages.get(requirement.key):
+            # Fall back to safe version of the name
+            self.scan_url(self.index_url + requirement.project_name + '/')
+
+        if not self.package_pages.get(requirement.key):
+            # We couldn't find the target package, so search the index page too
+            self.not_found_in_index(requirement)
+
+        for url in list(self.package_pages.get(requirement.key, ())):
+            # scan each page that might be related to the desired package
+            self.scan_url(url)
+
+    def obtain(self, requirement, installer=None):
+        self.prescan()
+        self.find_packages(requirement)
+        for dist in self[requirement.key]:
+            if dist in requirement:
+                return dist
+            self.debug("%s does not match %s", requirement, dist)
+        return super(PackageIndex, self).obtain(requirement, installer)
+
+    def check_hash(self, checker, filename, tfp):
+        """
+        checker is a ContentChecker
+        """
+        checker.report(
+            self.debug,
+            "Validating %%s checksum for %s" % filename)
+        if not checker.is_valid():
+            tfp.close()
+            os.unlink(filename)
+            raise DistutilsError(
+                "%s validation failed for %s; "
+                "possible download problem?"
+                % (checker.hash.name, os.path.basename(filename))
+            )
+
+    def add_find_links(self, urls):
+        """Add `urls` to the list that will be prescanned for searches"""
+        for url in urls:
+            if (
+                self.to_scan is None  # if we have already "gone online"
+                or not URL_SCHEME(url)  # or it's a local file/directory
+                or url.startswith('file:')
+                or list(distros_for_url(url))  # or a direct package link
+            ):
+                # then go ahead and process it now
+                self.scan_url(url)
+            else:
+                # otherwise, defer retrieval till later
+                self.to_scan.append(url)
+
+    def prescan(self):
+        """Scan urls scheduled for prescanning (e.g. --find-links)"""
+        if self.to_scan:
+            list(map(self.scan_url, self.to_scan))
+        self.to_scan = None  # from now on, go ahead and process immediately
+
+    def not_found_in_index(self, requirement):
+        if self[requirement.key]:  # we've seen at least one distro
+            meth, msg = self.info, "Couldn't retrieve index page for %r"
+        else:  # no distros seen for this name, might be misspelled
+            meth, msg = (
+                self.warn,
+                "Couldn't find index page for %r (maybe misspelled?)")
+        meth(msg, requirement.unsafe_name)
+        self.scan_all()
+
+    def download(self, spec, tmpdir):
+        """Locate and/or download `spec` to `tmpdir`, returning a local path
+
+        `spec` may be a ``Requirement`` object, or a string containing a URL,
+        an existing local filename, or a project/version requirement spec
+        (i.e. the string form of a ``Requirement`` object).  If it is the URL
+        of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
+        that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
+        automatically created alongside the downloaded file.
+
+        If `spec` is a ``Requirement`` object or a string containing a
+        project/version requirement spec, this method returns the location of
+        a matching distribution (possibly after downloading it to `tmpdir`).
+        If `spec` is a locally existing file or directory name, it is simply
+        returned unchanged.  If `spec` is a URL, it is downloaded to a subpath
+        of `tmpdir`, and the local filename is returned.  Various errors may be
+        raised if a problem occurs during downloading.
+        """
+        if not isinstance(spec, Requirement):
+            scheme = URL_SCHEME(spec)
+            if scheme:
+                # It's a url, download it to tmpdir
+                found = self._download_url(scheme.group(1), spec, tmpdir)
+                base, fragment = egg_info_for_url(spec)
+                if base.endswith('.py'):
+                    found = self.gen_setup(found, fragment, tmpdir)
+                return found
+            elif os.path.exists(spec):
+                # Existing file or directory, just return it
+                return spec
+            else:
+                spec = parse_requirement_arg(spec)
+        return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
+
+    def fetch_distribution(
+            self, requirement, tmpdir, force_scan=False, source=False,
+            develop_ok=False, local_index=None):
+        """Obtain a distribution suitable for fulfilling `requirement`
+
+        `requirement` must be a ``pkg_resources.Requirement`` instance.
+        If necessary, or if the `force_scan` flag is set, the requirement is
+        searched for in the (online) package index as well as the locally
+        installed packages.  If a distribution matching `requirement` is found,
+        the returned distribution's ``location`` is the value you would have
+        gotten from calling the ``download()`` method with the matching
+        distribution's URL or filename.  If no matching distribution is found,
+        ``None`` is returned.
+
+        If the `source` flag is set, only source distributions and source
+        checkout links will be considered.  Unless the `develop_ok` flag is
+        set, development and system eggs (i.e., those using the ``.egg-info``
+        format) will be ignored.
+        """
+        # process a Requirement
+        self.info("Searching for %s", requirement)
+        skipped = {}
+        dist = None
+
+        def find(req, env=None):
+            if env is None:
+                env = self
+            # Find a matching distribution; may be called more than once
+
+            for dist in env[req.key]:
+
+                if dist.precedence == DEVELOP_DIST and not develop_ok:
+                    if dist not in skipped:
+                        self.warn(
+                            "Skipping development or system egg: %s", dist,
+                        )
+                        skipped[dist] = 1
+                    continue
+
+                test = (
+                    dist in req
+                    and (dist.precedence <= SOURCE_DIST or not source)
+                )
+                if test:
+                    loc = self.download(dist.location, tmpdir)
+                    dist.download_location = loc
+                    if os.path.exists(dist.download_location):
+                        return dist
+
+        if force_scan:
+            self.prescan()
+            self.find_packages(requirement)
+            dist = find(requirement)
+
+        if not dist and local_index is not None:
+            dist = find(requirement, local_index)
+
+        if dist is None:
+            if self.to_scan is not None:
+                self.prescan()
+            dist = find(requirement)
+
+        if dist is None and not force_scan:
+            self.find_packages(requirement)
+            dist = find(requirement)
+
+        if dist is None:
+            self.warn(
+                "No local packages or working download links found for %s%s",
+                (source and "a source distribution of " or ""),
+                requirement,
+            )
+        else:
+            self.info("Best match: %s", dist)
+            return dist.clone(location=dist.download_location)
+
+    def fetch(self, requirement, tmpdir, force_scan=False, source=False):
+        """Obtain a file suitable for fulfilling `requirement`
+
+        DEPRECATED; use the ``fetch_distribution()`` method now instead.  For
+        backward compatibility, this routine is identical but returns the
+        ``location`` of the downloaded distribution instead of a distribution
+        object.
+        """
+        dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
+        if dist is not None:
+            return dist.location
+        return None
+
+    def gen_setup(self, filename, fragment, tmpdir):
+        match = EGG_FRAGMENT.match(fragment)
+        dists = match and [
+            d for d in
+            interpret_distro_name(filename, match.group(1), None) if d.version
+        ] or []
+
+        if len(dists) == 1:  # unambiguous ``#egg`` fragment
+            basename = os.path.basename(filename)
+
+            # Make sure the file has been downloaded to the temp dir.
+            if os.path.dirname(filename) != tmpdir:
+                dst = os.path.join(tmpdir, basename)
+                from setuptools.command.easy_install import samefile
+                if not samefile(filename, dst):
+                    shutil.copy2(filename, dst)
+                    filename = dst
+
+            with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
+                file.write(
+                    "from setuptools import setup\n"
+                    "setup(name=%r, version=%r, py_modules=[%r])\n"
+                    % (
+                        dists[0].project_name, dists[0].version,
+                        os.path.splitext(basename)[0]
+                    )
+                )
+            return filename
+
+        elif match:
+            raise DistutilsError(
+                "Can't unambiguously interpret project/version identifier %r; "
+                "any dashes in the name or version should be escaped using "
+                "underscores. %r" % (fragment, dists)
+            )
+        else:
+            raise DistutilsError(
+                "Can't process plain .py files without an '#egg=name-version'"
+                " suffix to enable automatic setup script generation."
+            )
+
+    dl_blocksize = 8192
+
+    def _download_to(self, url, filename):
+        self.info("Downloading %s", url)
+        # Download the file
+        fp = None
+        try:
+            checker = HashChecker.from_url(url)
+            fp = self.open_url(url)
+            if isinstance(fp, urllib.error.HTTPError):
+                raise DistutilsError(
+                    "Can't download %s: %s %s" % (url, fp.code, fp.msg)
+                )
+            headers = fp.info()
+            blocknum = 0
+            bs = self.dl_blocksize
+            size = -1
+            if "content-length" in headers:
+                # Some servers return multiple Content-Length headers :(
+                sizes = get_all_headers(headers, 'Content-Length')
+                size = max(map(int, sizes))
+                self.reporthook(url, filename, blocknum, bs, size)
+            with open(filename, 'wb') as tfp:
+                while True:
+                    block = fp.read(bs)
+                    if block:
+                        checker.feed(block)
+                        tfp.write(block)
+                        blocknum += 1
+                        self.reporthook(url, filename, blocknum, bs, size)
+                    else:
+                        break
+                self.check_hash(checker, filename, tfp)
+            return headers
+        finally:
+            if fp:
+                fp.close()
+
+    def reporthook(self, url, filename, blocknum, blksize, size):
+        pass  # no-op
+
+    def open_url(self, url, warning=None):
+        if url.startswith('file:'):
+            return local_open(url)
+        try:
+            return open_with_auth(url, self.opener)
+        except (ValueError, http_client.InvalidURL) as v:
+            msg = ' '.join([str(arg) for arg in v.args])
+            if warning:
+                self.warn(warning, msg)
+            else:
+                raise DistutilsError('%s %s' % (url, msg))
+        except urllib.error.HTTPError as v:
+            return v
+        except urllib.error.URLError as v:
+            if warning:
+                self.warn(warning, v.reason)
+            else:
+                raise DistutilsError("Download error for %s: %s"
+                                     % (url, v.reason))
+        except http_client.BadStatusLine as v:
+            if warning:
+                self.warn(warning, v.line)
+            else:
+                raise DistutilsError(
+                    '%s returned a bad status line. The server might be '
+                    'down, %s' %
+                    (url, v.line)
+                )
+        except (http_client.HTTPException, socket.error) as v:
+            if warning:
+                self.warn(warning, v)
+            else:
+                raise DistutilsError("Download error for %s: %s"
+                                     % (url, v))
+
+    def _download_url(self, scheme, url, tmpdir):
+        # Determine download filename
+        #
+        name, fragment = egg_info_for_url(url)
+        if name:
+            while '..' in name:
+                name = name.replace('..', '.').replace('\\', '_')
+        else:
+            name = "__downloaded__"  # default if URL has no path contents
+
+        if name.endswith('.egg.zip'):
+            name = name[:-4]  # strip the extra .zip before download
+
+        filename = os.path.join(tmpdir, name)
+
+        # Download the file
+        #
+        if scheme == 'svn' or scheme.startswith('svn+'):
+            return self._download_svn(url, filename)
+        elif scheme == 'git' or scheme.startswith('git+'):
+            return self._download_git(url, filename)
+        elif scheme.startswith('hg+'):
+            return self._download_hg(url, filename)
+        elif scheme == 'file':
+            return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
+        else:
+            self.url_ok(url, True)  # raises error if not allowed
+            return self._attempt_download(url, filename)
+
+    def scan_url(self, url):
+        self.process_url(url, True)
+
+    def _attempt_download(self, url, filename):
+        headers = self._download_to(url, filename)
+        if 'html' in headers.get('content-type', '').lower():
+            return self._download_html(url, headers, filename)
+        else:
+            return filename
+
+    def _download_html(self, url, headers, filename):
+        file = open(filename)
+        for line in file:
+            if line.strip():
+                # Check for a subversion index page
+                if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
+                    # it's a subversion index page:
+                    file.close()
+                    os.unlink(filename)
+                    return self._download_svn(url, filename)
+                break  # not an index page
+        file.close()
+        os.unlink(filename)
+        raise DistutilsError("Unexpected HTML page found at " + url)
+
+    def _download_svn(self, url, filename):
+        warnings.warn("SVN download support is deprecated", UserWarning)
+        url = url.split('#', 1)[0]  # remove any fragment for svn's sake
+        creds = ''
+        if url.lower().startswith('svn:') and '@' in url:
+            scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
+            if not netloc and path.startswith('//') and '/' in path[2:]:
+                netloc, path = path[2:].split('/', 1)
+                auth, host = _splituser(netloc)
+                if auth:
+                    if ':' in auth:
+                        user, pw = auth.split(':', 1)
+                        creds = " --username=%s --password=%s" % (user, pw)
+                    else:
+                        creds = " --username=" + auth
+                    netloc = host
+                    parts = scheme, netloc, url, p, q, f
+                    url = urllib.parse.urlunparse(parts)
+        self.info("Doing subversion checkout from %s to %s", url, filename)
+        os.system("svn checkout%s -q %s %s" % (creds, url, filename))
+        return filename
+
+    @staticmethod
+    def _vcs_split_rev_from_url(url, pop_prefix=False):
+        scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
+
+        scheme = scheme.split('+', 1)[-1]
+
+        # Some fragment identification fails
+        path = path.split('#', 1)[0]
+
+        rev = None
+        if '@' in path:
+            path, rev = path.rsplit('@', 1)
+
+        # Also, discard fragment
+        url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
+
+        return url, rev
+
+    def _download_git(self, url, filename):
+        filename = filename.split('#', 1)[0]
+        url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
+
+        self.info("Doing git clone from %s to %s", url, filename)
+        os.system("git clone --quiet %s %s" % (url, filename))
+
+        if rev is not None:
+            self.info("Checking out %s", rev)
+            os.system("git -C %s checkout --quiet %s" % (
+                filename,
+                rev,
+            ))
+
+        return filename
+
+    def _download_hg(self, url, filename):
+        filename = filename.split('#', 1)[0]
+        url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
+
+        self.info("Doing hg clone from %s to %s", url, filename)
+        os.system("hg clone --quiet %s %s" % (url, filename))
+
+        if rev is not None:
+            self.info("Updating to %s", rev)
+            os.system("hg --cwd %s up -C -r %s -q" % (
+                filename,
+                rev,
+            ))
+
+        return filename
+
+    def debug(self, msg, *args):
+        log.debug(msg, *args)
+
+    def info(self, msg, *args):
+        log.info(msg, *args)
+
+    def warn(self, msg, *args):
+        log.warn(msg, *args)
+
+
+# This pattern matches a character entity reference (a decimal numeric
+# references, a hexadecimal numeric reference, or a named reference).
+entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
+
+
+def decode_entity(match):
+    what = match.group(0)
+    return unescape(what)
+
+
+def htmldecode(text):
+    """
+    Decode HTML entities in the given text.
+
+    >>> htmldecode(
+    ...     'https://../package_name-0.1.2.tar.gz'
+    ...     '?tokena=A&amp;tokenb=B">package_name-0.1.2.tar.gz')
+    'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz'
+    """
+    return entity_sub(decode_entity, text)
+
+
+def socket_timeout(timeout=15):
+    def _socket_timeout(func):
+        def _socket_timeout(*args, **kwargs):
+            old_timeout = socket.getdefaulttimeout()
+            socket.setdefaulttimeout(timeout)
+            try:
+                return func(*args, **kwargs)
+            finally:
+                socket.setdefaulttimeout(old_timeout)
+
+        return _socket_timeout
+
+    return _socket_timeout
+
+
+def _encode_auth(auth):
+    """
+    A function compatible with Python 2.3-3.3 that will encode
+    auth from a URL suitable for an HTTP header.
+    >>> str(_encode_auth('username%3Apassword'))
+    'dXNlcm5hbWU6cGFzc3dvcmQ='
+
+    Long auth strings should not cause a newline to be inserted.
+    >>> long_auth = 'username:' + 'password'*10
+    >>> chr(10) in str(_encode_auth(long_auth))
+    False
+    """
+    auth_s = urllib.parse.unquote(auth)
+    # convert to bytes
+    auth_bytes = auth_s.encode()
+    encoded_bytes = base64.b64encode(auth_bytes)
+    # convert back to a string
+    encoded = encoded_bytes.decode()
+    # strip the trailing carriage return
+    return encoded.replace('\n', '')
+
+
+class Credential:
+    """
+    A username/password pair. Use like a namedtuple.
+    """
+
+    def __init__(self, username, password):
+        self.username = username
+        self.password = password
+
+    def __iter__(self):
+        yield self.username
+        yield self.password
+
+    def __str__(self):
+        return '%(username)s:%(password)s' % vars(self)
+
+
+class PyPIConfig(configparser.RawConfigParser):
+    def __init__(self):
+        """
+        Load from ~/.pypirc
+        """
+        defaults = dict.fromkeys(['username', 'password', 'repository'], '')
+        configparser.RawConfigParser.__init__(self, defaults)
+
+        rc = os.path.join(os.path.expanduser('~'), '.pypirc')
+        if os.path.exists(rc):
+            self.read(rc)
+
+    @property
+    def creds_by_repository(self):
+        sections_with_repositories = [
+            section for section in self.sections()
+            if self.get(section, 'repository').strip()
+        ]
+
+        return dict(map(self._get_repo_cred, sections_with_repositories))
+
+    def _get_repo_cred(self, section):
+        repo = self.get(section, 'repository').strip()
+        return repo, Credential(
+            self.get(section, 'username').strip(),
+            self.get(section, 'password').strip(),
+        )
+
+    def find_credential(self, url):
+        """
+        If the URL indicated appears to be a repository defined in this
+        config, return the credential for that repository.
+        """
+        for repository, cred in self.creds_by_repository.items():
+            if url.startswith(repository):
+                return cred
+
+
+def open_with_auth(url, opener=urllib.request.urlopen):
+    """Open a urllib2 request, handling HTTP authentication"""
+
+    parsed = urllib.parse.urlparse(url)
+    scheme, netloc, path, params, query, frag = parsed
+
+    # Double scheme does not raise on Mac OS X as revealed by a
+    # failing test. We would expect "nonnumeric port". Refs #20.
+    if netloc.endswith(':'):
+        raise http_client.InvalidURL("nonnumeric port: ''")
+
+    if scheme in ('http', 'https'):
+        auth, address = _splituser(netloc)
+    else:
+        auth = None
+
+    if not auth:
+        cred = PyPIConfig().find_credential(url)
+        if cred:
+            auth = str(cred)
+            info = cred.username, url
+            log.info('Authenticating as %s for %s (from .pypirc)', *info)
+
+    if auth:
+        auth = "Basic " + _encode_auth(auth)
+        parts = scheme, address, path, params, query, frag
+        new_url = urllib.parse.urlunparse(parts)
+        request = urllib.request.Request(new_url)
+        request.add_header("Authorization", auth)
+    else:
+        request = urllib.request.Request(url)
+
+    request.add_header('User-Agent', user_agent)
+    fp = opener(request)
+
+    if auth:
+        # Put authentication info back into request URL if same host,
+        # so that links found on the page will work
+        s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
+        if s2 == scheme and h2 == address:
+            parts = s2, netloc, path2, param2, query2, frag2
+            fp.url = urllib.parse.urlunparse(parts)
+
+    return fp
+
+
+# copy of urllib.parse._splituser from Python 3.8
+def _splituser(host):
+    """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
+    user, delim, host = host.rpartition('@')
+    return (user if delim else None), host
+
+
+# adding a timeout to avoid freezing package_index
+open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
+
+
+def fix_sf_url(url):
+    return url  # backward compatibility
+
+
+def local_open(url):
+    """Read a local path, with special support for directories"""
+    scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
+    filename = urllib.request.url2pathname(path)
+    if os.path.isfile(filename):
+        return urllib.request.urlopen(url)
+    elif path.endswith('/') and os.path.isdir(filename):
+        files = []
+        for f in os.listdir(filename):
+            filepath = os.path.join(filename, f)
+            if f == 'index.html':
+                with open(filepath, 'r') as fp:
+                    body = fp.read()
+                break
+            elif os.path.isdir(filepath):
+                f += '/'
+            files.append('<a href="{name}">{name}</a>'.format(name=f))
+        else:
+            tmpl = (
+                "<html><head><title>{url}</title>"
+                "</head><body>{files}</body></html>")
+            body = tmpl.format(url=url, files='\n'.join(files))
+        status, message = 200, "OK"
+    else:
+        status, message, body = 404, "Path not found", "Not found"
+
+    headers = {'content-type': 'text/html'}
+    body_stream = six.StringIO(body)
+    return urllib.error.HTTPError(url, status, message, headers, body_stream)
diff --git a/lib/python3.7/site-packages/setuptools/pep425tags.py b/lib/python3.7/site-packages/setuptools/pep425tags.py
new file mode 100644
index 0000000000000000000000000000000000000000..48745a29027400f1c921b52b7d492a26d4e5d5a2
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/pep425tags.py
@@ -0,0 +1,319 @@
+# This file originally from pip:
+# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/pep425tags.py
+"""Generate and work with PEP 425 Compatibility Tags."""
+from __future__ import absolute_import
+
+import distutils.util
+from distutils import log
+import platform
+import re
+import sys
+import sysconfig
+import warnings
+from collections import OrderedDict
+
+from .extern import six
+
+from . import glibc
+
+_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)')
+
+
+def get_config_var(var):
+    try:
+        return sysconfig.get_config_var(var)
+    except IOError as e:  # Issue #1074
+        warnings.warn("{}".format(e), RuntimeWarning)
+        return None
+
+
+def get_abbr_impl():
+    """Return abbreviated implementation name."""
+    if hasattr(sys, 'pypy_version_info'):
+        pyimpl = 'pp'
+    elif sys.platform.startswith('java'):
+        pyimpl = 'jy'
+    elif sys.platform == 'cli':
+        pyimpl = 'ip'
+    else:
+        pyimpl = 'cp'
+    return pyimpl
+
+
+def get_impl_ver():
+    """Return implementation version."""
+    impl_ver = get_config_var("py_version_nodot")
+    if not impl_ver or get_abbr_impl() == 'pp':
+        impl_ver = ''.join(map(str, get_impl_version_info()))
+    return impl_ver
+
+
+def get_impl_version_info():
+    """Return sys.version_info-like tuple for use in decrementing the minor
+    version."""
+    if get_abbr_impl() == 'pp':
+        # as per https://github.com/pypa/pip/issues/2882
+        return (sys.version_info[0], sys.pypy_version_info.major,
+                sys.pypy_version_info.minor)
+    else:
+        return sys.version_info[0], sys.version_info[1]
+
+
+def get_impl_tag():
+    """
+    Returns the Tag for this specific implementation.
+    """
+    return "{}{}".format(get_abbr_impl(), get_impl_ver())
+
+
+def get_flag(var, fallback, expected=True, warn=True):
+    """Use a fallback method for determining SOABI flags if the needed config
+    var is unset or unavailable."""
+    val = get_config_var(var)
+    if val is None:
+        if warn:
+            log.debug("Config variable '%s' is unset, Python ABI tag may "
+                      "be incorrect", var)
+        return fallback()
+    return val == expected
+
+
+def get_abi_tag():
+    """Return the ABI tag based on SOABI (if available) or emulate SOABI
+    (CPython 2, PyPy)."""
+    soabi = get_config_var('SOABI')
+    impl = get_abbr_impl()
+    if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'):
+        d = ''
+        m = ''
+        u = ''
+        if get_flag('Py_DEBUG',
+                    lambda: hasattr(sys, 'gettotalrefcount'),
+                    warn=(impl == 'cp')):
+            d = 'd'
+        if get_flag('WITH_PYMALLOC',
+                    lambda: impl == 'cp',
+                    warn=(impl == 'cp')):
+            m = 'm'
+        if get_flag('Py_UNICODE_SIZE',
+                    lambda: sys.maxunicode == 0x10ffff,
+                    expected=4,
+                    warn=(impl == 'cp' and
+                          six.PY2)) \
+                and six.PY2:
+            u = 'u'
+        abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
+    elif soabi and soabi.startswith('cpython-'):
+        abi = 'cp' + soabi.split('-')[1]
+    elif soabi:
+        abi = soabi.replace('.', '_').replace('-', '_')
+    else:
+        abi = None
+    return abi
+
+
+def _is_running_32bit():
+    return sys.maxsize == 2147483647
+
+
+def get_platform():
+    """Return our platform name 'win32', 'linux_x86_64'"""
+    if sys.platform == 'darwin':
+        # distutils.util.get_platform() returns the release based on the value
+        # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may
+        # be significantly older than the user's current machine.
+        release, _, machine = platform.mac_ver()
+        split_ver = release.split('.')
+
+        if machine == "x86_64" and _is_running_32bit():
+            machine = "i386"
+        elif machine == "ppc64" and _is_running_32bit():
+            machine = "ppc"
+
+        return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine)
+
+    # XXX remove distutils dependency
+    result = distutils.util.get_platform().replace('.', '_').replace('-', '_')
+    if result == "linux_x86_64" and _is_running_32bit():
+        # 32 bit Python program (running on a 64 bit Linux): pip should only
+        # install and run 32 bit compiled extensions in that case.
+        result = "linux_i686"
+
+    return result
+
+
+def is_manylinux1_compatible():
+    # Only Linux, and only x86-64 / i686
+    if get_platform() not in {"linux_x86_64", "linux_i686"}:
+        return False
+
+    # Check for presence of _manylinux module
+    try:
+        import _manylinux
+        return bool(_manylinux.manylinux1_compatible)
+    except (ImportError, AttributeError):
+        # Fall through to heuristic check below
+        pass
+
+    # Check glibc version. CentOS 5 uses glibc 2.5.
+    return glibc.have_compatible_glibc(2, 5)
+
+
+def get_darwin_arches(major, minor, machine):
+    """Return a list of supported arches (including group arches) for
+    the given major, minor and machine architecture of a macOS machine.
+    """
+    arches = []
+
+    def _supports_arch(major, minor, arch):
+        # Looking at the application support for macOS versions in the chart
+        # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears
+        # our timeline looks roughly like:
+        #
+        # 10.0 - Introduces ppc support.
+        # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64
+        #        and x86_64 support is CLI only, and cannot be used for GUI
+        #        applications.
+        # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications.
+        # 10.6 - Drops support for ppc64
+        # 10.7 - Drops support for ppc
+        #
+        # Given that we do not know if we're installing a CLI or a GUI
+        # application, we must be conservative and assume it might be a GUI
+        # application and behave as if ppc64 and x86_64 support did not occur
+        # until 10.5.
+        #
+        # Note: The above information is taken from the "Application support"
+        #       column in the chart not the "Processor support" since I believe
+        #       that we care about what instruction sets an application can use
+        #       not which processors the OS supports.
+        if arch == 'ppc':
+            return (major, minor) <= (10, 5)
+        if arch == 'ppc64':
+            return (major, minor) == (10, 5)
+        if arch == 'i386':
+            return (major, minor) >= (10, 4)
+        if arch == 'x86_64':
+            return (major, minor) >= (10, 5)
+        if arch in groups:
+            for garch in groups[arch]:
+                if _supports_arch(major, minor, garch):
+                    return True
+        return False
+
+    groups = OrderedDict([
+        ("fat", ("i386", "ppc")),
+        ("intel", ("x86_64", "i386")),
+        ("fat64", ("x86_64", "ppc64")),
+        ("fat32", ("x86_64", "i386", "ppc")),
+    ])
+
+    if _supports_arch(major, minor, machine):
+        arches.append(machine)
+
+    for garch in groups:
+        if machine in groups[garch] and _supports_arch(major, minor, garch):
+            arches.append(garch)
+
+    arches.append('universal')
+
+    return arches
+
+
+def get_supported(versions=None, noarch=False, platform=None,
+                  impl=None, abi=None):
+    """Return a list of supported tags for each version specified in
+    `versions`.
+
+    :param versions: a list of string versions, of the form ["33", "32"],
+        or None. The first version will be assumed to support our ABI.
+    :param platform: specify the exact platform you want valid
+        tags for, or None. If None, use the local system platform.
+    :param impl: specify the exact implementation you want valid
+        tags for, or None. If None, use the local interpreter impl.
+    :param abi: specify the exact abi you want valid
+        tags for, or None. If None, use the local interpreter abi.
+    """
+    supported = []
+
+    # Versions must be given with respect to the preference
+    if versions is None:
+        versions = []
+        version_info = get_impl_version_info()
+        major = version_info[:-1]
+        # Support all previous minor Python versions.
+        for minor in range(version_info[-1], -1, -1):
+            versions.append(''.join(map(str, major + (minor,))))
+
+    impl = impl or get_abbr_impl()
+
+    abis = []
+
+    abi = abi or get_abi_tag()
+    if abi:
+        abis[0:0] = [abi]
+
+    abi3s = set()
+    import imp
+    for suffix in imp.get_suffixes():
+        if suffix[0].startswith('.abi'):
+            abi3s.add(suffix[0].split('.', 2)[1])
+
+    abis.extend(sorted(list(abi3s)))
+
+    abis.append('none')
+
+    if not noarch:
+        arch = platform or get_platform()
+        if arch.startswith('macosx'):
+            # support macosx-10.6-intel on macosx-10.9-x86_64
+            match = _osx_arch_pat.match(arch)
+            if match:
+                name, major, minor, actual_arch = match.groups()
+                tpl = '{}_{}_%i_%s'.format(name, major)
+                arches = []
+                for m in reversed(range(int(minor) + 1)):
+                    for a in get_darwin_arches(int(major), m, actual_arch):
+                        arches.append(tpl % (m, a))
+            else:
+                # arch pattern didn't match (?!)
+                arches = [arch]
+        elif platform is None and is_manylinux1_compatible():
+            arches = [arch.replace('linux', 'manylinux1'), arch]
+        else:
+            arches = [arch]
+
+        # Current version, current API (built specifically for our Python):
+        for abi in abis:
+            for arch in arches:
+                supported.append(('%s%s' % (impl, versions[0]), abi, arch))
+
+        # abi3 modules compatible with older version of Python
+        for version in versions[1:]:
+            # abi3 was introduced in Python 3.2
+            if version in {'31', '30'}:
+                break
+            for abi in abi3s:   # empty set if not Python 3
+                for arch in arches:
+                    supported.append(("%s%s" % (impl, version), abi, arch))
+
+        # Has binaries, does not use the Python API:
+        for arch in arches:
+            supported.append(('py%s' % (versions[0][0]), 'none', arch))
+
+    # No abi / arch, but requires our implementation:
+    supported.append(('%s%s' % (impl, versions[0]), 'none', 'any'))
+    # Tagged specifically as being cross-version compatible
+    # (with just the major version specified)
+    supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
+
+    # No abi / arch, generic Python
+    for i, version in enumerate(versions):
+        supported.append(('py%s' % (version,), 'none', 'any'))
+        if i == 0:
+            supported.append(('py%s' % (version[0]), 'none', 'any'))
+
+    return supported
+
+
+implementation_tag = get_impl_tag()
diff --git a/lib/python3.7/site-packages/setuptools/py27compat.py b/lib/python3.7/site-packages/setuptools/py27compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..2985011b92f7449e784f5f056dd24ffd991d24bd
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/py27compat.py
@@ -0,0 +1,28 @@
+"""
+Compatibility Support for Python 2.7 and earlier
+"""
+
+import platform
+
+from setuptools.extern import six
+
+
+def get_all_headers(message, key):
+    """
+    Given an HTTPMessage, return all headers matching a given key.
+    """
+    return message.get_all(key)
+
+
+if six.PY2:
+    def get_all_headers(message, key):
+        return message.getheaders(key)
+
+
+linux_py2_ascii = (
+    platform.system() == 'Linux' and
+    six.PY2
+)
+
+rmtree_safe = str if linux_py2_ascii else lambda x: x
+"""Workaround for http://bugs.python.org/issue24672"""
diff --git a/lib/python3.7/site-packages/setuptools/py31compat.py b/lib/python3.7/site-packages/setuptools/py31compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1da7ee2a2c56e46e09665d98ba1bc5bfedd2c3e
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/py31compat.py
@@ -0,0 +1,32 @@
+__all__ = []
+
+__metaclass__ = type
+
+
+try:
+    # Python >=3.2
+    from tempfile import TemporaryDirectory
+except ImportError:
+    import shutil
+    import tempfile
+
+    class TemporaryDirectory:
+        """
+        Very simple temporary directory context manager.
+        Will try to delete afterward, but will also ignore OS and similar
+        errors on deletion.
+        """
+
+        def __init__(self, **kwargs):
+            self.name = None  # Handle mkdtemp raising an exception
+            self.name = tempfile.mkdtemp(**kwargs)
+
+        def __enter__(self):
+            return self.name
+
+        def __exit__(self, exctype, excvalue, exctrace):
+            try:
+                shutil.rmtree(self.name, True)
+            except OSError:  # removal errors are not the only possible
+                pass
+            self.name = None
diff --git a/lib/python3.7/site-packages/setuptools/py33compat.py b/lib/python3.7/site-packages/setuptools/py33compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..87cf53983c3bf1a39c5186ce1c67f6ac7f3b64a0
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/py33compat.py
@@ -0,0 +1,55 @@
+import dis
+import array
+import collections
+
+try:
+    import html
+except ImportError:
+    html = None
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import html_parser
+
+__metaclass__ = type
+
+OpArg = collections.namedtuple('OpArg', 'opcode arg')
+
+
+class Bytecode_compat:
+    def __init__(self, code):
+        self.code = code
+
+    def __iter__(self):
+        """Yield '(op,arg)' pair for each operation in code object 'code'"""
+
+        bytes = array.array('b', self.code.co_code)
+        eof = len(self.code.co_code)
+
+        ptr = 0
+        extended_arg = 0
+
+        while ptr < eof:
+
+            op = bytes[ptr]
+
+            if op >= dis.HAVE_ARGUMENT:
+
+                arg = bytes[ptr + 1] + bytes[ptr + 2] * 256 + extended_arg
+                ptr += 3
+
+                if op == dis.EXTENDED_ARG:
+                    long_type = six.integer_types[-1]
+                    extended_arg = arg * long_type(65536)
+                    continue
+
+            else:
+                arg = None
+                ptr += 1
+
+            yield OpArg(op, arg)
+
+
+Bytecode = getattr(dis, 'Bytecode', Bytecode_compat)
+
+
+unescape = getattr(html, 'unescape', html_parser.HTMLParser().unescape)
diff --git a/lib/python3.7/site-packages/setuptools/sandbox.py b/lib/python3.7/site-packages/setuptools/sandbox.py
new file mode 100644
index 0000000000000000000000000000000000000000..685f3f72e3611a5fa99c999e233ffd179c431a6d
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/sandbox.py
@@ -0,0 +1,491 @@
+import os
+import sys
+import tempfile
+import operator
+import functools
+import itertools
+import re
+import contextlib
+import pickle
+import textwrap
+
+from setuptools.extern import six
+from setuptools.extern.six.moves import builtins, map
+
+import pkg_resources.py31compat
+
+if sys.platform.startswith('java'):
+    import org.python.modules.posix.PosixModule as _os
+else:
+    _os = sys.modules[os.name]
+try:
+    _file = file
+except NameError:
+    _file = None
+_open = open
+from distutils.errors import DistutilsError
+from pkg_resources import working_set
+
+
+__all__ = [
+    "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
+]
+
+
+def _execfile(filename, globals, locals=None):
+    """
+    Python 3 implementation of execfile.
+    """
+    mode = 'rb'
+    with open(filename, mode) as stream:
+        script = stream.read()
+    if locals is None:
+        locals = globals
+    code = compile(script, filename, 'exec')
+    exec(code, globals, locals)
+
+
+@contextlib.contextmanager
+def save_argv(repl=None):
+    saved = sys.argv[:]
+    if repl is not None:
+        sys.argv[:] = repl
+    try:
+        yield saved
+    finally:
+        sys.argv[:] = saved
+
+
+@contextlib.contextmanager
+def save_path():
+    saved = sys.path[:]
+    try:
+        yield saved
+    finally:
+        sys.path[:] = saved
+
+
+@contextlib.contextmanager
+def override_temp(replacement):
+    """
+    Monkey-patch tempfile.tempdir with replacement, ensuring it exists
+    """
+    pkg_resources.py31compat.makedirs(replacement, exist_ok=True)
+
+    saved = tempfile.tempdir
+
+    tempfile.tempdir = replacement
+
+    try:
+        yield
+    finally:
+        tempfile.tempdir = saved
+
+
+@contextlib.contextmanager
+def pushd(target):
+    saved = os.getcwd()
+    os.chdir(target)
+    try:
+        yield saved
+    finally:
+        os.chdir(saved)
+
+
+class UnpickleableException(Exception):
+    """
+    An exception representing another Exception that could not be pickled.
+    """
+
+    @staticmethod
+    def dump(type, exc):
+        """
+        Always return a dumped (pickled) type and exc. If exc can't be pickled,
+        wrap it in UnpickleableException first.
+        """
+        try:
+            return pickle.dumps(type), pickle.dumps(exc)
+        except Exception:
+            # get UnpickleableException inside the sandbox
+            from setuptools.sandbox import UnpickleableException as cls
+            return cls.dump(cls, cls(repr(exc)))
+
+
+class ExceptionSaver:
+    """
+    A Context Manager that will save an exception, serialized, and restore it
+    later.
+    """
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, exc, tb):
+        if not exc:
+            return
+
+        # dump the exception
+        self._saved = UnpickleableException.dump(type, exc)
+        self._tb = tb
+
+        # suppress the exception
+        return True
+
+    def resume(self):
+        "restore and re-raise any exception"
+
+        if '_saved' not in vars(self):
+            return
+
+        type, exc = map(pickle.loads, self._saved)
+        six.reraise(type, exc, self._tb)
+
+
+@contextlib.contextmanager
+def save_modules():
+    """
+    Context in which imported modules are saved.
+
+    Translates exceptions internal to the context into the equivalent exception
+    outside the context.
+    """
+    saved = sys.modules.copy()
+    with ExceptionSaver() as saved_exc:
+        yield saved
+
+    sys.modules.update(saved)
+    # remove any modules imported since
+    del_modules = (
+        mod_name for mod_name in sys.modules
+        if mod_name not in saved
+        # exclude any encodings modules. See #285
+        and not mod_name.startswith('encodings.')
+    )
+    _clear_modules(del_modules)
+
+    saved_exc.resume()
+
+
+def _clear_modules(module_names):
+    for mod_name in list(module_names):
+        del sys.modules[mod_name]
+
+
+@contextlib.contextmanager
+def save_pkg_resources_state():
+    saved = pkg_resources.__getstate__()
+    try:
+        yield saved
+    finally:
+        pkg_resources.__setstate__(saved)
+
+
+@contextlib.contextmanager
+def setup_context(setup_dir):
+    temp_dir = os.path.join(setup_dir, 'temp')
+    with save_pkg_resources_state():
+        with save_modules():
+            hide_setuptools()
+            with save_path():
+                with save_argv():
+                    with override_temp(temp_dir):
+                        with pushd(setup_dir):
+                            # ensure setuptools commands are available
+                            __import__('setuptools')
+                            yield
+
+
+def _needs_hiding(mod_name):
+    """
+    >>> _needs_hiding('setuptools')
+    True
+    >>> _needs_hiding('pkg_resources')
+    True
+    >>> _needs_hiding('setuptools_plugin')
+    False
+    >>> _needs_hiding('setuptools.__init__')
+    True
+    >>> _needs_hiding('distutils')
+    True
+    >>> _needs_hiding('os')
+    False
+    >>> _needs_hiding('Cython')
+    True
+    """
+    pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)')
+    return bool(pattern.match(mod_name))
+
+
+def hide_setuptools():
+    """
+    Remove references to setuptools' modules from sys.modules to allow the
+    invocation to import the most appropriate setuptools. This technique is
+    necessary to avoid issues such as #315 where setuptools upgrading itself
+    would fail to find a function declared in the metadata.
+    """
+    modules = filter(_needs_hiding, sys.modules)
+    _clear_modules(modules)
+
+
+def run_setup(setup_script, args):
+    """Run a distutils setup script, sandboxed in its directory"""
+    setup_dir = os.path.abspath(os.path.dirname(setup_script))
+    with setup_context(setup_dir):
+        try:
+            sys.argv[:] = [setup_script] + list(args)
+            sys.path.insert(0, setup_dir)
+            # reset to include setup dir, w/clean callback list
+            working_set.__init__()
+            working_set.callbacks.append(lambda dist: dist.activate())
+
+            # __file__ should be a byte string on Python 2 (#712)
+            dunder_file = (
+                setup_script
+                if isinstance(setup_script, str) else
+                setup_script.encode(sys.getfilesystemencoding())
+            )
+
+            with DirectorySandbox(setup_dir):
+                ns = dict(__file__=dunder_file, __name__='__main__')
+                _execfile(setup_script, ns)
+        except SystemExit as v:
+            if v.args and v.args[0]:
+                raise
+            # Normal exit, just return
+
+
+class AbstractSandbox:
+    """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
+
+    _active = False
+
+    def __init__(self):
+        self._attrs = [
+            name for name in dir(_os)
+            if not name.startswith('_') and hasattr(self, name)
+        ]
+
+    def _copy(self, source):
+        for name in self._attrs:
+            setattr(os, name, getattr(source, name))
+
+    def __enter__(self):
+        self._copy(self)
+        if _file:
+            builtins.file = self._file
+        builtins.open = self._open
+        self._active = True
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self._active = False
+        if _file:
+            builtins.file = _file
+        builtins.open = _open
+        self._copy(_os)
+
+    def run(self, func):
+        """Run 'func' under os sandboxing"""
+        with self:
+            return func()
+
+    def _mk_dual_path_wrapper(name):
+        original = getattr(_os, name)
+
+        def wrap(self, src, dst, *args, **kw):
+            if self._active:
+                src, dst = self._remap_pair(name, src, dst, *args, **kw)
+            return original(src, dst, *args, **kw)
+
+        return wrap
+
+    for name in ["rename", "link", "symlink"]:
+        if hasattr(_os, name):
+            locals()[name] = _mk_dual_path_wrapper(name)
+
+    def _mk_single_path_wrapper(name, original=None):
+        original = original or getattr(_os, name)
+
+        def wrap(self, path, *args, **kw):
+            if self._active:
+                path = self._remap_input(name, path, *args, **kw)
+            return original(path, *args, **kw)
+
+        return wrap
+
+    if _file:
+        _file = _mk_single_path_wrapper('file', _file)
+    _open = _mk_single_path_wrapper('open', _open)
+    for name in [
+        "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
+        "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
+        "startfile", "mkfifo", "mknod", "pathconf", "access"
+    ]:
+        if hasattr(_os, name):
+            locals()[name] = _mk_single_path_wrapper(name)
+
+    def _mk_single_with_return(name):
+        original = getattr(_os, name)
+
+        def wrap(self, path, *args, **kw):
+            if self._active:
+                path = self._remap_input(name, path, *args, **kw)
+                return self._remap_output(name, original(path, *args, **kw))
+            return original(path, *args, **kw)
+
+        return wrap
+
+    for name in ['readlink', 'tempnam']:
+        if hasattr(_os, name):
+            locals()[name] = _mk_single_with_return(name)
+
+    def _mk_query(name):
+        original = getattr(_os, name)
+
+        def wrap(self, *args, **kw):
+            retval = original(*args, **kw)
+            if self._active:
+                return self._remap_output(name, retval)
+            return retval
+
+        return wrap
+
+    for name in ['getcwd', 'tmpnam']:
+        if hasattr(_os, name):
+            locals()[name] = _mk_query(name)
+
+    def _validate_path(self, path):
+        """Called to remap or validate any path, whether input or output"""
+        return path
+
+    def _remap_input(self, operation, path, *args, **kw):
+        """Called for path inputs"""
+        return self._validate_path(path)
+
+    def _remap_output(self, operation, path):
+        """Called for path outputs"""
+        return self._validate_path(path)
+
+    def _remap_pair(self, operation, src, dst, *args, **kw):
+        """Called for path pairs like rename, link, and symlink operations"""
+        return (
+            self._remap_input(operation + '-from', src, *args, **kw),
+            self._remap_input(operation + '-to', dst, *args, **kw)
+        )
+
+
+if hasattr(os, 'devnull'):
+    _EXCEPTIONS = [os.devnull,]
+else:
+    _EXCEPTIONS = []
+
+
+class DirectorySandbox(AbstractSandbox):
+    """Restrict operations to a single subdirectory - pseudo-chroot"""
+
+    write_ops = dict.fromkeys([
+        "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
+        "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
+    ])
+
+    _exception_patterns = [
+        # Allow lib2to3 to attempt to save a pickled grammar object (#121)
+        r'.*lib2to3.*\.pickle$',
+    ]
+    "exempt writing to paths that match the pattern"
+
+    def __init__(self, sandbox, exceptions=_EXCEPTIONS):
+        self._sandbox = os.path.normcase(os.path.realpath(sandbox))
+        self._prefix = os.path.join(self._sandbox, '')
+        self._exceptions = [
+            os.path.normcase(os.path.realpath(path))
+            for path in exceptions
+        ]
+        AbstractSandbox.__init__(self)
+
+    def _violation(self, operation, *args, **kw):
+        from setuptools.sandbox import SandboxViolation
+        raise SandboxViolation(operation, args, kw)
+
+    if _file:
+
+        def _file(self, path, mode='r', *args, **kw):
+            if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
+                self._violation("file", path, mode, *args, **kw)
+            return _file(path, mode, *args, **kw)
+
+    def _open(self, path, mode='r', *args, **kw):
+        if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
+            self._violation("open", path, mode, *args, **kw)
+        return _open(path, mode, *args, **kw)
+
+    def tmpnam(self):
+        self._violation("tmpnam")
+
+    def _ok(self, path):
+        active = self._active
+        try:
+            self._active = False
+            realpath = os.path.normcase(os.path.realpath(path))
+            return (
+                self._exempted(realpath)
+                or realpath == self._sandbox
+                or realpath.startswith(self._prefix)
+            )
+        finally:
+            self._active = active
+
+    def _exempted(self, filepath):
+        start_matches = (
+            filepath.startswith(exception)
+            for exception in self._exceptions
+        )
+        pattern_matches = (
+            re.match(pattern, filepath)
+            for pattern in self._exception_patterns
+        )
+        candidates = itertools.chain(start_matches, pattern_matches)
+        return any(candidates)
+
+    def _remap_input(self, operation, path, *args, **kw):
+        """Called for path inputs"""
+        if operation in self.write_ops and not self._ok(path):
+            self._violation(operation, os.path.realpath(path), *args, **kw)
+        return path
+
+    def _remap_pair(self, operation, src, dst, *args, **kw):
+        """Called for path pairs like rename, link, and symlink operations"""
+        if not self._ok(src) or not self._ok(dst):
+            self._violation(operation, src, dst, *args, **kw)
+        return (src, dst)
+
+    def open(self, file, flags, mode=0o777, *args, **kw):
+        """Called for low-level os.open()"""
+        if flags & WRITE_FLAGS and not self._ok(file):
+            self._violation("os.open", file, flags, mode, *args, **kw)
+        return _os.open(file, flags, mode, *args, **kw)
+
+
+WRITE_FLAGS = functools.reduce(
+    operator.or_, [getattr(_os, a, 0) for a in
+        "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
+)
+
+
+class SandboxViolation(DistutilsError):
+    """A setup script attempted to modify the filesystem outside the sandbox"""
+
+    tmpl = textwrap.dedent("""
+        SandboxViolation: {cmd}{args!r} {kwargs}
+
+        The package setup script has attempted to modify files on your system
+        that are not within the EasyInstall build area, and has been aborted.
+
+        This package cannot be safely installed by EasyInstall, and may not
+        support alternate installation locations even if you run its setup
+        script by hand.  Please inform the package's author and the EasyInstall
+        maintainers to find out if a fix or workaround is available.
+        """).lstrip()
+
+    def __str__(self):
+        cmd, args, kwargs = self.args
+        return self.tmpl.format(**locals())
diff --git a/lib/python3.7/site-packages/setuptools/script (dev).tmpl b/lib/python3.7/site-packages/setuptools/script (dev).tmpl
new file mode 100644
index 0000000000000000000000000000000000000000..39a24b04888e79df51e2237577b303a2f901be63
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/script (dev).tmpl	
@@ -0,0 +1,6 @@
+# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r
+__requires__ = %(spec)r
+__import__('pkg_resources').require(%(spec)r)
+__file__ = %(dev_path)r
+with open(__file__) as f:
+    exec(compile(f.read(), __file__, 'exec'))
diff --git a/lib/python3.7/site-packages/setuptools/script.tmpl b/lib/python3.7/site-packages/setuptools/script.tmpl
new file mode 100644
index 0000000000000000000000000000000000000000..ff5efbcab3b58063dd84787181c26a95fb663d94
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/script.tmpl
@@ -0,0 +1,3 @@
+# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
+__requires__ = %(spec)r
+__import__('pkg_resources').run_script(%(spec)r, %(script_name)r)
diff --git a/lib/python3.7/site-packages/setuptools/site-patch.py b/lib/python3.7/site-packages/setuptools/site-patch.py
new file mode 100644
index 0000000000000000000000000000000000000000..40b00de0a799686485b266fd92abb9fb100ed718
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/site-patch.py
@@ -0,0 +1,74 @@
+def __boot():
+    import sys
+    import os
+    PYTHONPATH = os.environ.get('PYTHONPATH')
+    if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH):
+        PYTHONPATH = []
+    else:
+        PYTHONPATH = PYTHONPATH.split(os.pathsep)
+
+    pic = getattr(sys, 'path_importer_cache', {})
+    stdpath = sys.path[len(PYTHONPATH):]
+    mydir = os.path.dirname(__file__)
+
+    for item in stdpath:
+        if item == mydir or not item:
+            continue  # skip if current dir. on Windows, or my own directory
+        importer = pic.get(item)
+        if importer is not None:
+            loader = importer.find_module('site')
+            if loader is not None:
+                # This should actually reload the current module
+                loader.load_module('site')
+                break
+        else:
+            try:
+                import imp  # Avoid import loop in Python 3
+                stream, path, descr = imp.find_module('site', [item])
+            except ImportError:
+                continue
+            if stream is None:
+                continue
+            try:
+                # This should actually reload the current module
+                imp.load_module('site', stream, path, descr)
+            finally:
+                stream.close()
+            break
+    else:
+        raise ImportError("Couldn't find the real 'site' module")
+
+    known_paths = dict([(makepath(item)[1], 1) for item in sys.path])  # 2.2 comp
+
+    oldpos = getattr(sys, '__egginsert', 0)  # save old insertion position
+    sys.__egginsert = 0  # and reset the current one
+
+    for item in PYTHONPATH:
+        addsitedir(item)
+
+    sys.__egginsert += oldpos  # restore effective old position
+
+    d, nd = makepath(stdpath[0])
+    insert_at = None
+    new_path = []
+
+    for item in sys.path:
+        p, np = makepath(item)
+
+        if np == nd and insert_at is None:
+            # We've hit the first 'system' path entry, so added entries go here
+            insert_at = len(new_path)
+
+        if np in known_paths or insert_at is None:
+            new_path.append(item)
+        else:
+            # new path after the insert point, back-insert it
+            new_path.insert(insert_at, item)
+            insert_at += 1
+
+    sys.path[:] = new_path
+
+
+if __name__ == 'site':
+    __boot()
+    del __boot
diff --git a/lib/python3.7/site-packages/setuptools/ssl_support.py b/lib/python3.7/site-packages/setuptools/ssl_support.py
new file mode 100644
index 0000000000000000000000000000000000000000..226db694bb38791147c6bf2881c4b86025dd2f8f
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/ssl_support.py
@@ -0,0 +1,260 @@
+import os
+import socket
+import atexit
+import re
+import functools
+
+from setuptools.extern.six.moves import urllib, http_client, map, filter
+
+from pkg_resources import ResolutionError, ExtractionError
+
+try:
+    import ssl
+except ImportError:
+    ssl = None
+
+__all__ = [
+    'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
+    'opener_for'
+]
+
+cert_paths = """
+/etc/pki/tls/certs/ca-bundle.crt
+/etc/ssl/certs/ca-certificates.crt
+/usr/share/ssl/certs/ca-bundle.crt
+/usr/local/share/certs/ca-root.crt
+/etc/ssl/cert.pem
+/System/Library/OpenSSL/certs/cert.pem
+/usr/local/share/certs/ca-root-nss.crt
+/etc/ssl/ca-bundle.pem
+""".strip().split()
+
+try:
+    HTTPSHandler = urllib.request.HTTPSHandler
+    HTTPSConnection = http_client.HTTPSConnection
+except AttributeError:
+    HTTPSHandler = HTTPSConnection = object
+
+is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
+
+
+try:
+    from ssl import CertificateError, match_hostname
+except ImportError:
+    try:
+        from backports.ssl_match_hostname import CertificateError
+        from backports.ssl_match_hostname import match_hostname
+    except ImportError:
+        CertificateError = None
+        match_hostname = None
+
+if not CertificateError:
+
+    class CertificateError(ValueError):
+        pass
+
+
+if not match_hostname:
+
+    def _dnsname_match(dn, hostname, max_wildcards=1):
+        """Matching according to RFC 6125, section 6.4.3
+
+        https://tools.ietf.org/html/rfc6125#section-6.4.3
+        """
+        pats = []
+        if not dn:
+            return False
+
+        # Ported from python3-syntax:
+        # leftmost, *remainder = dn.split(r'.')
+        parts = dn.split(r'.')
+        leftmost = parts[0]
+        remainder = parts[1:]
+
+        wildcards = leftmost.count('*')
+        if wildcards > max_wildcards:
+            # Issue #17980: avoid denials of service by refusing more
+            # than one wildcard per fragment.  A survey of established
+            # policy among SSL implementations showed it to be a
+            # reasonable choice.
+            raise CertificateError(
+                "too many wildcards in certificate DNS name: " + repr(dn))
+
+        # speed up common case w/o wildcards
+        if not wildcards:
+            return dn.lower() == hostname.lower()
+
+        # RFC 6125, section 6.4.3, subitem 1.
+        # The client SHOULD NOT attempt to match a presented identifier in which
+        # the wildcard character comprises a label other than the left-most label.
+        if leftmost == '*':
+            # When '*' is a fragment by itself, it matches a non-empty dotless
+            # fragment.
+            pats.append('[^.]+')
+        elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+            # RFC 6125, section 6.4.3, subitem 3.
+            # The client SHOULD NOT attempt to match a presented identifier
+            # where the wildcard character is embedded within an A-label or
+            # U-label of an internationalized domain name.
+            pats.append(re.escape(leftmost))
+        else:
+            # Otherwise, '*' matches any dotless string, e.g. www*
+            pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+        # add the remaining fragments, ignore any wildcards
+        for frag in remainder:
+            pats.append(re.escape(frag))
+
+        pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+        return pat.match(hostname)
+
+    def match_hostname(cert, hostname):
+        """Verify that *cert* (in decoded format as returned by
+        SSLSocket.getpeercert()) matches the *hostname*.  RFC 2818 and RFC 6125
+        rules are followed, but IP addresses are not accepted for *hostname*.
+
+        CertificateError is raised on failure. On success, the function
+        returns nothing.
+        """
+        if not cert:
+            raise ValueError("empty or no certificate")
+        dnsnames = []
+        san = cert.get('subjectAltName', ())
+        for key, value in san:
+            if key == 'DNS':
+                if _dnsname_match(value, hostname):
+                    return
+                dnsnames.append(value)
+        if not dnsnames:
+            # The subject is only checked when there is no dNSName entry
+            # in subjectAltName
+            for sub in cert.get('subject', ()):
+                for key, value in sub:
+                    # XXX according to RFC 2818, the most specific Common Name
+                    # must be used.
+                    if key == 'commonName':
+                        if _dnsname_match(value, hostname):
+                            return
+                        dnsnames.append(value)
+        if len(dnsnames) > 1:
+            raise CertificateError("hostname %r "
+                "doesn't match either of %s"
+                % (hostname, ', '.join(map(repr, dnsnames))))
+        elif len(dnsnames) == 1:
+            raise CertificateError("hostname %r "
+                "doesn't match %r"
+                % (hostname, dnsnames[0]))
+        else:
+            raise CertificateError("no appropriate commonName or "
+                "subjectAltName fields were found")
+
+
+class VerifyingHTTPSHandler(HTTPSHandler):
+    """Simple verifying handler: no auth, subclasses, timeouts, etc."""
+
+    def __init__(self, ca_bundle):
+        self.ca_bundle = ca_bundle
+        HTTPSHandler.__init__(self)
+
+    def https_open(self, req):
+        return self.do_open(
+            lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
+        )
+
+
+class VerifyingHTTPSConn(HTTPSConnection):
+    """Simple verifying connection: no auth, subclasses, timeouts, etc."""
+
+    def __init__(self, host, ca_bundle, **kw):
+        HTTPSConnection.__init__(self, host, **kw)
+        self.ca_bundle = ca_bundle
+
+    def connect(self):
+        sock = socket.create_connection(
+            (self.host, self.port), getattr(self, 'source_address', None)
+        )
+
+        # Handle the socket if a (proxy) tunnel is present
+        if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
+            self.sock = sock
+            self._tunnel()
+            # http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
+            # change self.host to mean the proxy server host when tunneling is
+            # being used. Adapt, since we are interested in the destination
+            # host for the match_hostname() comparison.
+            actual_host = self._tunnel_host
+        else:
+            actual_host = self.host
+
+        if hasattr(ssl, 'create_default_context'):
+            ctx = ssl.create_default_context(cafile=self.ca_bundle)
+            self.sock = ctx.wrap_socket(sock, server_hostname=actual_host)
+        else:
+            # This is for python < 2.7.9 and < 3.4?
+            self.sock = ssl.wrap_socket(
+                sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
+            )
+        try:
+            match_hostname(self.sock.getpeercert(), actual_host)
+        except CertificateError:
+            self.sock.shutdown(socket.SHUT_RDWR)
+            self.sock.close()
+            raise
+
+
+def opener_for(ca_bundle=None):
+    """Get a urlopen() replacement that uses ca_bundle for verification"""
+    return urllib.request.build_opener(
+        VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
+    ).open
+
+
+# from jaraco.functools
+def once(func):
+    @functools.wraps(func)
+    def wrapper(*args, **kwargs):
+        if not hasattr(func, 'always_returns'):
+            func.always_returns = func(*args, **kwargs)
+        return func.always_returns
+    return wrapper
+
+
+@once
+def get_win_certfile():
+    try:
+        import wincertstore
+    except ImportError:
+        return None
+
+    class CertFile(wincertstore.CertFile):
+        def __init__(self):
+            super(CertFile, self).__init__()
+            atexit.register(self.close)
+
+        def close(self):
+            try:
+                super(CertFile, self).close()
+            except OSError:
+                pass
+
+    _wincerts = CertFile()
+    _wincerts.addstore('CA')
+    _wincerts.addstore('ROOT')
+    return _wincerts.name
+
+
+def find_ca_bundle():
+    """Return an existing CA bundle path, or None"""
+    extant_cert_paths = filter(os.path.isfile, cert_paths)
+    return (
+        get_win_certfile()
+        or next(extant_cert_paths, None)
+        or _certifi_where()
+    )
+
+
+def _certifi_where():
+    try:
+        return __import__('certifi').where()
+    except (ImportError, ResolutionError, ExtractionError):
+        pass
diff --git a/lib/python3.7/site-packages/setuptools/unicode_utils.py b/lib/python3.7/site-packages/setuptools/unicode_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c63efd20b350358ab25c079166dbb00ef49f8d2
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/unicode_utils.py
@@ -0,0 +1,44 @@
+import unicodedata
+import sys
+
+from setuptools.extern import six
+
+
+# HFS Plus uses decomposed UTF-8
+def decompose(path):
+    if isinstance(path, six.text_type):
+        return unicodedata.normalize('NFD', path)
+    try:
+        path = path.decode('utf-8')
+        path = unicodedata.normalize('NFD', path)
+        path = path.encode('utf-8')
+    except UnicodeError:
+        pass  # Not UTF-8
+    return path
+
+
+def filesys_decode(path):
+    """
+    Ensure that the given path is decoded,
+    NONE when no expected encoding works
+    """
+
+    if isinstance(path, six.text_type):
+        return path
+
+    fs_enc = sys.getfilesystemencoding() or 'utf-8'
+    candidates = fs_enc, 'utf-8'
+
+    for enc in candidates:
+        try:
+            return path.decode(enc)
+        except UnicodeDecodeError:
+            continue
+
+
+def try_encode(string, enc):
+    "turn unicode encoding into a functional routine"
+    try:
+        return string.encode(enc)
+    except UnicodeEncodeError:
+        return None
diff --git a/lib/python3.7/site-packages/setuptools/version.py b/lib/python3.7/site-packages/setuptools/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..95e1869658566aac3060562d8cd5a6b647887d1e
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/version.py
@@ -0,0 +1,6 @@
+import pkg_resources
+
+try:
+    __version__ = pkg_resources.get_distribution('setuptools').version
+except Exception:
+    __version__ = 'unknown'
diff --git a/lib/python3.7/site-packages/setuptools/wheel.py b/lib/python3.7/site-packages/setuptools/wheel.py
new file mode 100644
index 0000000000000000000000000000000000000000..e11f0a1d912860b0da241dee65f6f76d9c8d45e9
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/wheel.py
@@ -0,0 +1,211 @@
+"""Wheels support."""
+
+from distutils.util import get_platform
+import email
+import itertools
+import os
+import posixpath
+import re
+import zipfile
+
+import pkg_resources
+import setuptools
+from pkg_resources import parse_version
+from setuptools.extern.packaging.utils import canonicalize_name
+from setuptools.extern.six import PY3
+from setuptools import pep425tags
+from setuptools.command.egg_info import write_requirements
+
+
+__metaclass__ = type
+
+
+WHEEL_NAME = re.compile(
+    r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
+    ((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
+    )\.whl$""",
+    re.VERBOSE).match
+
+NAMESPACE_PACKAGE_INIT = '''\
+try:
+    __import__('pkg_resources').declare_namespace(__name__)
+except ImportError:
+    __path__ = __import__('pkgutil').extend_path(__path__, __name__)
+'''
+
+
+def unpack(src_dir, dst_dir):
+    '''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
+    for dirpath, dirnames, filenames in os.walk(src_dir):
+        subdir = os.path.relpath(dirpath, src_dir)
+        for f in filenames:
+            src = os.path.join(dirpath, f)
+            dst = os.path.join(dst_dir, subdir, f)
+            os.renames(src, dst)
+        for n, d in reversed(list(enumerate(dirnames))):
+            src = os.path.join(dirpath, d)
+            dst = os.path.join(dst_dir, subdir, d)
+            if not os.path.exists(dst):
+                # Directory does not exist in destination,
+                # rename it and prune it from os.walk list.
+                os.renames(src, dst)
+                del dirnames[n]
+    # Cleanup.
+    for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
+        assert not filenames
+        os.rmdir(dirpath)
+
+
+class Wheel:
+
+    def __init__(self, filename):
+        match = WHEEL_NAME(os.path.basename(filename))
+        if match is None:
+            raise ValueError('invalid wheel name: %r' % filename)
+        self.filename = filename
+        for k, v in match.groupdict().items():
+            setattr(self, k, v)
+
+    def tags(self):
+        '''List tags (py_version, abi, platform) supported by this wheel.'''
+        return itertools.product(
+            self.py_version.split('.'),
+            self.abi.split('.'),
+            self.platform.split('.'),
+        )
+
+    def is_compatible(self):
+        '''Is the wheel is compatible with the current platform?'''
+        supported_tags = pep425tags.get_supported()
+        return next((True for t in self.tags() if t in supported_tags), False)
+
+    def egg_name(self):
+        return pkg_resources.Distribution(
+            project_name=self.project_name, version=self.version,
+            platform=(None if self.platform == 'any' else get_platform()),
+        ).egg_name() + '.egg'
+
+    def get_dist_info(self, zf):
+        # find the correct name of the .dist-info dir in the wheel file
+        for member in zf.namelist():
+            dirname = posixpath.dirname(member)
+            if (dirname.endswith('.dist-info') and
+                    canonicalize_name(dirname).startswith(
+                        canonicalize_name(self.project_name))):
+                return dirname
+        raise ValueError("unsupported wheel format. .dist-info not found")
+
+    def install_as_egg(self, destination_eggdir):
+        '''Install wheel as an egg directory.'''
+        with zipfile.ZipFile(self.filename) as zf:
+            self._install_as_egg(destination_eggdir, zf)
+
+    def _install_as_egg(self, destination_eggdir, zf):
+        dist_basename = '%s-%s' % (self.project_name, self.version)
+        dist_info = self.get_dist_info(zf)
+        dist_data = '%s.data' % dist_basename
+        egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
+
+        self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
+        self._move_data_entries(destination_eggdir, dist_data)
+        self._fix_namespace_packages(egg_info, destination_eggdir)
+
+    @staticmethod
+    def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
+        def get_metadata(name):
+            with zf.open(posixpath.join(dist_info, name)) as fp:
+                value = fp.read().decode('utf-8') if PY3 else fp.read()
+                return email.parser.Parser().parsestr(value)
+
+        wheel_metadata = get_metadata('WHEEL')
+        # Check wheel format version is supported.
+        wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
+        wheel_v1 = (
+            parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
+        )
+        if not wheel_v1:
+            raise ValueError(
+                'unsupported wheel format version: %s' % wheel_version)
+        # Extract to target directory.
+        os.mkdir(destination_eggdir)
+        zf.extractall(destination_eggdir)
+        # Convert metadata.
+        dist_info = os.path.join(destination_eggdir, dist_info)
+        dist = pkg_resources.Distribution.from_location(
+            destination_eggdir, dist_info,
+            metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),
+        )
+
+        # Note: Evaluate and strip markers now,
+        # as it's difficult to convert back from the syntax:
+        # foobar; "linux" in sys_platform and extra == 'test'
+        def raw_req(req):
+            req.marker = None
+            return str(req)
+        install_requires = list(sorted(map(raw_req, dist.requires())))
+        extras_require = {
+            extra: sorted(
+                req
+                for req in map(raw_req, dist.requires((extra,)))
+                if req not in install_requires
+            )
+            for extra in dist.extras
+        }
+        os.rename(dist_info, egg_info)
+        os.rename(
+            os.path.join(egg_info, 'METADATA'),
+            os.path.join(egg_info, 'PKG-INFO'),
+        )
+        setup_dist = setuptools.Distribution(
+            attrs=dict(
+                install_requires=install_requires,
+                extras_require=extras_require,
+            ),
+        )
+        write_requirements(
+            setup_dist.get_command_obj('egg_info'),
+            None,
+            os.path.join(egg_info, 'requires.txt'),
+        )
+
+    @staticmethod
+    def _move_data_entries(destination_eggdir, dist_data):
+        """Move data entries to their correct location."""
+        dist_data = os.path.join(destination_eggdir, dist_data)
+        dist_data_scripts = os.path.join(dist_data, 'scripts')
+        if os.path.exists(dist_data_scripts):
+            egg_info_scripts = os.path.join(
+                destination_eggdir, 'EGG-INFO', 'scripts')
+            os.mkdir(egg_info_scripts)
+            for entry in os.listdir(dist_data_scripts):
+                # Remove bytecode, as it's not properly handled
+                # during easy_install scripts install phase.
+                if entry.endswith('.pyc'):
+                    os.unlink(os.path.join(dist_data_scripts, entry))
+                else:
+                    os.rename(
+                        os.path.join(dist_data_scripts, entry),
+                        os.path.join(egg_info_scripts, entry),
+                    )
+            os.rmdir(dist_data_scripts)
+        for subdir in filter(os.path.exists, (
+            os.path.join(dist_data, d)
+            for d in ('data', 'headers', 'purelib', 'platlib')
+        )):
+            unpack(subdir, destination_eggdir)
+        if os.path.exists(dist_data):
+            os.rmdir(dist_data)
+
+    @staticmethod
+    def _fix_namespace_packages(egg_info, destination_eggdir):
+        namespace_packages = os.path.join(
+            egg_info, 'namespace_packages.txt')
+        if os.path.exists(namespace_packages):
+            with open(namespace_packages) as fp:
+                namespace_packages = fp.read().split()
+            for mod in namespace_packages:
+                mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
+                mod_init = os.path.join(mod_dir, '__init__.py')
+                if os.path.exists(mod_dir) and not os.path.exists(mod_init):
+                    with open(mod_init, 'w') as fp:
+                        fp.write(NAMESPACE_PACKAGE_INIT)
diff --git a/lib/python3.7/site-packages/setuptools/windows_support.py b/lib/python3.7/site-packages/setuptools/windows_support.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb977cff9545ef5d48ad7cf13f2cbe1ebc3e7cd0
--- /dev/null
+++ b/lib/python3.7/site-packages/setuptools/windows_support.py
@@ -0,0 +1,29 @@
+import platform
+import ctypes
+
+
+def windows_only(func):
+    if platform.system() != 'Windows':
+        return lambda *args, **kwargs: None
+    return func
+
+
+@windows_only
+def hide_file(path):
+    """
+    Set the hidden attribute on a file or directory.
+
+    From http://stackoverflow.com/questions/19622133/
+
+    `path` must be text.
+    """
+    __import__('ctypes.wintypes')
+    SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
+    SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
+    SetFileAttributes.restype = ctypes.wintypes.BOOL
+
+    FILE_ATTRIBUTE_HIDDEN = 0x02
+
+    ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
+    if not ret:
+        raise ctypes.WinError()
diff --git a/lib/python3.7/site-packages/werkzeug/__init__.py b/lib/python3.7/site-packages/werkzeug/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9bc34a1e2c2ee43bccc0f6f7e91b77e0480f930
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/__init__.py
@@ -0,0 +1,233 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug
+    ~~~~~~~~
+
+    Werkzeug is the Swiss Army knife of Python web development.
+
+    It provides useful classes and functions for any WSGI application to make
+    the life of a python web developer much easier.  All of the provided
+    classes are independent from each other so you can mix it with any other
+    library.
+
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import sys
+from types import ModuleType
+
+__version__ = "0.15.2"
+
+# This import magic raises concerns quite often which is why the implementation
+# and motivation is explained here in detail now.
+#
+# The majority of the functions and classes provided by Werkzeug work on the
+# HTTP and WSGI layer.  There is no useful grouping for those which is why
+# they are all importable from "werkzeug" instead of the modules where they are
+# implemented.  The downside of that is, that now everything would be loaded at
+# once, even if unused.
+#
+# The implementation of a lazy-loading module in this file replaces the
+# werkzeug package when imported from within.  Attribute access to the werkzeug
+# module will then lazily import from the modules that implement the objects.
+
+# import mapping to objects in other modules
+all_by_module = {
+    "werkzeug.debug": ["DebuggedApplication"],
+    "werkzeug.local": [
+        "Local",
+        "LocalManager",
+        "LocalProxy",
+        "LocalStack",
+        "release_local",
+    ],
+    "werkzeug.serving": ["run_simple"],
+    "werkzeug.test": ["Client", "EnvironBuilder", "create_environ", "run_wsgi_app"],
+    "werkzeug.testapp": ["test_app"],
+    "werkzeug.exceptions": ["abort", "Aborter"],
+    "werkzeug.urls": [
+        "url_decode",
+        "url_encode",
+        "url_quote",
+        "url_quote_plus",
+        "url_unquote",
+        "url_unquote_plus",
+        "url_fix",
+        "Href",
+        "iri_to_uri",
+        "uri_to_iri",
+    ],
+    "werkzeug.formparser": ["parse_form_data"],
+    "werkzeug.utils": [
+        "escape",
+        "environ_property",
+        "append_slash_redirect",
+        "redirect",
+        "cached_property",
+        "import_string",
+        "dump_cookie",
+        "parse_cookie",
+        "unescape",
+        "format_string",
+        "find_modules",
+        "header_property",
+        "html",
+        "xhtml",
+        "HTMLBuilder",
+        "validate_arguments",
+        "ArgumentValidationError",
+        "bind_arguments",
+        "secure_filename",
+    ],
+    "werkzeug.wsgi": [
+        "get_current_url",
+        "get_host",
+        "pop_path_info",
+        "peek_path_info",
+        "ClosingIterator",
+        "FileWrapper",
+        "make_line_iter",
+        "LimitedStream",
+        "responder",
+        "wrap_file",
+        "extract_path_info",
+    ],
+    "werkzeug.datastructures": [
+        "MultiDict",
+        "CombinedMultiDict",
+        "Headers",
+        "EnvironHeaders",
+        "ImmutableList",
+        "ImmutableDict",
+        "ImmutableMultiDict",
+        "TypeConversionDict",
+        "ImmutableTypeConversionDict",
+        "Accept",
+        "MIMEAccept",
+        "CharsetAccept",
+        "LanguageAccept",
+        "RequestCacheControl",
+        "ResponseCacheControl",
+        "ETags",
+        "HeaderSet",
+        "WWWAuthenticate",
+        "Authorization",
+        "FileMultiDict",
+        "CallbackDict",
+        "FileStorage",
+        "OrderedMultiDict",
+        "ImmutableOrderedMultiDict",
+    ],
+    "werkzeug.useragents": ["UserAgent"],
+    "werkzeug.http": [
+        "parse_etags",
+        "parse_date",
+        "http_date",
+        "cookie_date",
+        "parse_cache_control_header",
+        "is_resource_modified",
+        "parse_accept_header",
+        "parse_set_header",
+        "quote_etag",
+        "unquote_etag",
+        "generate_etag",
+        "dump_header",
+        "parse_list_header",
+        "parse_dict_header",
+        "parse_authorization_header",
+        "parse_www_authenticate_header",
+        "remove_entity_headers",
+        "is_entity_header",
+        "remove_hop_by_hop_headers",
+        "parse_options_header",
+        "dump_options_header",
+        "is_hop_by_hop_header",
+        "unquote_header_value",
+        "quote_header_value",
+        "HTTP_STATUS_CODES",
+    ],
+    "werkzeug.wrappers": [
+        "BaseResponse",
+        "BaseRequest",
+        "Request",
+        "Response",
+        "AcceptMixin",
+        "ETagRequestMixin",
+        "ETagResponseMixin",
+        "ResponseStreamMixin",
+        "CommonResponseDescriptorsMixin",
+        "UserAgentMixin",
+        "AuthorizationMixin",
+        "WWWAuthenticateMixin",
+        "CommonRequestDescriptorsMixin",
+    ],
+    "werkzeug.middleware.dispatcher": ["DispatcherMiddleware"],
+    "werkzeug.middleware.shared_data": ["SharedDataMiddleware"],
+    "werkzeug.security": ["generate_password_hash", "check_password_hash"],
+    # the undocumented easteregg ;-)
+    "werkzeug._internal": ["_easteregg"],
+}
+
+# modules that should be imported when accessed as attributes of werkzeug
+attribute_modules = frozenset(["exceptions", "routing"])
+
+object_origins = {}
+for module, items in all_by_module.items():
+    for item in items:
+        object_origins[item] = module
+
+
+class module(ModuleType):
+    """Automatically import objects from the modules."""
+
+    def __getattr__(self, name):
+        if name in object_origins:
+            module = __import__(object_origins[name], None, None, [name])
+            for extra_name in all_by_module[module.__name__]:
+                setattr(self, extra_name, getattr(module, extra_name))
+            return getattr(module, name)
+        elif name in attribute_modules:
+            __import__("werkzeug." + name)
+        return ModuleType.__getattribute__(self, name)
+
+    def __dir__(self):
+        """Just show what we want to show."""
+        result = list(new_module.__all__)
+        result.extend(
+            (
+                "__file__",
+                "__doc__",
+                "__all__",
+                "__docformat__",
+                "__name__",
+                "__path__",
+                "__package__",
+                "__version__",
+            )
+        )
+        return result
+
+
+# keep a reference to this module so that it's not garbage collected
+old_module = sys.modules["werkzeug"]
+
+
+# setup the new module and patch it into the dict of loaded modules
+new_module = sys.modules["werkzeug"] = module("werkzeug")
+new_module.__dict__.update(
+    {
+        "__file__": __file__,
+        "__package__": "werkzeug",
+        "__path__": __path__,
+        "__doc__": __doc__,
+        "__version__": __version__,
+        "__all__": tuple(object_origins) + tuple(attribute_modules),
+        "__docformat__": "restructuredtext en",
+    }
+)
+
+
+# Due to bootstrapping issues we need to import exceptions here.
+# Don't ask :-(
+__import__("werkzeug.exceptions")
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3e0003c421df6b6680e9570f2c6d36eec331482d
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/_compat.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/_compat.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..24bc160381cd4b4f5f7780eae744b7d381ef01f3
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/_compat.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/_internal.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/_internal.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..140e454752e8dec84cb120f6c6f8d999c71d8381
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/_internal.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/_reloader.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/_reloader.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..438313043f51469d62447a7f246df79c839a391b
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/_reloader.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/datastructures.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/datastructures.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3bb5ddd45a66d627177e99512233570f7d0947de
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/datastructures.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/exceptions.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/exceptions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dd5f52e0afa29deeeb74c770205d6edfd0aee240
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/exceptions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/filesystem.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/filesystem.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3102d2c4841c81955488847d512fbf5f30cf7112
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/filesystem.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/formparser.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/formparser.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb87b4c732bf5abac6251a08d936c41d81a65a84
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/formparser.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/http.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/http.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9552e00f61eb1a2a5c9bb546875a4d797dd2e0c7
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/http.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/local.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/local.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d8b88217550b97b1973bb8de763808ec741c71a4
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/local.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/posixemulation.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/posixemulation.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..991470ee39af3b6ce5efe80d1e30a2b1b9716048
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/posixemulation.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/routing.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/routing.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..514a3b64cb9d69faaaeeaad0358901a4fc647a68
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/routing.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/security.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/security.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fa8a8d997e23cf2bedcfb8c6ee6c7b9b03c48fe4
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/security.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/serving.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/serving.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c251fa63f97576e4c1532792f38b4ea7e4ec8008
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/serving.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/test.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/test.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3664cfe2be46735a729f9ebdc7a38ed30ce150af
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/test.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/testapp.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/testapp.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25ff5db27d7495c92d2eac5e28c2234d0f512d6d
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/testapp.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/urls.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/urls.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9d04b086df61883f2310d810d5163d4e41eb4ca2
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/urls.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/useragents.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/useragents.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bdc8b6fc7edd50995fee59b8c2055c23470e74dc
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/useragents.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/utils.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/utils.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..71a76777f8b32cd7398313c5366e02e2b7d55773
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/utils.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/__pycache__/wsgi.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/__pycache__/wsgi.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6ebab384a64936944621188bf71b08b1bb2048bd
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/__pycache__/wsgi.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/_compat.py b/lib/python3.7/site-packages/werkzeug/_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..1097983e0e25dd9cc298908dbae94124ddf22e4f
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/_compat.py
@@ -0,0 +1,219 @@
+# flake8: noqa
+# This whole file is full of lint errors
+import functools
+import operator
+import sys
+
+try:
+    import builtins
+except ImportError:
+    import __builtin__ as builtins
+
+
+PY2 = sys.version_info[0] == 2
+WIN = sys.platform.startswith("win")
+
+_identity = lambda x: x
+
+if PY2:
+    unichr = unichr
+    text_type = unicode
+    string_types = (str, unicode)
+    integer_types = (int, long)
+
+    iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
+    itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
+    iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
+
+    iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
+    iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
+
+    int_to_byte = chr
+    iter_bytes = iter
+
+    import collections as collections_abc
+
+    exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
+
+    def fix_tuple_repr(obj):
+        def __repr__(self):
+            cls = self.__class__
+            return "%s(%s)" % (
+                cls.__name__,
+                ", ".join(
+                    "%s=%r" % (field, self[index])
+                    for index, field in enumerate(cls._fields)
+                ),
+            )
+
+        obj.__repr__ = __repr__
+        return obj
+
+    def implements_iterator(cls):
+        cls.next = cls.__next__
+        del cls.__next__
+        return cls
+
+    def implements_to_string(cls):
+        cls.__unicode__ = cls.__str__
+        cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
+        return cls
+
+    def native_string_result(func):
+        def wrapper(*args, **kwargs):
+            return func(*args, **kwargs).encode("utf-8")
+
+        return functools.update_wrapper(wrapper, func)
+
+    def implements_bool(cls):
+        cls.__nonzero__ = cls.__bool__
+        del cls.__bool__
+        return cls
+
+    from itertools import imap, izip, ifilter
+
+    range_type = xrange
+
+    from StringIO import StringIO
+    from cStringIO import StringIO as BytesIO
+
+    NativeStringIO = BytesIO
+
+    def make_literal_wrapper(reference):
+        return _identity
+
+    def normalize_string_tuple(tup):
+        """Normalizes a string tuple to a common type. Following Python 2
+        rules, upgrades to unicode are implicit.
+        """
+        if any(isinstance(x, text_type) for x in tup):
+            return tuple(to_unicode(x) for x in tup)
+        return tup
+
+    def try_coerce_native(s):
+        """Try to coerce a unicode string to native if possible. Otherwise,
+        leave it as unicode.
+        """
+        try:
+            return to_native(s)
+        except UnicodeError:
+            return s
+
+    wsgi_get_bytes = _identity
+
+    def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
+        return s.decode(charset, errors)
+
+    def wsgi_encoding_dance(s, charset="utf-8", errors="replace"):
+        if isinstance(s, bytes):
+            return s
+        return s.encode(charset, errors)
+
+    def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"):
+        if x is None:
+            return None
+        if isinstance(x, (bytes, bytearray, buffer)):
+            return bytes(x)
+        if isinstance(x, unicode):
+            return x.encode(charset, errors)
+        raise TypeError("Expected bytes")
+
+    def to_native(x, charset=sys.getdefaultencoding(), errors="strict"):
+        if x is None or isinstance(x, str):
+            return x
+        return x.encode(charset, errors)
+
+
+else:
+    unichr = chr
+    text_type = str
+    string_types = (str,)
+    integer_types = (int,)
+
+    iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
+    itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
+    iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
+
+    iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
+    iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
+
+    int_to_byte = operator.methodcaller("to_bytes", 1, "big")
+    iter_bytes = functools.partial(map, int_to_byte)
+
+    import collections.abc as collections_abc
+
+    def reraise(tp, value, tb=None):
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+
+    fix_tuple_repr = _identity
+    implements_iterator = _identity
+    implements_to_string = _identity
+    implements_bool = _identity
+    native_string_result = _identity
+    imap = map
+    izip = zip
+    ifilter = filter
+    range_type = range
+
+    from io import StringIO, BytesIO
+
+    NativeStringIO = StringIO
+
+    _latin1_encode = operator.methodcaller("encode", "latin1")
+
+    def make_literal_wrapper(reference):
+        if isinstance(reference, text_type):
+            return _identity
+        return _latin1_encode
+
+    def normalize_string_tuple(tup):
+        """Ensures that all types in the tuple are either strings
+        or bytes.
+        """
+        tupiter = iter(tup)
+        is_text = isinstance(next(tupiter, None), text_type)
+        for arg in tupiter:
+            if isinstance(arg, text_type) != is_text:
+                raise TypeError(
+                    "Cannot mix str and bytes arguments (got %s)" % repr(tup)
+                )
+        return tup
+
+    try_coerce_native = _identity
+    wsgi_get_bytes = _latin1_encode
+
+    def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
+        return s.encode("latin1").decode(charset, errors)
+
+    def wsgi_encoding_dance(s, charset="utf-8", errors="replace"):
+        if isinstance(s, text_type):
+            s = s.encode(charset)
+        return s.decode("latin1", errors)
+
+    def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"):
+        if x is None:
+            return None
+        if isinstance(x, (bytes, bytearray, memoryview)):  # noqa
+            return bytes(x)
+        if isinstance(x, str):
+            return x.encode(charset, errors)
+        raise TypeError("Expected bytes")
+
+    def to_native(x, charset=sys.getdefaultencoding(), errors="strict"):
+        if x is None or isinstance(x, str):
+            return x
+        return x.decode(charset, errors)
+
+
+def to_unicode(
+    x, charset=sys.getdefaultencoding(), errors="strict", allow_none_charset=False
+):
+    if x is None:
+        return None
+    if not isinstance(x, bytes):
+        return text_type(x)
+    if charset is None and allow_none_charset:
+        return x
+    return x.decode(charset, errors)
diff --git a/lib/python3.7/site-packages/werkzeug/_internal.py b/lib/python3.7/site-packages/werkzeug/_internal.py
new file mode 100644
index 0000000000000000000000000000000000000000..90e3dd9307b5d03afab52ed235c665c6ee9ebc57
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/_internal.py
@@ -0,0 +1,484 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug._internal
+    ~~~~~~~~~~~~~~~~~~
+
+    This module provides internally used helpers and constants.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import inspect
+import logging
+import re
+import string
+from datetime import date
+from datetime import datetime
+from itertools import chain
+from weakref import WeakKeyDictionary
+
+from ._compat import int_to_byte
+from ._compat import integer_types
+from ._compat import iter_bytes
+from ._compat import range_type
+from ._compat import text_type
+
+
+_logger = None
+_signature_cache = WeakKeyDictionary()
+_epoch_ord = date(1970, 1, 1).toordinal()
+_cookie_params = {
+    b"expires",
+    b"path",
+    b"comment",
+    b"max-age",
+    b"secure",
+    b"httponly",
+    b"version",
+}
+_legal_cookie_chars = (
+    string.ascii_letters + string.digits + u"/=!#$%&'*+-.^_`|~:"
+).encode("ascii")
+
+_cookie_quoting_map = {b",": b"\\054", b";": b"\\073", b'"': b'\\"', b"\\": b"\\\\"}
+for _i in chain(range_type(32), range_type(127, 256)):
+    _cookie_quoting_map[int_to_byte(_i)] = ("\\%03o" % _i).encode("latin1")
+
+_octal_re = re.compile(br"\\[0-3][0-7][0-7]")
+_quote_re = re.compile(br"[\\].")
+_legal_cookie_chars_re = br"[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
+_cookie_re = re.compile(
+    br"""
+    (?P<key>[^=;]+)
+    (?:\s*=\s*
+        (?P<val>
+            "(?:[^\\"]|\\.)*" |
+             (?:.*?)
+        )
+    )?
+    \s*;
+""",
+    flags=re.VERBOSE,
+)
+
+
+class _Missing(object):
+    def __repr__(self):
+        return "no value"
+
+    def __reduce__(self):
+        return "_missing"
+
+
+_missing = _Missing()
+
+
+def _get_environ(obj):
+    env = getattr(obj, "environ", obj)
+    assert isinstance(env, dict), (
+        "%r is not a WSGI environment (has to be a dict)" % type(obj).__name__
+    )
+    return env
+
+
+def _has_level_handler(logger):
+    """Check if there is a handler in the logging chain that will handle
+    the given logger's effective level.
+    """
+    level = logger.getEffectiveLevel()
+    current = logger
+
+    while current:
+        if any(handler.level <= level for handler in current.handlers):
+            return True
+
+        if not current.propagate:
+            break
+
+        current = current.parent
+
+    return False
+
+
+def _log(type, message, *args, **kwargs):
+    """Log a message to the 'werkzeug' logger.
+
+    The logger is created the first time it is needed. If there is no
+    level set, it is set to :data:`logging.INFO`. If there is no handler
+    for the logger's effective level, a :class:`logging.StreamHandler`
+    is added.
+    """
+    global _logger
+
+    if _logger is None:
+        _logger = logging.getLogger("werkzeug")
+
+        if _logger.level == logging.NOTSET:
+            _logger.setLevel(logging.INFO)
+
+        if not _has_level_handler(_logger):
+            _logger.addHandler(logging.StreamHandler())
+
+    getattr(_logger, type)(message.rstrip(), *args, **kwargs)
+
+
+def _parse_signature(func):
+    """Return a signature object for the function."""
+    if hasattr(func, "im_func"):
+        func = func.im_func
+
+    # if we have a cached validator for this function, return it
+    parse = _signature_cache.get(func)
+    if parse is not None:
+        return parse
+
+    # inspect the function signature and collect all the information
+    if hasattr(inspect, "getfullargspec"):
+        tup = inspect.getfullargspec(func)
+    else:
+        tup = inspect.getargspec(func)
+    positional, vararg_var, kwarg_var, defaults = tup[:4]
+    defaults = defaults or ()
+    arg_count = len(positional)
+    arguments = []
+    for idx, name in enumerate(positional):
+        if isinstance(name, list):
+            raise TypeError(
+                "cannot parse functions that unpack tuples in the function signature"
+            )
+        try:
+            default = defaults[idx - arg_count]
+        except IndexError:
+            param = (name, False, None)
+        else:
+            param = (name, True, default)
+        arguments.append(param)
+    arguments = tuple(arguments)
+
+    def parse(args, kwargs):
+        new_args = []
+        missing = []
+        extra = {}
+
+        # consume as many arguments as positional as possible
+        for idx, (name, has_default, default) in enumerate(arguments):
+            try:
+                new_args.append(args[idx])
+            except IndexError:
+                try:
+                    new_args.append(kwargs.pop(name))
+                except KeyError:
+                    if has_default:
+                        new_args.append(default)
+                    else:
+                        missing.append(name)
+            else:
+                if name in kwargs:
+                    extra[name] = kwargs.pop(name)
+
+        # handle extra arguments
+        extra_positional = args[arg_count:]
+        if vararg_var is not None:
+            new_args.extend(extra_positional)
+            extra_positional = ()
+        if kwargs and kwarg_var is None:
+            extra.update(kwargs)
+            kwargs = {}
+
+        return (
+            new_args,
+            kwargs,
+            missing,
+            extra,
+            extra_positional,
+            arguments,
+            vararg_var,
+            kwarg_var,
+        )
+
+    _signature_cache[func] = parse
+    return parse
+
+
+def _date_to_unix(arg):
+    """Converts a timetuple, integer or datetime object into the seconds from
+    epoch in utc.
+    """
+    if isinstance(arg, datetime):
+        arg = arg.utctimetuple()
+    elif isinstance(arg, integer_types + (float,)):
+        return int(arg)
+    year, month, day, hour, minute, second = arg[:6]
+    days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
+    hours = days * 24 + hour
+    minutes = hours * 60 + minute
+    seconds = minutes * 60 + second
+    return seconds
+
+
+class _DictAccessorProperty(object):
+    """Baseclass for `environ_property` and `header_property`."""
+
+    read_only = False
+
+    def __init__(
+        self,
+        name,
+        default=None,
+        load_func=None,
+        dump_func=None,
+        read_only=None,
+        doc=None,
+    ):
+        self.name = name
+        self.default = default
+        self.load_func = load_func
+        self.dump_func = dump_func
+        if read_only is not None:
+            self.read_only = read_only
+        self.__doc__ = doc
+
+    def __get__(self, obj, type=None):
+        if obj is None:
+            return self
+        storage = self.lookup(obj)
+        if self.name not in storage:
+            return self.default
+        rv = storage[self.name]
+        if self.load_func is not None:
+            try:
+                rv = self.load_func(rv)
+            except (ValueError, TypeError):
+                rv = self.default
+        return rv
+
+    def __set__(self, obj, value):
+        if self.read_only:
+            raise AttributeError("read only property")
+        if self.dump_func is not None:
+            value = self.dump_func(value)
+        self.lookup(obj)[self.name] = value
+
+    def __delete__(self, obj):
+        if self.read_only:
+            raise AttributeError("read only property")
+        self.lookup(obj).pop(self.name, None)
+
+    def __repr__(self):
+        return "<%s %s>" % (self.__class__.__name__, self.name)
+
+
+def _cookie_quote(b):
+    buf = bytearray()
+    all_legal = True
+    _lookup = _cookie_quoting_map.get
+    _push = buf.extend
+
+    for char in iter_bytes(b):
+        if char not in _legal_cookie_chars:
+            all_legal = False
+            char = _lookup(char, char)
+        _push(char)
+
+    if all_legal:
+        return bytes(buf)
+    return bytes(b'"' + buf + b'"')
+
+
+def _cookie_unquote(b):
+    if len(b) < 2:
+        return b
+    if b[:1] != b'"' or b[-1:] != b'"':
+        return b
+
+    b = b[1:-1]
+
+    i = 0
+    n = len(b)
+    rv = bytearray()
+    _push = rv.extend
+
+    while 0 <= i < n:
+        o_match = _octal_re.search(b, i)
+        q_match = _quote_re.search(b, i)
+        if not o_match and not q_match:
+            rv.extend(b[i:])
+            break
+        j = k = -1
+        if o_match:
+            j = o_match.start(0)
+        if q_match:
+            k = q_match.start(0)
+        if q_match and (not o_match or k < j):
+            _push(b[i:k])
+            _push(b[k + 1 : k + 2])
+            i = k + 2
+        else:
+            _push(b[i:j])
+            rv.append(int(b[j + 1 : j + 4], 8))
+            i = j + 4
+
+    return bytes(rv)
+
+
+def _cookie_parse_impl(b):
+    """Lowlevel cookie parsing facility that operates on bytes."""
+    i = 0
+    n = len(b)
+
+    while i < n:
+        match = _cookie_re.search(b + b";", i)
+        if not match:
+            break
+
+        key = match.group("key").strip()
+        value = match.group("val") or b""
+        i = match.end(0)
+
+        # Ignore parameters.  We have no interest in them.
+        if key.lower() not in _cookie_params:
+            yield _cookie_unquote(key), _cookie_unquote(value)
+
+
+def _encode_idna(domain):
+    # If we're given bytes, make sure they fit into ASCII
+    if not isinstance(domain, text_type):
+        domain.decode("ascii")
+        return domain
+
+    # Otherwise check if it's already ascii, then return
+    try:
+        return domain.encode("ascii")
+    except UnicodeError:
+        pass
+
+    # Otherwise encode each part separately
+    parts = domain.split(".")
+    for idx, part in enumerate(parts):
+        parts[idx] = part.encode("idna")
+    return b".".join(parts)
+
+
+def _decode_idna(domain):
+    # If the input is a string try to encode it to ascii to
+    # do the idna decoding.  if that fails because of an
+    # unicode error, then we already have a decoded idna domain
+    if isinstance(domain, text_type):
+        try:
+            domain = domain.encode("ascii")
+        except UnicodeError:
+            return domain
+
+    # Decode each part separately.  If a part fails, try to
+    # decode it with ascii and silently ignore errors.  This makes
+    # most sense because the idna codec does not have error handling
+    parts = domain.split(b".")
+    for idx, part in enumerate(parts):
+        try:
+            parts[idx] = part.decode("idna")
+        except UnicodeError:
+            parts[idx] = part.decode("ascii", "ignore")
+
+    return ".".join(parts)
+
+
+def _make_cookie_domain(domain):
+    if domain is None:
+        return None
+    domain = _encode_idna(domain)
+    if b":" in domain:
+        domain = domain.split(b":", 1)[0]
+    if b"." in domain:
+        return domain
+    raise ValueError(
+        "Setting 'domain' for a cookie on a server running locally (ex: "
+        "localhost) is not supported by complying browsers. You should "
+        "have something like: '127.0.0.1 localhost dev.localhost' on "
+        "your hosts file and then point your server to run on "
+        "'dev.localhost' and also set 'domain' for 'dev.localhost'"
+    )
+
+
+def _easteregg(app=None):
+    """Like the name says.  But who knows how it works?"""
+
+    def bzzzzzzz(gyver):
+        import base64
+        import zlib
+
+        return zlib.decompress(base64.b64decode(gyver)).decode("ascii")
+
+    gyver = u"\n".join(
+        [
+            x + (77 - len(x)) * u" "
+            for x in bzzzzzzz(
+                b"""
+eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
+9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
+4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
+jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
+q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
+jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
+8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
+v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
+XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
+LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
+iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
+tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
+1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
+GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
+Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
+QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
+8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
+jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
+DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
+MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
+GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
+RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
+Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
+NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
+pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
+sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
+p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
+krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
+nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
+mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
+7f2zLkGNv8b191cD/3vs9Q833z8t"""
+            ).splitlines()
+        ]
+    )
+
+    def easteregged(environ, start_response):
+        def injecting_start_response(status, headers, exc_info=None):
+            headers.append(("X-Powered-By", "Werkzeug"))
+            return start_response(status, headers, exc_info)
+
+        if app is not None and environ.get("QUERY_STRING") != "macgybarchakku":
+            return app(environ, injecting_start_response)
+        injecting_start_response("200 OK", [("Content-Type", "text/html")])
+        return [
+            (
+                u"""
+<!DOCTYPE html>
+<html>
+<head>
+<title>About Werkzeug</title>
+<style type="text/css">
+  body { font: 15px Georgia, serif; text-align: center; }
+  a { color: #333; text-decoration: none; }
+  h1 { font-size: 30px; margin: 20px 0 10px 0; }
+  p { margin: 0 0 30px 0; }
+  pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
+</style>
+</head>
+<body>
+<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
+<p>the Swiss Army knife of Python web development.</p>
+<pre>%s\n\n\n</pre>
+</body>
+</html>"""
+                % gyver
+            ).encode("latin1")
+        ]
+
+    return easteregged
diff --git a/lib/python3.7/site-packages/werkzeug/_reloader.py b/lib/python3.7/site-packages/werkzeug/_reloader.py
new file mode 100644
index 0000000000000000000000000000000000000000..f06a63d5fca9824c33be710ae36f4f9058b5815f
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/_reloader.py
@@ -0,0 +1,334 @@
+import os
+import subprocess
+import sys
+import threading
+import time
+from itertools import chain
+
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import text_type
+from ._internal import _log
+
+
+def _iter_module_files():
+    """This iterates over all relevant Python files.  It goes through all
+    loaded files from modules, all files in folders of already loaded modules
+    as well as all files reachable through a package.
+    """
+    # The list call is necessary on Python 3 in case the module
+    # dictionary modifies during iteration.
+    for module in list(sys.modules.values()):
+        if module is None:
+            continue
+        filename = getattr(module, "__file__", None)
+        if filename:
+            if os.path.isdir(filename) and os.path.exists(
+                os.path.join(filename, "__init__.py")
+            ):
+                filename = os.path.join(filename, "__init__.py")
+
+            old = None
+            while not os.path.isfile(filename):
+                old = filename
+                filename = os.path.dirname(filename)
+                if filename == old:
+                    break
+            else:
+                if filename[-4:] in (".pyc", ".pyo"):
+                    filename = filename[:-1]
+                yield filename
+
+
+def _find_observable_paths(extra_files=None):
+    """Finds all paths that should be observed."""
+    rv = set(
+        os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x)
+        for x in sys.path
+    )
+
+    for filename in extra_files or ():
+        rv.add(os.path.dirname(os.path.abspath(filename)))
+
+    for module in list(sys.modules.values()):
+        fn = getattr(module, "__file__", None)
+        if fn is None:
+            continue
+        fn = os.path.abspath(fn)
+        rv.add(os.path.dirname(fn))
+
+    return _find_common_roots(rv)
+
+
+def _get_args_for_reloading():
+    """Returns the executable. This contains a workaround for windows
+    if the executable is incorrectly reported to not have the .exe
+    extension which can cause bugs on reloading.  This also contains
+    a workaround for linux where the file is executable (possibly with
+    a program other than python)
+    """
+    rv = [sys.executable]
+    py_script = os.path.abspath(sys.argv[0])
+    args = sys.argv[1:]
+    # Need to look at main module to determine how it was executed.
+    __main__ = sys.modules["__main__"]
+
+    if __main__.__package__ is None:
+        # Executed a file, like "python app.py".
+        if os.name == "nt":
+            # Windows entry points have ".exe" extension and should be
+            # called directly.
+            if not os.path.exists(py_script) and os.path.exists(py_script + ".exe"):
+                py_script += ".exe"
+
+            if (
+                os.path.splitext(rv[0])[1] == ".exe"
+                and os.path.splitext(py_script)[1] == ".exe"
+            ):
+                rv.pop(0)
+
+        elif os.path.isfile(py_script) and os.access(py_script, os.X_OK):
+            # The file is marked as executable. Nix adds a wrapper that
+            # shouldn't be called with the Python executable.
+            rv.pop(0)
+
+        rv.append(py_script)
+    else:
+        # Executed a module, like "python -m werkzeug.serving".
+        if sys.argv[0] == "-m":
+            # Flask works around previous behavior by putting
+            # "-m flask" in sys.argv.
+            # TODO remove this once Flask no longer misbehaves
+            args = sys.argv
+        else:
+            py_module = __main__.__package__
+            name = os.path.splitext(os.path.basename(py_script))[0]
+
+            if name != "__main__":
+                py_module += "." + name
+
+            rv.extend(("-m", py_module.lstrip(".")))
+
+    rv.extend(args)
+    return rv
+
+
+def _find_common_roots(paths):
+    """Out of some paths it finds the common roots that need monitoring."""
+    paths = [x.split(os.path.sep) for x in paths]
+    root = {}
+    for chunks in sorted(paths, key=len, reverse=True):
+        node = root
+        for chunk in chunks:
+            node = node.setdefault(chunk, {})
+        node.clear()
+
+    rv = set()
+
+    def _walk(node, path):
+        for prefix, child in iteritems(node):
+            _walk(child, path + (prefix,))
+        if not node:
+            rv.add("/".join(path))
+
+    _walk(root, ())
+    return rv
+
+
+class ReloaderLoop(object):
+    name = None
+
+    # monkeypatched by testsuite. wrapping with `staticmethod` is required in
+    # case time.sleep has been replaced by a non-c function (e.g. by
+    # `eventlet.monkey_patch`) before we get here
+    _sleep = staticmethod(time.sleep)
+
+    def __init__(self, extra_files=None, interval=1):
+        self.extra_files = set(os.path.abspath(x) for x in extra_files or ())
+        self.interval = interval
+
+    def run(self):
+        pass
+
+    def restart_with_reloader(self):
+        """Spawn a new Python interpreter with the same arguments as this one,
+        but running the reloader thread.
+        """
+        while 1:
+            _log("info", " * Restarting with %s" % self.name)
+            args = _get_args_for_reloading()
+
+            # a weird bug on windows. sometimes unicode strings end up in the
+            # environment and subprocess.call does not like this, encode them
+            # to latin1 and continue.
+            if os.name == "nt" and PY2:
+                new_environ = {}
+                for key, value in iteritems(os.environ):
+                    if isinstance(key, text_type):
+                        key = key.encode("iso-8859-1")
+                    if isinstance(value, text_type):
+                        value = value.encode("iso-8859-1")
+                    new_environ[key] = value
+            else:
+                new_environ = os.environ.copy()
+
+            new_environ["WERKZEUG_RUN_MAIN"] = "true"
+            exit_code = subprocess.call(args, env=new_environ, close_fds=False)
+            if exit_code != 3:
+                return exit_code
+
+    def trigger_reload(self, filename):
+        self.log_reload(filename)
+        sys.exit(3)
+
+    def log_reload(self, filename):
+        filename = os.path.abspath(filename)
+        _log("info", " * Detected change in %r, reloading" % filename)
+
+
+class StatReloaderLoop(ReloaderLoop):
+    name = "stat"
+
+    def run(self):
+        mtimes = {}
+        while 1:
+            for filename in chain(_iter_module_files(), self.extra_files):
+                try:
+                    mtime = os.stat(filename).st_mtime
+                except OSError:
+                    continue
+
+                old_time = mtimes.get(filename)
+                if old_time is None:
+                    mtimes[filename] = mtime
+                    continue
+                elif mtime > old_time:
+                    self.trigger_reload(filename)
+            self._sleep(self.interval)
+
+
+class WatchdogReloaderLoop(ReloaderLoop):
+    def __init__(self, *args, **kwargs):
+        ReloaderLoop.__init__(self, *args, **kwargs)
+        from watchdog.observers import Observer
+        from watchdog.events import FileSystemEventHandler
+
+        self.observable_paths = set()
+
+        def _check_modification(filename):
+            if filename in self.extra_files:
+                self.trigger_reload(filename)
+            dirname = os.path.dirname(filename)
+            if dirname.startswith(tuple(self.observable_paths)):
+                if filename.endswith((".pyc", ".pyo", ".py")):
+                    self.trigger_reload(filename)
+
+        class _CustomHandler(FileSystemEventHandler):
+            def on_created(self, event):
+                _check_modification(event.src_path)
+
+            def on_modified(self, event):
+                _check_modification(event.src_path)
+
+            def on_moved(self, event):
+                _check_modification(event.src_path)
+                _check_modification(event.dest_path)
+
+            def on_deleted(self, event):
+                _check_modification(event.src_path)
+
+        reloader_name = Observer.__name__.lower()
+        if reloader_name.endswith("observer"):
+            reloader_name = reloader_name[:-8]
+        reloader_name += " reloader"
+
+        self.name = reloader_name
+
+        self.observer_class = Observer
+        self.event_handler = _CustomHandler()
+        self.should_reload = False
+
+    def trigger_reload(self, filename):
+        # This is called inside an event handler, which means throwing
+        # SystemExit has no effect.
+        # https://github.com/gorakhargosh/watchdog/issues/294
+        self.should_reload = True
+        self.log_reload(filename)
+
+    def run(self):
+        watches = {}
+        observer = self.observer_class()
+        observer.start()
+
+        try:
+            while not self.should_reload:
+                to_delete = set(watches)
+                paths = _find_observable_paths(self.extra_files)
+                for path in paths:
+                    if path not in watches:
+                        try:
+                            watches[path] = observer.schedule(
+                                self.event_handler, path, recursive=True
+                            )
+                        except OSError:
+                            # Clear this path from list of watches We don't want
+                            # the same error message showing again in the next
+                            # iteration.
+                            watches[path] = None
+                    to_delete.discard(path)
+                for path in to_delete:
+                    watch = watches.pop(path, None)
+                    if watch is not None:
+                        observer.unschedule(watch)
+                self.observable_paths = paths
+                self._sleep(self.interval)
+        finally:
+            observer.stop()
+            observer.join()
+
+        sys.exit(3)
+
+
+reloader_loops = {"stat": StatReloaderLoop, "watchdog": WatchdogReloaderLoop}
+
+try:
+    __import__("watchdog.observers")
+except ImportError:
+    reloader_loops["auto"] = reloader_loops["stat"]
+else:
+    reloader_loops["auto"] = reloader_loops["watchdog"]
+
+
+def ensure_echo_on():
+    """Ensure that echo mode is enabled. Some tools such as PDB disable
+    it which causes usability issues after reload."""
+    # tcgetattr will fail if stdin isn't a tty
+    if not sys.stdin.isatty():
+        return
+    try:
+        import termios
+    except ImportError:
+        return
+    attributes = termios.tcgetattr(sys.stdin)
+    if not attributes[3] & termios.ECHO:
+        attributes[3] |= termios.ECHO
+        termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
+
+
+def run_with_reloader(main_func, extra_files=None, interval=1, reloader_type="auto"):
+    """Run the given function in an independent python interpreter."""
+    import signal
+
+    reloader = reloader_loops[reloader_type](extra_files, interval)
+    signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
+    try:
+        if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
+            ensure_echo_on()
+            t = threading.Thread(target=main_func, args=())
+            t.setDaemon(True)
+            t.start()
+            reloader.run()
+        else:
+            sys.exit(reloader.restart_with_reloader())
+    except KeyboardInterrupt:
+        pass
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__init__.py b/lib/python3.7/site-packages/werkzeug/contrib/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e741f07fedb53c4b9bc843154757fc3c1ba2f68
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/contrib/__init__.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib
+    ~~~~~~~~~~~~~~~~
+
+    Contains user-submitted code that other users may find useful, but which
+    is not part of the Werkzeug core.  Anyone can write code for inclusion in
+    the `contrib` package.  All modules in this package are distributed as an
+    add-on library and thus are not part of Werkzeug itself.
+
+    This file itself is mostly for informational purposes and to tell the
+    Python interpreter that `contrib` is a package.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ea26423ff0adc89f12f53877b4495388776f25f7
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/atom.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/atom.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce342b13adab748d8027505389dd4ec0d971de61
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/atom.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/cache.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/cache.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d6e82eed30f2d152f053e5d01c6d60104e89313a
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/cache.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/fixers.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/fixers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72d1e62b2291c3f589622a157ee2b77a80b71851
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/fixers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/iterio.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/iterio.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11b86c42268dcb5750d605733de6f5c74d0e0e26
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/iterio.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/lint.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/lint.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9ec0805cca57a1c627cc0f9273d4f805d09fe5de
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/lint.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/profiler.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/profiler.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b13b54b932dd16f1b6bff0c9ad51e423da589277
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/profiler.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/securecookie.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/securecookie.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ec67c52b2f515b9e0b44cbaaec473a3ee1ebbf3
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/securecookie.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/sessions.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/sessions.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..527f40992d25ee2bc121075e2ed303f04ccff31a
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/sessions.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/wrappers.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/wrappers.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a60e091067009a18d8bbafba7ee8ac438aff95ab
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/contrib/__pycache__/wrappers.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/atom.py b/lib/python3.7/site-packages/werkzeug/contrib/atom.py
new file mode 100644
index 0000000000000000000000000000000000000000..d079d2bf26a542eef5f892ba17ada4b946962c96
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/contrib/atom.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.atom
+    ~~~~~~~~~~~~~~~~~~~~~
+
+    This module provides a class called :class:`AtomFeed` which can be
+    used to generate feeds in the Atom syndication format (see :rfc:`4287`).
+
+    Example::
+
+        def atom_feed(request):
+            feed = AtomFeed("My Blog", feed_url=request.url,
+                            url=request.host_url,
+                            subtitle="My example blog for a feed test.")
+            for post in Post.query.limit(10).all():
+                feed.add(post.title, post.body, content_type='html',
+                         author=post.author, url=post.url, id=post.uid,
+                         updated=post.last_update, published=post.pub_date)
+            return feed.get_response()
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import warnings
+from datetime import datetime
+
+from .._compat import implements_to_string
+from .._compat import string_types
+from ..utils import escape
+from ..wrappers import BaseResponse
+
+warnings.warn(
+    "'werkzeug.contrib.atom' is deprecated as of version 0.15 and will"
+    " be removed in version 1.0.",
+    DeprecationWarning,
+    stacklevel=2,
+)
+
+XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
+
+
+def _make_text_block(name, content, content_type=None):
+    """Helper function for the builder that creates an XML text block."""
+    if content_type == "xhtml":
+        return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % (
+            name,
+            XHTML_NAMESPACE,
+            content,
+            name,
+        )
+    if not content_type:
+        return u"<%s>%s</%s>\n" % (name, escape(content), name)
+    return u'<%s type="%s">%s</%s>\n' % (name, content_type, escape(content), name)
+
+
+def format_iso8601(obj):
+    """Format a datetime object for iso8601"""
+    iso8601 = obj.isoformat()
+    if obj.tzinfo:
+        return iso8601
+    return iso8601 + "Z"
+
+
+@implements_to_string
+class AtomFeed(object):
+
+    """A helper class that creates Atom feeds.
+
+    :param title: the title of the feed. Required.
+    :param title_type: the type attribute for the title element.  One of
+                       ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param url: the url for the feed (not the url *of* the feed)
+    :param id: a globally unique id for the feed.  Must be an URI.  If
+               not present the `feed_url` is used, but one of both is
+               required.
+    :param updated: the time the feed was modified the last time.  Must
+                    be a :class:`datetime.datetime` object.  If not
+                    present the latest entry's `updated` is used.
+                    Treated as UTC if naive datetime.
+    :param feed_url: the URL to the feed.  Should be the URL that was
+                     requested.
+    :param author: the author of the feed.  Must be either a string (the
+                   name) or a dict with name (required) and uri or
+                   email (both optional).  Can be a list of (may be
+                   mixed, too) strings and dicts, too, if there are
+                   multiple authors. Required if not every entry has an
+                   author element.
+    :param icon: an icon for the feed.
+    :param logo: a logo for the feed.
+    :param rights: copyright information for the feed.
+    :param rights_type: the type attribute for the rights element.  One of
+                        ``'html'``, ``'text'`` or ``'xhtml'``.  Default is
+                        ``'text'``.
+    :param subtitle: a short description of the feed.
+    :param subtitle_type: the type attribute for the subtitle element.
+                          One of ``'text'``, ``'html'``, ``'text'``
+                          or ``'xhtml'``.  Default is ``'text'``.
+    :param links: additional links.  Must be a list of dictionaries with
+                  href (required) and rel, type, hreflang, title, length
+                  (all optional)
+    :param generator: the software that generated this feed.  This must be
+                      a tuple in the form ``(name, url, version)``.  If
+                      you don't want to specify one of them, set the item
+                      to `None`.
+    :param entries: a list with the entries for the feed. Entries can also
+                    be added later with :meth:`add`.
+
+    For more information on the elements see
+    http://www.atomenabled.org/developers/syndication/
+
+    Everywhere where a list is demanded, any iterable can be used.
+    """
+
+    default_generator = ("Werkzeug", None, None)
+
+    def __init__(self, title=None, entries=None, **kwargs):
+        self.title = title
+        self.title_type = kwargs.get("title_type", "text")
+        self.url = kwargs.get("url")
+        self.feed_url = kwargs.get("feed_url", self.url)
+        self.id = kwargs.get("id", self.feed_url)
+        self.updated = kwargs.get("updated")
+        self.author = kwargs.get("author", ())
+        self.icon = kwargs.get("icon")
+        self.logo = kwargs.get("logo")
+        self.rights = kwargs.get("rights")
+        self.rights_type = kwargs.get("rights_type")
+        self.subtitle = kwargs.get("subtitle")
+        self.subtitle_type = kwargs.get("subtitle_type", "text")
+        self.generator = kwargs.get("generator")
+        if self.generator is None:
+            self.generator = self.default_generator
+        self.links = kwargs.get("links", [])
+        self.entries = list(entries) if entries else []
+
+        if not hasattr(self.author, "__iter__") or isinstance(
+            self.author, string_types + (dict,)
+        ):
+            self.author = [self.author]
+        for i, author in enumerate(self.author):
+            if not isinstance(author, dict):
+                self.author[i] = {"name": author}
+
+        if not self.title:
+            raise ValueError("title is required")
+        if not self.id:
+            raise ValueError("id is required")
+        for author in self.author:
+            if "name" not in author:
+                raise TypeError("author must contain at least a name")
+
+    def add(self, *args, **kwargs):
+        """Add a new entry to the feed.  This function can either be called
+        with a :class:`FeedEntry` or some keyword and positional arguments
+        that are forwarded to the :class:`FeedEntry` constructor.
+        """
+        if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
+            self.entries.append(args[0])
+        else:
+            kwargs["feed_url"] = self.feed_url
+            self.entries.append(FeedEntry(*args, **kwargs))
+
+    def __repr__(self):
+        return "<%s %r (%d entries)>" % (
+            self.__class__.__name__,
+            self.title,
+            len(self.entries),
+        )
+
+    def generate(self):
+        """Return a generator that yields pieces of XML."""
+        # atom demands either an author element in every entry or a global one
+        if not self.author:
+            if any(not e.author for e in self.entries):
+                self.author = ({"name": "Unknown author"},)
+
+        if not self.updated:
+            dates = sorted([entry.updated for entry in self.entries])
+            self.updated = dates[-1] if dates else datetime.utcnow()
+
+        yield u'<?xml version="1.0" encoding="utf-8"?>\n'
+        yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
+        yield "  " + _make_text_block("title", self.title, self.title_type)
+        yield u"  <id>%s</id>\n" % escape(self.id)
+        yield u"  <updated>%s</updated>\n" % format_iso8601(self.updated)
+        if self.url:
+            yield u'  <link href="%s" />\n' % escape(self.url)
+        if self.feed_url:
+            yield u'  <link href="%s" rel="self" />\n' % escape(self.feed_url)
+        for link in self.links:
+            yield u"  <link %s/>\n" % "".join(
+                '%s="%s" ' % (k, escape(link[k])) for k in link
+            )
+        for author in self.author:
+            yield u"  <author>\n"
+            yield u"    <name>%s</name>\n" % escape(author["name"])
+            if "uri" in author:
+                yield u"    <uri>%s</uri>\n" % escape(author["uri"])
+            if "email" in author:
+                yield "    <email>%s</email>\n" % escape(author["email"])
+            yield "  </author>\n"
+        if self.subtitle:
+            yield "  " + _make_text_block("subtitle", self.subtitle, self.subtitle_type)
+        if self.icon:
+            yield u"  <icon>%s</icon>\n" % escape(self.icon)
+        if self.logo:
+            yield u"  <logo>%s</logo>\n" % escape(self.logo)
+        if self.rights:
+            yield "  " + _make_text_block("rights", self.rights, self.rights_type)
+        generator_name, generator_url, generator_version = self.generator
+        if generator_name or generator_url or generator_version:
+            tmp = [u"  <generator"]
+            if generator_url:
+                tmp.append(u' uri="%s"' % escape(generator_url))
+            if generator_version:
+                tmp.append(u' version="%s"' % escape(generator_version))
+            tmp.append(u">%s</generator>\n" % escape(generator_name))
+            yield u"".join(tmp)
+        for entry in self.entries:
+            for line in entry.generate():
+                yield u"  " + line
+        yield u"</feed>\n"
+
+    def to_string(self):
+        """Convert the feed into a string."""
+        return u"".join(self.generate())
+
+    def get_response(self):
+        """Return a response object for the feed."""
+        return BaseResponse(self.to_string(), mimetype="application/atom+xml")
+
+    def __call__(self, environ, start_response):
+        """Use the class as WSGI response object."""
+        return self.get_response()(environ, start_response)
+
+    def __str__(self):
+        return self.to_string()
+
+
+@implements_to_string
+class FeedEntry(object):
+
+    """Represents a single entry in a feed.
+
+    :param title: the title of the entry. Required.
+    :param title_type: the type attribute for the title element.  One of
+                       ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param content: the content of the entry.
+    :param content_type: the type attribute for the content element.  One
+                         of ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param summary: a summary of the entry's content.
+    :param summary_type: the type attribute for the summary element.  One
+                         of ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param url: the url for the entry.
+    :param id: a globally unique id for the entry.  Must be an URI.  If
+               not present the URL is used, but one of both is required.
+    :param updated: the time the entry was modified the last time.  Must
+                    be a :class:`datetime.datetime` object.  Treated as
+                    UTC if naive datetime. Required.
+    :param author: the author of the entry.  Must be either a string (the
+                   name) or a dict with name (required) and uri or
+                   email (both optional).  Can be a list of (may be
+                   mixed, too) strings and dicts, too, if there are
+                   multiple authors. Required if the feed does not have an
+                   author element.
+    :param published: the time the entry was initially published.  Must
+                      be a :class:`datetime.datetime` object.  Treated as
+                      UTC if naive datetime.
+    :param rights: copyright information for the entry.
+    :param rights_type: the type attribute for the rights element.  One of
+                        ``'html'``, ``'text'`` or ``'xhtml'``.  Default is
+                        ``'text'``.
+    :param links: additional links.  Must be a list of dictionaries with
+                  href (required) and rel, type, hreflang, title, length
+                  (all optional)
+    :param categories: categories for the entry. Must be a list of dictionaries
+                       with term (required), scheme and label (all optional)
+    :param xml_base: The xml base (url) for this feed item.  If not provided
+                     it will default to the item url.
+
+    For more information on the elements see
+    http://www.atomenabled.org/developers/syndication/
+
+    Everywhere where a list is demanded, any iterable can be used.
+    """
+
+    def __init__(self, title=None, content=None, feed_url=None, **kwargs):
+        self.title = title
+        self.title_type = kwargs.get("title_type", "text")
+        self.content = content
+        self.content_type = kwargs.get("content_type", "html")
+        self.url = kwargs.get("url")
+        self.id = kwargs.get("id", self.url)
+        self.updated = kwargs.get("updated")
+        self.summary = kwargs.get("summary")
+        self.summary_type = kwargs.get("summary_type", "html")
+        self.author = kwargs.get("author", ())
+        self.published = kwargs.get("published")
+        self.rights = kwargs.get("rights")
+        self.links = kwargs.get("links", [])
+        self.categories = kwargs.get("categories", [])
+        self.xml_base = kwargs.get("xml_base", feed_url)
+
+        if not hasattr(self.author, "__iter__") or isinstance(
+            self.author, string_types + (dict,)
+        ):
+            self.author = [self.author]
+        for i, author in enumerate(self.author):
+            if not isinstance(author, dict):
+                self.author[i] = {"name": author}
+
+        if not self.title:
+            raise ValueError("title is required")
+        if not self.id:
+            raise ValueError("id is required")
+        if not self.updated:
+            raise ValueError("updated is required")
+
+    def __repr__(self):
+        return "<%s %r>" % (self.__class__.__name__, self.title)
+
+    def generate(self):
+        """Yields pieces of ATOM XML."""
+        base = ""
+        if self.xml_base:
+            base = ' xml:base="%s"' % escape(self.xml_base)
+        yield u"<entry%s>\n" % base
+        yield u"  " + _make_text_block("title", self.title, self.title_type)
+        yield u"  <id>%s</id>\n" % escape(self.id)
+        yield u"  <updated>%s</updated>\n" % format_iso8601(self.updated)
+        if self.published:
+            yield u"  <published>%s</published>\n" % format_iso8601(self.published)
+        if self.url:
+            yield u'  <link href="%s" />\n' % escape(self.url)
+        for author in self.author:
+            yield u"  <author>\n"
+            yield u"    <name>%s</name>\n" % escape(author["name"])
+            if "uri" in author:
+                yield u"    <uri>%s</uri>\n" % escape(author["uri"])
+            if "email" in author:
+                yield u"    <email>%s</email>\n" % escape(author["email"])
+            yield u"  </author>\n"
+        for link in self.links:
+            yield u"  <link %s/>\n" % "".join(
+                '%s="%s" ' % (k, escape(link[k])) for k in link
+            )
+        for category in self.categories:
+            yield u"  <category %s/>\n" % "".join(
+                '%s="%s" ' % (k, escape(category[k])) for k in category
+            )
+        if self.summary:
+            yield u"  " + _make_text_block("summary", self.summary, self.summary_type)
+        if self.content:
+            yield u"  " + _make_text_block("content", self.content, self.content_type)
+        yield u"</entry>\n"
+
+    def to_string(self):
+        """Convert the feed item into a unicode object."""
+        return u"".join(self.generate())
+
+    def __str__(self):
+        return self.to_string()
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/cache.py b/lib/python3.7/site-packages/werkzeug/contrib/cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..79c749b5e6c342b0726741a888c17e8b4b67b4d5
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/contrib/cache.py
@@ -0,0 +1,933 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.cache
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    The main problem with dynamic Web sites is, well, they're dynamic.  Each
+    time a user requests a page, the webserver executes a lot of code, queries
+    the database, renders templates until the visitor gets the page he sees.
+
+    This is a lot more expensive than just loading a file from the file system
+    and sending it to the visitor.
+
+    For most Web applications, this overhead isn't a big deal but once it
+    becomes, you will be glad to have a cache system in place.
+
+    How Caching Works
+    =================
+
+    Caching is pretty simple.  Basically you have a cache object lurking around
+    somewhere that is connected to a remote cache or the file system or
+    something else.  When the request comes in you check if the current page
+    is already in the cache and if so, you're returning it from the cache.
+    Otherwise you generate the page and put it into the cache. (Or a fragment
+    of the page, you don't have to cache the full thing)
+
+    Here is a simple example of how to cache a sidebar for 5 minutes::
+
+        def get_sidebar(user):
+            identifier = 'sidebar_for/user%d' % user.id
+            value = cache.get(identifier)
+            if value is not None:
+                return value
+            value = generate_sidebar_for(user=user)
+            cache.set(identifier, value, timeout=60 * 5)
+            return value
+
+    Creating a Cache Object
+    =======================
+
+    To create a cache object you just import the cache system of your choice
+    from the cache module and instantiate it.  Then you can start working
+    with that object:
+
+    >>> from werkzeug.contrib.cache import SimpleCache
+    >>> c = SimpleCache()
+    >>> c.set("foo", "value")
+    >>> c.get("foo")
+    'value'
+    >>> c.get("missing") is None
+    True
+
+    Please keep in mind that you have to create the cache and put it somewhere
+    you have access to it (either as a module global you can import or you just
+    put it into your WSGI application).
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import errno
+import os
+import platform
+import re
+import tempfile
+import warnings
+from hashlib import md5
+from time import time
+
+from .._compat import integer_types
+from .._compat import iteritems
+from .._compat import string_types
+from .._compat import text_type
+from .._compat import to_native
+from ..posixemulation import rename
+
+try:
+    import cPickle as pickle
+except ImportError:  # pragma: no cover
+    import pickle
+
+warnings.warn(
+    "'werkzeug.contrib.cache' is deprecated as of version 0.15 and will"
+    " be removed in version 1.0. It has moved to https://github.com"
+    "/pallets/cachelib.",
+    DeprecationWarning,
+    stacklevel=2,
+)
+
+
+def _items(mappingorseq):
+    """Wrapper for efficient iteration over mappings represented by dicts
+    or sequences::
+
+        >>> for k, v in _items((i, i*i) for i in xrange(5)):
+        ...    assert k*k == v
+
+        >>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
+        ...    assert k*k == v
+
+    """
+    if hasattr(mappingorseq, "items"):
+        return iteritems(mappingorseq)
+    return mappingorseq
+
+
+class BaseCache(object):
+    """Baseclass for the cache systems.  All the cache systems implement this
+    API or a superset of it.
+
+    :param default_timeout: the default timeout (in seconds) that is used if
+                            no timeout is specified on :meth:`set`. A timeout
+                            of 0 indicates that the cache never expires.
+    """
+
+    def __init__(self, default_timeout=300):
+        self.default_timeout = default_timeout
+
+    def _normalize_timeout(self, timeout):
+        if timeout is None:
+            timeout = self.default_timeout
+        return timeout
+
+    def get(self, key):
+        """Look up key in the cache and return the value for it.
+
+        :param key: the key to be looked up.
+        :returns: The value if it exists and is readable, else ``None``.
+        """
+        return None
+
+    def delete(self, key):
+        """Delete `key` from the cache.
+
+        :param key: the key to delete.
+        :returns: Whether the key existed and has been deleted.
+        :rtype: boolean
+        """
+        return True
+
+    def get_many(self, *keys):
+        """Returns a list of values for the given keys.
+        For each key an item in the list is created::
+
+            foo, bar = cache.get_many("foo", "bar")
+
+        Has the same error handling as :meth:`get`.
+
+        :param keys: The function accepts multiple keys as positional
+                     arguments.
+        """
+        return [self.get(k) for k in keys]
+
+    def get_dict(self, *keys):
+        """Like :meth:`get_many` but return a dict::
+
+            d = cache.get_dict("foo", "bar")
+            foo = d["foo"]
+            bar = d["bar"]
+
+        :param keys: The function accepts multiple keys as positional
+                     arguments.
+        """
+        return dict(zip(keys, self.get_many(*keys)))
+
+    def set(self, key, value, timeout=None):
+        """Add a new key/value to the cache (overwrites value, if key already
+        exists in the cache).
+
+        :param key: the key to set
+        :param value: the value for the key
+        :param timeout: the cache timeout for the key in seconds (if not
+                        specified, it uses the default timeout). A timeout of
+                        0 idicates that the cache never expires.
+        :returns: ``True`` if key has been updated, ``False`` for backend
+                  errors. Pickling errors, however, will raise a subclass of
+                  ``pickle.PickleError``.
+        :rtype: boolean
+        """
+        return True
+
+    def add(self, key, value, timeout=None):
+        """Works like :meth:`set` but does not overwrite the values of already
+        existing keys.
+
+        :param key: the key to set
+        :param value: the value for the key
+        :param timeout: the cache timeout for the key in seconds (if not
+                        specified, it uses the default timeout). A timeout of
+                        0 idicates that the cache never expires.
+        :returns: Same as :meth:`set`, but also ``False`` for already
+                  existing keys.
+        :rtype: boolean
+        """
+        return True
+
+    def set_many(self, mapping, timeout=None):
+        """Sets multiple keys and values from a mapping.
+
+        :param mapping: a mapping with the keys/values to set.
+        :param timeout: the cache timeout for the key in seconds (if not
+                        specified, it uses the default timeout). A timeout of
+                        0 idicates that the cache never expires.
+        :returns: Whether all given keys have been set.
+        :rtype: boolean
+        """
+        rv = True
+        for key, value in _items(mapping):
+            if not self.set(key, value, timeout):
+                rv = False
+        return rv
+
+    def delete_many(self, *keys):
+        """Deletes multiple keys at once.
+
+        :param keys: The function accepts multiple keys as positional
+                     arguments.
+        :returns: Whether all given keys have been deleted.
+        :rtype: boolean
+        """
+        return all(self.delete(key) for key in keys)
+
+    def has(self, key):
+        """Checks if a key exists in the cache without returning it. This is a
+        cheap operation that bypasses loading the actual data on the backend.
+
+        This method is optional and may not be implemented on all caches.
+
+        :param key: the key to check
+        """
+        raise NotImplementedError(
+            "%s doesn't have an efficient implementation of `has`. That "
+            "means it is impossible to check whether a key exists without "
+            "fully loading the key's data. Consider using `self.get` "
+            "explicitly if you don't care about performance."
+        )
+
+    def clear(self):
+        """Clears the cache.  Keep in mind that not all caches support
+        completely clearing the cache.
+
+        :returns: Whether the cache has been cleared.
+        :rtype: boolean
+        """
+        return True
+
+    def inc(self, key, delta=1):
+        """Increments the value of a key by `delta`.  If the key does
+        not yet exist it is initialized with `delta`.
+
+        For supporting caches this is an atomic operation.
+
+        :param key: the key to increment.
+        :param delta: the delta to add.
+        :returns: The new value or ``None`` for backend errors.
+        """
+        value = (self.get(key) or 0) + delta
+        return value if self.set(key, value) else None
+
+    def dec(self, key, delta=1):
+        """Decrements the value of a key by `delta`.  If the key does
+        not yet exist it is initialized with `-delta`.
+
+        For supporting caches this is an atomic operation.
+
+        :param key: the key to increment.
+        :param delta: the delta to subtract.
+        :returns: The new value or `None` for backend errors.
+        """
+        value = (self.get(key) or 0) - delta
+        return value if self.set(key, value) else None
+
+
+class NullCache(BaseCache):
+    """A cache that doesn't cache.  This can be useful for unit testing.
+
+    :param default_timeout: a dummy parameter that is ignored but exists
+                            for API compatibility with other caches.
+    """
+
+    def has(self, key):
+        return False
+
+
+class SimpleCache(BaseCache):
+    """Simple memory cache for single process environments.  This class exists
+    mainly for the development server and is not 100% thread safe.  It tries
+    to use as many atomic operations as possible and no locks for simplicity
+    but it could happen under heavy load that keys are added multiple times.
+
+    :param threshold: the maximum number of items the cache stores before
+                      it starts deleting some.
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`. A timeout of
+                            0 indicates that the cache never expires.
+    """
+
+    def __init__(self, threshold=500, default_timeout=300):
+        BaseCache.__init__(self, default_timeout)
+        self._cache = {}
+        self.clear = self._cache.clear
+        self._threshold = threshold
+
+    def _prune(self):
+        if len(self._cache) > self._threshold:
+            now = time()
+            toremove = []
+            for idx, (key, (expires, _)) in enumerate(self._cache.items()):
+                if (expires != 0 and expires <= now) or idx % 3 == 0:
+                    toremove.append(key)
+            for key in toremove:
+                self._cache.pop(key, None)
+
+    def _normalize_timeout(self, timeout):
+        timeout = BaseCache._normalize_timeout(self, timeout)
+        if timeout > 0:
+            timeout = time() + timeout
+        return timeout
+
+    def get(self, key):
+        try:
+            expires, value = self._cache[key]
+            if expires == 0 or expires > time():
+                return pickle.loads(value)
+        except (KeyError, pickle.PickleError):
+            return None
+
+    def set(self, key, value, timeout=None):
+        expires = self._normalize_timeout(timeout)
+        self._prune()
+        self._cache[key] = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
+        return True
+
+    def add(self, key, value, timeout=None):
+        expires = self._normalize_timeout(timeout)
+        self._prune()
+        item = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
+        if key in self._cache:
+            return False
+        self._cache.setdefault(key, item)
+        return True
+
+    def delete(self, key):
+        return self._cache.pop(key, None) is not None
+
+    def has(self, key):
+        try:
+            expires, value = self._cache[key]
+            return expires == 0 or expires > time()
+        except KeyError:
+            return False
+
+
+_test_memcached_key = re.compile(r"[^\x00-\x21\xff]{1,250}$").match
+
+
+class MemcachedCache(BaseCache):
+    """A cache that uses memcached as backend.
+
+    The first argument can either be an object that resembles the API of a
+    :class:`memcache.Client` or a tuple/list of server addresses. In the
+    event that a tuple/list is passed, Werkzeug tries to import the best
+    available memcache library.
+
+    This cache looks into the following packages/modules to find bindings for
+    memcached:
+
+        - ``pylibmc``
+        - ``google.appengine.api.memcached``
+        - ``memcached``
+        - ``libmc``
+
+    Implementation notes:  This cache backend works around some limitations in
+    memcached to simplify the interface.  For example unicode keys are encoded
+    to utf-8 on the fly.  Methods such as :meth:`~BaseCache.get_dict` return
+    the keys in the same format as passed.  Furthermore all get methods
+    silently ignore key errors to not cause problems when untrusted user data
+    is passed to the get methods which is often the case in web applications.
+
+    :param servers: a list or tuple of server addresses or alternatively
+                    a :class:`memcache.Client` or a compatible client.
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`. A timeout of
+                            0 indicates that the cache never expires.
+    :param key_prefix: a prefix that is added before all keys.  This makes it
+                       possible to use the same memcached server for different
+                       applications.  Keep in mind that
+                       :meth:`~BaseCache.clear` will also clear keys with a
+                       different prefix.
+    """
+
+    def __init__(self, servers=None, default_timeout=300, key_prefix=None):
+        BaseCache.__init__(self, default_timeout)
+        if servers is None or isinstance(servers, (list, tuple)):
+            if servers is None:
+                servers = ["127.0.0.1:11211"]
+            self._client = self.import_preferred_memcache_lib(servers)
+            if self._client is None:
+                raise RuntimeError("no memcache module found")
+        else:
+            # NOTE: servers is actually an already initialized memcache
+            # client.
+            self._client = servers
+
+        self.key_prefix = to_native(key_prefix)
+
+    def _normalize_key(self, key):
+        key = to_native(key, "utf-8")
+        if self.key_prefix:
+            key = self.key_prefix + key
+        return key
+
+    def _normalize_timeout(self, timeout):
+        timeout = BaseCache._normalize_timeout(self, timeout)
+        if timeout > 0:
+            timeout = int(time()) + timeout
+        return timeout
+
+    def get(self, key):
+        key = self._normalize_key(key)
+        # memcached doesn't support keys longer than that.  Because often
+        # checks for so long keys can occur because it's tested from user
+        # submitted data etc we fail silently for getting.
+        if _test_memcached_key(key):
+            return self._client.get(key)
+
+    def get_dict(self, *keys):
+        key_mapping = {}
+        have_encoded_keys = False
+        for key in keys:
+            encoded_key = self._normalize_key(key)
+            if not isinstance(key, str):
+                have_encoded_keys = True
+            if _test_memcached_key(key):
+                key_mapping[encoded_key] = key
+        _keys = list(key_mapping)
+        d = rv = self._client.get_multi(_keys)
+        if have_encoded_keys or self.key_prefix:
+            rv = {}
+            for key, value in iteritems(d):
+                rv[key_mapping[key]] = value
+        if len(rv) < len(keys):
+            for key in keys:
+                if key not in rv:
+                    rv[key] = None
+        return rv
+
+    def add(self, key, value, timeout=None):
+        key = self._normalize_key(key)
+        timeout = self._normalize_timeout(timeout)
+        return self._client.add(key, value, timeout)
+
+    def set(self, key, value, timeout=None):
+        key = self._normalize_key(key)
+        timeout = self._normalize_timeout(timeout)
+        return self._client.set(key, value, timeout)
+
+    def get_many(self, *keys):
+        d = self.get_dict(*keys)
+        return [d[key] for key in keys]
+
+    def set_many(self, mapping, timeout=None):
+        new_mapping = {}
+        for key, value in _items(mapping):
+            key = self._normalize_key(key)
+            new_mapping[key] = value
+
+        timeout = self._normalize_timeout(timeout)
+        failed_keys = self._client.set_multi(new_mapping, timeout)
+        return not failed_keys
+
+    def delete(self, key):
+        key = self._normalize_key(key)
+        if _test_memcached_key(key):
+            return self._client.delete(key)
+
+    def delete_many(self, *keys):
+        new_keys = []
+        for key in keys:
+            key = self._normalize_key(key)
+            if _test_memcached_key(key):
+                new_keys.append(key)
+        return self._client.delete_multi(new_keys)
+
+    def has(self, key):
+        key = self._normalize_key(key)
+        if _test_memcached_key(key):
+            return self._client.append(key, "")
+        return False
+
+    def clear(self):
+        return self._client.flush_all()
+
+    def inc(self, key, delta=1):
+        key = self._normalize_key(key)
+        return self._client.incr(key, delta)
+
+    def dec(self, key, delta=1):
+        key = self._normalize_key(key)
+        return self._client.decr(key, delta)
+
+    def import_preferred_memcache_lib(self, servers):
+        """Returns an initialized memcache client.  Used by the constructor."""
+        try:
+            import pylibmc
+        except ImportError:
+            pass
+        else:
+            return pylibmc.Client(servers)
+
+        try:
+            from google.appengine.api import memcache
+        except ImportError:
+            pass
+        else:
+            return memcache.Client()
+
+        try:
+            import memcache
+        except ImportError:
+            pass
+        else:
+            return memcache.Client(servers)
+
+        try:
+            import libmc
+        except ImportError:
+            pass
+        else:
+            return libmc.Client(servers)
+
+
+# backwards compatibility
+GAEMemcachedCache = MemcachedCache
+
+
+class RedisCache(BaseCache):
+    """Uses the Redis key-value store as a cache backend.
+
+    The first argument can be either a string denoting address of the Redis
+    server or an object resembling an instance of a redis.Redis class.
+
+    Note: Python Redis API already takes care of encoding unicode strings on
+    the fly.
+
+    .. versionadded:: 0.7
+
+    .. versionadded:: 0.8
+       `key_prefix` was added.
+
+    .. versionchanged:: 0.8
+       This cache backend now properly serializes objects.
+
+    .. versionchanged:: 0.8.3
+       This cache backend now supports password authentication.
+
+    .. versionchanged:: 0.10
+        ``**kwargs`` is now passed to the redis object.
+
+    :param host: address of the Redis server or an object which API is
+                 compatible with the official Python Redis client (redis-py).
+    :param port: port number on which Redis server listens for connections.
+    :param password: password authentication for the Redis server.
+    :param db: db (zero-based numeric index) on Redis Server to connect.
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`. A timeout of
+                            0 indicates that the cache never expires.
+    :param key_prefix: A prefix that should be added to all keys.
+
+    Any additional keyword arguments will be passed to ``redis.Redis``.
+    """
+
+    def __init__(
+        self,
+        host="localhost",
+        port=6379,
+        password=None,
+        db=0,
+        default_timeout=300,
+        key_prefix=None,
+        **kwargs
+    ):
+        BaseCache.__init__(self, default_timeout)
+        if host is None:
+            raise ValueError("RedisCache host parameter may not be None")
+        if isinstance(host, string_types):
+            try:
+                import redis
+            except ImportError:
+                raise RuntimeError("no redis module found")
+            if kwargs.get("decode_responses", None):
+                raise ValueError("decode_responses is not supported by RedisCache.")
+            self._client = redis.Redis(
+                host=host, port=port, password=password, db=db, **kwargs
+            )
+        else:
+            self._client = host
+        self.key_prefix = key_prefix or ""
+
+    def _normalize_timeout(self, timeout):
+        timeout = BaseCache._normalize_timeout(self, timeout)
+        if timeout == 0:
+            timeout = -1
+        return timeout
+
+    def dump_object(self, value):
+        """Dumps an object into a string for redis.  By default it serializes
+        integers as regular string and pickle dumps everything else.
+        """
+        t = type(value)
+        if t in integer_types:
+            return str(value).encode("ascii")
+        return b"!" + pickle.dumps(value)
+
+    def load_object(self, value):
+        """The reversal of :meth:`dump_object`.  This might be called with
+        None.
+        """
+        if value is None:
+            return None
+        if value.startswith(b"!"):
+            try:
+                return pickle.loads(value[1:])
+            except pickle.PickleError:
+                return None
+        try:
+            return int(value)
+        except ValueError:
+            # before 0.8 we did not have serialization.  Still support that.
+            return value
+
+    def get(self, key):
+        return self.load_object(self._client.get(self.key_prefix + key))
+
+    def get_many(self, *keys):
+        if self.key_prefix:
+            keys = [self.key_prefix + key for key in keys]
+        return [self.load_object(x) for x in self._client.mget(keys)]
+
+    def set(self, key, value, timeout=None):
+        timeout = self._normalize_timeout(timeout)
+        dump = self.dump_object(value)
+        if timeout == -1:
+            result = self._client.set(name=self.key_prefix + key, value=dump)
+        else:
+            result = self._client.setex(
+                name=self.key_prefix + key, value=dump, time=timeout
+            )
+        return result
+
+    def add(self, key, value, timeout=None):
+        timeout = self._normalize_timeout(timeout)
+        dump = self.dump_object(value)
+        return self._client.setnx(
+            name=self.key_prefix + key, value=dump
+        ) and self._client.expire(name=self.key_prefix + key, time=timeout)
+
+    def set_many(self, mapping, timeout=None):
+        timeout = self._normalize_timeout(timeout)
+        # Use transaction=False to batch without calling redis MULTI
+        # which is not supported by twemproxy
+        pipe = self._client.pipeline(transaction=False)
+
+        for key, value in _items(mapping):
+            dump = self.dump_object(value)
+            if timeout == -1:
+                pipe.set(name=self.key_prefix + key, value=dump)
+            else:
+                pipe.setex(name=self.key_prefix + key, value=dump, time=timeout)
+        return pipe.execute()
+
+    def delete(self, key):
+        return self._client.delete(self.key_prefix + key)
+
+    def delete_many(self, *keys):
+        if not keys:
+            return
+        if self.key_prefix:
+            keys = [self.key_prefix + key for key in keys]
+        return self._client.delete(*keys)
+
+    def has(self, key):
+        return self._client.exists(self.key_prefix + key)
+
+    def clear(self):
+        status = False
+        if self.key_prefix:
+            keys = self._client.keys(self.key_prefix + "*")
+            if keys:
+                status = self._client.delete(*keys)
+        else:
+            status = self._client.flushdb()
+        return status
+
+    def inc(self, key, delta=1):
+        return self._client.incr(name=self.key_prefix + key, amount=delta)
+
+    def dec(self, key, delta=1):
+        return self._client.decr(name=self.key_prefix + key, amount=delta)
+
+
+class FileSystemCache(BaseCache):
+    """A cache that stores the items on the file system.  This cache depends
+    on being the only user of the `cache_dir`.  Make absolutely sure that
+    nobody but this cache stores files there or otherwise the cache will
+    randomly delete files therein.
+
+    :param cache_dir: the directory where cache files are stored.
+    :param threshold: the maximum number of items the cache stores before
+                      it starts deleting some. A threshold value of 0
+                      indicates no threshold.
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`. A timeout of
+                            0 indicates that the cache never expires.
+    :param mode: the file mode wanted for the cache files, default 0600
+    """
+
+    #: used for temporary files by the FileSystemCache
+    _fs_transaction_suffix = ".__wz_cache"
+    #: keep amount of files in a cache element
+    _fs_count_file = "__wz_cache_count"
+
+    def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0o600):
+        BaseCache.__init__(self, default_timeout)
+        self._path = cache_dir
+        self._threshold = threshold
+        self._mode = mode
+
+        try:
+            os.makedirs(self._path)
+        except OSError as ex:
+            if ex.errno != errno.EEXIST:
+                raise
+
+        self._update_count(value=len(self._list_dir()))
+
+    @property
+    def _file_count(self):
+        return self.get(self._fs_count_file) or 0
+
+    def _update_count(self, delta=None, value=None):
+        # If we have no threshold, don't count files
+        if self._threshold == 0:
+            return
+
+        if delta:
+            new_count = self._file_count + delta
+        else:
+            new_count = value or 0
+        self.set(self._fs_count_file, new_count, mgmt_element=True)
+
+    def _normalize_timeout(self, timeout):
+        timeout = BaseCache._normalize_timeout(self, timeout)
+        if timeout != 0:
+            timeout = time() + timeout
+        return int(timeout)
+
+    def _list_dir(self):
+        """return a list of (fully qualified) cache filenames
+        """
+        mgmt_files = [
+            self._get_filename(name).split("/")[-1] for name in (self._fs_count_file,)
+        ]
+        return [
+            os.path.join(self._path, fn)
+            for fn in os.listdir(self._path)
+            if not fn.endswith(self._fs_transaction_suffix) and fn not in mgmt_files
+        ]
+
+    def _prune(self):
+        if self._threshold == 0 or not self._file_count > self._threshold:
+            return
+
+        entries = self._list_dir()
+        now = time()
+        for idx, fname in enumerate(entries):
+            try:
+                remove = False
+                with open(fname, "rb") as f:
+                    expires = pickle.load(f)
+                remove = (expires != 0 and expires <= now) or idx % 3 == 0
+
+                if remove:
+                    os.remove(fname)
+            except (IOError, OSError):
+                pass
+        self._update_count(value=len(self._list_dir()))
+
+    def clear(self):
+        for fname in self._list_dir():
+            try:
+                os.remove(fname)
+            except (IOError, OSError):
+                self._update_count(value=len(self._list_dir()))
+                return False
+        self._update_count(value=0)
+        return True
+
+    def _get_filename(self, key):
+        if isinstance(key, text_type):
+            key = key.encode("utf-8")  # XXX unicode review
+        hash = md5(key).hexdigest()
+        return os.path.join(self._path, hash)
+
+    def get(self, key):
+        filename = self._get_filename(key)
+        try:
+            with open(filename, "rb") as f:
+                pickle_time = pickle.load(f)
+                if pickle_time == 0 or pickle_time >= time():
+                    return pickle.load(f)
+                else:
+                    os.remove(filename)
+                    return None
+        except (IOError, OSError, pickle.PickleError):
+            return None
+
+    def add(self, key, value, timeout=None):
+        filename = self._get_filename(key)
+        if not os.path.exists(filename):
+            return self.set(key, value, timeout)
+        return False
+
+    def set(self, key, value, timeout=None, mgmt_element=False):
+        # Management elements have no timeout
+        if mgmt_element:
+            timeout = 0
+
+        # Don't prune on management element update, to avoid loop
+        else:
+            self._prune()
+
+        timeout = self._normalize_timeout(timeout)
+        filename = self._get_filename(key)
+        try:
+            fd, tmp = tempfile.mkstemp(
+                suffix=self._fs_transaction_suffix, dir=self._path
+            )
+            with os.fdopen(fd, "wb") as f:
+                pickle.dump(timeout, f, 1)
+                pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
+            rename(tmp, filename)
+            os.chmod(filename, self._mode)
+        except (IOError, OSError):
+            return False
+        else:
+            # Management elements should not count towards threshold
+            if not mgmt_element:
+                self._update_count(delta=1)
+            return True
+
+    def delete(self, key, mgmt_element=False):
+        try:
+            os.remove(self._get_filename(key))
+        except (IOError, OSError):
+            return False
+        else:
+            # Management elements should not count towards threshold
+            if not mgmt_element:
+                self._update_count(delta=-1)
+            return True
+
+    def has(self, key):
+        filename = self._get_filename(key)
+        try:
+            with open(filename, "rb") as f:
+                pickle_time = pickle.load(f)
+                if pickle_time == 0 or pickle_time >= time():
+                    return True
+                else:
+                    os.remove(filename)
+                    return False
+        except (IOError, OSError, pickle.PickleError):
+            return False
+
+
+class UWSGICache(BaseCache):
+    """Implements the cache using uWSGI's caching framework.
+
+    .. note::
+        This class cannot be used when running under PyPy, because the uWSGI
+        API implementation for PyPy is lacking the needed functionality.
+
+    :param default_timeout: The default timeout in seconds.
+    :param cache: The name of the caching instance to connect to, for
+        example: mycache@localhost:3031, defaults to an empty string, which
+        means uWSGI will cache in the local instance. If the cache is in the
+        same instance as the werkzeug app, you only have to provide the name of
+        the cache.
+    """
+
+    def __init__(self, default_timeout=300, cache=""):
+        BaseCache.__init__(self, default_timeout)
+
+        if platform.python_implementation() == "PyPy":
+            raise RuntimeError(
+                "uWSGI caching does not work under PyPy, see "
+                "the docs for more details."
+            )
+
+        try:
+            import uwsgi
+
+            self._uwsgi = uwsgi
+        except ImportError:
+            raise RuntimeError(
+                "uWSGI could not be imported, are you running under uWSGI?"
+            )
+
+        self.cache = cache
+
+    def get(self, key):
+        rv = self._uwsgi.cache_get(key, self.cache)
+        if rv is None:
+            return
+        return pickle.loads(rv)
+
+    def delete(self, key):
+        return self._uwsgi.cache_del(key, self.cache)
+
+    def set(self, key, value, timeout=None):
+        return self._uwsgi.cache_update(
+            key, pickle.dumps(value), self._normalize_timeout(timeout), self.cache
+        )
+
+    def add(self, key, value, timeout=None):
+        return self._uwsgi.cache_set(
+            key, pickle.dumps(value), self._normalize_timeout(timeout), self.cache
+        )
+
+    def clear(self):
+        return self._uwsgi.cache_clear(self.cache)
+
+    def has(self, key):
+        return self._uwsgi.cache_exists(key, self.cache) is not None
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/fixers.py b/lib/python3.7/site-packages/werkzeug/contrib/fixers.py
new file mode 100644
index 0000000000000000000000000000000000000000..8df0afdab1162d3a58c07a4adbfed3496df24857
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/contrib/fixers.py
@@ -0,0 +1,262 @@
+"""
+Fixers
+======
+
+.. warning::
+    .. deprecated:: 0.15
+        ``ProxyFix`` has moved to :mod:`werkzeug.middleware.proxy_fix`.
+        All other code in this module is deprecated and will be removed
+        in version 1.0.
+
+.. versionadded:: 0.5
+
+This module includes various helpers that fix web server behavior.
+
+.. autoclass:: ProxyFix
+   :members:
+
+.. autoclass:: CGIRootFix
+
+.. autoclass:: PathInfoFromRequestUriFix
+
+.. autoclass:: HeaderRewriterFix
+
+.. autoclass:: InternetExplorerFix
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+import warnings
+
+from ..datastructures import Headers
+from ..datastructures import ResponseCacheControl
+from ..http import parse_cache_control_header
+from ..http import parse_options_header
+from ..http import parse_set_header
+from ..middleware.proxy_fix import ProxyFix as _ProxyFix
+from ..useragents import UserAgent
+
+try:
+    from urllib.parse import unquote
+except ImportError:
+    from urllib import unquote
+
+
+class CGIRootFix(object):
+    """Wrap the application in this middleware if you are using FastCGI
+    or CGI and you have problems with your app root being set to the CGI
+    script's path instead of the path users are going to visit.
+
+    :param app: the WSGI application
+    :param app_root: Defaulting to ``'/'``, you can set this to
+        something else if your app is mounted somewhere else.
+
+    .. deprecated:: 0.15
+        This middleware will be removed in version 1.0.
+
+    .. versionchanged:: 0.9
+        Added `app_root` parameter and renamed from
+        ``LighttpdCGIRootFix``.
+    """
+
+    def __init__(self, app, app_root="/"):
+        warnings.warn(
+            "'CGIRootFix' is deprecated as of version 0.15 and will be"
+            " removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        self.app = app
+        self.app_root = app_root.strip("/")
+
+    def __call__(self, environ, start_response):
+        environ["SCRIPT_NAME"] = self.app_root
+        return self.app(environ, start_response)
+
+
+class LighttpdCGIRootFix(CGIRootFix):
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'LighttpdCGIRootFix' is renamed 'CGIRootFix'. Both will be"
+            " removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(LighttpdCGIRootFix, self).__init__(*args, **kwargs)
+
+
+class PathInfoFromRequestUriFix(object):
+    """On windows environment variables are limited to the system charset
+    which makes it impossible to store the `PATH_INFO` variable in the
+    environment without loss of information on some systems.
+
+    This is for example a problem for CGI scripts on a Windows Apache.
+
+    This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
+    `REQUEST_URL`, or `UNENCODED_URL` (whatever is available).  Thus the
+    fix can only be applied if the webserver supports either of these
+    variables.
+
+    :param app: the WSGI application
+
+    .. deprecated:: 0.15
+        This middleware will be removed in version 1.0.
+    """
+
+    def __init__(self, app):
+        warnings.warn(
+            "'PathInfoFromRequestUriFix' is deprecated as of version"
+            " 0.15 and will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        self.app = app
+
+    def __call__(self, environ, start_response):
+        for key in "REQUEST_URL", "REQUEST_URI", "UNENCODED_URL":
+            if key not in environ:
+                continue
+            request_uri = unquote(environ[key])
+            script_name = unquote(environ.get("SCRIPT_NAME", ""))
+            if request_uri.startswith(script_name):
+                environ["PATH_INFO"] = request_uri[len(script_name) :].split("?", 1)[0]
+                break
+        return self.app(environ, start_response)
+
+
+class ProxyFix(_ProxyFix):
+    """
+    .. deprecated:: 0.15
+        ``werkzeug.contrib.fixers.ProxyFix`` has moved to
+        :mod:`werkzeug.middleware.proxy_fix`. This import will be
+        removed in 1.0.
+    """
+
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.contrib.fixers.ProxyFix' has moved to 'werkzeug"
+            ".middleware.proxy_fix.ProxyFix'. This import is deprecated"
+            " as of version 0.15 and will be removed in 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(ProxyFix, self).__init__(*args, **kwargs)
+
+
+class HeaderRewriterFix(object):
+    """This middleware can remove response headers and add others.  This
+    is for example useful to remove the `Date` header from responses if you
+    are using a server that adds that header, no matter if it's present or
+    not or to add `X-Powered-By` headers::
+
+        app = HeaderRewriterFix(app, remove_headers=['Date'],
+                                add_headers=[('X-Powered-By', 'WSGI')])
+
+    :param app: the WSGI application
+    :param remove_headers: a sequence of header keys that should be
+                           removed.
+    :param add_headers: a sequence of ``(key, value)`` tuples that should
+                        be added.
+
+    .. deprecated:: 0.15
+        This middleware will be removed in 1.0.
+    """
+
+    def __init__(self, app, remove_headers=None, add_headers=None):
+        warnings.warn(
+            "'HeaderRewriterFix' is deprecated as of version 0.15 and"
+            " will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        self.app = app
+        self.remove_headers = set(x.lower() for x in (remove_headers or ()))
+        self.add_headers = list(add_headers or ())
+
+    def __call__(self, environ, start_response):
+        def rewriting_start_response(status, headers, exc_info=None):
+            new_headers = []
+            for key, value in headers:
+                if key.lower() not in self.remove_headers:
+                    new_headers.append((key, value))
+            new_headers += self.add_headers
+            return start_response(status, new_headers, exc_info)
+
+        return self.app(environ, rewriting_start_response)
+
+
+class InternetExplorerFix(object):
+    """This middleware fixes a couple of bugs with Microsoft Internet
+    Explorer.  Currently the following fixes are applied:
+
+    -   removing of `Vary` headers for unsupported mimetypes which
+        causes troubles with caching.  Can be disabled by passing
+        ``fix_vary=False`` to the constructor.
+        see: https://support.microsoft.com/en-us/help/824847
+
+    -   removes offending headers to work around caching bugs in
+        Internet Explorer if `Content-Disposition` is set.  Can be
+        disabled by passing ``fix_attach=False`` to the constructor.
+
+    If it does not detect affected Internet Explorer versions it won't touch
+    the request / response.
+
+    .. deprecated:: 0.15
+        This middleware will be removed in 1.0.
+    """
+
+    # This code was inspired by Django fixers for the same bugs.  The
+    # fix_vary and fix_attach fixers were originally implemented in Django
+    # by Michael Axiak and is available as part of the Django project:
+    #     https://code.djangoproject.com/ticket/4148
+
+    def __init__(self, app, fix_vary=True, fix_attach=True):
+        warnings.warn(
+            "'InternetExplorerFix' is deprecated as of version 0.15 and"
+            " will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        self.app = app
+        self.fix_vary = fix_vary
+        self.fix_attach = fix_attach
+
+    def fix_headers(self, environ, headers, status=None):
+        if self.fix_vary:
+            header = headers.get("content-type", "")
+            mimetype, options = parse_options_header(header)
+            if mimetype not in ("text/html", "text/plain", "text/sgml"):
+                headers.pop("vary", None)
+
+        if self.fix_attach and "content-disposition" in headers:
+            pragma = parse_set_header(headers.get("pragma", ""))
+            pragma.discard("no-cache")
+            header = pragma.to_header()
+            if not header:
+                headers.pop("pragma", "")
+            else:
+                headers["Pragma"] = header
+            header = headers.get("cache-control", "")
+            if header:
+                cc = parse_cache_control_header(header, cls=ResponseCacheControl)
+                cc.no_cache = None
+                cc.no_store = False
+                header = cc.to_header()
+                if not header:
+                    headers.pop("cache-control", "")
+                else:
+                    headers["Cache-Control"] = header
+
+    def run_fixed(self, environ, start_response):
+        def fixing_start_response(status, headers, exc_info=None):
+            headers = Headers(headers)
+            self.fix_headers(environ, headers, status)
+            return start_response(status, headers.to_wsgi_list(), exc_info)
+
+        return self.app(environ, fixing_start_response)
+
+    def __call__(self, environ, start_response):
+        ua = UserAgent(environ)
+        if ua.browser != "msie":
+            return self.app(environ, start_response)
+        return self.run_fixed(environ, start_response)
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/iterio.py b/lib/python3.7/site-packages/werkzeug/contrib/iterio.py
new file mode 100644
index 0000000000000000000000000000000000000000..b67245409595aaeb87913e1a349cba6f9b869962
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/contrib/iterio.py
@@ -0,0 +1,358 @@
+# -*- coding: utf-8 -*-
+r"""
+    werkzeug.contrib.iterio
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module implements a :class:`IterIO` that converts an iterator into
+    a stream object and the other way round.  Converting streams into
+    iterators requires the `greenlet`_ module.
+
+    To convert an iterator into a stream all you have to do is to pass it
+    directly to the :class:`IterIO` constructor.  In this example we pass it
+    a newly created generator::
+
+        def foo():
+            yield "something\n"
+            yield "otherthings"
+        stream = IterIO(foo())
+        print stream.read()         # read the whole iterator
+
+    The other way round works a bit different because we have to ensure that
+    the code execution doesn't take place yet.  An :class:`IterIO` call with a
+    callable as first argument does two things.  The function itself is passed
+    an :class:`IterIO` stream it can feed.  The object returned by the
+    :class:`IterIO` constructor on the other hand is not an stream object but
+    an iterator::
+
+        def foo(stream):
+            stream.write("some")
+            stream.write("thing")
+            stream.flush()
+            stream.write("otherthing")
+        iterator = IterIO(foo)
+        print iterator.next()       # prints something
+        print iterator.next()       # prints otherthing
+        iterator.next()             # raises StopIteration
+
+    .. _greenlet: https://github.com/python-greenlet/greenlet
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import warnings
+
+from .._compat import implements_iterator
+
+try:
+    import greenlet
+except ImportError:
+    greenlet = None
+
+warnings.warn(
+    "'werkzeug.contrib.iterio' is deprecated as of version 0.15 and"
+    " will be removed in version 1.0.",
+    DeprecationWarning,
+    stacklevel=2,
+)
+
+
+def _mixed_join(iterable, sentinel):
+    """concatenate any string type in an intelligent way."""
+    iterator = iter(iterable)
+    first_item = next(iterator, sentinel)
+    if isinstance(first_item, bytes):
+        return first_item + b"".join(iterator)
+    return first_item + u"".join(iterator)
+
+
+def _newline(reference_string):
+    if isinstance(reference_string, bytes):
+        return b"\n"
+    return u"\n"
+
+
+@implements_iterator
+class IterIO(object):
+    """Instances of this object implement an interface compatible with the
+    standard Python :class:`file` object.  Streams are either read-only or
+    write-only depending on how the object is created.
+
+    If the first argument is an iterable a file like object is returned that
+    returns the contents of the iterable.  In case the iterable is empty
+    read operations will return the sentinel value.
+
+    If the first argument is a callable then the stream object will be
+    created and passed to that function.  The caller itself however will
+    not receive a stream but an iterable.  The function will be executed
+    step by step as something iterates over the returned iterable.  Each
+    call to :meth:`flush` will create an item for the iterable.  If
+    :meth:`flush` is called without any writes in-between the sentinel
+    value will be yielded.
+
+    Note for Python 3: due to the incompatible interface of bytes and
+    streams you should set the sentinel value explicitly to an empty
+    bytestring (``b''``) if you are expecting to deal with bytes as
+    otherwise the end of the stream is marked with the wrong sentinel
+    value.
+
+    .. versionadded:: 0.9
+       `sentinel` parameter was added.
+    """
+
+    def __new__(cls, obj, sentinel=""):
+        try:
+            iterator = iter(obj)
+        except TypeError:
+            return IterI(obj, sentinel)
+        return IterO(iterator, sentinel)
+
+    def __iter__(self):
+        return self
+
+    def tell(self):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        return self.pos
+
+    def isatty(self):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        return False
+
+    def seek(self, pos, mode=0):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        raise IOError(9, "Bad file descriptor")
+
+    def truncate(self, size=None):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        raise IOError(9, "Bad file descriptor")
+
+    def write(self, s):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        raise IOError(9, "Bad file descriptor")
+
+    def writelines(self, list):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        raise IOError(9, "Bad file descriptor")
+
+    def read(self, n=-1):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        raise IOError(9, "Bad file descriptor")
+
+    def readlines(self, sizehint=0):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        raise IOError(9, "Bad file descriptor")
+
+    def readline(self, length=None):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        raise IOError(9, "Bad file descriptor")
+
+    def flush(self):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        raise IOError(9, "Bad file descriptor")
+
+    def __next__(self):
+        if self.closed:
+            raise StopIteration()
+        line = self.readline()
+        if not line:
+            raise StopIteration()
+        return line
+
+
+class IterI(IterIO):
+    """Convert an stream into an iterator."""
+
+    def __new__(cls, func, sentinel=""):
+        if greenlet is None:
+            raise RuntimeError("IterI requires greenlet support")
+        stream = object.__new__(cls)
+        stream._parent = greenlet.getcurrent()
+        stream._buffer = []
+        stream.closed = False
+        stream.sentinel = sentinel
+        stream.pos = 0
+
+        def run():
+            func(stream)
+            stream.close()
+
+        g = greenlet.greenlet(run, stream._parent)
+        while 1:
+            rv = g.switch()
+            if not rv:
+                return
+            yield rv[0]
+
+    def close(self):
+        if not self.closed:
+            self.closed = True
+            self._flush_impl()
+
+    def write(self, s):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        if s:
+            self.pos += len(s)
+            self._buffer.append(s)
+
+    def writelines(self, list):
+        for item in list:
+            self.write(item)
+
+    def flush(self):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        self._flush_impl()
+
+    def _flush_impl(self):
+        data = _mixed_join(self._buffer, self.sentinel)
+        self._buffer = []
+        if not data and self.closed:
+            self._parent.switch()
+        else:
+            self._parent.switch((data,))
+
+
+class IterO(IterIO):
+    """Iter output.  Wrap an iterator and give it a stream like interface."""
+
+    def __new__(cls, gen, sentinel=""):
+        self = object.__new__(cls)
+        self._gen = gen
+        self._buf = None
+        self.sentinel = sentinel
+        self.closed = False
+        self.pos = 0
+        return self
+
+    def __iter__(self):
+        return self
+
+    def _buf_append(self, string):
+        """Replace string directly without appending to an empty string,
+        avoiding type issues."""
+        if not self._buf:
+            self._buf = string
+        else:
+            self._buf += string
+
+    def close(self):
+        if not self.closed:
+            self.closed = True
+            if hasattr(self._gen, "close"):
+                self._gen.close()
+
+    def seek(self, pos, mode=0):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        if mode == 1:
+            pos += self.pos
+        elif mode == 2:
+            self.read()
+            self.pos = min(self.pos, self.pos + pos)
+            return
+        elif mode != 0:
+            raise IOError("Invalid argument")
+        buf = []
+        try:
+            tmp_end_pos = len(self._buf or "")
+            while pos > tmp_end_pos:
+                item = next(self._gen)
+                tmp_end_pos += len(item)
+                buf.append(item)
+        except StopIteration:
+            pass
+        if buf:
+            self._buf_append(_mixed_join(buf, self.sentinel))
+        self.pos = max(0, pos)
+
+    def read(self, n=-1):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        if n < 0:
+            self._buf_append(_mixed_join(self._gen, self.sentinel))
+            result = self._buf[self.pos :]
+            self.pos += len(result)
+            return result
+        new_pos = self.pos + n
+        buf = []
+        try:
+            tmp_end_pos = 0 if self._buf is None else len(self._buf)
+            while new_pos > tmp_end_pos or (self._buf is None and not buf):
+                item = next(self._gen)
+                tmp_end_pos += len(item)
+                buf.append(item)
+        except StopIteration:
+            pass
+        if buf:
+            self._buf_append(_mixed_join(buf, self.sentinel))
+
+        if self._buf is None:
+            return self.sentinel
+
+        new_pos = max(0, new_pos)
+        try:
+            return self._buf[self.pos : new_pos]
+        finally:
+            self.pos = min(new_pos, len(self._buf))
+
+    def readline(self, length=None):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+
+        nl_pos = -1
+        if self._buf:
+            nl_pos = self._buf.find(_newline(self._buf), self.pos)
+        buf = []
+        try:
+            if self._buf is None:
+                pos = self.pos
+            else:
+                pos = len(self._buf)
+            while nl_pos < 0:
+                item = next(self._gen)
+                local_pos = item.find(_newline(item))
+                buf.append(item)
+                if local_pos >= 0:
+                    nl_pos = pos + local_pos
+                    break
+                pos += len(item)
+        except StopIteration:
+            pass
+        if buf:
+            self._buf_append(_mixed_join(buf, self.sentinel))
+
+        if self._buf is None:
+            return self.sentinel
+
+        if nl_pos < 0:
+            new_pos = len(self._buf)
+        else:
+            new_pos = nl_pos + 1
+        if length is not None and self.pos + length < new_pos:
+            new_pos = self.pos + length
+        try:
+            return self._buf[self.pos : new_pos]
+        finally:
+            self.pos = min(new_pos, len(self._buf))
+
+    def readlines(self, sizehint=0):
+        total = 0
+        lines = []
+        line = self.readline()
+        while line:
+            lines.append(line)
+            total += len(line)
+            if 0 < sizehint <= total:
+                break
+            line = self.readline()
+        return lines
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/lint.py b/lib/python3.7/site-packages/werkzeug/contrib/lint.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bd8b8ab2799749a89bb6344c28d7f8df6d7df16
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/contrib/lint.py
@@ -0,0 +1,11 @@
+import warnings
+
+from ..middleware.lint import *  # noqa: F401, F403
+
+warnings.warn(
+    "'werkzeug.contrib.lint' has moved to 'werkzeug.middleware.lint'."
+    " This import is deprecated as of version 0.15 and will be removed"
+    " in version 1.0.",
+    DeprecationWarning,
+    stacklevel=2,
+)
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/profiler.py b/lib/python3.7/site-packages/werkzeug/contrib/profiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..b79fe567f5125849cb3ba6b8c558200dd8bc0715
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/contrib/profiler.py
@@ -0,0 +1,42 @@
+import warnings
+
+from ..middleware.profiler import *  # noqa: F401, F403
+
+warnings.warn(
+    "'werkzeug.contrib.profiler' has moved to"
+    "'werkzeug.middleware.profiler'. This import is deprecated as of"
+    "version 0.15 and will be removed in version 1.0.",
+    DeprecationWarning,
+    stacklevel=2,
+)
+
+
+class MergeStream(object):
+    """An object that redirects ``write`` calls to multiple streams.
+    Use this to log to both ``sys.stdout`` and a file::
+
+        f = open('profiler.log', 'w')
+        stream = MergeStream(sys.stdout, f)
+        profiler = ProfilerMiddleware(app, stream)
+
+    .. deprecated:: 0.15
+        Use the ``tee`` command in your terminal instead. This class
+        will be removed in 1.0.
+    """
+
+    def __init__(self, *streams):
+        warnings.warn(
+            "'MergeStream' is deprecated as of version 0.15 and will be removed in"
+            " version 1.0. Use your terminal's 'tee' command instead.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+
+        if not streams:
+            raise TypeError("At least one stream must be given.")
+
+        self.streams = streams
+
+    def write(self, data):
+        for stream in self.streams:
+            stream.write(data)
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/securecookie.py b/lib/python3.7/site-packages/werkzeug/contrib/securecookie.py
new file mode 100644
index 0000000000000000000000000000000000000000..c4c9eee25ecf8854cce845bf6a1016fa2cc91bf1
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/contrib/securecookie.py
@@ -0,0 +1,362 @@
+# -*- coding: utf-8 -*-
+r"""
+    werkzeug.contrib.securecookie
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module implements a cookie that is not alterable from the client
+    because it adds a checksum the server checks for.  You can use it as
+    session replacement if all you have is a user id or something to mark
+    a logged in user.
+
+    Keep in mind that the data is still readable from the client as a
+    normal cookie is.  However you don't have to store and flush the
+    sessions you have at the server.
+
+    Example usage:
+
+    >>> from werkzeug.contrib.securecookie import SecureCookie
+    >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
+
+    Dumping into a string so that one can store it in a cookie:
+
+    >>> value = x.serialize()
+
+    Loading from that string again:
+
+    >>> x = SecureCookie.unserialize(value, "deadbeef")
+    >>> x["baz"]
+    (1, 2, 3)
+
+    If someone modifies the cookie and the checksum is wrong the unserialize
+    method will fail silently and return a new empty `SecureCookie` object.
+
+    Keep in mind that the values will be visible in the cookie so do not
+    store data in a cookie you don't want the user to see.
+
+    Application Integration
+    =======================
+
+    If you are using the werkzeug request objects you could integrate the
+    secure cookie into your application like this::
+
+        from werkzeug.utils import cached_property
+        from werkzeug.wrappers import BaseRequest
+        from werkzeug.contrib.securecookie import SecureCookie
+
+        # don't use this key but a different one; you could just use
+        # os.urandom(20) to get something random
+        SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
+
+        class Request(BaseRequest):
+
+            @cached_property
+            def client_session(self):
+                data = self.cookies.get('session_data')
+                if not data:
+                    return SecureCookie(secret_key=SECRET_KEY)
+                return SecureCookie.unserialize(data, SECRET_KEY)
+
+        def application(environ, start_response):
+            request = Request(environ)
+
+            # get a response object here
+            response = ...
+
+            if request.client_session.should_save:
+                session_data = request.client_session.serialize()
+                response.set_cookie('session_data', session_data,
+                                    httponly=True)
+            return response(environ, start_response)
+
+    A less verbose integration can be achieved by using shorthand methods::
+
+        class Request(BaseRequest):
+
+            @cached_property
+            def client_session(self):
+                return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
+
+        def application(environ, start_response):
+            request = Request(environ)
+
+            # get a response object here
+            response = ...
+
+            request.client_session.save_cookie(response)
+            return response(environ, start_response)
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import base64
+import pickle
+import warnings
+from hashlib import sha1 as _default_hash
+from hmac import new as hmac
+from time import time
+
+from .._compat import iteritems
+from .._compat import text_type
+from .._compat import to_bytes
+from .._compat import to_native
+from .._internal import _date_to_unix
+from ..contrib.sessions import ModificationTrackingDict
+from ..security import safe_str_cmp
+from ..urls import url_quote_plus
+from ..urls import url_unquote_plus
+
+warnings.warn(
+    "'werkzeug.contrib.securecookie' is deprecated as of version 0.15"
+    " and will be removed in version 1.0. It has moved to"
+    " https://github.com/pallets/secure-cookie.",
+    DeprecationWarning,
+    stacklevel=2,
+)
+
+
+class UnquoteError(Exception):
+    """Internal exception used to signal failures on quoting."""
+
+
+class SecureCookie(ModificationTrackingDict):
+    """Represents a secure cookie.  You can subclass this class and provide
+    an alternative mac method.  The import thing is that the mac method
+    is a function with a similar interface to the hashlib.  Required
+    methods are update() and digest().
+
+    Example usage:
+
+    >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
+    >>> x["foo"]
+    42
+    >>> x["baz"]
+    (1, 2, 3)
+    >>> x["blafasel"] = 23
+    >>> x.should_save
+    True
+
+    :param data: the initial data.  Either a dict, list of tuples or `None`.
+    :param secret_key: the secret key.  If not set `None` or not specified
+                       it has to be set before :meth:`serialize` is called.
+    :param new: The initial value of the `new` flag.
+    """
+
+    #: The hash method to use.  This has to be a module with a new function
+    #: or a function that creates a hashlib object.  Such as `hashlib.md5`
+    #: Subclasses can override this attribute.  The default hash is sha1.
+    #: Make sure to wrap this in staticmethod() if you store an arbitrary
+    #: function there such as hashlib.sha1 which  might be implemented
+    #: as a function.
+    hash_method = staticmethod(_default_hash)
+
+    #: The module used for serialization. Should have a ``dumps`` and a
+    #: ``loads`` method that takes bytes. The default is :mod:`pickle`.
+    #:
+    #: .. versionchanged:: 0.15
+    #:     The default of ``pickle`` will change to :mod:`json` in 1.0.
+    serialization_method = pickle
+
+    #: if the contents should be base64 quoted.  This can be disabled if the
+    #: serialization process returns cookie safe strings only.
+    quote_base64 = True
+
+    def __init__(self, data=None, secret_key=None, new=True):
+        ModificationTrackingDict.__init__(self, data or ())
+        # explicitly convert it into a bytestring because python 2.6
+        # no longer performs an implicit string conversion on hmac
+        if secret_key is not None:
+            secret_key = to_bytes(secret_key, "utf-8")
+        self.secret_key = secret_key
+        self.new = new
+
+        if self.serialization_method is pickle:
+            warnings.warn(
+                "The default 'SecureCookie.serialization_method' will"
+                " change from pickle to json in version 1.0. To upgrade"
+                " existing tokens, override 'unquote' to try pickle if"
+                " json fails.",
+                stacklevel=2,
+            )
+
+    def __repr__(self):
+        return "<%s %s%s>" % (
+            self.__class__.__name__,
+            dict.__repr__(self),
+            "*" if self.should_save else "",
+        )
+
+    @property
+    def should_save(self):
+        """True if the session should be saved.  By default this is only true
+        for :attr:`modified` cookies, not :attr:`new`.
+        """
+        return self.modified
+
+    @classmethod
+    def quote(cls, value):
+        """Quote the value for the cookie.  This can be any object supported
+        by :attr:`serialization_method`.
+
+        :param value: the value to quote.
+        """
+        if cls.serialization_method is not None:
+            value = cls.serialization_method.dumps(value)
+        if cls.quote_base64:
+            value = b"".join(
+                base64.b64encode(to_bytes(value, "utf8")).splitlines()
+            ).strip()
+        return value
+
+    @classmethod
+    def unquote(cls, value):
+        """Unquote the value for the cookie.  If unquoting does not work a
+        :exc:`UnquoteError` is raised.
+
+        :param value: the value to unquote.
+        """
+        try:
+            if cls.quote_base64:
+                value = base64.b64decode(value)
+            if cls.serialization_method is not None:
+                value = cls.serialization_method.loads(value)
+            return value
+        except Exception:
+            # unfortunately pickle and other serialization modules can
+            # cause pretty every error here.  if we get one we catch it
+            # and convert it into an UnquoteError
+            raise UnquoteError()
+
+    def serialize(self, expires=None):
+        """Serialize the secure cookie into a string.
+
+        If expires is provided, the session will be automatically invalidated
+        after expiration when you unseralize it. This provides better
+        protection against session cookie theft.
+
+        :param expires: an optional expiration date for the cookie (a
+                        :class:`datetime.datetime` object)
+        """
+        if self.secret_key is None:
+            raise RuntimeError("no secret key defined")
+        if expires:
+            self["_expires"] = _date_to_unix(expires)
+        result = []
+        mac = hmac(self.secret_key, None, self.hash_method)
+        for key, value in sorted(self.items()):
+            result.append(
+                (
+                    "%s=%s" % (url_quote_plus(key), self.quote(value).decode("ascii"))
+                ).encode("ascii")
+            )
+            mac.update(b"|" + result[-1])
+        return b"?".join([base64.b64encode(mac.digest()).strip(), b"&".join(result)])
+
+    @classmethod
+    def unserialize(cls, string, secret_key):
+        """Load the secure cookie from a serialized string.
+
+        :param string: the cookie value to unserialize.
+        :param secret_key: the secret key used to serialize the cookie.
+        :return: a new :class:`SecureCookie`.
+        """
+        if isinstance(string, text_type):
+            string = string.encode("utf-8", "replace")
+        if isinstance(secret_key, text_type):
+            secret_key = secret_key.encode("utf-8", "replace")
+        try:
+            base64_hash, data = string.split(b"?", 1)
+        except (ValueError, IndexError):
+            items = ()
+        else:
+            items = {}
+            mac = hmac(secret_key, None, cls.hash_method)
+            for item in data.split(b"&"):
+                mac.update(b"|" + item)
+                if b"=" not in item:
+                    items = None
+                    break
+                key, value = item.split(b"=", 1)
+                # try to make the key a string
+                key = url_unquote_plus(key.decode("ascii"))
+                try:
+                    key = to_native(key)
+                except UnicodeError:
+                    pass
+                items[key] = value
+
+            # no parsing error and the mac looks okay, we can now
+            # sercurely unpickle our cookie.
+            try:
+                client_hash = base64.b64decode(base64_hash)
+            except TypeError:
+                items = client_hash = None
+            if items is not None and safe_str_cmp(client_hash, mac.digest()):
+                try:
+                    for key, value in iteritems(items):
+                        items[key] = cls.unquote(value)
+                except UnquoteError:
+                    items = ()
+                else:
+                    if "_expires" in items:
+                        if time() > items["_expires"]:
+                            items = ()
+                        else:
+                            del items["_expires"]
+            else:
+                items = ()
+        return cls(items, secret_key, False)
+
+    @classmethod
+    def load_cookie(cls, request, key="session", secret_key=None):
+        """Loads a :class:`SecureCookie` from a cookie in request.  If the
+        cookie is not set, a new :class:`SecureCookie` instanced is
+        returned.
+
+        :param request: a request object that has a `cookies` attribute
+                        which is a dict of all cookie values.
+        :param key: the name of the cookie.
+        :param secret_key: the secret key used to unquote the cookie.
+                           Always provide the value even though it has
+                           no default!
+        """
+        data = request.cookies.get(key)
+        if not data:
+            return cls(secret_key=secret_key)
+        return cls.unserialize(data, secret_key)
+
+    def save_cookie(
+        self,
+        response,
+        key="session",
+        expires=None,
+        session_expires=None,
+        max_age=None,
+        path="/",
+        domain=None,
+        secure=None,
+        httponly=False,
+        force=False,
+    ):
+        """Saves the SecureCookie in a cookie on response object.  All
+        parameters that are not described here are forwarded directly
+        to :meth:`~BaseResponse.set_cookie`.
+
+        :param response: a response object that has a
+                         :meth:`~BaseResponse.set_cookie` method.
+        :param key: the name of the cookie.
+        :param session_expires: the expiration date of the secure cookie
+                                stored information.  If this is not provided
+                                the cookie `expires` date is used instead.
+        """
+        if force or self.should_save:
+            data = self.serialize(session_expires or expires)
+            response.set_cookie(
+                key,
+                data,
+                expires=expires,
+                max_age=max_age,
+                path=path,
+                domain=domain,
+                secure=secure,
+                httponly=httponly,
+            )
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/sessions.py b/lib/python3.7/site-packages/werkzeug/contrib/sessions.py
new file mode 100644
index 0000000000000000000000000000000000000000..866e827c131cf8fdc7e906b9fe06b2ae4f0ad814
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/contrib/sessions.py
@@ -0,0 +1,389 @@
+# -*- coding: utf-8 -*-
+r"""
+    werkzeug.contrib.sessions
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module contains some helper classes that help one to add session
+    support to a python WSGI application.  For full client-side session
+    storage see :mod:`~werkzeug.contrib.securecookie` which implements a
+    secure, client-side session storage.
+
+
+    Application Integration
+    =======================
+
+    ::
+
+        from werkzeug.contrib.sessions import SessionMiddleware, \
+             FilesystemSessionStore
+
+        app = SessionMiddleware(app, FilesystemSessionStore())
+
+    The current session will then appear in the WSGI environment as
+    `werkzeug.session`.  However it's recommended to not use the middleware
+    but the stores directly in the application.  However for very simple
+    scripts a middleware for sessions could be sufficient.
+
+    This module does not implement methods or ways to check if a session is
+    expired.  That should be done by a cronjob and storage specific.  For
+    example to prune unused filesystem sessions one could check the modified
+    time of the files.  If sessions are stored in the database the new()
+    method should add an expiration timestamp for the session.
+
+    For better flexibility it's recommended to not use the middleware but the
+    store and session object directly in the application dispatching::
+
+        session_store = FilesystemSessionStore()
+
+        def application(environ, start_response):
+            request = Request(environ)
+            sid = request.cookies.get('cookie_name')
+            if sid is None:
+                request.session = session_store.new()
+            else:
+                request.session = session_store.get(sid)
+            response = get_the_response_object(request)
+            if request.session.should_save:
+                session_store.save(request.session)
+                response.set_cookie('cookie_name', request.session.sid)
+            return response(environ, start_response)
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import os
+import re
+import tempfile
+import warnings
+from hashlib import sha1
+from os import path
+from pickle import dump
+from pickle import HIGHEST_PROTOCOL
+from pickle import load
+from random import random
+from time import time
+
+from .._compat import PY2
+from .._compat import text_type
+from ..datastructures import CallbackDict
+from ..filesystem import get_filesystem_encoding
+from ..posixemulation import rename
+from ..utils import dump_cookie
+from ..utils import parse_cookie
+from ..wsgi import ClosingIterator
+
+warnings.warn(
+    "'werkzeug.contrib.sessions' is deprecated as of version 0.15 and"
+    " will be removed in version 1.0. It has moved to"
+    " https://github.com/pallets/secure-cookie.",
+    DeprecationWarning,
+    stacklevel=2,
+)
+
+_sha1_re = re.compile(r"^[a-f0-9]{40}$")
+
+
+def _urandom():
+    if hasattr(os, "urandom"):
+        return os.urandom(30)
+    return text_type(random()).encode("ascii")
+
+
+def generate_key(salt=None):
+    if salt is None:
+        salt = repr(salt).encode("ascii")
+    return sha1(b"".join([salt, str(time()).encode("ascii"), _urandom()])).hexdigest()
+
+
+class ModificationTrackingDict(CallbackDict):
+    __slots__ = ("modified",)
+
+    def __init__(self, *args, **kwargs):
+        def on_update(self):
+            self.modified = True
+
+        self.modified = False
+        CallbackDict.__init__(self, on_update=on_update)
+        dict.update(self, *args, **kwargs)
+
+    def copy(self):
+        """Create a flat copy of the dict."""
+        missing = object()
+        result = object.__new__(self.__class__)
+        for name in self.__slots__:
+            val = getattr(self, name, missing)
+            if val is not missing:
+                setattr(result, name, val)
+        return result
+
+    def __copy__(self):
+        return self.copy()
+
+
+class Session(ModificationTrackingDict):
+    """Subclass of a dict that keeps track of direct object changes.  Changes
+    in mutable structures are not tracked, for those you have to set
+    `modified` to `True` by hand.
+    """
+
+    __slots__ = ModificationTrackingDict.__slots__ + ("sid", "new")
+
+    def __init__(self, data, sid, new=False):
+        ModificationTrackingDict.__init__(self, data)
+        self.sid = sid
+        self.new = new
+
+    def __repr__(self):
+        return "<%s %s%s>" % (
+            self.__class__.__name__,
+            dict.__repr__(self),
+            "*" if self.should_save else "",
+        )
+
+    @property
+    def should_save(self):
+        """True if the session should be saved.
+
+        .. versionchanged:: 0.6
+           By default the session is now only saved if the session is
+           modified, not if it is new like it was before.
+        """
+        return self.modified
+
+
+class SessionStore(object):
+    """Baseclass for all session stores.  The Werkzeug contrib module does not
+    implement any useful stores besides the filesystem store, application
+    developers are encouraged to create their own stores.
+
+    :param session_class: The session class to use.  Defaults to
+                          :class:`Session`.
+    """
+
+    def __init__(self, session_class=None):
+        if session_class is None:
+            session_class = Session
+        self.session_class = session_class
+
+    def is_valid_key(self, key):
+        """Check if a key has the correct format."""
+        return _sha1_re.match(key) is not None
+
+    def generate_key(self, salt=None):
+        """Simple function that generates a new session key."""
+        return generate_key(salt)
+
+    def new(self):
+        """Generate a new session."""
+        return self.session_class({}, self.generate_key(), True)
+
+    def save(self, session):
+        """Save a session."""
+
+    def save_if_modified(self, session):
+        """Save if a session class wants an update."""
+        if session.should_save:
+            self.save(session)
+
+    def delete(self, session):
+        """Delete a session."""
+
+    def get(self, sid):
+        """Get a session for this sid or a new session object.  This method
+        has to check if the session key is valid and create a new session if
+        that wasn't the case.
+        """
+        return self.session_class({}, sid, True)
+
+
+#: used for temporary files by the filesystem session store
+_fs_transaction_suffix = ".__wz_sess"
+
+
+class FilesystemSessionStore(SessionStore):
+    """Simple example session store that saves sessions on the filesystem.
+    This store works best on POSIX systems and Windows Vista / Windows
+    Server 2008 and newer.
+
+    .. versionchanged:: 0.6
+       `renew_missing` was added.  Previously this was considered `True`,
+       now the default changed to `False` and it can be explicitly
+       deactivated.
+
+    :param path: the path to the folder used for storing the sessions.
+                 If not provided the default temporary directory is used.
+    :param filename_template: a string template used to give the session
+                              a filename.  ``%s`` is replaced with the
+                              session id.
+    :param session_class: The session class to use.  Defaults to
+                          :class:`Session`.
+    :param renew_missing: set to `True` if you want the store to
+                          give the user a new sid if the session was
+                          not yet saved.
+    """
+
+    def __init__(
+        self,
+        path=None,
+        filename_template="werkzeug_%s.sess",
+        session_class=None,
+        renew_missing=False,
+        mode=0o644,
+    ):
+        SessionStore.__init__(self, session_class)
+        if path is None:
+            path = tempfile.gettempdir()
+        self.path = path
+        if isinstance(filename_template, text_type) and PY2:
+            filename_template = filename_template.encode(get_filesystem_encoding())
+        assert not filename_template.endswith(_fs_transaction_suffix), (
+            "filename templates may not end with %s" % _fs_transaction_suffix
+        )
+        self.filename_template = filename_template
+        self.renew_missing = renew_missing
+        self.mode = mode
+
+    def get_session_filename(self, sid):
+        # out of the box, this should be a strict ASCII subset but
+        # you might reconfigure the session object to have a more
+        # arbitrary string.
+        if isinstance(sid, text_type) and PY2:
+            sid = sid.encode(get_filesystem_encoding())
+        return path.join(self.path, self.filename_template % sid)
+
+    def save(self, session):
+        fn = self.get_session_filename(session.sid)
+        fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path)
+        f = os.fdopen(fd, "wb")
+        try:
+            dump(dict(session), f, HIGHEST_PROTOCOL)
+        finally:
+            f.close()
+        try:
+            rename(tmp, fn)
+            os.chmod(fn, self.mode)
+        except (IOError, OSError):
+            pass
+
+    def delete(self, session):
+        fn = self.get_session_filename(session.sid)
+        try:
+            os.unlink(fn)
+        except OSError:
+            pass
+
+    def get(self, sid):
+        if not self.is_valid_key(sid):
+            return self.new()
+        try:
+            f = open(self.get_session_filename(sid), "rb")
+        except IOError:
+            if self.renew_missing:
+                return self.new()
+            data = {}
+        else:
+            try:
+                try:
+                    data = load(f)
+                except Exception:
+                    data = {}
+            finally:
+                f.close()
+        return self.session_class(data, sid, False)
+
+    def list(self):
+        """Lists all sessions in the store.
+
+        .. versionadded:: 0.6
+        """
+        before, after = self.filename_template.split("%s", 1)
+        filename_re = re.compile(
+            r"%s(.{5,})%s$" % (re.escape(before), re.escape(after))
+        )
+        result = []
+        for filename in os.listdir(self.path):
+            #: this is a session that is still being saved.
+            if filename.endswith(_fs_transaction_suffix):
+                continue
+            match = filename_re.match(filename)
+            if match is not None:
+                result.append(match.group(1))
+        return result
+
+
+class SessionMiddleware(object):
+    """A simple middleware that puts the session object of a store provided
+    into the WSGI environ.  It automatically sets cookies and restores
+    sessions.
+
+    However a middleware is not the preferred solution because it won't be as
+    fast as sessions managed by the application itself and will put a key into
+    the WSGI environment only relevant for the application which is against
+    the concept of WSGI.
+
+    The cookie parameters are the same as for the :func:`~dump_cookie`
+    function just prefixed with ``cookie_``.  Additionally `max_age` is
+    called `cookie_age` and not `cookie_max_age` because of backwards
+    compatibility.
+    """
+
+    def __init__(
+        self,
+        app,
+        store,
+        cookie_name="session_id",
+        cookie_age=None,
+        cookie_expires=None,
+        cookie_path="/",
+        cookie_domain=None,
+        cookie_secure=None,
+        cookie_httponly=False,
+        cookie_samesite="Lax",
+        environ_key="werkzeug.session",
+    ):
+        self.app = app
+        self.store = store
+        self.cookie_name = cookie_name
+        self.cookie_age = cookie_age
+        self.cookie_expires = cookie_expires
+        self.cookie_path = cookie_path
+        self.cookie_domain = cookie_domain
+        self.cookie_secure = cookie_secure
+        self.cookie_httponly = cookie_httponly
+        self.cookie_samesite = cookie_samesite
+        self.environ_key = environ_key
+
+    def __call__(self, environ, start_response):
+        cookie = parse_cookie(environ.get("HTTP_COOKIE", ""))
+        sid = cookie.get(self.cookie_name, None)
+        if sid is None:
+            session = self.store.new()
+        else:
+            session = self.store.get(sid)
+        environ[self.environ_key] = session
+
+        def injecting_start_response(status, headers, exc_info=None):
+            if session.should_save:
+                self.store.save(session)
+                headers.append(
+                    (
+                        "Set-Cookie",
+                        dump_cookie(
+                            self.cookie_name,
+                            session.sid,
+                            self.cookie_age,
+                            self.cookie_expires,
+                            self.cookie_path,
+                            self.cookie_domain,
+                            self.cookie_secure,
+                            self.cookie_httponly,
+                            samesite=self.cookie_samesite,
+                        ),
+                    )
+                )
+            return start_response(status, headers, exc_info)
+
+        return ClosingIterator(
+            self.app(environ, injecting_start_response),
+            lambda: self.store.save_if_modified(session),
+        )
diff --git a/lib/python3.7/site-packages/werkzeug/contrib/wrappers.py b/lib/python3.7/site-packages/werkzeug/contrib/wrappers.py
new file mode 100644
index 0000000000000000000000000000000000000000..49b82a71ef3125cdd8ba7d701e40e934971b30cc
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/contrib/wrappers.py
@@ -0,0 +1,385 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.wrappers
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Extra wrappers or mixins contributed by the community.  These wrappers can
+    be mixed in into request objects to add extra functionality.
+
+    Example::
+
+        from werkzeug.wrappers import Request as RequestBase
+        from werkzeug.contrib.wrappers import JSONRequestMixin
+
+        class Request(RequestBase, JSONRequestMixin):
+            pass
+
+    Afterwards this request object provides the extra functionality of the
+    :class:`JSONRequestMixin`.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import codecs
+import warnings
+
+from .._compat import wsgi_decoding_dance
+from ..exceptions import BadRequest
+from ..http import dump_options_header
+from ..http import parse_options_header
+from ..utils import cached_property
+from ..wrappers.json import JSONMixin as _JSONMixin
+
+
+def is_known_charset(charset):
+    """Checks if the given charset is known to Python."""
+    try:
+        codecs.lookup(charset)
+    except LookupError:
+        return False
+    return True
+
+
+class JSONRequestMixin(_JSONMixin):
+    """
+    .. deprecated:: 0.15
+        Moved to :class:`werkzeug.wrappers.json.JSONMixin`. This old
+        import will be removed in version 1.0.
+    """
+
+    @property
+    def json(self):
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.JSONRequestMixin' has moved to"
+            " 'werkzeug.wrappers.json.JSONMixin'. This old import will"
+            " be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return super(JSONRequestMixin, self).json
+
+
+class ProtobufRequestMixin(object):
+
+    """Add protobuf parsing method to a request object.  This will parse the
+    input data through `protobuf`_ if possible.
+
+    :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
+    is not protobuf or if the data itself cannot be parsed property.
+
+    .. _protobuf: https://github.com/protocolbuffers/protobuf
+
+    .. deprecated:: 0.15
+        This mixin will be removed in version 1.0.
+    """
+
+    #: by default the :class:`ProtobufRequestMixin` will raise a
+    #: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
+    #: initialized.  You can bypass that check by setting this
+    #: attribute to `False`.
+    protobuf_check_initialization = True
+
+    def parse_protobuf(self, proto_type):
+        """Parse the data into an instance of proto_type."""
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.ProtobufRequestMixin' is"
+            " deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        if "protobuf" not in self.environ.get("CONTENT_TYPE", ""):
+            raise BadRequest("Not a Protobuf request")
+
+        obj = proto_type()
+        try:
+            obj.ParseFromString(self.data)
+        except Exception:
+            raise BadRequest("Unable to parse Protobuf request")
+
+        # Fail if not all required fields are set
+        if self.protobuf_check_initialization and not obj.IsInitialized():
+            raise BadRequest("Partial Protobuf request")
+
+        return obj
+
+
+class RoutingArgsRequestMixin(object):
+
+    """This request mixin adds support for the wsgiorg routing args
+    `specification`_.
+
+    .. _specification: https://wsgi.readthedocs.io/en/latest/
+       specifications/routing_args.html
+
+    .. deprecated:: 0.15
+        This mixin will be removed in version 1.0.
+    """
+
+    def _get_routing_args(self):
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
+            " deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return self.environ.get("wsgiorg.routing_args", (()))[0]
+
+    def _set_routing_args(self, value):
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
+            " deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        if self.shallow:
+            raise RuntimeError(
+                "A shallow request tried to modify the WSGI "
+                "environment.  If you really want to do that, "
+                "set `shallow` to False."
+            )
+        self.environ["wsgiorg.routing_args"] = (value, self.routing_vars)
+
+    routing_args = property(
+        _get_routing_args,
+        _set_routing_args,
+        doc="""
+        The positional URL arguments as `tuple`.""",
+    )
+    del _get_routing_args, _set_routing_args
+
+    def _get_routing_vars(self):
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
+            " deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        rv = self.environ.get("wsgiorg.routing_args")
+        if rv is not None:
+            return rv[1]
+        rv = {}
+        if not self.shallow:
+            self.routing_vars = rv
+        return rv
+
+    def _set_routing_vars(self, value):
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
+            " deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        if self.shallow:
+            raise RuntimeError(
+                "A shallow request tried to modify the WSGI "
+                "environment.  If you really want to do that, "
+                "set `shallow` to False."
+            )
+        self.environ["wsgiorg.routing_args"] = (self.routing_args, value)
+
+    routing_vars = property(
+        _get_routing_vars,
+        _set_routing_vars,
+        doc="""
+        The keyword URL arguments as `dict`.""",
+    )
+    del _get_routing_vars, _set_routing_vars
+
+
+class ReverseSlashBehaviorRequestMixin(object):
+
+    """This mixin reverses the trailing slash behavior of :attr:`script_root`
+    and :attr:`path`.  This makes it possible to use :func:`~urlparse.urljoin`
+    directly on the paths.
+
+    Because it changes the behavior or :class:`Request` this class has to be
+    mixed in *before* the actual request class::
+
+        class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
+            pass
+
+    This example shows the differences (for an application mounted on
+    `/application` and the request going to `/application/foo/bar`):
+
+        +---------------+-------------------+---------------------+
+        |               | normal behavior   | reverse behavior    |
+        +===============+===================+=====================+
+        | `script_root` | ``/application``  | ``/application/``   |
+        +---------------+-------------------+---------------------+
+        | `path`        | ``/foo/bar``      | ``foo/bar``         |
+        +---------------+-------------------+---------------------+
+
+    .. deprecated:: 0.15
+        This mixin will be removed in version 1.0.
+    """
+
+    @cached_property
+    def path(self):
+        """Requested path as unicode.  This works a bit like the regular path
+        info in the WSGI environment but will not include a leading slash.
+        """
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.ReverseSlashBehaviorRequestMixin'"
+            " is deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        path = wsgi_decoding_dance(
+            self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors
+        )
+        return path.lstrip("/")
+
+    @cached_property
+    def script_root(self):
+        """The root path of the script includling a trailing slash."""
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.ReverseSlashBehaviorRequestMixin'"
+            " is deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        path = wsgi_decoding_dance(
+            self.environ.get("SCRIPT_NAME") or "", self.charset, self.encoding_errors
+        )
+        return path.rstrip("/") + "/"
+
+
+class DynamicCharsetRequestMixin(object):
+
+    """"If this mixin is mixed into a request class it will provide
+    a dynamic `charset` attribute.  This means that if the charset is
+    transmitted in the content type headers it's used from there.
+
+    Because it changes the behavior or :class:`Request` this class has
+    to be mixed in *before* the actual request class::
+
+        class MyRequest(DynamicCharsetRequestMixin, Request):
+            pass
+
+    By default the request object assumes that the URL charset is the
+    same as the data charset.  If the charset varies on each request
+    based on the transmitted data it's not a good idea to let the URLs
+    change based on that.  Most browsers assume either utf-8 or latin1
+    for the URLs if they have troubles figuring out.  It's strongly
+    recommended to set the URL charset to utf-8::
+
+        class MyRequest(DynamicCharsetRequestMixin, Request):
+            url_charset = 'utf-8'
+
+    .. deprecated:: 0.15
+        This mixin will be removed in version 1.0.
+
+    .. versionadded:: 0.6
+    """
+
+    #: the default charset that is assumed if the content type header
+    #: is missing or does not contain a charset parameter.  The default
+    #: is latin1 which is what HTTP specifies as default charset.
+    #: You may however want to set this to utf-8 to better support
+    #: browsers that do not transmit a charset for incoming data.
+    default_charset = "latin1"
+
+    def unknown_charset(self, charset):
+        """Called if a charset was provided but is not supported by
+        the Python codecs module.  By default latin1 is assumed then
+        to not lose any information, you may override this method to
+        change the behavior.
+
+        :param charset: the charset that was not found.
+        :return: the replacement charset.
+        """
+        return "latin1"
+
+    @cached_property
+    def charset(self):
+        """The charset from the content type."""
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.DynamicCharsetRequestMixin'"
+            " is deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        header = self.environ.get("CONTENT_TYPE")
+        if header:
+            ct, options = parse_options_header(header)
+            charset = options.get("charset")
+            if charset:
+                if is_known_charset(charset):
+                    return charset
+                return self.unknown_charset(charset)
+        return self.default_charset
+
+
+class DynamicCharsetResponseMixin(object):
+
+    """If this mixin is mixed into a response class it will provide
+    a dynamic `charset` attribute.  This means that if the charset is
+    looked up and stored in the `Content-Type` header and updates
+    itself automatically.  This also means a small performance hit but
+    can be useful if you're working with different charsets on
+    responses.
+
+    Because the charset attribute is no a property at class-level, the
+    default value is stored in `default_charset`.
+
+    Because it changes the behavior or :class:`Response` this class has
+    to be mixed in *before* the actual response class::
+
+        class MyResponse(DynamicCharsetResponseMixin, Response):
+            pass
+
+    .. deprecated:: 0.15
+        This mixin will be removed in version 1.0.
+
+    .. versionadded:: 0.6
+    """
+
+    #: the default charset.
+    default_charset = "utf-8"
+
+    def _get_charset(self):
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.DynamicCharsetResponseMixin'"
+            " is deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        header = self.headers.get("content-type")
+        if header:
+            charset = parse_options_header(header)[1].get("charset")
+            if charset:
+                return charset
+        return self.default_charset
+
+    def _set_charset(self, charset):
+        warnings.warn(
+            "'werkzeug.contrib.wrappers.DynamicCharsetResponseMixin'"
+            " is deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        header = self.headers.get("content-type")
+        ct, options = parse_options_header(header)
+        if not ct:
+            raise TypeError("Cannot set charset if Content-Type header is missing.")
+        options["charset"] = charset
+        self.headers["Content-Type"] = dump_options_header(ct, options)
+
+    charset = property(
+        _get_charset,
+        _set_charset,
+        doc="""
+        The charset for the response.  It's stored inside the
+        Content-Type header as a parameter.""",
+    )
+    del _get_charset, _set_charset
diff --git a/lib/python3.7/site-packages/werkzeug/datastructures.py b/lib/python3.7/site-packages/werkzeug/datastructures.py
new file mode 100644
index 0000000000000000000000000000000000000000..9643db96c0ad850f5e3d642ed153d1b95c7014d0
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/datastructures.py
@@ -0,0 +1,2852 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.datastructures
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module provides mixins and classes with an immutable interface.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import codecs
+import mimetypes
+import re
+from copy import deepcopy
+from itertools import repeat
+
+from ._compat import BytesIO
+from ._compat import collections_abc
+from ._compat import integer_types
+from ._compat import iteritems
+from ._compat import iterkeys
+from ._compat import iterlists
+from ._compat import itervalues
+from ._compat import make_literal_wrapper
+from ._compat import PY2
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import to_native
+from ._internal import _missing
+from .filesystem import get_filesystem_encoding
+
+_locale_delim_re = re.compile(r"[_-]")
+
+
+def is_immutable(self):
+    raise TypeError("%r objects are immutable" % self.__class__.__name__)
+
+
+def iter_multi_items(mapping):
+    """Iterates over the items of a mapping yielding keys and values
+    without dropping any from more complex structures.
+    """
+    if isinstance(mapping, MultiDict):
+        for item in iteritems(mapping, multi=True):
+            yield item
+    elif isinstance(mapping, dict):
+        for key, value in iteritems(mapping):
+            if isinstance(value, (tuple, list)):
+                for value in value:
+                    yield key, value
+            else:
+                yield key, value
+    else:
+        for item in mapping:
+            yield item
+
+
+def native_itermethods(names):
+    if not PY2:
+        return lambda x: x
+
+    def setviewmethod(cls, name):
+        viewmethod_name = "view%s" % name
+        repr_name = "view_%s" % name
+
+        def viewmethod(self, *a, **kw):
+            return ViewItems(self, name, repr_name, *a, **kw)
+
+        viewmethod.__name__ = viewmethod_name
+        viewmethod.__doc__ = "`%s()` object providing a view on %s" % (
+            viewmethod_name,
+            name,
+        )
+        setattr(cls, viewmethod_name, viewmethod)
+
+    def setitermethod(cls, name):
+        itermethod = getattr(cls, name)
+        setattr(cls, "iter%s" % name, itermethod)
+
+        def listmethod(self, *a, **kw):
+            return list(itermethod(self, *a, **kw))
+
+        listmethod.__name__ = name
+        listmethod.__doc__ = "Like :py:meth:`iter%s`, but returns a list." % name
+        setattr(cls, name, listmethod)
+
+    def wrap(cls):
+        for name in names:
+            setitermethod(cls, name)
+            setviewmethod(cls, name)
+        return cls
+
+    return wrap
+
+
+class ImmutableListMixin(object):
+    """Makes a :class:`list` immutable.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    _hash_cache = None
+
+    def __hash__(self):
+        if self._hash_cache is not None:
+            return self._hash_cache
+        rv = self._hash_cache = hash(tuple(self))
+        return rv
+
+    def __reduce_ex__(self, protocol):
+        return type(self), (list(self),)
+
+    def __delitem__(self, key):
+        is_immutable(self)
+
+    def __iadd__(self, other):
+        is_immutable(self)
+
+    __imul__ = __iadd__
+
+    def __setitem__(self, key, value):
+        is_immutable(self)
+
+    def append(self, item):
+        is_immutable(self)
+
+    remove = append
+
+    def extend(self, iterable):
+        is_immutable(self)
+
+    def insert(self, pos, value):
+        is_immutable(self)
+
+    def pop(self, index=-1):
+        is_immutable(self)
+
+    def reverse(self):
+        is_immutable(self)
+
+    def sort(self, cmp=None, key=None, reverse=None):
+        is_immutable(self)
+
+
+class ImmutableList(ImmutableListMixin, list):
+    """An immutable :class:`list`.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    def __repr__(self):
+        return "%s(%s)" % (self.__class__.__name__, list.__repr__(self))
+
+
+class ImmutableDictMixin(object):
+    """Makes a :class:`dict` immutable.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    _hash_cache = None
+
+    @classmethod
+    def fromkeys(cls, keys, value=None):
+        instance = super(cls, cls).__new__(cls)
+        instance.__init__(zip(keys, repeat(value)))
+        return instance
+
+    def __reduce_ex__(self, protocol):
+        return type(self), (dict(self),)
+
+    def _iter_hashitems(self):
+        return iteritems(self)
+
+    def __hash__(self):
+        if self._hash_cache is not None:
+            return self._hash_cache
+        rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
+        return rv
+
+    def setdefault(self, key, default=None):
+        is_immutable(self)
+
+    def update(self, *args, **kwargs):
+        is_immutable(self)
+
+    def pop(self, key, default=None):
+        is_immutable(self)
+
+    def popitem(self):
+        is_immutable(self)
+
+    def __setitem__(self, key, value):
+        is_immutable(self)
+
+    def __delitem__(self, key):
+        is_immutable(self)
+
+    def clear(self):
+        is_immutable(self)
+
+
+class ImmutableMultiDictMixin(ImmutableDictMixin):
+    """Makes a :class:`MultiDict` immutable.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    def __reduce_ex__(self, protocol):
+        return type(self), (list(iteritems(self, multi=True)),)
+
+    def _iter_hashitems(self):
+        return iteritems(self, multi=True)
+
+    def add(self, key, value):
+        is_immutable(self)
+
+    def popitemlist(self):
+        is_immutable(self)
+
+    def poplist(self, key):
+        is_immutable(self)
+
+    def setlist(self, key, new_list):
+        is_immutable(self)
+
+    def setlistdefault(self, key, default_list=None):
+        is_immutable(self)
+
+
+class UpdateDictMixin(object):
+    """Makes dicts call `self.on_update` on modifications.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    on_update = None
+
+    def calls_update(name):  # noqa: B902
+        def oncall(self, *args, **kw):
+            rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
+            if self.on_update is not None:
+                self.on_update(self)
+            return rv
+
+        oncall.__name__ = name
+        return oncall
+
+    def setdefault(self, key, default=None):
+        modified = key not in self
+        rv = super(UpdateDictMixin, self).setdefault(key, default)
+        if modified and self.on_update is not None:
+            self.on_update(self)
+        return rv
+
+    def pop(self, key, default=_missing):
+        modified = key in self
+        if default is _missing:
+            rv = super(UpdateDictMixin, self).pop(key)
+        else:
+            rv = super(UpdateDictMixin, self).pop(key, default)
+        if modified and self.on_update is not None:
+            self.on_update(self)
+        return rv
+
+    __setitem__ = calls_update("__setitem__")
+    __delitem__ = calls_update("__delitem__")
+    clear = calls_update("clear")
+    popitem = calls_update("popitem")
+    update = calls_update("update")
+    del calls_update
+
+
+class TypeConversionDict(dict):
+    """Works like a regular dict but the :meth:`get` method can perform
+    type conversions.  :class:`MultiDict` and :class:`CombinedMultiDict`
+    are subclasses of this class and provide the same feature.
+
+    .. versionadded:: 0.5
+    """
+
+    def get(self, key, default=None, type=None):
+        """Return the default value if the requested data doesn't exist.
+        If `type` is provided and is a callable it should convert the value,
+        return it or raise a :exc:`ValueError` if that is not possible.  In
+        this case the function will return the default as if the value was not
+        found:
+
+        >>> d = TypeConversionDict(foo='42', bar='blub')
+        >>> d.get('foo', type=int)
+        42
+        >>> d.get('bar', -1, type=int)
+        -1
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key can't
+                        be looked up.  If not further specified `None` is
+                        returned.
+        :param type: A callable that is used to cast the value in the
+                     :class:`MultiDict`.  If a :exc:`ValueError` is raised
+                     by this callable the default value is returned.
+        """
+        try:
+            rv = self[key]
+        except KeyError:
+            return default
+        if type is not None:
+            try:
+                rv = type(rv)
+            except ValueError:
+                rv = default
+        return rv
+
+
+class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
+    """Works like a :class:`TypeConversionDict` but does not support
+    modifications.
+
+    .. versionadded:: 0.5
+    """
+
+    def copy(self):
+        """Return a shallow mutable copy of this object.  Keep in mind that
+        the standard library's :func:`copy` function is a no-op for this class
+        like for any other python immutable type (eg: :class:`tuple`).
+        """
+        return TypeConversionDict(self)
+
+    def __copy__(self):
+        return self
+
+
+class ViewItems(object):
+    def __init__(self, multi_dict, method, repr_name, *a, **kw):
+        self.__multi_dict = multi_dict
+        self.__method = method
+        self.__repr_name = repr_name
+        self.__a = a
+        self.__kw = kw
+
+    def __get_items(self):
+        return getattr(self.__multi_dict, self.__method)(*self.__a, **self.__kw)
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__repr_name, list(self.__get_items()))
+
+    def __iter__(self):
+        return iter(self.__get_items())
+
+
+@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
+class MultiDict(TypeConversionDict):
+    """A :class:`MultiDict` is a dictionary subclass customized to deal with
+    multiple values for the same key which is for example used by the parsing
+    functions in the wrappers.  This is necessary because some HTML form
+    elements pass multiple values for the same key.
+
+    :class:`MultiDict` implements all standard dictionary methods.
+    Internally, it saves all values for a key as a list, but the standard dict
+    access methods will only return the first value for a key. If you want to
+    gain access to the other values, too, you have to use the `list` methods as
+    explained below.
+
+    Basic Usage:
+
+    >>> d = MultiDict([('a', 'b'), ('a', 'c')])
+    >>> d
+    MultiDict([('a', 'b'), ('a', 'c')])
+    >>> d['a']
+    'b'
+    >>> d.getlist('a')
+    ['b', 'c']
+    >>> 'a' in d
+    True
+
+    It behaves like a normal dict thus all dict functions will only return the
+    first value when multiple values for one key are found.
+
+    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+    render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
+    exceptions.
+
+    A :class:`MultiDict` can be constructed from an iterable of
+    ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
+    onwards some keyword parameters.
+
+    :param mapping: the initial value for the :class:`MultiDict`.  Either a
+                    regular dict, an iterable of ``(key, value)`` tuples
+                    or `None`.
+    """
+
+    def __init__(self, mapping=None):
+        if isinstance(mapping, MultiDict):
+            dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
+        elif isinstance(mapping, dict):
+            tmp = {}
+            for key, value in iteritems(mapping):
+                if isinstance(value, (tuple, list)):
+                    if len(value) == 0:
+                        continue
+                    value = list(value)
+                else:
+                    value = [value]
+                tmp[key] = value
+            dict.__init__(self, tmp)
+        else:
+            tmp = {}
+            for key, value in mapping or ():
+                tmp.setdefault(key, []).append(value)
+            dict.__init__(self, tmp)
+
+    def __getstate__(self):
+        return dict(self.lists())
+
+    def __setstate__(self, value):
+        dict.clear(self)
+        dict.update(self, value)
+
+    def __getitem__(self, key):
+        """Return the first data value for this key;
+        raises KeyError if not found.
+
+        :param key: The key to be looked up.
+        :raise KeyError: if the key does not exist.
+        """
+
+        if key in self:
+            lst = dict.__getitem__(self, key)
+            if len(lst) > 0:
+                return lst[0]
+        raise exceptions.BadRequestKeyError(key)
+
+    def __setitem__(self, key, value):
+        """Like :meth:`add` but removes an existing key first.
+
+        :param key: the key for the value.
+        :param value: the value to set.
+        """
+        dict.__setitem__(self, key, [value])
+
+    def add(self, key, value):
+        """Adds a new value for the key.
+
+        .. versionadded:: 0.6
+
+        :param key: the key for the value.
+        :param value: the value to add.
+        """
+        dict.setdefault(self, key, []).append(value)
+
+    def getlist(self, key, type=None):
+        """Return the list of items for a given key. If that key is not in the
+        `MultiDict`, the return value will be an empty list.  Just as `get`
+        `getlist` accepts a `type` parameter.  All items will be converted
+        with the callable defined there.
+
+        :param key: The key to be looked up.
+        :param type: A callable that is used to cast the value in the
+                     :class:`MultiDict`.  If a :exc:`ValueError` is raised
+                     by this callable the value will be removed from the list.
+        :return: a :class:`list` of all the values for the key.
+        """
+        try:
+            rv = dict.__getitem__(self, key)
+        except KeyError:
+            return []
+        if type is None:
+            return list(rv)
+        result = []
+        for item in rv:
+            try:
+                result.append(type(item))
+            except ValueError:
+                pass
+        return result
+
+    def setlist(self, key, new_list):
+        """Remove the old values for a key and add new ones.  Note that the list
+        you pass the values in will be shallow-copied before it is inserted in
+        the dictionary.
+
+        >>> d = MultiDict()
+        >>> d.setlist('foo', ['1', '2'])
+        >>> d['foo']
+        '1'
+        >>> d.getlist('foo')
+        ['1', '2']
+
+        :param key: The key for which the values are set.
+        :param new_list: An iterable with the new values for the key.  Old values
+                         are removed first.
+        """
+        dict.__setitem__(self, key, list(new_list))
+
+    def setdefault(self, key, default=None):
+        """Returns the value for the key if it is in the dict, otherwise it
+        returns `default` and sets that value for `key`.
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key is not
+                        in the dict.  If not further specified it's `None`.
+        """
+        if key not in self:
+            self[key] = default
+        else:
+            default = self[key]
+        return default
+
+    def setlistdefault(self, key, default_list=None):
+        """Like `setdefault` but sets multiple values.  The list returned
+        is not a copy, but the list that is actually used internally.  This
+        means that you can put new values into the dict by appending items
+        to the list:
+
+        >>> d = MultiDict({"foo": 1})
+        >>> d.setlistdefault("foo").extend([2, 3])
+        >>> d.getlist("foo")
+        [1, 2, 3]
+
+        :param key: The key to be looked up.
+        :param default_list: An iterable of default values.  It is either copied
+                             (in case it was a list) or converted into a list
+                             before returned.
+        :return: a :class:`list`
+        """
+        if key not in self:
+            default_list = list(default_list or ())
+            dict.__setitem__(self, key, default_list)
+        else:
+            default_list = dict.__getitem__(self, key)
+        return default_list
+
+    def items(self, multi=False):
+        """Return an iterator of ``(key, value)`` pairs.
+
+        :param multi: If set to `True` the iterator returned will have a pair
+                      for each value of each key.  Otherwise it will only
+                      contain pairs for the first value of each key.
+        """
+
+        for key, values in iteritems(dict, self):
+            if multi:
+                for value in values:
+                    yield key, value
+            else:
+                yield key, values[0]
+
+    def lists(self):
+        """Return a iterator of ``(key, values)`` pairs, where values is the list
+        of all values associated with the key."""
+
+        for key, values in iteritems(dict, self):
+            yield key, list(values)
+
+    def keys(self):
+        return iterkeys(dict, self)
+
+    __iter__ = keys
+
+    def values(self):
+        """Returns an iterator of the first value on every key's value list."""
+        for values in itervalues(dict, self):
+            yield values[0]
+
+    def listvalues(self):
+        """Return an iterator of all values associated with a key.  Zipping
+        :meth:`keys` and this is the same as calling :meth:`lists`:
+
+        >>> d = MultiDict({"foo": [1, 2, 3]})
+        >>> zip(d.keys(), d.listvalues()) == d.lists()
+        True
+        """
+
+        return itervalues(dict, self)
+
+    def copy(self):
+        """Return a shallow copy of this object."""
+        return self.__class__(self)
+
+    def deepcopy(self, memo=None):
+        """Return a deep copy of this object."""
+        return self.__class__(deepcopy(self.to_dict(flat=False), memo))
+
+    def to_dict(self, flat=True):
+        """Return the contents as regular dict.  If `flat` is `True` the
+        returned dict will only have the first item present, if `flat` is
+        `False` all values will be returned as lists.
+
+        :param flat: If set to `False` the dict returned will have lists
+                     with all the values in it.  Otherwise it will only
+                     contain the first value for each key.
+        :return: a :class:`dict`
+        """
+        if flat:
+            return dict(iteritems(self))
+        return dict(self.lists())
+
+    def update(self, other_dict):
+        """update() extends rather than replaces existing key lists:
+
+        >>> a = MultiDict({'x': 1})
+        >>> b = MultiDict({'x': 2, 'y': 3})
+        >>> a.update(b)
+        >>> a
+        MultiDict([('y', 3), ('x', 1), ('x', 2)])
+
+        If the value list for a key in ``other_dict`` is empty, no new values
+        will be added to the dict and the key will not be created:
+
+        >>> x = {'empty_list': []}
+        >>> y = MultiDict()
+        >>> y.update(x)
+        >>> y
+        MultiDict([])
+        """
+        for key, value in iter_multi_items(other_dict):
+            MultiDict.add(self, key, value)
+
+    def pop(self, key, default=_missing):
+        """Pop the first item for a list on the dict.  Afterwards the
+        key is removed from the dict, so additional values are discarded:
+
+        >>> d = MultiDict({"foo": [1, 2, 3]})
+        >>> d.pop("foo")
+        1
+        >>> "foo" in d
+        False
+
+        :param key: the key to pop.
+        :param default: if provided the value to return if the key was
+                        not in the dictionary.
+        """
+        try:
+            lst = dict.pop(self, key)
+
+            if len(lst) == 0:
+                raise exceptions.BadRequestKeyError(key)
+
+            return lst[0]
+        except KeyError:
+            if default is not _missing:
+                return default
+            raise exceptions.BadRequestKeyError(key)
+
+    def popitem(self):
+        """Pop an item from the dict."""
+        try:
+            item = dict.popitem(self)
+
+            if len(item[1]) == 0:
+                raise exceptions.BadRequestKeyError(item)
+
+            return (item[0], item[1][0])
+        except KeyError as e:
+            raise exceptions.BadRequestKeyError(e.args[0])
+
+    def poplist(self, key):
+        """Pop the list for a key from the dict.  If the key is not in the dict
+        an empty list is returned.
+
+        .. versionchanged:: 0.5
+           If the key does no longer exist a list is returned instead of
+           raising an error.
+        """
+        return dict.pop(self, key, [])
+
+    def popitemlist(self):
+        """Pop a ``(key, list)`` tuple from the dict."""
+        try:
+            return dict.popitem(self)
+        except KeyError as e:
+            raise exceptions.BadRequestKeyError(e.args[0])
+
+    def __copy__(self):
+        return self.copy()
+
+    def __deepcopy__(self, memo):
+        return self.deepcopy(memo=memo)
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__class__.__name__, list(iteritems(self, multi=True)))
+
+
+class _omd_bucket(object):
+    """Wraps values in the :class:`OrderedMultiDict`.  This makes it
+    possible to keep an order over multiple different keys.  It requires
+    a lot of extra memory and slows down access a lot, but makes it
+    possible to access elements in O(1) and iterate in O(n).
+    """
+
+    __slots__ = ("prev", "key", "value", "next")
+
+    def __init__(self, omd, key, value):
+        self.prev = omd._last_bucket
+        self.key = key
+        self.value = value
+        self.next = None
+
+        if omd._first_bucket is None:
+            omd._first_bucket = self
+        if omd._last_bucket is not None:
+            omd._last_bucket.next = self
+        omd._last_bucket = self
+
+    def unlink(self, omd):
+        if self.prev:
+            self.prev.next = self.next
+        if self.next:
+            self.next.prev = self.prev
+        if omd._first_bucket is self:
+            omd._first_bucket = self.next
+        if omd._last_bucket is self:
+            omd._last_bucket = self.prev
+
+
+@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
+class OrderedMultiDict(MultiDict):
+    """Works like a regular :class:`MultiDict` but preserves the
+    order of the fields.  To convert the ordered multi dict into a
+    list you can use the :meth:`items` method and pass it ``multi=True``.
+
+    In general an :class:`OrderedMultiDict` is an order of magnitude
+    slower than a :class:`MultiDict`.
+
+    .. admonition:: note
+
+       Due to a limitation in Python you cannot convert an ordered
+       multi dict into a regular dict by using ``dict(multidict)``.
+       Instead you have to use the :meth:`to_dict` method, otherwise
+       the internal bucket objects are exposed.
+    """
+
+    def __init__(self, mapping=None):
+        dict.__init__(self)
+        self._first_bucket = self._last_bucket = None
+        if mapping is not None:
+            OrderedMultiDict.update(self, mapping)
+
+    def __eq__(self, other):
+        if not isinstance(other, MultiDict):
+            return NotImplemented
+        if isinstance(other, OrderedMultiDict):
+            iter1 = iteritems(self, multi=True)
+            iter2 = iteritems(other, multi=True)
+            try:
+                for k1, v1 in iter1:
+                    k2, v2 = next(iter2)
+                    if k1 != k2 or v1 != v2:
+                        return False
+            except StopIteration:
+                return False
+            try:
+                next(iter2)
+            except StopIteration:
+                return True
+            return False
+        if len(self) != len(other):
+            return False
+        for key, values in iterlists(self):
+            if other.getlist(key) != values:
+                return False
+        return True
+
+    __hash__ = None
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __reduce_ex__(self, protocol):
+        return type(self), (list(iteritems(self, multi=True)),)
+
+    def __getstate__(self):
+        return list(iteritems(self, multi=True))
+
+    def __setstate__(self, values):
+        dict.clear(self)
+        for key, value in values:
+            self.add(key, value)
+
+    def __getitem__(self, key):
+        if key in self:
+            return dict.__getitem__(self, key)[0].value
+        raise exceptions.BadRequestKeyError(key)
+
+    def __setitem__(self, key, value):
+        self.poplist(key)
+        self.add(key, value)
+
+    def __delitem__(self, key):
+        self.pop(key)
+
+    def keys(self):
+        return (key for key, value in iteritems(self))
+
+    __iter__ = keys
+
+    def values(self):
+        return (value for key, value in iteritems(self))
+
+    def items(self, multi=False):
+        ptr = self._first_bucket
+        if multi:
+            while ptr is not None:
+                yield ptr.key, ptr.value
+                ptr = ptr.next
+        else:
+            returned_keys = set()
+            while ptr is not None:
+                if ptr.key not in returned_keys:
+                    returned_keys.add(ptr.key)
+                    yield ptr.key, ptr.value
+                ptr = ptr.next
+
+    def lists(self):
+        returned_keys = set()
+        ptr = self._first_bucket
+        while ptr is not None:
+            if ptr.key not in returned_keys:
+                yield ptr.key, self.getlist(ptr.key)
+                returned_keys.add(ptr.key)
+            ptr = ptr.next
+
+    def listvalues(self):
+        for _key, values in iterlists(self):
+            yield values
+
+    def add(self, key, value):
+        dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
+
+    def getlist(self, key, type=None):
+        try:
+            rv = dict.__getitem__(self, key)
+        except KeyError:
+            return []
+        if type is None:
+            return [x.value for x in rv]
+        result = []
+        for item in rv:
+            try:
+                result.append(type(item.value))
+            except ValueError:
+                pass
+        return result
+
+    def setlist(self, key, new_list):
+        self.poplist(key)
+        for value in new_list:
+            self.add(key, value)
+
+    def setlistdefault(self, key, default_list=None):
+        raise TypeError("setlistdefault is unsupported for ordered multi dicts")
+
+    def update(self, mapping):
+        for key, value in iter_multi_items(mapping):
+            OrderedMultiDict.add(self, key, value)
+
+    def poplist(self, key):
+        buckets = dict.pop(self, key, ())
+        for bucket in buckets:
+            bucket.unlink(self)
+        return [x.value for x in buckets]
+
+    def pop(self, key, default=_missing):
+        try:
+            buckets = dict.pop(self, key)
+        except KeyError:
+            if default is not _missing:
+                return default
+            raise exceptions.BadRequestKeyError(key)
+        for bucket in buckets:
+            bucket.unlink(self)
+        return buckets[0].value
+
+    def popitem(self):
+        try:
+            key, buckets = dict.popitem(self)
+        except KeyError as e:
+            raise exceptions.BadRequestKeyError(e.args[0])
+        for bucket in buckets:
+            bucket.unlink(self)
+        return key, buckets[0].value
+
+    def popitemlist(self):
+        try:
+            key, buckets = dict.popitem(self)
+        except KeyError as e:
+            raise exceptions.BadRequestKeyError(e.args[0])
+        for bucket in buckets:
+            bucket.unlink(self)
+        return key, [x.value for x in buckets]
+
+
+def _options_header_vkw(value, kw):
+    return dump_options_header(
+        value, dict((k.replace("_", "-"), v) for k, v in kw.items())
+    )
+
+
+def _unicodify_header_value(value):
+    if isinstance(value, bytes):
+        value = value.decode("latin-1")
+    if not isinstance(value, text_type):
+        value = text_type(value)
+    return value
+
+
+@native_itermethods(["keys", "values", "items"])
+class Headers(object):
+    """An object that stores some headers.  It has a dict-like interface
+    but is ordered and can store the same keys multiple times.
+
+    This data structure is useful if you want a nicer way to handle WSGI
+    headers which are stored as tuples in a list.
+
+    From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
+    also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
+    and will render a page for a ``400 BAD REQUEST`` if caught in a
+    catch-all for HTTP exceptions.
+
+    Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
+    class, with the exception of `__getitem__`.  :mod:`wsgiref` will return
+    `None` for ``headers['missing']``, whereas :class:`Headers` will raise
+    a :class:`KeyError`.
+
+    To create a new :class:`Headers` object pass it a list or dict of headers
+    which are used as default values.  This does not reuse the list passed
+    to the constructor for internal usage.
+
+    :param defaults: The list of default values for the :class:`Headers`.
+
+    .. versionchanged:: 0.9
+       This data structure now stores unicode values similar to how the
+       multi dicts do it.  The main difference is that bytes can be set as
+       well which will automatically be latin1 decoded.
+
+    .. versionchanged:: 0.9
+       The :meth:`linked` function was removed without replacement as it
+       was an API that does not support the changes to the encoding model.
+    """
+
+    def __init__(self, defaults=None):
+        self._list = []
+        if defaults is not None:
+            if isinstance(defaults, (list, Headers)):
+                self._list.extend(defaults)
+            else:
+                self.extend(defaults)
+
+    def __getitem__(self, key, _get_mode=False):
+        if not _get_mode:
+            if isinstance(key, integer_types):
+                return self._list[key]
+            elif isinstance(key, slice):
+                return self.__class__(self._list[key])
+        if not isinstance(key, string_types):
+            raise exceptions.BadRequestKeyError(key)
+        ikey = key.lower()
+        for k, v in self._list:
+            if k.lower() == ikey:
+                return v
+        # micro optimization: if we are in get mode we will catch that
+        # exception one stack level down so we can raise a standard
+        # key error instead of our special one.
+        if _get_mode:
+            raise KeyError()
+        raise exceptions.BadRequestKeyError(key)
+
+    def __eq__(self, other):
+        return other.__class__ is self.__class__ and set(other._list) == set(self._list)
+
+    __hash__ = None
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def get(self, key, default=None, type=None, as_bytes=False):
+        """Return the default value if the requested data doesn't exist.
+        If `type` is provided and is a callable it should convert the value,
+        return it or raise a :exc:`ValueError` if that is not possible.  In
+        this case the function will return the default as if the value was not
+        found:
+
+        >>> d = Headers([('Content-Length', '42')])
+        >>> d.get('Content-Length', type=int)
+        42
+
+        If a headers object is bound you must not add unicode strings
+        because no encoding takes place.
+
+        .. versionadded:: 0.9
+           Added support for `as_bytes`.
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key can't
+                        be looked up.  If not further specified `None` is
+                        returned.
+        :param type: A callable that is used to cast the value in the
+                     :class:`Headers`.  If a :exc:`ValueError` is raised
+                     by this callable the default value is returned.
+        :param as_bytes: return bytes instead of unicode strings.
+        """
+        try:
+            rv = self.__getitem__(key, _get_mode=True)
+        except KeyError:
+            return default
+        if as_bytes:
+            rv = rv.encode("latin1")
+        if type is None:
+            return rv
+        try:
+            return type(rv)
+        except ValueError:
+            return default
+
+    def getlist(self, key, type=None, as_bytes=False):
+        """Return the list of items for a given key. If that key is not in the
+        :class:`Headers`, the return value will be an empty list.  Just as
+        :meth:`get` :meth:`getlist` accepts a `type` parameter.  All items will
+        be converted with the callable defined there.
+
+        .. versionadded:: 0.9
+           Added support for `as_bytes`.
+
+        :param key: The key to be looked up.
+        :param type: A callable that is used to cast the value in the
+                     :class:`Headers`.  If a :exc:`ValueError` is raised
+                     by this callable the value will be removed from the list.
+        :return: a :class:`list` of all the values for the key.
+        :param as_bytes: return bytes instead of unicode strings.
+        """
+        ikey = key.lower()
+        result = []
+        for k, v in self:
+            if k.lower() == ikey:
+                if as_bytes:
+                    v = v.encode("latin1")
+                if type is not None:
+                    try:
+                        v = type(v)
+                    except ValueError:
+                        continue
+                result.append(v)
+        return result
+
+    def get_all(self, name):
+        """Return a list of all the values for the named field.
+
+        This method is compatible with the :mod:`wsgiref`
+        :meth:`~wsgiref.headers.Headers.get_all` method.
+        """
+        return self.getlist(name)
+
+    def items(self, lower=False):
+        for key, value in self:
+            if lower:
+                key = key.lower()
+            yield key, value
+
+    def keys(self, lower=False):
+        for key, _ in iteritems(self, lower):
+            yield key
+
+    def values(self):
+        for _, value in iteritems(self):
+            yield value
+
+    def extend(self, iterable):
+        """Extend the headers with a dict or an iterable yielding keys and
+        values.
+        """
+        if isinstance(iterable, dict):
+            for key, value in iteritems(iterable):
+                if isinstance(value, (tuple, list)):
+                    for v in value:
+                        self.add(key, v)
+                else:
+                    self.add(key, value)
+        else:
+            for key, value in iterable:
+                self.add(key, value)
+
+    def __delitem__(self, key, _index_operation=True):
+        if _index_operation and isinstance(key, (integer_types, slice)):
+            del self._list[key]
+            return
+        key = key.lower()
+        new = []
+        for k, v in self._list:
+            if k.lower() != key:
+                new.append((k, v))
+        self._list[:] = new
+
+    def remove(self, key):
+        """Remove a key.
+
+        :param key: The key to be removed.
+        """
+        return self.__delitem__(key, _index_operation=False)
+
+    def pop(self, key=None, default=_missing):
+        """Removes and returns a key or index.
+
+        :param key: The key to be popped.  If this is an integer the item at
+                    that position is removed, if it's a string the value for
+                    that key is.  If the key is omitted or `None` the last
+                    item is removed.
+        :return: an item.
+        """
+        if key is None:
+            return self._list.pop()
+        if isinstance(key, integer_types):
+            return self._list.pop(key)
+        try:
+            rv = self[key]
+            self.remove(key)
+        except KeyError:
+            if default is not _missing:
+                return default
+            raise
+        return rv
+
+    def popitem(self):
+        """Removes a key or index and returns a (key, value) item."""
+        return self.pop()
+
+    def __contains__(self, key):
+        """Check if a key is present."""
+        try:
+            self.__getitem__(key, _get_mode=True)
+        except KeyError:
+            return False
+        return True
+
+    has_key = __contains__
+
+    def __iter__(self):
+        """Yield ``(key, value)`` tuples."""
+        return iter(self._list)
+
+    def __len__(self):
+        return len(self._list)
+
+    def add(self, _key, _value, **kw):
+        """Add a new header tuple to the list.
+
+        Keyword arguments can specify additional parameters for the header
+        value, with underscores converted to dashes::
+
+        >>> d = Headers()
+        >>> d.add('Content-Type', 'text/plain')
+        >>> d.add('Content-Disposition', 'attachment', filename='foo.png')
+
+        The keyword argument dumping uses :func:`dump_options_header`
+        behind the scenes.
+
+        .. versionadded:: 0.4.1
+            keyword arguments were added for :mod:`wsgiref` compatibility.
+        """
+        if kw:
+            _value = _options_header_vkw(_value, kw)
+        _key = _unicodify_header_value(_key)
+        _value = _unicodify_header_value(_value)
+        self._validate_value(_value)
+        self._list.append((_key, _value))
+
+    def _validate_value(self, value):
+        if not isinstance(value, text_type):
+            raise TypeError("Value should be unicode.")
+        if u"\n" in value or u"\r" in value:
+            raise ValueError(
+                "Detected newline in header value.  This is "
+                "a potential security problem"
+            )
+
+    def add_header(self, _key, _value, **_kw):
+        """Add a new header tuple to the list.
+
+        An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
+        :meth:`~wsgiref.headers.Headers.add_header` method.
+        """
+        self.add(_key, _value, **_kw)
+
+    def clear(self):
+        """Clears all headers."""
+        del self._list[:]
+
+    def set(self, _key, _value, **kw):
+        """Remove all header tuples for `key` and add a new one.  The newly
+        added key either appears at the end of the list if there was no
+        entry or replaces the first one.
+
+        Keyword arguments can specify additional parameters for the header
+        value, with underscores converted to dashes.  See :meth:`add` for
+        more information.
+
+        .. versionchanged:: 0.6.1
+           :meth:`set` now accepts the same arguments as :meth:`add`.
+
+        :param key: The key to be inserted.
+        :param value: The value to be inserted.
+        """
+        if kw:
+            _value = _options_header_vkw(_value, kw)
+        _key = _unicodify_header_value(_key)
+        _value = _unicodify_header_value(_value)
+        self._validate_value(_value)
+        if not self._list:
+            self._list.append((_key, _value))
+            return
+        listiter = iter(self._list)
+        ikey = _key.lower()
+        for idx, (old_key, _old_value) in enumerate(listiter):
+            if old_key.lower() == ikey:
+                # replace first ocurrence
+                self._list[idx] = (_key, _value)
+                break
+        else:
+            self._list.append((_key, _value))
+            return
+        self._list[idx + 1 :] = [t for t in listiter if t[0].lower() != ikey]
+
+    def setdefault(self, key, default):
+        """Returns the value for the key if it is in the dict, otherwise it
+        returns `default` and sets that value for `key`.
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key is not
+                        in the dict.  If not further specified it's `None`.
+        """
+        if key in self:
+            return self[key]
+        self.set(key, default)
+        return default
+
+    def __setitem__(self, key, value):
+        """Like :meth:`set` but also supports index/slice based setting."""
+        if isinstance(key, (slice, integer_types)):
+            if isinstance(key, integer_types):
+                value = [value]
+            value = [
+                (_unicodify_header_value(k), _unicodify_header_value(v))
+                for (k, v) in value
+            ]
+            [self._validate_value(v) for (k, v) in value]
+            if isinstance(key, integer_types):
+                self._list[key] = value[0]
+            else:
+                self._list[key] = value
+        else:
+            self.set(key, value)
+
+    def to_list(self, charset="iso-8859-1"):
+        """Convert the headers into a list suitable for WSGI.
+
+        .. deprecated:: 0.9
+        """
+        from warnings import warn
+
+        warn(
+            "'to_list' deprecated as of version 0.9 and will be removed"
+            " in version 1.0. Use 'to_wsgi_list' instead.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return self.to_wsgi_list()
+
+    def to_wsgi_list(self):
+        """Convert the headers into a list suitable for WSGI.
+
+        The values are byte strings in Python 2 converted to latin1 and unicode
+        strings in Python 3 for the WSGI server to encode.
+
+        :return: list
+        """
+        if PY2:
+            return [(to_native(k), v.encode("latin1")) for k, v in self]
+        return list(self)
+
+    def copy(self):
+        return self.__class__(self._list)
+
+    def __copy__(self):
+        return self.copy()
+
+    def __str__(self):
+        """Returns formatted headers suitable for HTTP transmission."""
+        strs = []
+        for key, value in self.to_wsgi_list():
+            strs.append("%s: %s" % (key, value))
+        strs.append("\r\n")
+        return "\r\n".join(strs)
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__class__.__name__, list(self))
+
+
+class ImmutableHeadersMixin(object):
+    """Makes a :class:`Headers` immutable.  We do not mark them as
+    hashable though since the only usecase for this datastructure
+    in Werkzeug is a view on a mutable structure.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    def __delitem__(self, key, **kwargs):
+        is_immutable(self)
+
+    def __setitem__(self, key, value):
+        is_immutable(self)
+
+    set = __setitem__
+
+    def add(self, item):
+        is_immutable(self)
+
+    remove = add_header = add
+
+    def extend(self, iterable):
+        is_immutable(self)
+
+    def insert(self, pos, value):
+        is_immutable(self)
+
+    def pop(self, index=-1):
+        is_immutable(self)
+
+    def popitem(self):
+        is_immutable(self)
+
+    def setdefault(self, key, default):
+        is_immutable(self)
+
+
+class EnvironHeaders(ImmutableHeadersMixin, Headers):
+    """Read only version of the headers from a WSGI environment.  This
+    provides the same interface as `Headers` and is constructed from
+    a WSGI environment.
+
+    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+    render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
+    HTTP exceptions.
+    """
+
+    def __init__(self, environ):
+        self.environ = environ
+
+    def __eq__(self, other):
+        return self.environ is other.environ
+
+    __hash__ = None
+
+    def __getitem__(self, key, _get_mode=False):
+        # _get_mode is a no-op for this class as there is no index but
+        # used because get() calls it.
+        if not isinstance(key, string_types):
+            raise KeyError(key)
+        key = key.upper().replace("-", "_")
+        if key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
+            return _unicodify_header_value(self.environ[key])
+        return _unicodify_header_value(self.environ["HTTP_" + key])
+
+    def __len__(self):
+        # the iter is necessary because otherwise list calls our
+        # len which would call list again and so forth.
+        return len(list(iter(self)))
+
+    def __iter__(self):
+        for key, value in iteritems(self.environ):
+            if key.startswith("HTTP_") and key not in (
+                "HTTP_CONTENT_TYPE",
+                "HTTP_CONTENT_LENGTH",
+            ):
+                yield (
+                    key[5:].replace("_", "-").title(),
+                    _unicodify_header_value(value),
+                )
+            elif key in ("CONTENT_TYPE", "CONTENT_LENGTH") and value:
+                yield (key.replace("_", "-").title(), _unicodify_header_value(value))
+
+    def copy(self):
+        raise TypeError("cannot create %r copies" % self.__class__.__name__)
+
+
+@native_itermethods(["keys", "values", "items", "lists", "listvalues"])
+class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
+    """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
+    instances as sequence and it will combine the return values of all wrapped
+    dicts:
+
+    >>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
+    >>> post = MultiDict([('foo', 'bar')])
+    >>> get = MultiDict([('blub', 'blah')])
+    >>> combined = CombinedMultiDict([get, post])
+    >>> combined['foo']
+    'bar'
+    >>> combined['blub']
+    'blah'
+
+    This works for all read operations and will raise a `TypeError` for
+    methods that usually change data which isn't possible.
+
+    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+    render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
+    exceptions.
+    """
+
+    def __reduce_ex__(self, protocol):
+        return type(self), (self.dicts,)
+
+    def __init__(self, dicts=None):
+        self.dicts = dicts or []
+
+    @classmethod
+    def fromkeys(cls):
+        raise TypeError("cannot create %r instances by fromkeys" % cls.__name__)
+
+    def __getitem__(self, key):
+        for d in self.dicts:
+            if key in d:
+                return d[key]
+        raise exceptions.BadRequestKeyError(key)
+
+    def get(self, key, default=None, type=None):
+        for d in self.dicts:
+            if key in d:
+                if type is not None:
+                    try:
+                        return type(d[key])
+                    except ValueError:
+                        continue
+                return d[key]
+        return default
+
+    def getlist(self, key, type=None):
+        rv = []
+        for d in self.dicts:
+            rv.extend(d.getlist(key, type))
+        return rv
+
+    def _keys_impl(self):
+        """This function exists so __len__ can be implemented more efficiently,
+        saving one list creation from an iterator.
+
+        Using this for Python 2's ``dict.keys`` behavior would be useless since
+        `dict.keys` in Python 2 returns a list, while we have a set here.
+        """
+        rv = set()
+        for d in self.dicts:
+            rv.update(iterkeys(d))
+        return rv
+
+    def keys(self):
+        return iter(self._keys_impl())
+
+    __iter__ = keys
+
+    def items(self, multi=False):
+        found = set()
+        for d in self.dicts:
+            for key, value in iteritems(d, multi):
+                if multi:
+                    yield key, value
+                elif key not in found:
+                    found.add(key)
+                    yield key, value
+
+    def values(self):
+        for _key, value in iteritems(self):
+            yield value
+
+    def lists(self):
+        rv = {}
+        for d in self.dicts:
+            for key, values in iterlists(d):
+                rv.setdefault(key, []).extend(values)
+        return iteritems(rv)
+
+    def listvalues(self):
+        return (x[1] for x in self.lists())
+
+    def copy(self):
+        """Return a shallow mutable copy of this object.
+
+        This returns a :class:`MultiDict` representing the data at the
+        time of copying. The copy will no longer reflect changes to the
+        wrapped dicts.
+
+        .. versionchanged:: 0.15
+            Return a mutable :class:`MultiDict`.
+        """
+        return MultiDict(self)
+
+    def to_dict(self, flat=True):
+        """Return the contents as regular dict.  If `flat` is `True` the
+        returned dict will only have the first item present, if `flat` is
+        `False` all values will be returned as lists.
+
+        :param flat: If set to `False` the dict returned will have lists
+                     with all the values in it.  Otherwise it will only
+                     contain the first item for each key.
+        :return: a :class:`dict`
+        """
+        rv = {}
+        for d in reversed(self.dicts):
+            rv.update(d.to_dict(flat))
+        return rv
+
+    def __len__(self):
+        return len(self._keys_impl())
+
+    def __contains__(self, key):
+        for d in self.dicts:
+            if key in d:
+                return True
+        return False
+
+    has_key = __contains__
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__class__.__name__, self.dicts)
+
+
+class FileMultiDict(MultiDict):
+    """A special :class:`MultiDict` that has convenience methods to add
+    files to it.  This is used for :class:`EnvironBuilder` and generally
+    useful for unittesting.
+
+    .. versionadded:: 0.5
+    """
+
+    def add_file(self, name, file, filename=None, content_type=None):
+        """Adds a new file to the dict.  `file` can be a file name or
+        a :class:`file`-like or a :class:`FileStorage` object.
+
+        :param name: the name of the field.
+        :param file: a filename or :class:`file`-like object
+        :param filename: an optional filename
+        :param content_type: an optional content type
+        """
+        if isinstance(file, FileStorage):
+            value = file
+        else:
+            if isinstance(file, string_types):
+                if filename is None:
+                    filename = file
+                file = open(file, "rb")
+            if filename and content_type is None:
+                content_type = (
+                    mimetypes.guess_type(filename)[0] or "application/octet-stream"
+                )
+            value = FileStorage(file, filename, name, content_type)
+
+        self.add(name, value)
+
+
+class ImmutableDict(ImmutableDictMixin, dict):
+    """An immutable :class:`dict`.
+
+    .. versionadded:: 0.5
+    """
+
+    def __repr__(self):
+        return "%s(%s)" % (self.__class__.__name__, dict.__repr__(self))
+
+    def copy(self):
+        """Return a shallow mutable copy of this object.  Keep in mind that
+        the standard library's :func:`copy` function is a no-op for this class
+        like for any other python immutable type (eg: :class:`tuple`).
+        """
+        return dict(self)
+
+    def __copy__(self):
+        return self
+
+
+class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
+    """An immutable :class:`MultiDict`.
+
+    .. versionadded:: 0.5
+    """
+
+    def copy(self):
+        """Return a shallow mutable copy of this object.  Keep in mind that
+        the standard library's :func:`copy` function is a no-op for this class
+        like for any other python immutable type (eg: :class:`tuple`).
+        """
+        return MultiDict(self)
+
+    def __copy__(self):
+        return self
+
+
+class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
+    """An immutable :class:`OrderedMultiDict`.
+
+    .. versionadded:: 0.6
+    """
+
+    def _iter_hashitems(self):
+        return enumerate(iteritems(self, multi=True))
+
+    def copy(self):
+        """Return a shallow mutable copy of this object.  Keep in mind that
+        the standard library's :func:`copy` function is a no-op for this class
+        like for any other python immutable type (eg: :class:`tuple`).
+        """
+        return OrderedMultiDict(self)
+
+    def __copy__(self):
+        return self
+
+
+@native_itermethods(["values"])
+class Accept(ImmutableList):
+    """An :class:`Accept` object is just a list subclass for lists of
+    ``(value, quality)`` tuples.  It is automatically sorted by specificity
+    and quality.
+
+    All :class:`Accept` objects work similar to a list but provide extra
+    functionality for working with the data.  Containment checks are
+    normalized to the rules of that header:
+
+    >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
+    >>> a.best
+    'ISO-8859-1'
+    >>> 'iso-8859-1' in a
+    True
+    >>> 'UTF8' in a
+    True
+    >>> 'utf7' in a
+    False
+
+    To get the quality for an item you can use normal item lookup:
+
+    >>> print a['utf-8']
+    0.7
+    >>> a['utf7']
+    0
+
+    .. versionchanged:: 0.5
+       :class:`Accept` objects are forced immutable now.
+    """
+
+    def __init__(self, values=()):
+        if values is None:
+            list.__init__(self)
+            self.provided = False
+        elif isinstance(values, Accept):
+            self.provided = values.provided
+            list.__init__(self, values)
+        else:
+            self.provided = True
+            values = sorted(
+                values,
+                key=lambda x: (self._specificity(x[0]), x[1], x[0]),
+                reverse=True,
+            )
+            list.__init__(self, values)
+
+    def _specificity(self, value):
+        """Returns a tuple describing the value's specificity."""
+        return (value != "*",)
+
+    def _value_matches(self, value, item):
+        """Check if a value matches a given accept item."""
+        return item == "*" or item.lower() == value.lower()
+
+    def __getitem__(self, key):
+        """Besides index lookup (getting item n) you can also pass it a string
+        to get the quality for the item.  If the item is not in the list, the
+        returned quality is ``0``.
+        """
+        if isinstance(key, string_types):
+            return self.quality(key)
+        return list.__getitem__(self, key)
+
+    def quality(self, key):
+        """Returns the quality of the key.
+
+        .. versionadded:: 0.6
+           In previous versions you had to use the item-lookup syntax
+           (eg: ``obj[key]`` instead of ``obj.quality(key)``)
+        """
+        for item, quality in self:
+            if self._value_matches(key, item):
+                return quality
+        return 0
+
+    def __contains__(self, value):
+        for item, _quality in self:
+            if self._value_matches(value, item):
+                return True
+        return False
+
+    def __repr__(self):
+        return "%s([%s])" % (
+            self.__class__.__name__,
+            ", ".join("(%r, %s)" % (x, y) for x, y in self),
+        )
+
+    def index(self, key):
+        """Get the position of an entry or raise :exc:`ValueError`.
+
+        :param key: The key to be looked up.
+
+        .. versionchanged:: 0.5
+           This used to raise :exc:`IndexError`, which was inconsistent
+           with the list API.
+        """
+        if isinstance(key, string_types):
+            for idx, (item, _quality) in enumerate(self):
+                if self._value_matches(key, item):
+                    return idx
+            raise ValueError(key)
+        return list.index(self, key)
+
+    def find(self, key):
+        """Get the position of an entry or return -1.
+
+        :param key: The key to be looked up.
+        """
+        try:
+            return self.index(key)
+        except ValueError:
+            return -1
+
+    def values(self):
+        """Iterate over all values."""
+        for item in self:
+            yield item[0]
+
+    def to_header(self):
+        """Convert the header set into an HTTP header string."""
+        result = []
+        for value, quality in self:
+            if quality != 1:
+                value = "%s;q=%s" % (value, quality)
+            result.append(value)
+        return ",".join(result)
+
+    def __str__(self):
+        return self.to_header()
+
+    def _best_single_match(self, match):
+        for client_item, quality in self:
+            if self._value_matches(match, client_item):
+                # self is sorted by specificity descending, we can exit
+                return client_item, quality
+
+    def best_match(self, matches, default=None):
+        """Returns the best match from a list of possible matches based
+        on the specificity and quality of the client. If two items have the
+        same quality and specificity, the one is returned that comes first.
+
+        :param matches: a list of matches to check for
+        :param default: the value that is returned if none match
+        """
+        result = default
+        best_quality = -1
+        best_specificity = (-1,)
+        for server_item in matches:
+            match = self._best_single_match(server_item)
+            if not match:
+                continue
+            client_item, quality = match
+            specificity = self._specificity(client_item)
+            if quality <= 0 or quality < best_quality:
+                continue
+            # better quality or same quality but more specific => better match
+            if quality > best_quality or specificity > best_specificity:
+                result = server_item
+                best_quality = quality
+                best_specificity = specificity
+        return result
+
+    @property
+    def best(self):
+        """The best match as value."""
+        if self:
+            return self[0][0]
+
+
+class MIMEAccept(Accept):
+    """Like :class:`Accept` but with special methods and behavior for
+    mimetypes.
+    """
+
+    def _specificity(self, value):
+        return tuple(x != "*" for x in value.split("/", 1))
+
+    def _value_matches(self, value, item):
+        def _normalize(x):
+            x = x.lower()
+            return ("*", "*") if x == "*" else x.split("/", 1)
+
+        # this is from the application which is trusted.  to avoid developer
+        # frustration we actually check these for valid values
+        if "/" not in value:
+            raise ValueError("invalid mimetype %r" % value)
+        value_type, value_subtype = _normalize(value)
+        if value_type == "*" and value_subtype != "*":
+            raise ValueError("invalid mimetype %r" % value)
+
+        if "/" not in item:
+            return False
+        item_type, item_subtype = _normalize(item)
+        if item_type == "*" and item_subtype != "*":
+            return False
+        return (
+            item_type == item_subtype == "*" or value_type == value_subtype == "*"
+        ) or (
+            item_type == value_type
+            and (
+                item_subtype == "*"
+                or value_subtype == "*"
+                or item_subtype == value_subtype
+            )
+        )
+
+    @property
+    def accept_html(self):
+        """True if this object accepts HTML."""
+        return (
+            "text/html" in self or "application/xhtml+xml" in self or self.accept_xhtml
+        )
+
+    @property
+    def accept_xhtml(self):
+        """True if this object accepts XHTML."""
+        return "application/xhtml+xml" in self or "application/xml" in self
+
+    @property
+    def accept_json(self):
+        """True if this object accepts JSON."""
+        return "application/json" in self
+
+
+class LanguageAccept(Accept):
+    """Like :class:`Accept` but with normalization for languages."""
+
+    def _value_matches(self, value, item):
+        def _normalize(language):
+            return _locale_delim_re.split(language.lower())
+
+        return item == "*" or _normalize(value) == _normalize(item)
+
+
+class CharsetAccept(Accept):
+    """Like :class:`Accept` but with normalization for charsets."""
+
+    def _value_matches(self, value, item):
+        def _normalize(name):
+            try:
+                return codecs.lookup(name).name
+            except LookupError:
+                return name.lower()
+
+        return item == "*" or _normalize(value) == _normalize(item)
+
+
+def cache_property(key, empty, type):
+    """Return a new property object for a cache header.  Useful if you
+    want to add support for a cache extension in a subclass."""
+    return property(
+        lambda x: x._get_cache_value(key, empty, type),
+        lambda x, v: x._set_cache_value(key, v, type),
+        lambda x: x._del_cache_value(key),
+        "accessor for %r" % key,
+    )
+
+
+class _CacheControl(UpdateDictMixin, dict):
+    """Subclass of a dict that stores values for a Cache-Control header.  It
+    has accessors for all the cache-control directives specified in RFC 2616.
+    The class does not differentiate between request and response directives.
+
+    Because the cache-control directives in the HTTP header use dashes the
+    python descriptors use underscores for that.
+
+    To get a header of the :class:`CacheControl` object again you can convert
+    the object into a string or call the :meth:`to_header` method.  If you plan
+    to subclass it and add your own items have a look at the sourcecode for
+    that class.
+
+    .. versionchanged:: 0.4
+
+       Setting `no_cache` or `private` to boolean `True` will set the implicit
+       none-value which is ``*``:
+
+       >>> cc = ResponseCacheControl()
+       >>> cc.no_cache = True
+       >>> cc
+       <ResponseCacheControl 'no-cache'>
+       >>> cc.no_cache
+       '*'
+       >>> cc.no_cache = None
+       >>> cc
+       <ResponseCacheControl ''>
+
+       In versions before 0.5 the behavior documented here affected the now
+       no longer existing `CacheControl` class.
+    """
+
+    no_cache = cache_property("no-cache", "*", None)
+    no_store = cache_property("no-store", None, bool)
+    max_age = cache_property("max-age", -1, int)
+    no_transform = cache_property("no-transform", None, None)
+
+    def __init__(self, values=(), on_update=None):
+        dict.__init__(self, values or ())
+        self.on_update = on_update
+        self.provided = values is not None
+
+    def _get_cache_value(self, key, empty, type):
+        """Used internally by the accessor properties."""
+        if type is bool:
+            return key in self
+        if key in self:
+            value = self[key]
+            if value is None:
+                return empty
+            elif type is not None:
+                try:
+                    value = type(value)
+                except ValueError:
+                    pass
+            return value
+
+    def _set_cache_value(self, key, value, type):
+        """Used internally by the accessor properties."""
+        if type is bool:
+            if value:
+                self[key] = None
+            else:
+                self.pop(key, None)
+        else:
+            if value is None:
+                self.pop(key)
+            elif value is True:
+                self[key] = None
+            else:
+                self[key] = value
+
+    def _del_cache_value(self, key):
+        """Used internally by the accessor properties."""
+        if key in self:
+            del self[key]
+
+    def to_header(self):
+        """Convert the stored values into a cache control header."""
+        return dump_header(self)
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return "<%s %s>" % (
+            self.__class__.__name__,
+            " ".join("%s=%r" % (k, v) for k, v in sorted(self.items())),
+        )
+
+
+class RequestCacheControl(ImmutableDictMixin, _CacheControl):
+    """A cache control for requests.  This is immutable and gives access
+    to all the request-relevant cache control headers.
+
+    To get a header of the :class:`RequestCacheControl` object again you can
+    convert the object into a string or call the :meth:`to_header` method.  If
+    you plan to subclass it and add your own items have a look at the sourcecode
+    for that class.
+
+    .. versionadded:: 0.5
+       In previous versions a `CacheControl` class existed that was used
+       both for request and response.
+    """
+
+    max_stale = cache_property("max-stale", "*", int)
+    min_fresh = cache_property("min-fresh", "*", int)
+    no_transform = cache_property("no-transform", None, None)
+    only_if_cached = cache_property("only-if-cached", None, bool)
+
+
+class ResponseCacheControl(_CacheControl):
+    """A cache control for responses.  Unlike :class:`RequestCacheControl`
+    this is mutable and gives access to response-relevant cache control
+    headers.
+
+    To get a header of the :class:`ResponseCacheControl` object again you can
+    convert the object into a string or call the :meth:`to_header` method.  If
+    you plan to subclass it and add your own items have a look at the sourcecode
+    for that class.
+
+    .. versionadded:: 0.5
+       In previous versions a `CacheControl` class existed that was used
+       both for request and response.
+    """
+
+    public = cache_property("public", None, bool)
+    private = cache_property("private", "*", None)
+    must_revalidate = cache_property("must-revalidate", None, bool)
+    proxy_revalidate = cache_property("proxy-revalidate", None, bool)
+    s_maxage = cache_property("s-maxage", None, None)
+
+
+# attach cache_property to the _CacheControl as staticmethod
+# so that others can reuse it.
+_CacheControl.cache_property = staticmethod(cache_property)
+
+
+class CallbackDict(UpdateDictMixin, dict):
+    """A dict that calls a function passed every time something is changed.
+    The function is passed the dict instance.
+    """
+
+    def __init__(self, initial=None, on_update=None):
+        dict.__init__(self, initial or ())
+        self.on_update = on_update
+
+    def __repr__(self):
+        return "<%s %s>" % (self.__class__.__name__, dict.__repr__(self))
+
+
+class HeaderSet(collections_abc.MutableSet):
+    """Similar to the :class:`ETags` class this implements a set-like structure.
+    Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
+    content-language headers.
+
+    If not constructed using the :func:`parse_set_header` function the
+    instantiation works like this:
+
+    >>> hs = HeaderSet(['foo', 'bar', 'baz'])
+    >>> hs
+    HeaderSet(['foo', 'bar', 'baz'])
+    """
+
+    def __init__(self, headers=None, on_update=None):
+        self._headers = list(headers or ())
+        self._set = set([x.lower() for x in self._headers])
+        self.on_update = on_update
+
+    def add(self, header):
+        """Add a new header to the set."""
+        self.update((header,))
+
+    def remove(self, header):
+        """Remove a header from the set.  This raises an :exc:`KeyError` if the
+        header is not in the set.
+
+        .. versionchanged:: 0.5
+            In older versions a :exc:`IndexError` was raised instead of a
+            :exc:`KeyError` if the object was missing.
+
+        :param header: the header to be removed.
+        """
+        key = header.lower()
+        if key not in self._set:
+            raise KeyError(header)
+        self._set.remove(key)
+        for idx, key in enumerate(self._headers):
+            if key.lower() == header:
+                del self._headers[idx]
+                break
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def update(self, iterable):
+        """Add all the headers from the iterable to the set.
+
+        :param iterable: updates the set with the items from the iterable.
+        """
+        inserted_any = False
+        for header in iterable:
+            key = header.lower()
+            if key not in self._set:
+                self._headers.append(header)
+                self._set.add(key)
+                inserted_any = True
+        if inserted_any and self.on_update is not None:
+            self.on_update(self)
+
+    def discard(self, header):
+        """Like :meth:`remove` but ignores errors.
+
+        :param header: the header to be discarded.
+        """
+        try:
+            return self.remove(header)
+        except KeyError:
+            pass
+
+    def find(self, header):
+        """Return the index of the header in the set or return -1 if not found.
+
+        :param header: the header to be looked up.
+        """
+        header = header.lower()
+        for idx, item in enumerate(self._headers):
+            if item.lower() == header:
+                return idx
+        return -1
+
+    def index(self, header):
+        """Return the index of the header in the set or raise an
+        :exc:`IndexError`.
+
+        :param header: the header to be looked up.
+        """
+        rv = self.find(header)
+        if rv < 0:
+            raise IndexError(header)
+        return rv
+
+    def clear(self):
+        """Clear the set."""
+        self._set.clear()
+        del self._headers[:]
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def as_set(self, preserve_casing=False):
+        """Return the set as real python set type.  When calling this, all
+        the items are converted to lowercase and the ordering is lost.
+
+        :param preserve_casing: if set to `True` the items in the set returned
+                                will have the original case like in the
+                                :class:`HeaderSet`, otherwise they will
+                                be lowercase.
+        """
+        if preserve_casing:
+            return set(self._headers)
+        return set(self._set)
+
+    def to_header(self):
+        """Convert the header set into an HTTP header string."""
+        return ", ".join(map(quote_header_value, self._headers))
+
+    def __getitem__(self, idx):
+        return self._headers[idx]
+
+    def __delitem__(self, idx):
+        rv = self._headers.pop(idx)
+        self._set.remove(rv.lower())
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def __setitem__(self, idx, value):
+        old = self._headers[idx]
+        self._set.remove(old.lower())
+        self._headers[idx] = value
+        self._set.add(value.lower())
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def __contains__(self, header):
+        return header.lower() in self._set
+
+    def __len__(self):
+        return len(self._set)
+
+    def __iter__(self):
+        return iter(self._headers)
+
+    def __nonzero__(self):
+        return bool(self._set)
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return "%s(%r)" % (self.__class__.__name__, self._headers)
+
+
+class ETags(collections_abc.Container, collections_abc.Iterable):
+    """A set that can be used to check if one etag is present in a collection
+    of etags.
+    """
+
+    def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
+        self._strong = frozenset(not star_tag and strong_etags or ())
+        self._weak = frozenset(weak_etags or ())
+        self.star_tag = star_tag
+
+    def as_set(self, include_weak=False):
+        """Convert the `ETags` object into a python set.  Per default all the
+        weak etags are not part of this set."""
+        rv = set(self._strong)
+        if include_weak:
+            rv.update(self._weak)
+        return rv
+
+    def is_weak(self, etag):
+        """Check if an etag is weak."""
+        return etag in self._weak
+
+    def is_strong(self, etag):
+        """Check if an etag is strong."""
+        return etag in self._strong
+
+    def contains_weak(self, etag):
+        """Check if an etag is part of the set including weak and strong tags."""
+        return self.is_weak(etag) or self.contains(etag)
+
+    def contains(self, etag):
+        """Check if an etag is part of the set ignoring weak tags.
+        It is also possible to use the ``in`` operator.
+        """
+        if self.star_tag:
+            return True
+        return self.is_strong(etag)
+
+    def contains_raw(self, etag):
+        """When passed a quoted tag it will check if this tag is part of the
+        set.  If the tag is weak it is checked against weak and strong tags,
+        otherwise strong only."""
+        etag, weak = unquote_etag(etag)
+        if weak:
+            return self.contains_weak(etag)
+        return self.contains(etag)
+
+    def to_header(self):
+        """Convert the etags set into a HTTP header string."""
+        if self.star_tag:
+            return "*"
+        return ", ".join(
+            ['"%s"' % x for x in self._strong] + ['W/"%s"' % x for x in self._weak]
+        )
+
+    def __call__(self, etag=None, data=None, include_weak=False):
+        if [etag, data].count(None) != 1:
+            raise TypeError("either tag or data required, but at least one")
+        if etag is None:
+            etag = generate_etag(data)
+        if include_weak:
+            if etag in self._weak:
+                return True
+        return etag in self._strong
+
+    def __bool__(self):
+        return bool(self.star_tag or self._strong or self._weak)
+
+    __nonzero__ = __bool__
+
+    def __str__(self):
+        return self.to_header()
+
+    def __iter__(self):
+        return iter(self._strong)
+
+    def __contains__(self, etag):
+        return self.contains(etag)
+
+    def __repr__(self):
+        return "<%s %r>" % (self.__class__.__name__, str(self))
+
+
+class IfRange(object):
+    """Very simple object that represents the `If-Range` header in parsed
+    form.  It will either have neither a etag or date or one of either but
+    never both.
+
+    .. versionadded:: 0.7
+    """
+
+    def __init__(self, etag=None, date=None):
+        #: The etag parsed and unquoted.  Ranges always operate on strong
+        #: etags so the weakness information is not necessary.
+        self.etag = etag
+        #: The date in parsed format or `None`.
+        self.date = date
+
+    def to_header(self):
+        """Converts the object back into an HTTP header."""
+        if self.date is not None:
+            return http_date(self.date)
+        if self.etag is not None:
+            return quote_etag(self.etag)
+        return ""
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return "<%s %r>" % (self.__class__.__name__, str(self))
+
+
+class Range(object):
+    """Represents a ``Range`` header. All methods only support only
+    bytes as the unit. Stores a list of ranges if given, but the methods
+    only work if only one range is provided.
+
+    :raise ValueError: If the ranges provided are invalid.
+
+    .. versionchanged:: 0.15
+        The ranges passed in are validated.
+
+    .. versionadded:: 0.7
+    """
+
+    def __init__(self, units, ranges):
+        #: The units of this range.  Usually "bytes".
+        self.units = units
+        #: A list of ``(begin, end)`` tuples for the range header provided.
+        #: The ranges are non-inclusive.
+        self.ranges = ranges
+
+        for start, end in ranges:
+            if start is None or (end is not None and (start < 0 or start >= end)):
+                raise ValueError("{} is not a valid range.".format((start, end)))
+
+    def range_for_length(self, length):
+        """If the range is for bytes, the length is not None and there is
+        exactly one range and it is satisfiable it returns a ``(start, stop)``
+        tuple, otherwise `None`.
+        """
+        if self.units != "bytes" or length is None or len(self.ranges) != 1:
+            return None
+        start, end = self.ranges[0]
+        if end is None:
+            end = length
+            if start < 0:
+                start += length
+        if is_byte_range_valid(start, end, length):
+            return start, min(end, length)
+
+    def make_content_range(self, length):
+        """Creates a :class:`~werkzeug.datastructures.ContentRange` object
+        from the current range and given content length.
+        """
+        rng = self.range_for_length(length)
+        if rng is not None:
+            return ContentRange(self.units, rng[0], rng[1], length)
+
+    def to_header(self):
+        """Converts the object back into an HTTP header."""
+        ranges = []
+        for begin, end in self.ranges:
+            if end is None:
+                ranges.append("%s-" % begin if begin >= 0 else str(begin))
+            else:
+                ranges.append("%s-%s" % (begin, end - 1))
+        return "%s=%s" % (self.units, ",".join(ranges))
+
+    def to_content_range_header(self, length):
+        """Converts the object into `Content-Range` HTTP header,
+        based on given length
+        """
+        range_for_length = self.range_for_length(length)
+        if range_for_length is not None:
+            return "%s %d-%d/%d" % (
+                self.units,
+                range_for_length[0],
+                range_for_length[1] - 1,
+                length,
+            )
+        return None
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return "<%s %r>" % (self.__class__.__name__, str(self))
+
+
+class ContentRange(object):
+    """Represents the content range header.
+
+    .. versionadded:: 0.7
+    """
+
+    def __init__(self, units, start, stop, length=None, on_update=None):
+        assert is_byte_range_valid(start, stop, length), "Bad range provided"
+        self.on_update = on_update
+        self.set(start, stop, length, units)
+
+    def _callback_property(name):  # noqa: B902
+        def fget(self):
+            return getattr(self, name)
+
+        def fset(self, value):
+            setattr(self, name, value)
+            if self.on_update is not None:
+                self.on_update(self)
+
+        return property(fget, fset)
+
+    #: The units to use, usually "bytes"
+    units = _callback_property("_units")
+    #: The start point of the range or `None`.
+    start = _callback_property("_start")
+    #: The stop point of the range (non-inclusive) or `None`.  Can only be
+    #: `None` if also start is `None`.
+    stop = _callback_property("_stop")
+    #: The length of the range or `None`.
+    length = _callback_property("_length")
+    del _callback_property
+
+    def set(self, start, stop, length=None, units="bytes"):
+        """Simple method to update the ranges."""
+        assert is_byte_range_valid(start, stop, length), "Bad range provided"
+        self._units = units
+        self._start = start
+        self._stop = stop
+        self._length = length
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def unset(self):
+        """Sets the units to `None` which indicates that the header should
+        no longer be used.
+        """
+        self.set(None, None, units=None)
+
+    def to_header(self):
+        if self.units is None:
+            return ""
+        if self.length is None:
+            length = "*"
+        else:
+            length = self.length
+        if self.start is None:
+            return "%s */%s" % (self.units, length)
+        return "%s %s-%s/%s" % (self.units, self.start, self.stop - 1, length)
+
+    def __nonzero__(self):
+        return self.units is not None
+
+    __bool__ = __nonzero__
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return "<%s %r>" % (self.__class__.__name__, str(self))
+
+
+class Authorization(ImmutableDictMixin, dict):
+    """Represents an `Authorization` header sent by the client.  You should
+    not create this kind of object yourself but use it when it's returned by
+    the `parse_authorization_header` function.
+
+    This object is a dict subclass and can be altered by setting dict items
+    but it should be considered immutable as it's returned by the client and
+    not meant for modifications.
+
+    .. versionchanged:: 0.5
+       This object became immutable.
+    """
+
+    def __init__(self, auth_type, data=None):
+        dict.__init__(self, data or {})
+        self.type = auth_type
+
+    username = property(
+        lambda self: self.get("username"),
+        doc="""
+        The username transmitted.  This is set for both basic and digest
+        auth all the time.""",
+    )
+    password = property(
+        lambda self: self.get("password"),
+        doc="""
+        When the authentication type is basic this is the password
+        transmitted by the client, else `None`.""",
+    )
+    realm = property(
+        lambda self: self.get("realm"),
+        doc="""
+        This is the server realm sent back for HTTP digest auth.""",
+    )
+    nonce = property(
+        lambda self: self.get("nonce"),
+        doc="""
+        The nonce the server sent for digest auth, sent back by the client.
+        A nonce should be unique for every 401 response for HTTP digest
+        auth.""",
+    )
+    uri = property(
+        lambda self: self.get("uri"),
+        doc="""
+        The URI from Request-URI of the Request-Line; duplicated because
+        proxies are allowed to change the Request-Line in transit.  HTTP
+        digest auth only.""",
+    )
+    nc = property(
+        lambda self: self.get("nc"),
+        doc="""
+        The nonce count value transmitted by clients if a qop-header is
+        also transmitted.  HTTP digest auth only.""",
+    )
+    cnonce = property(
+        lambda self: self.get("cnonce"),
+        doc="""
+        If the server sent a qop-header in the ``WWW-Authenticate``
+        header, the client has to provide this value for HTTP digest auth.
+        See the RFC for more details.""",
+    )
+    response = property(
+        lambda self: self.get("response"),
+        doc="""
+        A string of 32 hex digits computed as defined in RFC 2617, which
+        proves that the user knows a password.  Digest auth only.""",
+    )
+    opaque = property(
+        lambda self: self.get("opaque"),
+        doc="""
+        The opaque header from the server returned unchanged by the client.
+        It is recommended that this string be base64 or hexadecimal data.
+        Digest auth only.""",
+    )
+    qop = property(
+        lambda self: self.get("qop"),
+        doc="""
+        Indicates what "quality of protection" the client has applied to
+        the message for HTTP digest auth. Note that this is a single token,
+        not a quoted list of alternatives as in WWW-Authenticate.""",
+    )
+
+
+class WWWAuthenticate(UpdateDictMixin, dict):
+    """Provides simple access to `WWW-Authenticate` headers."""
+
+    #: list of keys that require quoting in the generated header
+    _require_quoting = frozenset(["domain", "nonce", "opaque", "realm", "qop"])
+
+    def __init__(self, auth_type=None, values=None, on_update=None):
+        dict.__init__(self, values or ())
+        if auth_type:
+            self["__auth_type__"] = auth_type
+        self.on_update = on_update
+
+    def set_basic(self, realm="authentication required"):
+        """Clear the auth info and enable basic auth."""
+        dict.clear(self)
+        dict.update(self, {"__auth_type__": "basic", "realm": realm})
+        if self.on_update:
+            self.on_update(self)
+
+    def set_digest(
+        self, realm, nonce, qop=("auth",), opaque=None, algorithm=None, stale=False
+    ):
+        """Clear the auth info and enable digest auth."""
+        d = {
+            "__auth_type__": "digest",
+            "realm": realm,
+            "nonce": nonce,
+            "qop": dump_header(qop),
+        }
+        if stale:
+            d["stale"] = "TRUE"
+        if opaque is not None:
+            d["opaque"] = opaque
+        if algorithm is not None:
+            d["algorithm"] = algorithm
+        dict.clear(self)
+        dict.update(self, d)
+        if self.on_update:
+            self.on_update(self)
+
+    def to_header(self):
+        """Convert the stored values into a WWW-Authenticate header."""
+        d = dict(self)
+        auth_type = d.pop("__auth_type__", None) or "basic"
+        return "%s %s" % (
+            auth_type.title(),
+            ", ".join(
+                [
+                    "%s=%s"
+                    % (
+                        key,
+                        quote_header_value(
+                            value, allow_token=key not in self._require_quoting
+                        ),
+                    )
+                    for key, value in iteritems(d)
+                ]
+            ),
+        )
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return "<%s %r>" % (self.__class__.__name__, self.to_header())
+
+    def auth_property(name, doc=None):  # noqa: B902
+        """A static helper function for subclasses to add extra authentication
+        system properties onto a class::
+
+            class FooAuthenticate(WWWAuthenticate):
+                special_realm = auth_property('special_realm')
+
+        For more information have a look at the sourcecode to see how the
+        regular properties (:attr:`realm` etc.) are implemented.
+        """
+
+        def _set_value(self, value):
+            if value is None:
+                self.pop(name, None)
+            else:
+                self[name] = str(value)
+
+        return property(lambda x: x.get(name), _set_value, doc=doc)
+
+    def _set_property(name, doc=None):  # noqa: B902
+        def fget(self):
+            def on_update(header_set):
+                if not header_set and name in self:
+                    del self[name]
+                elif header_set:
+                    self[name] = header_set.to_header()
+
+            return parse_set_header(self.get(name), on_update)
+
+        return property(fget, doc=doc)
+
+    type = auth_property(
+        "__auth_type__",
+        doc="""The type of the auth mechanism. HTTP currently specifies
+        ``Basic`` and ``Digest``.""",
+    )
+    realm = auth_property(
+        "realm",
+        doc="""A string to be displayed to users so they know which
+        username and password to use. This string should contain at
+        least the name of the host performing the authentication and
+        might additionally indicate the collection of users who might
+        have access.""",
+    )
+    domain = _set_property(
+        "domain",
+        doc="""A list of URIs that define the protection space. If a URI
+        is an absolute path, it is relative to the canonical root URL of
+        the server being accessed.""",
+    )
+    nonce = auth_property(
+        "nonce",
+        doc="""
+        A server-specified data string which should be uniquely generated
+        each time a 401 response is made. It is recommended that this
+        string be base64 or hexadecimal data.""",
+    )
+    opaque = auth_property(
+        "opaque",
+        doc="""A string of data, specified by the server, which should
+        be returned by the client unchanged in the Authorization header
+        of subsequent requests with URIs in the same protection space.
+        It is recommended that this string be base64 or hexadecimal
+        data.""",
+    )
+    algorithm = auth_property(
+        "algorithm",
+        doc="""A string indicating a pair of algorithms used to produce
+        the digest and a checksum. If this is not present it is assumed
+        to be "MD5". If the algorithm is not understood, the challenge
+        should be ignored (and a different one used, if there is more
+        than one).""",
+    )
+    qop = _set_property(
+        "qop",
+        doc="""A set of quality-of-privacy directives such as auth and
+        auth-int.""",
+    )
+
+    @property
+    def stale(self):
+        """A flag, indicating that the previous request from the client
+        was rejected because the nonce value was stale.
+        """
+        val = self.get("stale")
+        if val is not None:
+            return val.lower() == "true"
+
+    @stale.setter
+    def stale(self, value):
+        if value is None:
+            self.pop("stale", None)
+        else:
+            self["stale"] = "TRUE" if value else "FALSE"
+
+    auth_property = staticmethod(auth_property)
+    del _set_property
+
+
+class FileStorage(object):
+    """The :class:`FileStorage` class is a thin wrapper over incoming files.
+    It is used by the request object to represent uploaded files.  All the
+    attributes of the wrapper stream are proxied by the file storage so
+    it's possible to do ``storage.read()`` instead of the long form
+    ``storage.stream.read()``.
+    """
+
+    def __init__(
+        self,
+        stream=None,
+        filename=None,
+        name=None,
+        content_type=None,
+        content_length=None,
+        headers=None,
+    ):
+        self.name = name
+        self.stream = stream or BytesIO()
+
+        # if no filename is provided we can attempt to get the filename
+        # from the stream object passed.  There we have to be careful to
+        # skip things like <fdopen>, <stderr> etc.  Python marks these
+        # special filenames with angular brackets.
+        if filename is None:
+            filename = getattr(stream, "name", None)
+            s = make_literal_wrapper(filename)
+            if filename and filename[0] == s("<") and filename[-1] == s(">"):
+                filename = None
+
+            # On Python 3 we want to make sure the filename is always unicode.
+            # This might not be if the name attribute is bytes due to the
+            # file being opened from the bytes API.
+            if not PY2 and isinstance(filename, bytes):
+                filename = filename.decode(get_filesystem_encoding(), "replace")
+
+        self.filename = filename
+        if headers is None:
+            headers = Headers()
+        self.headers = headers
+        if content_type is not None:
+            headers["Content-Type"] = content_type
+        if content_length is not None:
+            headers["Content-Length"] = str(content_length)
+
+    def _parse_content_type(self):
+        if not hasattr(self, "_parsed_content_type"):
+            self._parsed_content_type = parse_options_header(self.content_type)
+
+    @property
+    def content_type(self):
+        """The content-type sent in the header.  Usually not available"""
+        return self.headers.get("content-type")
+
+    @property
+    def content_length(self):
+        """The content-length sent in the header.  Usually not available"""
+        return int(self.headers.get("content-length") or 0)
+
+    @property
+    def mimetype(self):
+        """Like :attr:`content_type`, but without parameters (eg, without
+        charset, type etc.) and always lowercase.  For example if the content
+        type is ``text/HTML; charset=utf-8`` the mimetype would be
+        ``'text/html'``.
+
+        .. versionadded:: 0.7
+        """
+        self._parse_content_type()
+        return self._parsed_content_type[0].lower()
+
+    @property
+    def mimetype_params(self):
+        """The mimetype parameters as dict.  For example if the content
+        type is ``text/html; charset=utf-8`` the params would be
+        ``{'charset': 'utf-8'}``.
+
+        .. versionadded:: 0.7
+        """
+        self._parse_content_type()
+        return self._parsed_content_type[1]
+
+    def save(self, dst, buffer_size=16384):
+        """Save the file to a destination path or file object.  If the
+        destination is a file object you have to close it yourself after the
+        call.  The buffer size is the number of bytes held in memory during
+        the copy process.  It defaults to 16KB.
+
+        For secure file saving also have a look at :func:`secure_filename`.
+
+        :param dst: a filename or open file object the uploaded file
+                    is saved to.
+        :param buffer_size: the size of the buffer.  This works the same as
+                            the `length` parameter of
+                            :func:`shutil.copyfileobj`.
+        """
+        from shutil import copyfileobj
+
+        close_dst = False
+        if isinstance(dst, string_types):
+            dst = open(dst, "wb")
+            close_dst = True
+        try:
+            copyfileobj(self.stream, dst, buffer_size)
+        finally:
+            if close_dst:
+                dst.close()
+
+    def close(self):
+        """Close the underlying file if possible."""
+        try:
+            self.stream.close()
+        except Exception:
+            pass
+
+    def __nonzero__(self):
+        return bool(self.filename)
+
+    __bool__ = __nonzero__
+
+    def __getattr__(self, name):
+        try:
+            return getattr(self.stream, name)
+        except AttributeError:
+            # SpooledTemporaryFile doesn't implement IOBase, get the
+            # attribute from its backing file instead.
+            # https://github.com/python/cpython/pull/3249
+            if hasattr(self.stream, "_file"):
+                return getattr(self.stream._file, name)
+            raise
+
+    def __iter__(self):
+        return iter(self.stream)
+
+    def __repr__(self):
+        return "<%s: %r (%r)>" % (
+            self.__class__.__name__,
+            self.filename,
+            self.content_type,
+        )
+
+
+# circular dependencies
+from . import exceptions
+from .http import dump_header
+from .http import dump_options_header
+from .http import generate_etag
+from .http import http_date
+from .http import is_byte_range_valid
+from .http import parse_options_header
+from .http import parse_set_header
+from .http import quote_etag
+from .http import quote_header_value
+from .http import unquote_etag
diff --git a/lib/python3.7/site-packages/werkzeug/debug/__init__.py b/lib/python3.7/site-packages/werkzeug/debug/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bb374eaf1d51bf5bf8825d5255f87c2d319b6d7
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/debug/__init__.py
@@ -0,0 +1,511 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.debug
+    ~~~~~~~~~~~~~~
+
+    WSGI application traceback debugger.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import getpass
+import hashlib
+import json
+import mimetypes
+import os
+import pkgutil
+import re
+import sys
+import time
+import uuid
+from itertools import chain
+from os.path import basename
+from os.path import join
+
+from .._compat import text_type
+from .._internal import _log
+from ..http import parse_cookie
+from ..security import gen_salt
+from ..wrappers import BaseRequest as Request
+from ..wrappers import BaseResponse as Response
+from .console import Console
+from .repr import debug_repr as _debug_repr
+from .tbtools import get_current_traceback
+from .tbtools import render_console_html
+
+
+def debug_repr(*args, **kwargs):
+    import warnings
+
+    warnings.warn(
+        "'debug_repr' has moved to 'werkzeug.debug.repr.debug_repr'"
+        " as of version 0.7. This old import will be removed in version"
+        " 1.0.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+    return _debug_repr(*args, **kwargs)
+
+
+# A week
+PIN_TIME = 60 * 60 * 24 * 7
+
+
+def hash_pin(pin):
+    if isinstance(pin, text_type):
+        pin = pin.encode("utf-8", "replace")
+    return hashlib.md5(pin + b"shittysalt").hexdigest()[:12]
+
+
+_machine_id = None
+
+
+def get_machine_id():
+    global _machine_id
+    rv = _machine_id
+    if rv is not None:
+        return rv
+
+    def _generate():
+        # Potential sources of secret information on linux.  The machine-id
+        # is stable across boots, the boot id is not
+        for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
+            try:
+                with open(filename, "rb") as f:
+                    return f.readline().strip()
+            except IOError:
+                continue
+
+        # On OS X we can use the computer's serial number assuming that
+        # ioreg exists and can spit out that information.
+        try:
+            # Also catch import errors: subprocess may not be available, e.g.
+            # Google App Engine
+            # See https://github.com/pallets/werkzeug/issues/925
+            from subprocess import Popen, PIPE
+
+            dump = Popen(
+                ["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
+            ).communicate()[0]
+            match = re.search(b'"serial-number" = <([^>]+)', dump)
+            if match is not None:
+                return match.group(1)
+        except (OSError, ImportError):
+            pass
+
+        # On Windows we can use winreg to get the machine guid
+        wr = None
+        try:
+            import winreg as wr
+        except ImportError:
+            try:
+                import _winreg as wr
+            except ImportError:
+                pass
+        if wr is not None:
+            try:
+                with wr.OpenKey(
+                    wr.HKEY_LOCAL_MACHINE,
+                    "SOFTWARE\\Microsoft\\Cryptography",
+                    0,
+                    wr.KEY_READ | wr.KEY_WOW64_64KEY,
+                ) as rk:
+                    machineGuid, wrType = wr.QueryValueEx(rk, "MachineGuid")
+                    if wrType == wr.REG_SZ:
+                        return machineGuid.encode("utf-8")
+                    else:
+                        return machineGuid
+            except WindowsError:
+                pass
+
+    _machine_id = rv = _generate()
+    return rv
+
+
+class _ConsoleFrame(object):
+    """Helper class so that we can reuse the frame console code for the
+    standalone console.
+    """
+
+    def __init__(self, namespace):
+        self.console = Console(namespace)
+        self.id = 0
+
+
+def get_pin_and_cookie_name(app):
+    """Given an application object this returns a semi-stable 9 digit pin
+    code and a random key.  The hope is that this is stable between
+    restarts to not make debugging particularly frustrating.  If the pin
+    was forcefully disabled this returns `None`.
+
+    Second item in the resulting tuple is the cookie name for remembering.
+    """
+    pin = os.environ.get("WERKZEUG_DEBUG_PIN")
+    rv = None
+    num = None
+
+    # Pin was explicitly disabled
+    if pin == "off":
+        return None, None
+
+    # Pin was provided explicitly
+    if pin is not None and pin.replace("-", "").isdigit():
+        # If there are separators in the pin, return it directly
+        if "-" in pin:
+            rv = pin
+        else:
+            num = pin
+
+    modname = getattr(app, "__module__", getattr(app.__class__, "__module__"))
+
+    try:
+        # getuser imports the pwd module, which does not exist in Google
+        # App Engine. It may also raise a KeyError if the UID does not
+        # have a username, such as in Docker.
+        username = getpass.getuser()
+    except (ImportError, KeyError):
+        username = None
+
+    mod = sys.modules.get(modname)
+
+    # This information only exists to make the cookie unique on the
+    # computer, not as a security feature.
+    probably_public_bits = [
+        username,
+        modname,
+        getattr(app, "__name__", getattr(app.__class__, "__name__")),
+        getattr(mod, "__file__", None),
+    ]
+
+    # This information is here to make it harder for an attacker to
+    # guess the cookie name.  They are unlikely to be contained anywhere
+    # within the unauthenticated debug page.
+    private_bits = [str(uuid.getnode()), get_machine_id()]
+
+    h = hashlib.md5()
+    for bit in chain(probably_public_bits, private_bits):
+        if not bit:
+            continue
+        if isinstance(bit, text_type):
+            bit = bit.encode("utf-8")
+        h.update(bit)
+    h.update(b"cookiesalt")
+
+    cookie_name = "__wzd" + h.hexdigest()[:20]
+
+    # If we need to generate a pin we salt it a bit more so that we don't
+    # end up with the same value and generate out 9 digits
+    if num is None:
+        h.update(b"pinsalt")
+        num = ("%09d" % int(h.hexdigest(), 16))[:9]
+
+    # Format the pincode in groups of digits for easier remembering if
+    # we don't have a result yet.
+    if rv is None:
+        for group_size in 5, 4, 3:
+            if len(num) % group_size == 0:
+                rv = "-".join(
+                    num[x : x + group_size].rjust(group_size, "0")
+                    for x in range(0, len(num), group_size)
+                )
+                break
+        else:
+            rv = num
+
+    return rv, cookie_name
+
+
+class DebuggedApplication(object):
+    """Enables debugging support for a given application::
+
+        from werkzeug.debug import DebuggedApplication
+        from myapp import app
+        app = DebuggedApplication(app, evalex=True)
+
+    The `evalex` keyword argument allows evaluating expressions in a
+    traceback's frame context.
+
+    .. versionadded:: 0.9
+       The `lodgeit_url` parameter was deprecated.
+
+    :param app: the WSGI application to run debugged.
+    :param evalex: enable exception evaluation feature (interactive
+                   debugging).  This requires a non-forking server.
+    :param request_key: The key that points to the request object in ths
+                        environment.  This parameter is ignored in current
+                        versions.
+    :param console_path: the URL for a general purpose console.
+    :param console_init_func: the function that is executed before starting
+                              the general purpose console.  The return value
+                              is used as initial namespace.
+    :param show_hidden_frames: by default hidden traceback frames are skipped.
+                               You can show them by setting this parameter
+                               to `True`.
+    :param pin_security: can be used to disable the pin based security system.
+    :param pin_logging: enables the logging of the pin system.
+    """
+
+    def __init__(
+        self,
+        app,
+        evalex=False,
+        request_key="werkzeug.request",
+        console_path="/console",
+        console_init_func=None,
+        show_hidden_frames=False,
+        lodgeit_url=None,
+        pin_security=True,
+        pin_logging=True,
+    ):
+        if lodgeit_url is not None:
+            from warnings import warn
+
+            warn(
+                "'lodgeit_url' is no longer used as of version 0.9 and"
+                " will be removed in version 1.0. Werkzeug uses"
+                " https://gist.github.com/ instead.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+        if not console_init_func:
+            console_init_func = None
+        self.app = app
+        self.evalex = evalex
+        self.frames = {}
+        self.tracebacks = {}
+        self.request_key = request_key
+        self.console_path = console_path
+        self.console_init_func = console_init_func
+        self.show_hidden_frames = show_hidden_frames
+        self.secret = gen_salt(20)
+        self._failed_pin_auth = 0
+
+        self.pin_logging = pin_logging
+        if pin_security:
+            # Print out the pin for the debugger on standard out.
+            if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
+                _log("warning", " * Debugger is active!")
+                if self.pin is None:
+                    _log("warning", " * Debugger PIN disabled. DEBUGGER UNSECURED!")
+                else:
+                    _log("info", " * Debugger PIN: %s" % self.pin)
+        else:
+            self.pin = None
+
+    def _get_pin(self):
+        if not hasattr(self, "_pin"):
+            self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
+        return self._pin
+
+    def _set_pin(self, value):
+        self._pin = value
+
+    pin = property(_get_pin, _set_pin)
+    del _get_pin, _set_pin
+
+    @property
+    def pin_cookie_name(self):
+        """The name of the pin cookie."""
+        if not hasattr(self, "_pin_cookie"):
+            self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
+        return self._pin_cookie
+
+    def debug_application(self, environ, start_response):
+        """Run the application and conserve the traceback frames."""
+        app_iter = None
+        try:
+            app_iter = self.app(environ, start_response)
+            for item in app_iter:
+                yield item
+            if hasattr(app_iter, "close"):
+                app_iter.close()
+        except Exception:
+            if hasattr(app_iter, "close"):
+                app_iter.close()
+            traceback = get_current_traceback(
+                skip=1,
+                show_hidden_frames=self.show_hidden_frames,
+                ignore_system_exceptions=True,
+            )
+            for frame in traceback.frames:
+                self.frames[frame.id] = frame
+            self.tracebacks[traceback.id] = traceback
+
+            try:
+                start_response(
+                    "500 INTERNAL SERVER ERROR",
+                    [
+                        ("Content-Type", "text/html; charset=utf-8"),
+                        # Disable Chrome's XSS protection, the debug
+                        # output can cause false-positives.
+                        ("X-XSS-Protection", "0"),
+                    ],
+                )
+            except Exception:
+                # if we end up here there has been output but an error
+                # occurred.  in that situation we can do nothing fancy any
+                # more, better log something into the error log and fall
+                # back gracefully.
+                environ["wsgi.errors"].write(
+                    "Debugging middleware caught exception in streamed "
+                    "response at a point where response headers were already "
+                    "sent.\n"
+                )
+            else:
+                is_trusted = bool(self.check_pin_trust(environ))
+                yield traceback.render_full(
+                    evalex=self.evalex, evalex_trusted=is_trusted, secret=self.secret
+                ).encode("utf-8", "replace")
+
+            traceback.log(environ["wsgi.errors"])
+
+    def execute_command(self, request, command, frame):
+        """Execute a command in a console."""
+        return Response(frame.console.eval(command), mimetype="text/html")
+
+    def display_console(self, request):
+        """Display a standalone shell."""
+        if 0 not in self.frames:
+            if self.console_init_func is None:
+                ns = {}
+            else:
+                ns = dict(self.console_init_func())
+            ns.setdefault("app", self.app)
+            self.frames[0] = _ConsoleFrame(ns)
+        is_trusted = bool(self.check_pin_trust(request.environ))
+        return Response(
+            render_console_html(secret=self.secret, evalex_trusted=is_trusted),
+            mimetype="text/html",
+        )
+
+    def paste_traceback(self, request, traceback):
+        """Paste the traceback and return a JSON response."""
+        rv = traceback.paste()
+        return Response(json.dumps(rv), mimetype="application/json")
+
+    def get_resource(self, request, filename):
+        """Return a static resource from the shared folder."""
+        filename = join("shared", basename(filename))
+        try:
+            data = pkgutil.get_data(__package__, filename)
+        except OSError:
+            data = None
+        if data is not None:
+            mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream"
+            return Response(data, mimetype=mimetype)
+        return Response("Not Found", status=404)
+
+    def check_pin_trust(self, environ):
+        """Checks if the request passed the pin test.  This returns `True` if the
+        request is trusted on a pin/cookie basis and returns `False` if not.
+        Additionally if the cookie's stored pin hash is wrong it will return
+        `None` so that appropriate action can be taken.
+        """
+        if self.pin is None:
+            return True
+        val = parse_cookie(environ).get(self.pin_cookie_name)
+        if not val or "|" not in val:
+            return False
+        ts, pin_hash = val.split("|", 1)
+        if not ts.isdigit():
+            return False
+        if pin_hash != hash_pin(self.pin):
+            return None
+        return (time.time() - PIN_TIME) < int(ts)
+
+    def _fail_pin_auth(self):
+        time.sleep(5.0 if self._failed_pin_auth > 5 else 0.5)
+        self._failed_pin_auth += 1
+
+    def pin_auth(self, request):
+        """Authenticates with the pin."""
+        exhausted = False
+        auth = False
+        trust = self.check_pin_trust(request.environ)
+
+        # If the trust return value is `None` it means that the cookie is
+        # set but the stored pin hash value is bad.  This means that the
+        # pin was changed.  In this case we count a bad auth and unset the
+        # cookie.  This way it becomes harder to guess the cookie name
+        # instead of the pin as we still count up failures.
+        bad_cookie = False
+        if trust is None:
+            self._fail_pin_auth()
+            bad_cookie = True
+
+        # If we're trusted, we're authenticated.
+        elif trust:
+            auth = True
+
+        # If we failed too many times, then we're locked out.
+        elif self._failed_pin_auth > 10:
+            exhausted = True
+
+        # Otherwise go through pin based authentication
+        else:
+            entered_pin = request.args.get("pin")
+            if entered_pin.strip().replace("-", "") == self.pin.replace("-", ""):
+                self._failed_pin_auth = 0
+                auth = True
+            else:
+                self._fail_pin_auth()
+
+        rv = Response(
+            json.dumps({"auth": auth, "exhausted": exhausted}),
+            mimetype="application/json",
+        )
+        if auth:
+            rv.set_cookie(
+                self.pin_cookie_name,
+                "%s|%s" % (int(time.time()), hash_pin(self.pin)),
+                httponly=True,
+            )
+        elif bad_cookie:
+            rv.delete_cookie(self.pin_cookie_name)
+        return rv
+
+    def log_pin_request(self):
+        """Log the pin if needed."""
+        if self.pin_logging and self.pin is not None:
+            _log(
+                "info", " * To enable the debugger you need to enter the security pin:"
+            )
+            _log("info", " * Debugger pin code: %s" % self.pin)
+        return Response("")
+
+    def __call__(self, environ, start_response):
+        """Dispatch the requests."""
+        # important: don't ever access a function here that reads the incoming
+        # form data!  Otherwise the application won't have access to that data
+        # any more!
+        request = Request(environ)
+        response = self.debug_application
+        if request.args.get("__debugger__") == "yes":
+            cmd = request.args.get("cmd")
+            arg = request.args.get("f")
+            secret = request.args.get("s")
+            traceback = self.tracebacks.get(request.args.get("tb", type=int))
+            frame = self.frames.get(request.args.get("frm", type=int))
+            if cmd == "resource" and arg:
+                response = self.get_resource(request, arg)
+            elif cmd == "paste" and traceback is not None and secret == self.secret:
+                response = self.paste_traceback(request, traceback)
+            elif cmd == "pinauth" and secret == self.secret:
+                response = self.pin_auth(request)
+            elif cmd == "printpin" and secret == self.secret:
+                response = self.log_pin_request()
+            elif (
+                self.evalex
+                and cmd is not None
+                and frame is not None
+                and self.secret == secret
+                and self.check_pin_trust(environ)
+            ):
+                response = self.execute_command(request, cmd, frame)
+        elif (
+            self.evalex
+            and self.console_path is not None
+            and request.path == self.console_path
+        ):
+            response = self.display_console(request)
+        return response(environ, start_response)
diff --git a/lib/python3.7/site-packages/werkzeug/debug/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/debug/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fdfa1fa5e3d2a87332658eb78fbb4373c1d97885
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/debug/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/debug/__pycache__/console.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/debug/__pycache__/console.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..600ee0ad97beabc088597ab094537c87463dbfc8
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/debug/__pycache__/console.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/debug/__pycache__/repr.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/debug/__pycache__/repr.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e67781ad6e0774a415cda8dd2b2084e3f44cb46b
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/debug/__pycache__/repr.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/debug/__pycache__/tbtools.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/debug/__pycache__/tbtools.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74efb64d90d7020c4b361f66fdf747e971e87eaa
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/debug/__pycache__/tbtools.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/debug/console.py b/lib/python3.7/site-packages/werkzeug/debug/console.py
new file mode 100644
index 0000000000000000000000000000000000000000..adbd170b75662ce28f4c1c01f34cd8be20eb7986
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/debug/console.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.debug.console
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    Interactive console support.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import code
+import sys
+from types import CodeType
+
+from ..local import Local
+from ..utils import escape
+from .repr import debug_repr
+from .repr import dump
+from .repr import helper
+
+
+_local = Local()
+
+
+class HTMLStringO(object):
+    """A StringO version that HTML escapes on write."""
+
+    def __init__(self):
+        self._buffer = []
+
+    def isatty(self):
+        return False
+
+    def close(self):
+        pass
+
+    def flush(self):
+        pass
+
+    def seek(self, n, mode=0):
+        pass
+
+    def readline(self):
+        if len(self._buffer) == 0:
+            return ""
+        ret = self._buffer[0]
+        del self._buffer[0]
+        return ret
+
+    def reset(self):
+        val = "".join(self._buffer)
+        del self._buffer[:]
+        return val
+
+    def _write(self, x):
+        if isinstance(x, bytes):
+            x = x.decode("utf-8", "replace")
+        self._buffer.append(x)
+
+    def write(self, x):
+        self._write(escape(x))
+
+    def writelines(self, x):
+        self._write(escape("".join(x)))
+
+
+class ThreadedStream(object):
+    """Thread-local wrapper for sys.stdout for the interactive console."""
+
+    @staticmethod
+    def push():
+        if not isinstance(sys.stdout, ThreadedStream):
+            sys.stdout = ThreadedStream()
+        _local.stream = HTMLStringO()
+
+    @staticmethod
+    def fetch():
+        try:
+            stream = _local.stream
+        except AttributeError:
+            return ""
+        return stream.reset()
+
+    @staticmethod
+    def displayhook(obj):
+        try:
+            stream = _local.stream
+        except AttributeError:
+            return _displayhook(obj)
+        # stream._write bypasses escaping as debug_repr is
+        # already generating HTML for us.
+        if obj is not None:
+            _local._current_ipy.locals["_"] = obj
+            stream._write(debug_repr(obj))
+
+    def __setattr__(self, name, value):
+        raise AttributeError("read only attribute %s" % name)
+
+    def __dir__(self):
+        return dir(sys.__stdout__)
+
+    def __getattribute__(self, name):
+        if name == "__members__":
+            return dir(sys.__stdout__)
+        try:
+            stream = _local.stream
+        except AttributeError:
+            stream = sys.__stdout__
+        return getattr(stream, name)
+
+    def __repr__(self):
+        return repr(sys.__stdout__)
+
+
+# add the threaded stream as display hook
+_displayhook = sys.displayhook
+sys.displayhook = ThreadedStream.displayhook
+
+
+class _ConsoleLoader(object):
+    def __init__(self):
+        self._storage = {}
+
+    def register(self, code, source):
+        self._storage[id(code)] = source
+        # register code objects of wrapped functions too.
+        for var in code.co_consts:
+            if isinstance(var, CodeType):
+                self._storage[id(var)] = source
+
+    def get_source_by_code(self, code):
+        try:
+            return self._storage[id(code)]
+        except KeyError:
+            pass
+
+
+def _wrap_compiler(console):
+    compile = console.compile
+
+    def func(source, filename, symbol):
+        code = compile(source, filename, symbol)
+        console.loader.register(code, source)
+        return code
+
+    console.compile = func
+
+
+class _InteractiveConsole(code.InteractiveInterpreter):
+    def __init__(self, globals, locals):
+        code.InteractiveInterpreter.__init__(self, locals)
+        self.globals = dict(globals)
+        self.globals["dump"] = dump
+        self.globals["help"] = helper
+        self.globals["__loader__"] = self.loader = _ConsoleLoader()
+        self.more = False
+        self.buffer = []
+        _wrap_compiler(self)
+
+    def runsource(self, source):
+        source = source.rstrip() + "\n"
+        ThreadedStream.push()
+        prompt = "... " if self.more else ">>> "
+        try:
+            source_to_eval = "".join(self.buffer + [source])
+            if code.InteractiveInterpreter.runsource(
+                self, source_to_eval, "<debugger>", "single"
+            ):
+                self.more = True
+                self.buffer.append(source)
+            else:
+                self.more = False
+                del self.buffer[:]
+        finally:
+            output = ThreadedStream.fetch()
+        return prompt + escape(source) + output
+
+    def runcode(self, code):
+        try:
+            eval(code, self.globals, self.locals)
+        except Exception:
+            self.showtraceback()
+
+    def showtraceback(self):
+        from .tbtools import get_current_traceback
+
+        tb = get_current_traceback(skip=1)
+        sys.stdout._write(tb.render_summary())
+
+    def showsyntaxerror(self, filename=None):
+        from .tbtools import get_current_traceback
+
+        tb = get_current_traceback(skip=4)
+        sys.stdout._write(tb.render_summary())
+
+    def write(self, data):
+        sys.stdout.write(data)
+
+
+class Console(object):
+    """An interactive console."""
+
+    def __init__(self, globals=None, locals=None):
+        if locals is None:
+            locals = {}
+        if globals is None:
+            globals = {}
+        self._ipy = _InteractiveConsole(globals, locals)
+
+    def eval(self, code):
+        _local._current_ipy = self._ipy
+        old_sys_stdout = sys.stdout
+        try:
+            return self._ipy.runsource(code)
+        finally:
+            sys.stdout = old_sys_stdout
diff --git a/lib/python3.7/site-packages/werkzeug/debug/repr.py b/lib/python3.7/site-packages/werkzeug/debug/repr.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7a7285ca95dfbc64d765d1a949580d07f2f7a44
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/debug/repr.py
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.debug.repr
+    ~~~~~~~~~~~~~~~~~~~
+
+    This module implements object representations for debugging purposes.
+    Unlike the default repr these reprs expose a lot more information and
+    produce HTML instead of ASCII.
+
+    Together with the CSS and JavaScript files of the debugger this gives
+    a colorful and more compact output.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import codecs
+import re
+import sys
+from collections import deque
+from traceback import format_exception_only
+
+from .._compat import integer_types
+from .._compat import iteritems
+from .._compat import PY2
+from .._compat import string_types
+from .._compat import text_type
+from ..utils import escape
+
+
+missing = object()
+_paragraph_re = re.compile(r"(?:\r\n|\r|\n){2,}")
+RegexType = type(_paragraph_re)
+
+
+HELP_HTML = """\
+<div class=box>
+  <h3>%(title)s</h3>
+  <pre class=help>%(text)s</pre>
+</div>\
+"""
+OBJECT_DUMP_HTML = """\
+<div class=box>
+  <h3>%(title)s</h3>
+  %(repr)s
+  <table>%(items)s</table>
+</div>\
+"""
+
+
+def debug_repr(obj):
+    """Creates a debug repr of an object as HTML unicode string."""
+    return DebugReprGenerator().repr(obj)
+
+
+def dump(obj=missing):
+    """Print the object details to stdout._write (for the interactive
+    console of the web debugger.
+    """
+    gen = DebugReprGenerator()
+    if obj is missing:
+        rv = gen.dump_locals(sys._getframe(1).f_locals)
+    else:
+        rv = gen.dump_object(obj)
+    sys.stdout._write(rv)
+
+
+class _Helper(object):
+    """Displays an HTML version of the normal help, for the interactive
+    debugger only because it requires a patched sys.stdout.
+    """
+
+    def __repr__(self):
+        return "Type help(object) for help about object."
+
+    def __call__(self, topic=None):
+        if topic is None:
+            sys.stdout._write("<span class=help>%s</span>" % repr(self))
+            return
+        import pydoc
+
+        pydoc.help(topic)
+        rv = sys.stdout.reset()
+        if isinstance(rv, bytes):
+            rv = rv.decode("utf-8", "ignore")
+        paragraphs = _paragraph_re.split(rv)
+        if len(paragraphs) > 1:
+            title = paragraphs[0]
+            text = "\n\n".join(paragraphs[1:])
+        else:  # pragma: no cover
+            title = "Help"
+            text = paragraphs[0]
+        sys.stdout._write(HELP_HTML % {"title": title, "text": text})
+
+
+helper = _Helper()
+
+
+def _add_subclass_info(inner, obj, base):
+    if isinstance(base, tuple):
+        for base in base:
+            if type(obj) is base:
+                return inner
+    elif type(obj) is base:
+        return inner
+    module = ""
+    if obj.__class__.__module__ not in ("__builtin__", "exceptions"):
+        module = '<span class="module">%s.</span>' % obj.__class__.__module__
+    return "%s%s(%s)" % (module, obj.__class__.__name__, inner)
+
+
+class DebugReprGenerator(object):
+    def __init__(self):
+        self._stack = []
+
+    def _sequence_repr_maker(left, right, base=object(), limit=8):  # noqa: B008, B902
+        def proxy(self, obj, recursive):
+            if recursive:
+                return _add_subclass_info(left + "..." + right, obj, base)
+            buf = [left]
+            have_extended_section = False
+            for idx, item in enumerate(obj):
+                if idx:
+                    buf.append(", ")
+                if idx == limit:
+                    buf.append('<span class="extended">')
+                    have_extended_section = True
+                buf.append(self.repr(item))
+            if have_extended_section:
+                buf.append("</span>")
+            buf.append(right)
+            return _add_subclass_info(u"".join(buf), obj, base)
+
+        return proxy
+
+    list_repr = _sequence_repr_maker("[", "]", list)
+    tuple_repr = _sequence_repr_maker("(", ")", tuple)
+    set_repr = _sequence_repr_maker("set([", "])", set)
+    frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
+    deque_repr = _sequence_repr_maker(
+        '<span class="module">collections.' "</span>deque([", "])", deque
+    )
+    del _sequence_repr_maker
+
+    def regex_repr(self, obj):
+        pattern = repr(obj.pattern)
+        if PY2:
+            pattern = pattern.decode("string-escape", "ignore")
+        else:
+            pattern = codecs.decode(pattern, "unicode-escape", "ignore")
+        if pattern[:1] == "u":
+            pattern = "ur" + pattern[1:]
+        else:
+            pattern = "r" + pattern
+        return u're.compile(<span class="string regex">%s</span>)' % pattern
+
+    def string_repr(self, obj, limit=70):
+        buf = ['<span class="string">']
+        r = repr(obj)
+
+        # shorten the repr when the hidden part would be at least 3 chars
+        if len(r) - limit > 2:
+            buf.extend(
+                (
+                    escape(r[:limit]),
+                    '<span class="extended">',
+                    escape(r[limit:]),
+                    "</span>",
+                )
+            )
+        else:
+            buf.append(escape(r))
+
+        buf.append("</span>")
+        out = u"".join(buf)
+
+        # if the repr looks like a standard string, add subclass info if needed
+        if r[0] in "'\"" or (r[0] in "ub" and r[1] in "'\""):
+            return _add_subclass_info(out, obj, (bytes, text_type))
+
+        # otherwise, assume the repr distinguishes the subclass already
+        return out
+
+    def dict_repr(self, d, recursive, limit=5):
+        if recursive:
+            return _add_subclass_info(u"{...}", d, dict)
+        buf = ["{"]
+        have_extended_section = False
+        for idx, (key, value) in enumerate(iteritems(d)):
+            if idx:
+                buf.append(", ")
+            if idx == limit - 1:
+                buf.append('<span class="extended">')
+                have_extended_section = True
+            buf.append(
+                '<span class="pair"><span class="key">%s</span>: '
+                '<span class="value">%s</span></span>'
+                % (self.repr(key), self.repr(value))
+            )
+        if have_extended_section:
+            buf.append("</span>")
+        buf.append("}")
+        return _add_subclass_info(u"".join(buf), d, dict)
+
+    def object_repr(self, obj):
+        r = repr(obj)
+        if PY2:
+            r = r.decode("utf-8", "replace")
+        return u'<span class="object">%s</span>' % escape(r)
+
+    def dispatch_repr(self, obj, recursive):
+        if obj is helper:
+            return u'<span class="help">%r</span>' % helper
+        if isinstance(obj, (integer_types, float, complex)):
+            return u'<span class="number">%r</span>' % obj
+        if isinstance(obj, string_types) or isinstance(obj, bytes):
+            return self.string_repr(obj)
+        if isinstance(obj, RegexType):
+            return self.regex_repr(obj)
+        if isinstance(obj, list):
+            return self.list_repr(obj, recursive)
+        if isinstance(obj, tuple):
+            return self.tuple_repr(obj, recursive)
+        if isinstance(obj, set):
+            return self.set_repr(obj, recursive)
+        if isinstance(obj, frozenset):
+            return self.frozenset_repr(obj, recursive)
+        if isinstance(obj, dict):
+            return self.dict_repr(obj, recursive)
+        if deque is not None and isinstance(obj, deque):
+            return self.deque_repr(obj, recursive)
+        return self.object_repr(obj)
+
+    def fallback_repr(self):
+        try:
+            info = "".join(format_exception_only(*sys.exc_info()[:2]))
+        except Exception:  # pragma: no cover
+            info = "?"
+        if PY2:
+            info = info.decode("utf-8", "ignore")
+        return u'<span class="brokenrepr">&lt;broken repr (%s)&gt;' u"</span>" % escape(
+            info.strip()
+        )
+
+    def repr(self, obj):
+        recursive = False
+        for item in self._stack:
+            if item is obj:
+                recursive = True
+                break
+        self._stack.append(obj)
+        try:
+            try:
+                return self.dispatch_repr(obj, recursive)
+            except Exception:
+                return self.fallback_repr()
+        finally:
+            self._stack.pop()
+
+    def dump_object(self, obj):
+        repr = items = None
+        if isinstance(obj, dict):
+            title = "Contents of"
+            items = []
+            for key, value in iteritems(obj):
+                if not isinstance(key, string_types):
+                    items = None
+                    break
+                items.append((key, self.repr(value)))
+        if items is None:
+            items = []
+            repr = self.repr(obj)
+            for key in dir(obj):
+                try:
+                    items.append((key, self.repr(getattr(obj, key))))
+                except Exception:
+                    pass
+            title = "Details for"
+        title += " " + object.__repr__(obj)[1:-1]
+        return self.render_object_dump(items, title, repr)
+
+    def dump_locals(self, d):
+        items = [(key, self.repr(value)) for key, value in d.items()]
+        return self.render_object_dump(items, "Local variables in frame")
+
+    def render_object_dump(self, items, title, repr=None):
+        html_items = []
+        for key, value in items:
+            html_items.append(
+                "<tr><th>%s<td><pre class=repr>%s</pre>" % (escape(key), value)
+            )
+        if not html_items:
+            html_items.append("<tr><td><em>Nothing</em>")
+        return OBJECT_DUMP_HTML % {
+            "title": escape(title),
+            "repr": "<pre class=repr>%s</pre>" % repr if repr else "",
+            "items": "\n".join(html_items),
+        }
diff --git a/lib/python3.7/site-packages/werkzeug/debug/shared/FONT_LICENSE b/lib/python3.7/site-packages/werkzeug/debug/shared/FONT_LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..ae78a8f94eae372cb1ca4e14b67244342fce604e
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/debug/shared/FONT_LICENSE
@@ -0,0 +1,96 @@
+-------------------------------
+UBUNTU FONT LICENCE Version 1.0
+-------------------------------
+
+PREAMBLE
+This licence allows the licensed fonts to be used, studied, modified and
+redistributed freely. The fonts, including any derivative works, can be
+bundled, embedded, and redistributed provided the terms of this licence
+are met. The fonts and derivatives, however, cannot be released under
+any other licence. The requirement for fonts to remain under this
+licence does not require any document created using the fonts or their
+derivatives to be published under this licence, as long as the primary
+purpose of the document is not to be a vehicle for the distribution of
+the fonts.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this licence and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Original Version" refers to the collection of Font Software components
+as received under this licence.
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to
+a new environment.
+
+"Copyright Holder(s)" refers to all individuals and companies who have a
+copyright ownership of the Font Software.
+
+"Substantially Changed" refers to Modified Versions which can be easily
+identified as dissimilar to the Font Software by users of the Font
+Software comparing the Original Version with the Modified Version.
+
+To "Propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification and with or without charging
+a redistribution fee), making available to the public, and in some
+countries other activities as well.
+
+PERMISSION & CONDITIONS
+This licence does not grant any rights under trademark law and all such
+rights are reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of the Font Software, to propagate the Font Software, subject to
+the below conditions:
+
+1) Each copy of the Font Software must contain the above copyright
+notice and this licence. These can be included either as stand-alone
+text files, human-readable headers or in the appropriate machine-
+readable metadata fields within text or binary files as long as those
+fields can be easily viewed by the user.
+
+2) The font name complies with the following:
+(a) The Original Version must retain its name, unmodified.
+(b) Modified Versions which are Substantially Changed must be renamed to
+avoid use of the name of the Original Version or similar names entirely.
+(c) Modified Versions which are not Substantially Changed must be
+renamed to both (i) retain the name of the Original Version and (ii) add
+additional naming elements to distinguish the Modified Version from the
+Original Version. The name of such Modified Versions must be the name of
+the Original Version, with "derivative X" where X represents the name of
+the new work, appended to that name.
+
+3) The name(s) of the Copyright Holder(s) and any contributor to the
+Font Software shall not be used to promote, endorse or advertise any
+Modified Version, except (i) as required by this licence, (ii) to
+acknowledge the contribution(s) of the Copyright Holder(s) or (iii) with
+their explicit written permission.
+
+4) The Font Software, modified or unmodified, in part or in whole, must
+be distributed entirely under this licence, and must not be distributed
+under any other licence. The requirement for fonts to remain under this
+licence does not affect any document created using the Font Software,
+except any version of the Font Software extracted from a document
+created using the Font Software may only be distributed under this
+licence.
+
+TERMINATION
+This licence becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
+COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER
+DEALINGS IN THE FONT SOFTWARE.
diff --git a/lib/python3.7/site-packages/werkzeug/debug/shared/console.png b/lib/python3.7/site-packages/werkzeug/debug/shared/console.png
new file mode 100644
index 0000000000000000000000000000000000000000..c28dd63812d80e416682f835652f8e5824bdccb2
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/debug/shared/console.png differ
diff --git a/lib/python3.7/site-packages/werkzeug/debug/shared/debugger.js b/lib/python3.7/site-packages/werkzeug/debug/shared/debugger.js
new file mode 100644
index 0000000000000000000000000000000000000000..ba267ed608934d13a567a001e321d1214f105fe6
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/debug/shared/debugger.js
@@ -0,0 +1,210 @@
+$(function() {
+  if (!EVALEX_TRUSTED) {
+    initPinBox();
+  }
+
+  /**
+   * if we are in console mode, show the console.
+   */
+  if (CONSOLE_MODE && EVALEX) {
+    openShell(null, $('div.console div.inner').empty(), 0);
+  }
+
+  $("div.detail").click(function() {
+    $("div.traceback").get(0).scrollIntoView(false);
+  });
+
+  $('div.traceback div.frame').each(function() {
+    var
+      target = $('pre', this),
+      consoleNode = null,
+      frameID = this.id.substring(6);
+
+    target.click(function() {
+      $(this).parent().toggleClass('expanded');
+    });
+
+    /**
+     * Add an interactive console to the frames
+     */
+    if (EVALEX && target.is('.current')) {
+      $('<img src="?__debugger__=yes&cmd=resource&f=console.png">')
+        .attr('title', 'Open an interactive python shell in this frame')
+        .click(function() {
+          consoleNode = openShell(consoleNode, target, frameID);
+          return false;
+        })
+        .prependTo(target);
+    }
+  });
+
+  /**
+   * toggle traceback types on click.
+   */
+  $('h2.traceback').click(function() {
+    $(this).next().slideToggle('fast');
+    $('div.plain').slideToggle('fast');
+  }).css('cursor', 'pointer');
+  $('div.plain').hide();
+
+  /**
+   * Add extra info (this is here so that only users with JavaScript
+   * enabled see it.)
+   */
+  $('span.nojavascript')
+    .removeClass('nojavascript')
+    .html('<p>To switch between the interactive traceback and the plaintext ' +
+          'one, you can click on the "Traceback" headline.  From the text ' +
+          'traceback you can also create a paste of it. ' + (!EVALEX ? '' :
+          'For code execution mouse-over the frame you want to debug and ' +
+          'click on the console icon on the right side.' +
+          '<p>You can execute arbitrary Python code in the stack frames and ' +
+          'there are some extra helpers available for introspection:' +
+          '<ul><li><code>dump()</code> shows all variables in the frame' +
+          '<li><code>dump(obj)</code> dumps all that\'s known about the object</ul>'));
+
+  /**
+   * Add the pastebin feature
+   */
+  $('div.plain form')
+    .submit(function() {
+      var label = $('input[type="submit"]', this);
+      var old_val = label.val();
+      label.val('submitting...');
+      $.ajax({
+        dataType:     'json',
+        url:          document.location.pathname,
+        data:         {__debugger__: 'yes', tb: TRACEBACK, cmd: 'paste',
+                       s: SECRET},
+        success:      function(data) {
+          $('div.plain span.pastemessage')
+            .removeClass('pastemessage')
+            .text('Paste created: ')
+            .append($('<a>#' + data.id + '</a>').attr('href', data.url));
+        },
+        error:        function() {
+          alert('Error: Could not submit paste.  No network connection?');
+          label.val(old_val);
+        }
+      });
+      return false;
+    });
+
+  // if we have javascript we submit by ajax anyways, so no need for the
+  // not scaling textarea.
+  var plainTraceback = $('div.plain textarea');
+  plainTraceback.replaceWith($('<pre>').text(plainTraceback.text()));
+});
+
+function initPinBox() {
+  $('.pin-prompt form').submit(function(evt) {
+    evt.preventDefault();
+    var pin = this.pin.value;
+    var btn = this.btn;
+    btn.disabled = true;
+    $.ajax({
+      dataType: 'json',
+      url: document.location.pathname,
+      data: {__debugger__: 'yes', cmd: 'pinauth', pin: pin,
+             s: SECRET},
+      success: function(data) {
+        btn.disabled = false;
+        if (data.auth) {
+          EVALEX_TRUSTED = true;
+          $('.pin-prompt').fadeOut();
+        } else {
+          if (data.exhausted) {
+            alert('Error: too many attempts.  Restart server to retry.');
+          } else {
+            alert('Error: incorrect pin');
+          }
+        }
+        console.log(data);
+      },
+      error: function() {
+        btn.disabled = false;
+        alert('Error: Could not verify PIN.  Network error?');
+      }
+    });
+  });
+}
+
+function promptForPin() {
+  if (!EVALEX_TRUSTED) {
+    $.ajax({
+      url: document.location.pathname,
+      data: {__debugger__: 'yes', cmd: 'printpin', s: SECRET}
+    });
+    $('.pin-prompt').fadeIn(function() {
+      $('.pin-prompt input[name="pin"]').focus();
+    });
+  }
+}
+
+
+/**
+ * Helper function for shell initialization
+ */
+function openShell(consoleNode, target, frameID) {
+  promptForPin();
+  if (consoleNode)
+    return consoleNode.slideToggle('fast');
+  consoleNode = $('<pre class="console">')
+    .appendTo(target.parent())
+    .hide()
+  var historyPos = 0, history = [''];
+  var output = $('<div class="output">[console ready]</div>')
+    .appendTo(consoleNode);
+  var form = $('<form>&gt;&gt;&gt; </form>')
+    .submit(function() {
+      var cmd = command.val();
+      $.get('', {
+          __debugger__: 'yes', cmd: cmd, frm: frameID, s: SECRET}, function(data) {
+        var tmp = $('<div>').html(data);
+        $('span.extended', tmp).each(function() {
+          var hidden = $(this).wrap('<span>').hide();
+          hidden
+            .parent()
+            .append($('<a href="#" class="toggle">&nbsp;&nbsp;</a>')
+              .click(function() {
+                hidden.toggle();
+                $(this).toggleClass('open')
+                return false;
+              }));
+        });
+        output.append(tmp);
+        command.focus();
+        consoleNode.scrollTop(consoleNode.get(0).scrollHeight);
+        var old = history.pop();
+        history.push(cmd);
+        if (typeof old != 'undefined')
+          history.push(old);
+        historyPos = history.length - 1;
+      });
+      command.val('');
+      return false;
+    }).
+    appendTo(consoleNode);
+
+  var command = $('<input type="text" autocomplete="off" autocorrect="off" autocapitalize="off" spellcheck="false">')
+    .appendTo(form)
+    .keydown(function(e) {
+      if (e.key == 'l' && e.ctrlKey) {
+        output.text('--- screen cleared ---');
+        return false;
+      }
+      else if (e.charCode == 0 && (e.keyCode == 38 || e.keyCode == 40)) {
+        //   handle up arrow and down arrow
+        if (e.keyCode == 38 && historyPos > 0)
+          historyPos--;
+        else if (e.keyCode == 40 && historyPos < history.length)
+          historyPos++;
+        command.val(history[historyPos]);
+        return false;
+      }
+    });
+
+  return consoleNode.slideDown('fast', function() {
+    command.focus();
+  });
+}
diff --git a/lib/python3.7/site-packages/werkzeug/debug/shared/jquery.js b/lib/python3.7/site-packages/werkzeug/debug/shared/jquery.js
new file mode 100644
index 0000000000000000000000000000000000000000..4d9b3a258759c53e7bc66b6fc554c51e2434437c
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/debug/shared/jquery.js
@@ -0,0 +1,2 @@
+/*! jQuery v3.3.1 | (c) JS Foundation and other contributors | jquery.org/license */
+!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(e,t){"use strict";var n=[],r=e.document,i=Object.getPrototypeOf,o=n.slice,a=n.concat,s=n.push,u=n.indexOf,l={},c=l.toString,f=l.hasOwnProperty,p=f.toString,d=p.call(Object),h={},g=function e(t){return"function"==typeof t&&"number"!=typeof t.nodeType},y=function e(t){return null!=t&&t===t.window},v={type:!0,src:!0,noModule:!0};function m(e,t,n){var i,o=(t=t||r).createElement("script");if(o.text=e,n)for(i in v)n[i]&&(o[i]=n[i]);t.head.appendChild(o).parentNode.removeChild(o)}function x(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[c.call(e)]||"object":typeof e}var b="3.3.1",w=function(e,t){return new w.fn.init(e,t)},T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;w.fn=w.prototype={jquery:"3.3.1",constructor:w,length:0,toArray:function(){return o.call(this)},get:function(e){return null==e?o.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=w.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return w.each(this,e)},map:function(e){return this.pushStack(w.map(this,function(t,n){return e.call(t,n,t)}))},slice:function(){return this.pushStack(o.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(n>=0&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:s,sort:n.sort,splice:n.splice},w.extend=w.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||g(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)n=a[t],a!==(r=e[t])&&(l&&r&&(w.isPlainObject(r)||(i=Array.isArray(r)))?(i?(i=!1,o=n&&Array.isArray(n)?n:[]):o=n&&w.isPlainObject(n)?n:{},a[t]=w.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},w.extend({expando:"jQuery"+("3.3.1"+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==c.call(e))&&(!(t=i(e))||"function"==typeof(n=f.call(t,"constructor")&&t.constructor)&&p.call(n)===d)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e){m(e)},each:function(e,t){var n,r=0;if(C(e)){for(n=e.length;r<n;r++)if(!1===t.call(e[r],r,e[r]))break}else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},trim:function(e){return null==e?"":(e+"").replace(T,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(C(Object(e))?w.merge(n,"string"==typeof e?[e]:e):s.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:u.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r,i=[],o=0,a=e.length,s=!n;o<a;o++)(r=!t(e[o],o))!==s&&i.push(e[o]);return i},map:function(e,t,n){var r,i,o=0,s=[];if(C(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&s.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&s.push(i);return a.apply([],s)},guid:1,support:h}),"function"==typeof Symbol&&(w.fn[Symbol.iterator]=n[Symbol.iterator]),w.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){l["[object "+t+"]"]=t.toLowerCase()});function C(e){var t=!!e&&"length"in e&&e.length,n=x(e);return!g(e)&&!y(e)&&("array"===n||0===t||"number"==typeof t&&t>0&&t-1 in e)}var E=function(e){var t,n,r,i,o,a,s,u,l,c,f,p,d,h,g,y,v,m,x,b="sizzle"+1*new Date,w=e.document,T=0,C=0,E=ae(),k=ae(),S=ae(),D=function(e,t){return e===t&&(f=!0),0},N={}.hasOwnProperty,A=[],j=A.pop,q=A.push,L=A.push,H=A.slice,O=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},P="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",R="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",I="\\["+M+"*("+R+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+R+"))|)"+M+"*\\]",W=":("+R+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+I+")*)|.*)\\)|)",$=new RegExp(M+"+","g"),B=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),F=new RegExp("^"+M+"*,"+M+"*"),_=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),z=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),X=new RegExp(W),U=new RegExp("^"+R+"$"),V={ID:new RegExp("^#("+R+")"),CLASS:new RegExp("^\\.("+R+")"),TAG:new RegExp("^("+R+"|[*])"),ATTR:new RegExp("^"+I),PSEUDO:new RegExp("^"+W),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+P+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},G=/^(?:input|select|textarea|button)$/i,Y=/^h\d$/i,Q=/^[^{]+\{\s*\[native \w/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,K=/[+~]/,Z=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ee=function(e,t,n){var r="0x"+t-65536;return r!==r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},te=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ne=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},re=function(){p()},ie=me(function(e){return!0===e.disabled&&("form"in e||"label"in e)},{dir:"parentNode",next:"legend"});try{L.apply(A=H.call(w.childNodes),w.childNodes),A[w.childNodes.length].nodeType}catch(e){L={apply:A.length?function(e,t){q.apply(e,H.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function oe(e,t,r,i){var o,s,l,c,f,h,v,m=t&&t.ownerDocument,T=t?t.nodeType:9;if(r=r||[],"string"!=typeof e||!e||1!==T&&9!==T&&11!==T)return r;if(!i&&((t?t.ownerDocument||t:w)!==d&&p(t),t=t||d,g)){if(11!==T&&(f=J.exec(e)))if(o=f[1]){if(9===T){if(!(l=t.getElementById(o)))return r;if(l.id===o)return r.push(l),r}else if(m&&(l=m.getElementById(o))&&x(t,l)&&l.id===o)return r.push(l),r}else{if(f[2])return L.apply(r,t.getElementsByTagName(e)),r;if((o=f[3])&&n.getElementsByClassName&&t.getElementsByClassName)return L.apply(r,t.getElementsByClassName(o)),r}if(n.qsa&&!S[e+" "]&&(!y||!y.test(e))){if(1!==T)m=t,v=e;else if("object"!==t.nodeName.toLowerCase()){(c=t.getAttribute("id"))?c=c.replace(te,ne):t.setAttribute("id",c=b),s=(h=a(e)).length;while(s--)h[s]="#"+c+" "+ve(h[s]);v=h.join(","),m=K.test(e)&&ge(t.parentNode)||t}if(v)try{return L.apply(r,m.querySelectorAll(v)),r}catch(e){}finally{c===b&&t.removeAttribute("id")}}}return u(e.replace(B,"$1"),t,r,i)}function ae(){var e=[];function t(n,i){return e.push(n+" ")>r.cacheLength&&delete t[e.shift()],t[n+" "]=i}return t}function se(e){return e[b]=!0,e}function ue(e){var t=d.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function le(e,t){var n=e.split("|"),i=n.length;while(i--)r.attrHandle[n[i]]=t}function ce(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function fe(e){return function(t){return"input"===t.nodeName.toLowerCase()&&t.type===e}}function pe(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function de(e){return function(t){return"form"in t?t.parentNode&&!1===t.disabled?"label"in t?"label"in t.parentNode?t.parentNode.disabled===e:t.disabled===e:t.isDisabled===e||t.isDisabled!==!e&&ie(t)===e:t.disabled===e:"label"in t&&t.disabled===e}}function he(e){return se(function(t){return t=+t,se(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}function ge(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}n=oe.support={},o=oe.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return!!t&&"HTML"!==t.nodeName},p=oe.setDocument=function(e){var t,i,a=e?e.ownerDocument||e:w;return a!==d&&9===a.nodeType&&a.documentElement?(d=a,h=d.documentElement,g=!o(d),w!==d&&(i=d.defaultView)&&i.top!==i&&(i.addEventListener?i.addEventListener("unload",re,!1):i.attachEvent&&i.attachEvent("onunload",re)),n.attributes=ue(function(e){return e.className="i",!e.getAttribute("className")}),n.getElementsByTagName=ue(function(e){return e.appendChild(d.createComment("")),!e.getElementsByTagName("*").length}),n.getElementsByClassName=Q.test(d.getElementsByClassName),n.getById=ue(function(e){return h.appendChild(e).id=b,!d.getElementsByName||!d.getElementsByName(b).length}),n.getById?(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){return e.getAttribute("id")===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n=t.getElementById(e);return n?[n]:[]}}):(r.filter.ID=function(e){var t=e.replace(Z,ee);return function(e){var n="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return n&&n.value===t}},r.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&g){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),r.find.TAG=n.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):n.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},r.find.CLASS=n.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&g)return t.getElementsByClassName(e)},v=[],y=[],(n.qsa=Q.test(d.querySelectorAll))&&(ue(function(e){h.appendChild(e).innerHTML="<a id='"+b+"'></a><select id='"+b+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&y.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||y.push("\\["+M+"*(?:value|"+P+")"),e.querySelectorAll("[id~="+b+"-]").length||y.push("~="),e.querySelectorAll(":checked").length||y.push(":checked"),e.querySelectorAll("a#"+b+"+*").length||y.push(".#.+[+~]")}),ue(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=d.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&y.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&y.push(":enabled",":disabled"),h.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&y.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),y.push(",.*:")})),(n.matchesSelector=Q.test(m=h.matches||h.webkitMatchesSelector||h.mozMatchesSelector||h.oMatchesSelector||h.msMatchesSelector))&&ue(function(e){n.disconnectedMatch=m.call(e,"*"),m.call(e,"[s!='']:x"),v.push("!=",W)}),y=y.length&&new RegExp(y.join("|")),v=v.length&&new RegExp(v.join("|")),t=Q.test(h.compareDocumentPosition),x=t||Q.test(h.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return f=!0,0;var r=!e.compareDocumentPosition-!t.compareDocumentPosition;return r||(1&(r=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!n.sortDetached&&t.compareDocumentPosition(e)===r?e===d||e.ownerDocument===w&&x(w,e)?-1:t===d||t.ownerDocument===w&&x(w,t)?1:c?O(c,e)-O(c,t):0:4&r?-1:1)}:function(e,t){if(e===t)return f=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===d?-1:t===d?1:i?-1:o?1:c?O(c,e)-O(c,t):0;if(i===o)return ce(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?ce(a[r],s[r]):a[r]===w?-1:s[r]===w?1:0},d):d},oe.matches=function(e,t){return oe(e,null,null,t)},oe.matchesSelector=function(e,t){if((e.ownerDocument||e)!==d&&p(e),t=t.replace(z,"='$1']"),n.matchesSelector&&g&&!S[t+" "]&&(!v||!v.test(t))&&(!y||!y.test(t)))try{var r=m.call(e,t);if(r||n.disconnectedMatch||e.document&&11!==e.document.nodeType)return r}catch(e){}return oe(t,d,null,[e]).length>0},oe.contains=function(e,t){return(e.ownerDocument||e)!==d&&p(e),x(e,t)},oe.attr=function(e,t){(e.ownerDocument||e)!==d&&p(e);var i=r.attrHandle[t.toLowerCase()],o=i&&N.call(r.attrHandle,t.toLowerCase())?i(e,t,!g):void 0;return void 0!==o?o:n.attributes||!g?e.getAttribute(t):(o=e.getAttributeNode(t))&&o.specified?o.value:null},oe.escape=function(e){return(e+"").replace(te,ne)},oe.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},oe.uniqueSort=function(e){var t,r=[],i=0,o=0;if(f=!n.detectDuplicates,c=!n.sortStable&&e.slice(0),e.sort(D),f){while(t=e[o++])t===e[o]&&(i=r.push(o));while(i--)e.splice(r[i],1)}return c=null,e},i=oe.getText=function(e){var t,n="",r=0,o=e.nodeType;if(o){if(1===o||9===o||11===o){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=i(e)}else if(3===o||4===o)return e.nodeValue}else while(t=e[r++])n+=i(t);return n},(r=oe.selectors={cacheLength:50,createPseudo:se,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(Z,ee),e[3]=(e[3]||e[4]||e[5]||"").replace(Z,ee),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||oe.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&oe.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return V.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=a(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(Z,ee).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=E[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&E(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=oe.attr(r,e);return null==i?"!="===t:!t||(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i.replace($," ")+" ").indexOf(n)>-1:"|="===t&&(i===n||i.slice(0,n.length+1)===n+"-"))}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,f,p,d,h,g=o!==a?"nextSibling":"previousSibling",y=t.parentNode,v=s&&t.nodeName.toLowerCase(),m=!u&&!s,x=!1;if(y){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===v:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?y.firstChild:y.lastChild],a&&m){x=(d=(l=(c=(f=(p=y)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1])&&l[2],p=d&&y.childNodes[d];while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if(1===p.nodeType&&++x&&p===t){c[e]=[T,d,x];break}}else if(m&&(x=d=(l=(c=(f=(p=t)[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]||[])[0]===T&&l[1]),!1===x)while(p=++d&&p&&p[g]||(x=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===v:1===p.nodeType)&&++x&&(m&&((c=(f=p[b]||(p[b]={}))[p.uniqueID]||(f[p.uniqueID]={}))[e]=[T,x]),p===t))break;return(x-=i)===r||x%r==0&&x/r>=0}}},PSEUDO:function(e,t){var n,i=r.pseudos[e]||r.setFilters[e.toLowerCase()]||oe.error("unsupported pseudo: "+e);return i[b]?i(t):i.length>1?(n=[e,e,"",t],r.setFilters.hasOwnProperty(e.toLowerCase())?se(function(e,n){var r,o=i(e,t),a=o.length;while(a--)e[r=O(e,o[a])]=!(n[r]=o[a])}):function(e){return i(e,0,n)}):i}},pseudos:{not:se(function(e){var t=[],n=[],r=s(e.replace(B,"$1"));return r[b]?se(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),t[0]=null,!n.pop()}}),has:se(function(e){return function(t){return oe(e,t).length>0}}),contains:se(function(e){return e=e.replace(Z,ee),function(t){return(t.textContent||t.innerText||i(t)).indexOf(e)>-1}}),lang:se(function(e){return U.test(e||"")||oe.error("unsupported lang: "+e),e=e.replace(Z,ee).toLowerCase(),function(t){var n;do{if(n=g?t.lang:t.getAttribute("xml:lang")||t.getAttribute("lang"))return(n=n.toLowerCase())===e||0===n.indexOf(e+"-")}while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===h},focus:function(e){return e===d.activeElement&&(!d.hasFocus||d.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:de(!1),disabled:de(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!r.pseudos.empty(e)},header:function(e){return Y.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:he(function(){return[0]}),last:he(function(e,t){return[t-1]}),eq:he(function(e,t,n){return[n<0?n+t:n]}),even:he(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:he(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:he(function(e,t,n){for(var r=n<0?n+t:n;--r>=0;)e.push(r);return e}),gt:he(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=r.pseudos.eq;for(t in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})r.pseudos[t]=fe(t);for(t in{submit:!0,reset:!0})r.pseudos[t]=pe(t);function ye(){}ye.prototype=r.filters=r.pseudos,r.setFilters=new ye,a=oe.tokenize=function(e,t){var n,i,o,a,s,u,l,c=k[e+" "];if(c)return t?0:c.slice(0);s=e,u=[],l=r.preFilter;while(s){n&&!(i=F.exec(s))||(i&&(s=s.slice(i[0].length)||s),u.push(o=[])),n=!1,(i=_.exec(s))&&(n=i.shift(),o.push({value:n,type:i[0].replace(B," ")}),s=s.slice(n.length));for(a in r.filter)!(i=V[a].exec(s))||l[a]&&!(i=l[a](i))||(n=i.shift(),o.push({value:n,type:a,matches:i}),s=s.slice(n.length));if(!n)break}return t?s.length:s?oe.error(e):k(e,u).slice(0)};function ve(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function me(e,t,n){var r=t.dir,i=t.next,o=i||r,a=n&&"parentNode"===o,s=C++;return t.first?function(t,n,i){while(t=t[r])if(1===t.nodeType||a)return e(t,n,i);return!1}:function(t,n,u){var l,c,f,p=[T,s];if(u){while(t=t[r])if((1===t.nodeType||a)&&e(t,n,u))return!0}else while(t=t[r])if(1===t.nodeType||a)if(f=t[b]||(t[b]={}),c=f[t.uniqueID]||(f[t.uniqueID]={}),i&&i===t.nodeName.toLowerCase())t=t[r]||t;else{if((l=c[o])&&l[0]===T&&l[1]===s)return p[2]=l[2];if(c[o]=p,p[2]=e(t,n,u))return!0}return!1}}function xe(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function be(e,t,n){for(var r=0,i=t.length;r<i;r++)oe(e,t[r],n);return n}function we(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Te(e,t,n,r,i,o){return r&&!r[b]&&(r=Te(r)),i&&!i[b]&&(i=Te(i,o)),se(function(o,a,s,u){var l,c,f,p=[],d=[],h=a.length,g=o||be(t||"*",s.nodeType?[s]:s,[]),y=!e||!o&&t?g:we(g,p,e,s,u),v=n?i||(o?e:h||r)?[]:a:y;if(n&&n(y,v,s,u),r){l=we(v,d),r(l,[],s,u),c=l.length;while(c--)(f=l[c])&&(v[d[c]]=!(y[d[c]]=f))}if(o){if(i||e){if(i){l=[],c=v.length;while(c--)(f=v[c])&&l.push(y[c]=f);i(null,v=[],l,u)}c=v.length;while(c--)(f=v[c])&&(l=i?O(o,f):p[c])>-1&&(o[l]=!(a[l]=f))}}else v=we(v===a?v.splice(h,v.length):v),i?i(null,a,v,u):L.apply(a,v)})}function Ce(e){for(var t,n,i,o=e.length,a=r.relative[e[0].type],s=a||r.relative[" "],u=a?1:0,c=me(function(e){return e===t},s,!0),f=me(function(e){return O(t,e)>-1},s,!0),p=[function(e,n,r){var i=!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):f(e,n,r));return t=null,i}];u<o;u++)if(n=r.relative[e[u].type])p=[me(xe(p),n)];else{if((n=r.filter[e[u].type].apply(null,e[u].matches))[b]){for(i=++u;i<o;i++)if(r.relative[e[i].type])break;return Te(u>1&&xe(p),u>1&&ve(e.slice(0,u-1).concat({value:" "===e[u-2].type?"*":""})).replace(B,"$1"),n,u<i&&Ce(e.slice(u,i)),i<o&&Ce(e=e.slice(i)),i<o&&ve(e))}p.push(n)}return xe(p)}function Ee(e,t){var n=t.length>0,i=e.length>0,o=function(o,a,s,u,c){var f,h,y,v=0,m="0",x=o&&[],b=[],w=l,C=o||i&&r.find.TAG("*",c),E=T+=null==w?1:Math.random()||.1,k=C.length;for(c&&(l=a===d||a||c);m!==k&&null!=(f=C[m]);m++){if(i&&f){h=0,a||f.ownerDocument===d||(p(f),s=!g);while(y=e[h++])if(y(f,a||d,s)){u.push(f);break}c&&(T=E)}n&&((f=!y&&f)&&v--,o&&x.push(f))}if(v+=m,n&&m!==v){h=0;while(y=t[h++])y(x,b,a,s);if(o){if(v>0)while(m--)x[m]||b[m]||(b[m]=j.call(u));b=we(b)}L.apply(u,b),c&&!o&&b.length>0&&v+t.length>1&&oe.uniqueSort(u)}return c&&(T=E,l=w),x};return n?se(o):o}return s=oe.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=a(e)),n=t.length;while(n--)(o=Ce(t[n]))[b]?r.push(o):i.push(o);(o=S(e,Ee(i,r))).selector=e}return o},u=oe.select=function(e,t,n,i){var o,u,l,c,f,p="function"==typeof e&&e,d=!i&&a(e=p.selector||e);if(n=n||[],1===d.length){if((u=d[0]=d[0].slice(0)).length>2&&"ID"===(l=u[0]).type&&9===t.nodeType&&g&&r.relative[u[1].type]){if(!(t=(r.find.ID(l.matches[0].replace(Z,ee),t)||[])[0]))return n;p&&(t=t.parentNode),e=e.slice(u.shift().value.length)}o=V.needsContext.test(e)?0:u.length;while(o--){if(l=u[o],r.relative[c=l.type])break;if((f=r.find[c])&&(i=f(l.matches[0].replace(Z,ee),K.test(u[0].type)&&ge(t.parentNode)||t))){if(u.splice(o,1),!(e=i.length&&ve(u)))return L.apply(n,i),n;break}}}return(p||s(e,d))(i,t,!g,n,!t||K.test(e)&&ge(t.parentNode)||t),n},n.sortStable=b.split("").sort(D).join("")===b,n.detectDuplicates=!!f,p(),n.sortDetached=ue(function(e){return 1&e.compareDocumentPosition(d.createElement("fieldset"))}),ue(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||le("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),n.attributes&&ue(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")})||le("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ue(function(e){return null==e.getAttribute("disabled")})||le(P,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),oe}(e);w.find=E,w.expr=E.selectors,w.expr[":"]=w.expr.pseudos,w.uniqueSort=w.unique=E.uniqueSort,w.text=E.getText,w.isXMLDoc=E.isXML,w.contains=E.contains,w.escapeSelector=E.escape;var k=function(e,t,n){var r=[],i=void 0!==n;while((e=e[t])&&9!==e.nodeType)if(1===e.nodeType){if(i&&w(e).is(n))break;r.push(e)}return r},S=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},D=w.expr.match.needsContext;function N(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var A=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,t,n){return g(t)?w.grep(e,function(e,r){return!!t.call(e,r,e)!==n}):t.nodeType?w.grep(e,function(e){return e===t!==n}):"string"!=typeof t?w.grep(e,function(e){return u.call(t,e)>-1!==n}):w.filter(t,e,n)}w.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?w.find.matchesSelector(r,e)?[r]:[]:w.find.matches(e,w.grep(t,function(e){return 1===e.nodeType}))},w.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(w(e).filter(function(){for(t=0;t<r;t++)if(w.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)w.find(e,i[t],n);return r>1?w.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&D.test(e)?w(e):e||[],!1).length}});var q,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(w.fn.init=function(e,t,n){var i,o;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(i="<"===e[0]&&">"===e[e.length-1]&&e.length>=3?[null,e,null]:L.exec(e))||!i[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(i[1]){if(t=t instanceof w?t[0]:t,w.merge(this,w.parseHTML(i[1],t&&t.nodeType?t.ownerDocument||t:r,!0)),A.test(i[1])&&w.isPlainObject(t))for(i in t)g(this[i])?this[i](t[i]):this.attr(i,t[i]);return this}return(o=r.getElementById(i[2]))&&(this[0]=o,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):g(e)?void 0!==n.ready?n.ready(e):e(w):w.makeArray(e,this)}).prototype=w.fn,q=w(r);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};w.fn.extend({has:function(e){var t=w(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(w.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&w(e);if(!D.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?a.index(n)>-1:1===n.nodeType&&w.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(o.length>1?w.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?u.call(w(e),this[0]):u.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(w.uniqueSort(w.merge(this.get(),w(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}});function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}w.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return k(e,"parentNode")},parentsUntil:function(e,t,n){return k(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return k(e,"nextSibling")},prevAll:function(e){return k(e,"previousSibling")},nextUntil:function(e,t,n){return k(e,"nextSibling",n)},prevUntil:function(e,t,n){return k(e,"previousSibling",n)},siblings:function(e){return S((e.parentNode||{}).firstChild,e)},children:function(e){return S(e.firstChild)},contents:function(e){return N(e,"iframe")?e.contentDocument:(N(e,"template")&&(e=e.content||e),w.merge([],e.childNodes))}},function(e,t){w.fn[e]=function(n,r){var i=w.map(this,t,n);return"Until"!==e.slice(-5)&&(r=n),r&&"string"==typeof r&&(i=w.filter(r,i)),this.length>1&&(O[e]||w.uniqueSort(i),H.test(e)&&i.reverse()),this.pushStack(i)}});var M=/[^\x20\t\r\n\f]+/g;function R(e){var t={};return w.each(e.match(M)||[],function(e,n){t[n]=!0}),t}w.Callbacks=function(e){e="string"==typeof e?R(e):w.extend({},e);var t,n,r,i,o=[],a=[],s=-1,u=function(){for(i=i||e.once,r=t=!0;a.length;s=-1){n=a.shift();while(++s<o.length)!1===o[s].apply(n[0],n[1])&&e.stopOnFalse&&(s=o.length,n=!1)}e.memory||(n=!1),t=!1,i&&(o=n?[]:"")},l={add:function(){return o&&(n&&!t&&(s=o.length-1,a.push(n)),function t(n){w.each(n,function(n,r){g(r)?e.unique&&l.has(r)||o.push(r):r&&r.length&&"string"!==x(r)&&t(r)})}(arguments),n&&!t&&u()),this},remove:function(){return w.each(arguments,function(e,t){var n;while((n=w.inArray(t,o,n))>-1)o.splice(n,1),n<=s&&s--}),this},has:function(e){return e?w.inArray(e,o)>-1:o.length>0},empty:function(){return o&&(o=[]),this},disable:function(){return i=a=[],o=n="",this},disabled:function(){return!o},lock:function(){return i=a=[],n||t||(o=n=""),this},locked:function(){return!!i},fireWith:function(e,n){return i||(n=[e,(n=n||[]).slice?n.slice():n],a.push(n),t||u()),this},fire:function(){return l.fireWith(this,arguments),this},fired:function(){return!!r}};return l};function I(e){return e}function W(e){throw e}function $(e,t,n,r){var i;try{e&&g(i=e.promise)?i.call(e).done(t).fail(n):e&&g(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}w.extend({Deferred:function(t){var n=[["notify","progress",w.Callbacks("memory"),w.Callbacks("memory"),2],["resolve","done",w.Callbacks("once memory"),w.Callbacks("once memory"),0,"resolved"],["reject","fail",w.Callbacks("once memory"),w.Callbacks("once memory"),1,"rejected"]],r="pending",i={state:function(){return r},always:function(){return o.done(arguments).fail(arguments),this},"catch":function(e){return i.then(null,e)},pipe:function(){var e=arguments;return w.Deferred(function(t){w.each(n,function(n,r){var i=g(e[r[4]])&&e[r[4]];o[r[1]](function(){var e=i&&i.apply(this,arguments);e&&g(e.promise)?e.promise().progress(t.notify).done(t.resolve).fail(t.reject):t[r[0]+"With"](this,i?[e]:arguments)})}),e=null}).promise()},then:function(t,r,i){var o=0;function a(t,n,r,i){return function(){var s=this,u=arguments,l=function(){var e,l;if(!(t<o)){if((e=r.apply(s,u))===n.promise())throw new TypeError("Thenable self-resolution");l=e&&("object"==typeof e||"function"==typeof e)&&e.then,g(l)?i?l.call(e,a(o,n,I,i),a(o,n,W,i)):(o++,l.call(e,a(o,n,I,i),a(o,n,W,i),a(o,n,I,n.notifyWith))):(r!==I&&(s=void 0,u=[e]),(i||n.resolveWith)(s,u))}},c=i?l:function(){try{l()}catch(e){w.Deferred.exceptionHook&&w.Deferred.exceptionHook(e,c.stackTrace),t+1>=o&&(r!==W&&(s=void 0,u=[e]),n.rejectWith(s,u))}};t?c():(w.Deferred.getStackHook&&(c.stackTrace=w.Deferred.getStackHook()),e.setTimeout(c))}}return w.Deferred(function(e){n[0][3].add(a(0,e,g(i)?i:I,e.notifyWith)),n[1][3].add(a(0,e,g(t)?t:I)),n[2][3].add(a(0,e,g(r)?r:W))}).promise()},promise:function(e){return null!=e?w.extend(e,i):i}},o={};return w.each(n,function(e,t){var a=t[2],s=t[5];i[t[1]]=a.add,s&&a.add(function(){r=s},n[3-e][2].disable,n[3-e][3].disable,n[0][2].lock,n[0][3].lock),a.add(t[3].fire),o[t[0]]=function(){return o[t[0]+"With"](this===o?void 0:this,arguments),this},o[t[0]+"With"]=a.fireWith}),i.promise(o),t&&t.call(o,o),o},when:function(e){var t=arguments.length,n=t,r=Array(n),i=o.call(arguments),a=w.Deferred(),s=function(e){return function(n){r[e]=this,i[e]=arguments.length>1?o.call(arguments):n,--t||a.resolveWith(r,i)}};if(t<=1&&($(e,a.done(s(n)).resolve,a.reject,!t),"pending"===a.state()||g(i[n]&&i[n].then)))return a.then();while(n--)$(i[n],s(n),a.reject);return a.promise()}});var B=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;w.Deferred.exceptionHook=function(t,n){e.console&&e.console.warn&&t&&B.test(t.name)&&e.console.warn("jQuery.Deferred exception: "+t.message,t.stack,n)},w.readyException=function(t){e.setTimeout(function(){throw t})};var F=w.Deferred();w.fn.ready=function(e){return F.then(e)["catch"](function(e){w.readyException(e)}),this},w.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--w.readyWait:w.isReady)||(w.isReady=!0,!0!==e&&--w.readyWait>0||F.resolveWith(r,[w]))}}),w.ready.then=F.then;function _(){r.removeEventListener("DOMContentLoaded",_),e.removeEventListener("load",_),w.ready()}"complete"===r.readyState||"loading"!==r.readyState&&!r.documentElement.doScroll?e.setTimeout(w.ready):(r.addEventListener("DOMContentLoaded",_),e.addEventListener("load",_));var z=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===x(n)){i=!0;for(s in n)z(e,t,s,n[s],!0,o,a)}else if(void 0!==r&&(i=!0,g(r)||(a=!0),l&&(a?(t.call(e,r),t=null):(l=t,t=function(e,t,n){return l.call(w(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},X=/^-ms-/,U=/-([a-z])/g;function V(e,t){return t.toUpperCase()}function G(e){return e.replace(X,"ms-").replace(U,V)}var Y=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function Q(){this.expando=w.expando+Q.uid++}Q.uid=1,Q.prototype={cache:function(e){var t=e[this.expando];return t||(t={},Y(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[G(t)]=n;else for(r in t)i[G(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][G(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(G):(t=G(t))in r?[t]:t.match(M)||[]).length;while(n--)delete r[t[n]]}(void 0===t||w.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!w.isEmptyObject(t)}};var J=new Q,K=new Q,Z=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,ee=/[A-Z]/g;function te(e){return"true"===e||"false"!==e&&("null"===e?null:e===+e+""?+e:Z.test(e)?JSON.parse(e):e)}function ne(e,t,n){var r;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(ee,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n=te(n)}catch(e){}K.set(e,t,n)}else n=void 0;return n}w.extend({hasData:function(e){return K.hasData(e)||J.hasData(e)},data:function(e,t,n){return K.access(e,t,n)},removeData:function(e,t){K.remove(e,t)},_data:function(e,t,n){return J.access(e,t,n)},_removeData:function(e,t){J.remove(e,t)}}),w.fn.extend({data:function(e,t){var n,r,i,o=this[0],a=o&&o.attributes;if(void 0===e){if(this.length&&(i=K.get(o),1===o.nodeType&&!J.get(o,"hasDataAttrs"))){n=a.length;while(n--)a[n]&&0===(r=a[n].name).indexOf("data-")&&(r=G(r.slice(5)),ne(o,r,i[r]));J.set(o,"hasDataAttrs",!0)}return i}return"object"==typeof e?this.each(function(){K.set(this,e)}):z(this,function(t){var n;if(o&&void 0===t){if(void 0!==(n=K.get(o,e)))return n;if(void 0!==(n=ne(o,e)))return n}else this.each(function(){K.set(this,e,t)})},null,t,arguments.length>1,null,!0)},removeData:function(e){return this.each(function(){K.remove(this,e)})}}),w.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=J.get(e,t),n&&(!r||Array.isArray(n)?r=J.access(e,t,w.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=w.queue(e,t),r=n.length,i=n.shift(),o=w._queueHooks(e,t),a=function(){w.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return J.get(e,n)||J.access(e,n,{empty:w.Callbacks("once memory").add(function(){J.remove(e,[t+"queue",n])})})}}),w.fn.extend({queue:function(e,t){var n=2;return"string"!=typeof e&&(t=e,e="fx",n--),arguments.length<n?w.queue(this[0],e):void 0===t?this:this.each(function(){var n=w.queue(this,e,t);w._queueHooks(this,e),"fx"===e&&"inprogress"!==n[0]&&w.dequeue(this,e)})},dequeue:function(e){return this.each(function(){w.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,i=w.Deferred(),o=this,a=this.length,s=function(){--r||i.resolveWith(o,[o])};"string"!=typeof e&&(t=e,e=void 0),e=e||"fx";while(a--)(n=J.get(o[a],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),i.promise(t)}});var re=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ie=new RegExp("^(?:([+-])=|)("+re+")([a-z%]*)$","i"),oe=["Top","Right","Bottom","Left"],ae=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&w.contains(e.ownerDocument,e)&&"none"===w.css(e,"display")},se=function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=a[o];return i};function ue(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return w.css(e,t,"")},u=s(),l=n&&n[3]||(w.cssNumber[t]?"":"px"),c=(w.cssNumber[t]||"px"!==l&&+u)&&ie.exec(w.css(e,t));if(c&&c[3]!==l){u/=2,l=l||c[3],c=+u||1;while(a--)w.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,w.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var le={};function ce(e){var t,n=e.ownerDocument,r=e.nodeName,i=le[r];return i||(t=n.body.appendChild(n.createElement(r)),i=w.css(t,"display"),t.parentNode.removeChild(t),"none"===i&&(i="block"),le[r]=i,i)}function fe(e,t){for(var n,r,i=[],o=0,a=e.length;o<a;o++)(r=e[o]).style&&(n=r.style.display,t?("none"===n&&(i[o]=J.get(r,"display")||null,i[o]||(r.style.display="")),""===r.style.display&&ae(r)&&(i[o]=ce(r))):"none"!==n&&(i[o]="none",J.set(r,"display",n)));for(o=0;o<a;o++)null!=i[o]&&(e[o].style.display=i[o]);return e}w.fn.extend({show:function(){return fe(this,!0)},hide:function(){return fe(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){ae(this)?w(this).show():w(this).hide()})}});var pe=/^(?:checkbox|radio)$/i,de=/<([a-z][^\/\0>\x20\t\r\n\f]+)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};ge.optgroup=ge.option,ge.tbody=ge.tfoot=ge.colgroup=ge.caption=ge.thead,ge.th=ge.td;function ye(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&N(e,t)?w.merge([e],n):n}function ve(e,t){for(var n=0,r=e.length;n<r;n++)J.set(e[n],"globalEval",!t||J.get(t[n],"globalEval"))}var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d<h;d++)if((o=e[d])||0===o)if("object"===x(o))w.merge(p,o.nodeType?[o]:o);else if(me.test(o)){a=a||f.appendChild(t.createElement("div")),s=(de.exec(o)||["",""])[1].toLowerCase(),u=ge[s]||ge._default,a.innerHTML=u[1]+w.htmlPrefilter(o)+u[2],c=u[0];while(c--)a=a.lastChild;w.merge(p,a.childNodes),(a=f.firstChild).textContent=""}else p.push(t.createTextNode(o));f.textContent="",d=0;while(o=p[d++])if(r&&w.inArray(o,r)>-1)i&&i.push(o);else if(l=w.contains(o.ownerDocument,o),a=ye(f.appendChild(o),"script"),l&&ve(a),n){c=0;while(o=a[c++])he.test(o.type||"")&&n.push(o)}return f}!function(){var e=r.createDocumentFragment().appendChild(r.createElement("div")),t=r.createElement("input");t.setAttribute("type","radio"),t.setAttribute("checked","checked"),t.setAttribute("name","t"),e.appendChild(t),h.checkClone=e.cloneNode(!0).cloneNode(!0).lastChild.checked,e.innerHTML="<textarea>x</textarea>",h.noCloneChecked=!!e.cloneNode(!0).lastChild.defaultValue}();var be=r.documentElement,we=/^key/,Te=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ce=/^([^.]*)(?:\.(.+)|)/;function Ee(){return!0}function ke(){return!1}function Se(){try{return r.activeElement}catch(e){}}function De(e,t,n,r,i,o){var a,s;if("object"==typeof t){"string"!=typeof n&&(r=r||n,n=void 0);for(s in t)De(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=ke;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return w().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=w.guid++)),e.each(function(){w.event.add(this,t,i,r,n)})}w.event={global:{},add:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.get(e);if(y){n.handler&&(n=(o=n).handler,i=o.selector),i&&w.find.matchesSelector(be,i),n.guid||(n.guid=w.guid++),(u=y.events)||(u=y.events={}),(a=y.handle)||(a=y.handle=function(t){return"undefined"!=typeof w&&w.event.triggered!==t.type?w.event.dispatch.apply(e,arguments):void 0}),l=(t=(t||"").match(M)||[""]).length;while(l--)d=g=(s=Ce.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=w.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=w.event.special[d]||{},c=w.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&w.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(e,r,h,a)||e.addEventListener&&e.addEventListener(d,a)),f.add&&(f.add.call(e,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),w.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,y=J.hasData(e)&&J.get(e);if(y&&(u=y.events)){l=(t=(t||"").match(M)||[""]).length;while(l--)if(s=Ce.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){f=w.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,y.handle)||w.removeEvent(e,d,y.handle),delete u[d])}else for(d in u)w.event.remove(e,d+t[l],n,r,!0);w.isEmptyObject(u)&&J.remove(e,"handle events")}},dispatch:function(e){var t=w.event.fix(e),n,r,i,o,a,s,u=new Array(arguments.length),l=(J.get(this,"events")||{})[t.type]||[],c=w.event.special[t.type]||{};for(u[0]=t,n=1;n<arguments.length;n++)u[n]=arguments[n];if(t.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,t)){s=w.event.handlers.call(this,t,l),n=0;while((o=s[n++])&&!t.isPropagationStopped()){t.currentTarget=o.elem,r=0;while((a=o.handlers[r++])&&!t.isImmediatePropagationStopped())t.rnamespace&&!t.rnamespace.test(a.namespace)||(t.handleObj=a,t.data=a.data,void 0!==(i=((w.event.special[a.origType]||{}).handle||a.handler).apply(o.elem,u))&&!1===(t.result=i)&&(t.preventDefault(),t.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,t),t.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&e.button>=1))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?w(i,this).index(l)>-1:w.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(e,t){Object.defineProperty(w.Event.prototype,e,{enumerable:!0,configurable:!0,get:g(t)?function(){if(this.originalEvent)return t(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[e]},set:function(t){Object.defineProperty(this,e,{enumerable:!0,configurable:!0,writable:!0,value:t})}})},fix:function(e){return e[w.expando]?e:new w.Event(e)},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==Se()&&this.focus)return this.focus(),!1},delegateType:"focusin"},blur:{trigger:function(){if(this===Se()&&this.blur)return this.blur(),!1},delegateType:"focusout"},click:{trigger:function(){if("checkbox"===this.type&&this.click&&N(this,"input"))return this.click(),!1},_default:function(e){return N(e.target,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},w.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},w.Event=function(e,t){if(!(this instanceof w.Event))return new w.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?Ee:ke,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&w.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[w.expando]=!0},w.Event.prototype={constructor:w.Event,isDefaultPrevented:ke,isPropagationStopped:ke,isImmediatePropagationStopped:ke,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=Ee,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=Ee,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=Ee,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},w.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&we.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&Te.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},w.event.addProp),w.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,t){w.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj;return i&&(i===r||w.contains(r,i))||(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),w.fn.extend({on:function(e,t,n,r){return De(this,e,t,n,r)},one:function(e,t,n,r){return De(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,w(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(i in e)this.off(i,t,e[i]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=ke),this.each(function(){w.event.remove(this,e,n,t)})}});var Ne=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,Ae=/<script|<style|<link/i,je=/checked\s*(?:[^=]|=\s*.checked.)/i,qe=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Le(e,t){return N(e,"table")&&N(11!==t.nodeType?t:t.firstChild,"tr")?w(e).children("tbody")[0]||e:e}function He(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Oe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Pe(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(J.hasData(e)&&(o=J.access(e),a=J.set(t,o),l=o.events)){delete a.handle,a.events={};for(i in l)for(n=0,r=l[i].length;n<r;n++)w.event.add(t,i,l[i][n])}K.hasData(e)&&(s=K.access(e),u=w.extend({},s),K.set(t,u))}}function Me(e,t){var n=t.nodeName.toLowerCase();"input"===n&&pe.test(e.type)?t.checked=e.checked:"input"!==n&&"textarea"!==n||(t.defaultValue=e.defaultValue)}function Re(e,t,n,r){t=a.apply([],t);var i,o,s,u,l,c,f=0,p=e.length,d=p-1,y=t[0],v=g(y);if(v||p>1&&"string"==typeof y&&!h.checkClone&&je.test(y))return e.each(function(i){var o=e.eq(i);v&&(t[0]=y.call(this,i,o.html())),Re(o,t,n,r)});if(p&&(i=xe(t,e[0].ownerDocument,!1,e,r),o=i.firstChild,1===i.childNodes.length&&(i=o),o||r)){for(u=(s=w.map(ye(i,"script"),He)).length;f<p;f++)l=i,f!==d&&(l=w.clone(l,!0,!0),u&&w.merge(s,ye(l,"script"))),n.call(e[f],l,f);if(u)for(c=s[s.length-1].ownerDocument,w.map(s,Oe),f=0;f<u;f++)l=s[f],he.test(l.type||"")&&!J.access(l,"globalEval")&&w.contains(c,l)&&(l.src&&"module"!==(l.type||"").toLowerCase()?w._evalUrl&&w._evalUrl(l.src):m(l.textContent.replace(qe,""),c,l))}return e}function Ie(e,t,n){for(var r,i=t?w.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||w.cleanData(ye(r)),r.parentNode&&(n&&w.contains(r.ownerDocument,r)&&ve(ye(r,"script")),r.parentNode.removeChild(r));return e}w.extend({htmlPrefilter:function(e){return e.replace(Ne,"<$1></$2>")},clone:function(e,t,n){var r,i,o,a,s=e.cloneNode(!0),u=w.contains(e.ownerDocument,e);if(!(h.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||w.isXMLDoc(e)))for(a=ye(s),r=0,i=(o=ye(e)).length;r<i;r++)Me(o[r],a[r]);if(t)if(n)for(o=o||ye(e),a=a||ye(s),r=0,i=o.length;r<i;r++)Pe(o[r],a[r]);else Pe(e,s);return(a=ye(s,"script")).length>0&&ve(a,!u&&ye(e,"script")),s},cleanData:function(e){for(var t,n,r,i=w.event.special,o=0;void 0!==(n=e[o]);o++)if(Y(n)){if(t=n[J.expando]){if(t.events)for(r in t.events)i[r]?w.event.remove(n,r):w.removeEvent(n,r,t.handle);n[J.expando]=void 0}n[K.expando]&&(n[K.expando]=void 0)}}}),w.fn.extend({detach:function(e){return Ie(this,e,!0)},remove:function(e){return Ie(this,e)},text:function(e){return z(this,function(e){return void 0===e?w.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return Re(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Le(this,e).appendChild(e)})},prepend:function(){return Re(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Le(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return Re(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(w.cleanData(ye(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return w.clone(this,e,t)})},html:function(e){return z(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!Ae.test(e)&&!ge[(de.exec(e)||["",""])[1].toLowerCase()]){e=w.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(w.cleanData(ye(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var e=[];return Re(this,arguments,function(t){var n=this.parentNode;w.inArray(this,e)<0&&(w.cleanData(ye(this)),n&&n.replaceChild(t,this))},e)}}),w.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){w.fn[e]=function(e){for(var n,r=[],i=w(e),o=i.length-1,a=0;a<=o;a++)n=a===o?this:this.clone(!0),w(i[a])[t](n),s.apply(r,n.get());return this.pushStack(r)}});var We=new RegExp("^("+re+")(?!px)[a-z%]+$","i"),$e=function(t){var n=t.ownerDocument.defaultView;return n&&n.opener||(n=e),n.getComputedStyle(t)},Be=new RegExp(oe.join("|"),"i");!function(){function t(){if(c){l.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",c.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",be.appendChild(l).appendChild(c);var t=e.getComputedStyle(c);i="1%"!==t.top,u=12===n(t.marginLeft),c.style.right="60%",s=36===n(t.right),o=36===n(t.width),c.style.position="absolute",a=36===c.offsetWidth||"absolute",be.removeChild(l),c=null}}function n(e){return Math.round(parseFloat(e))}var i,o,a,s,u,l=r.createElement("div"),c=r.createElement("div");c.style&&(c.style.backgroundClip="content-box",c.cloneNode(!0).style.backgroundClip="",h.clearCloneStyle="content-box"===c.style.backgroundClip,w.extend(h,{boxSizingReliable:function(){return t(),o},pixelBoxStyles:function(){return t(),s},pixelPosition:function(){return t(),i},reliableMarginLeft:function(){return t(),u},scrollboxSize:function(){return t(),a}}))}();function Fe(e,t,n){var r,i,o,a,s=e.style;return(n=n||$e(e))&&(""!==(a=n.getPropertyValue(t)||n[t])||w.contains(e.ownerDocument,e)||(a=w.style(e,t)),!h.pixelBoxStyles()&&We.test(a)&&Be.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+"":a}function _e(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}var ze=/^(none|table(?!-c[ea]).+)/,Xe=/^--/,Ue={position:"absolute",visibility:"hidden",display:"block"},Ve={letterSpacing:"0",fontWeight:"400"},Ge=["Webkit","Moz","ms"],Ye=r.createElement("div").style;function Qe(e){if(e in Ye)return e;var t=e[0].toUpperCase()+e.slice(1),n=Ge.length;while(n--)if((e=Ge[n]+t)in Ye)return e}function Je(e){var t=w.cssProps[e];return t||(t=w.cssProps[e]=Qe(e)||e),t}function Ke(e,t,n){var r=ie.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function Ze(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=w.css(e,n+oe[a],!0,i)),r?("content"===n&&(u-=w.css(e,"padding"+oe[a],!0,i)),"margin"!==n&&(u-=w.css(e,"border"+oe[a]+"Width",!0,i))):(u+=w.css(e,"padding"+oe[a],!0,i),"padding"!==n?u+=w.css(e,"border"+oe[a]+"Width",!0,i):s+=w.css(e,"border"+oe[a]+"Width",!0,i));return!r&&o>=0&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))),u}function et(e,t,n){var r=$e(e),i=Fe(e,t,r),o="border-box"===w.css(e,"boxSizing",!1,r),a=o;if(We.test(i)){if(!n)return i;i="auto"}return a=a&&(h.boxSizingReliable()||i===e.style[t]),("auto"===i||!parseFloat(i)&&"inline"===w.css(e,"display",!1,r))&&(i=e["offset"+t[0].toUpperCase()+t.slice(1)],a=!0),(i=parseFloat(i)||0)+Ze(e,t,n||(o?"border":"content"),a,r,i)+"px"}w.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Fe(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=G(t),u=Xe.test(t),l=e.style;if(u||(t=Je(s)),a=w.cssHooks[t]||w.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"==(o=typeof n)&&(i=ie.exec(n))&&i[1]&&(n=ue(e,t,i),o="number"),null!=n&&n===n&&("number"===o&&(n+=i&&i[3]||(w.cssNumber[s]?"":"px")),h.clearCloneStyle||""!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=G(t);return Xe.test(t)||(t=Je(s)),(a=w.cssHooks[t]||w.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=Fe(e,t,r)),"normal"===i&&t in Ve&&(i=Ve[t]),""===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),w.each(["height","width"],function(e,t){w.cssHooks[t]={get:function(e,n,r){if(n)return!ze.test(w.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?et(e,t,r):se(e,Ue,function(){return et(e,t,r)})},set:function(e,n,r){var i,o=$e(e),a="border-box"===w.css(e,"boxSizing",!1,o),s=r&&Ze(e,t,r,a,o);return a&&h.scrollboxSize()===o.position&&(s-=Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-parseFloat(o[t])-Ze(e,t,"border",!1,o)-.5)),s&&(i=ie.exec(n))&&"px"!==(i[3]||"px")&&(e.style[t]=n,n=w.css(e,t)),Ke(e,n,s)}}}),w.cssHooks.marginLeft=_e(h.reliableMarginLeft,function(e,t){if(t)return(parseFloat(Fe(e,"marginLeft"))||e.getBoundingClientRect().left-se(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),w.each({margin:"",padding:"",border:"Width"},function(e,t){w.cssHooks[e+t]={expand:function(n){for(var r=0,i={},o="string"==typeof n?n.split(" "):[n];r<4;r++)i[e+oe[r]+t]=o[r]||o[r-2]||o[0];return i}},"margin"!==e&&(w.cssHooks[e+t].set=Ke)}),w.fn.extend({css:function(e,t){return z(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=$e(e),i=t.length;a<i;a++)o[t[a]]=w.css(e,t[a],!1,r);return o}return void 0!==n?w.style(e,t,n):w.css(e,t)},e,t,arguments.length>1)}});function tt(e,t,n,r,i){return new tt.prototype.init(e,t,n,r,i)}w.Tween=tt,tt.prototype={constructor:tt,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||w.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(w.cssNumber[n]?"":"px")},cur:function(){var e=tt.propHooks[this.prop];return e&&e.get?e.get(this):tt.propHooks._default.get(this)},run:function(e){var t,n=tt.propHooks[this.prop];return this.options.duration?this.pos=t=w.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):tt.propHooks._default.set(this),this}},tt.prototype.init.prototype=tt.prototype,tt.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=w.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){w.fx.step[e.prop]?w.fx.step[e.prop](e):1!==e.elem.nodeType||null==e.elem.style[w.cssProps[e.prop]]&&!w.cssHooks[e.prop]?e.elem[e.prop]=e.now:w.style(e.elem,e.prop,e.now+e.unit)}}},tt.propHooks.scrollTop=tt.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},w.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},w.fx=tt.prototype.init,w.fx.step={};var nt,rt,it=/^(?:toggle|show|hide)$/,ot=/queueHooks$/;function at(){rt&&(!1===r.hidden&&e.requestAnimationFrame?e.requestAnimationFrame(at):e.setTimeout(at,w.fx.interval),w.fx.tick())}function st(){return e.setTimeout(function(){nt=void 0}),nt=Date.now()}function ut(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=oe[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function lt(e,t,n){for(var r,i=(pt.tweeners[t]||[]).concat(pt.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function ct(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,p=this,d={},h=e.style,g=e.nodeType&&ae(e),y=J.get(e,"fxshow");n.queue||(null==(a=w._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,p.always(function(){p.always(function(){a.unqueued--,w.queue(e,"fx").length||a.empty.fire()})}));for(r in t)if(i=t[r],it.test(i)){if(delete t[r],o=o||"toggle"===i,i===(g?"hide":"show")){if("show"!==i||!y||void 0===y[r])continue;g=!0}d[r]=y&&y[r]||w.style(e,r)}if((u=!w.isEmptyObject(t))||!w.isEmptyObject(d)){f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=y&&y.display)&&(l=J.get(e,"display")),"none"===(c=w.css(e,"display"))&&(l?c=l:(fe([e],!0),l=e.style.display||l,c=w.css(e,"display"),fe([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===w.css(e,"float")&&(u||(p.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?"":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",p.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1;for(r in d)u||(y?"hidden"in y&&(g=y.hidden):y=J.access(e,"fxshow",{display:l}),o&&(y.hidden=!g),g&&fe([e],!0),p.done(function(){g||fe([e]),J.remove(e,"fxshow");for(r in d)w.style(e,r,d[r])})),u=lt(g?y[r]:0,r,p),r in y||(y[r]=u.start,g&&(u.end=u.start,u.start=0))}}function ft(e,t){var n,r,i,o,a;for(n in e)if(r=G(n),i=t[r],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=w.cssHooks[r])&&"expand"in a){o=a.expand(o),delete e[r];for(n in o)n in e||(e[n]=o[n],t[n]=i)}else t[r]=i}function pt(e,t,n){var r,i,o=0,a=pt.prefilters.length,s=w.Deferred().always(function(){delete u.elem}),u=function(){if(i)return!1;for(var t=nt||st(),n=Math.max(0,l.startTime+l.duration-t),r=1-(n/l.duration||0),o=0,a=l.tweens.length;o<a;o++)l.tweens[o].run(r);return s.notifyWith(e,[l,r,n]),r<1&&a?n:(a||s.notifyWith(e,[l,1,0]),s.resolveWith(e,[l]),!1)},l=s.promise({elem:e,props:w.extend({},t),opts:w.extend(!0,{specialEasing:{},easing:w.easing._default},n),originalProperties:t,originalOptions:n,startTime:nt||st(),duration:n.duration,tweens:[],createTween:function(t,n){var r=w.Tween(e,l.opts,t,n,l.opts.specialEasing[t]||l.opts.easing);return l.tweens.push(r),r},stop:function(t){var n=0,r=t?l.tweens.length:0;if(i)return this;for(i=!0;n<r;n++)l.tweens[n].run(1);return t?(s.notifyWith(e,[l,1,0]),s.resolveWith(e,[l,t])):s.rejectWith(e,[l,t]),this}}),c=l.props;for(ft(c,l.opts.specialEasing);o<a;o++)if(r=pt.prefilters[o].call(l,e,c,l.opts))return g(r.stop)&&(w._queueHooks(l.elem,l.opts.queue).stop=r.stop.bind(r)),r;return w.map(c,lt,l),g(l.opts.start)&&l.opts.start.call(e,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),w.fx.timer(w.extend(u,{elem:e,anim:l,queue:l.opts.queue})),l}w.Animation=w.extend(pt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return ue(n.elem,e,ie.exec(t),n),n}]},tweener:function(e,t){g(e)?(t=e,e=["*"]):e=e.match(M);for(var n,r=0,i=e.length;r<i;r++)n=e[r],pt.tweeners[n]=pt.tweeners[n]||[],pt.tweeners[n].unshift(t)},prefilters:[ct],prefilter:function(e,t){t?pt.prefilters.unshift(e):pt.prefilters.push(e)}}),w.speed=function(e,t,n){var r=e&&"object"==typeof e?w.extend({},e):{complete:n||!n&&t||g(e)&&e,duration:e,easing:n&&t||t&&!g(t)&&t};return w.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in w.fx.speeds?r.duration=w.fx.speeds[r.duration]:r.duration=w.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){g(r.old)&&r.old.call(this),r.queue&&w.dequeue(this,r.queue)},r},w.fn.extend({fadeTo:function(e,t,n,r){return this.filter(ae).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(e,t,n,r){var i=w.isEmptyObject(e),o=w.speed(t,n,r),a=function(){var t=pt(this,w.extend({},e),o);(i||J.get(this,"finish"))&&t.stop(!0)};return a.finish=a,i||!1===o.queue?this.each(a):this.queue(o.queue,a)},stop:function(e,t,n){var r=function(e){var t=e.stop;delete e.stop,t(n)};return"string"!=typeof e&&(n=t,t=e,e=void 0),t&&!1!==e&&this.queue(e||"fx",[]),this.each(function(){var t=!0,i=null!=e&&e+"queueHooks",o=w.timers,a=J.get(this);if(i)a[i]&&a[i].stop&&r(a[i]);else for(i in a)a[i]&&a[i].stop&&ot.test(i)&&r(a[i]);for(i=o.length;i--;)o[i].elem!==this||null!=e&&o[i].queue!==e||(o[i].anim.stop(n),t=!1,o.splice(i,1));!t&&n||w.dequeue(this,e)})},finish:function(e){return!1!==e&&(e=e||"fx"),this.each(function(){var t,n=J.get(this),r=n[e+"queue"],i=n[e+"queueHooks"],o=w.timers,a=r?r.length:0;for(n.finish=!0,w.queue(this,e,[]),i&&i.stop&&i.stop.call(this,!0),t=o.length;t--;)o[t].elem===this&&o[t].queue===e&&(o[t].anim.stop(!0),o.splice(t,1));for(t=0;t<a;t++)r[t]&&r[t].finish&&r[t].finish.call(this);delete n.finish})}}),w.each(["toggle","show","hide"],function(e,t){var n=w.fn[t];w.fn[t]=function(e,r,i){return null==e||"boolean"==typeof e?n.apply(this,arguments):this.animate(ut(t,!0),e,r,i)}}),w.each({slideDown:ut("show"),slideUp:ut("hide"),slideToggle:ut("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,t){w.fn[e]=function(e,n,r){return this.animate(t,e,n,r)}}),w.timers=[],w.fx.tick=function(){var e,t=0,n=w.timers;for(nt=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||w.fx.stop(),nt=void 0},w.fx.timer=function(e){w.timers.push(e),w.fx.start()},w.fx.interval=13,w.fx.start=function(){rt||(rt=!0,at())},w.fx.stop=function(){rt=null},w.fx.speeds={slow:600,fast:200,_default:400},w.fn.delay=function(t,n){return t=w.fx?w.fx.speeds[t]||t:t,n=n||"fx",this.queue(n,function(n,r){var i=e.setTimeout(n,t);r.stop=function(){e.clearTimeout(i)}})},function(){var e=r.createElement("input"),t=r.createElement("select").appendChild(r.createElement("option"));e.type="checkbox",h.checkOn=""!==e.value,h.optSelected=t.selected,(e=r.createElement("input")).value="t",e.type="radio",h.radioValue="t"===e.value}();var dt,ht=w.expr.attrHandle;w.fn.extend({attr:function(e,t){return z(this,w.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){w.removeAttr(this,e)})}}),w.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return"undefined"==typeof e.getAttribute?w.prop(e,t,n):(1===o&&w.isXMLDoc(e)||(i=w.attrHooks[t.toLowerCase()]||(w.expr.match.bool.test(t)?dt:void 0)),void 0!==n?null===n?void w.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+""),n):i&&"get"in i&&null!==(r=i.get(e,t))?r:null==(r=w.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!h.radioValue&&"radio"===t&&N(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(M);if(i&&1===e.nodeType)while(n=i[r++])e.removeAttribute(n)}}),dt={set:function(e,t,n){return!1===t?w.removeAttr(e,n):e.setAttribute(n,n),n}},w.each(w.expr.match.bool.source.match(/\w+/g),function(e,t){var n=ht[t]||w.find.attr;ht[t]=function(e,t,r){var i,o,a=t.toLowerCase();return r||(o=ht[a],ht[a]=i,i=null!=n(e,t,r)?a:null,ht[a]=o),i}});var gt=/^(?:input|select|textarea|button)$/i,yt=/^(?:a|area)$/i;w.fn.extend({prop:function(e,t){return z(this,w.prop,e,t,arguments.length>1)},removeProp:function(e){return this.each(function(){delete this[w.propFix[e]||e]})}}),w.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&w.isXMLDoc(e)||(t=w.propFix[t]||t,i=w.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=w.find.attr(e,"tabindex");return t?parseInt(t,10):gt.test(e.nodeName)||yt.test(e.nodeName)&&e.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),h.optSelected||(w.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),w.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){w.propFix[this.toLowerCase()]=this});function vt(e){return(e.match(M)||[]).join(" ")}function mt(e){return e.getAttribute&&e.getAttribute("class")||""}function xt(e){return Array.isArray(e)?e:"string"==typeof e?e.match(M)||[]:[]}w.fn.extend({addClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).addClass(e.call(this,t,mt(this)))});if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},removeClass:function(e){var t,n,r,i,o,a,s,u=0;if(g(e))return this.each(function(t){w(this).removeClass(e.call(this,t,mt(this)))});if(!arguments.length)return this.attr("class","");if((t=xt(e)).length)while(n=this[u++])if(i=mt(n),r=1===n.nodeType&&" "+vt(i)+" "){a=0;while(o=t[a++])while(r.indexOf(" "+o+" ")>-1)r=r.replace(" "+o+" "," ");i!==(s=vt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(e,t){var n=typeof e,r="string"===n||Array.isArray(e);return"boolean"==typeof t&&r?t?this.addClass(e):this.removeClass(e):g(e)?this.each(function(n){w(this).toggleClass(e.call(this,n,mt(this),t),t)}):this.each(function(){var t,i,o,a;if(r){i=0,o=w(this),a=xt(e);while(t=a[i++])o.hasClass(t)?o.removeClass(t):o.addClass(t)}else void 0!==e&&"boolean"!==n||((t=mt(this))&&J.set(this,"__className__",t),this.setAttribute&&this.setAttribute("class",t||!1===e?"":J.get(this,"__className__")||""))})},hasClass:function(e){var t,n,r=0;t=" "+e+" ";while(n=this[r++])if(1===n.nodeType&&(" "+vt(mt(n))+" ").indexOf(t)>-1)return!0;return!1}});var bt=/\r/g;w.fn.extend({val:function(e){var t,n,r,i=this[0];{if(arguments.length)return r=g(e),this.each(function(n){var i;1===this.nodeType&&(null==(i=r?e.call(this,n,w(this).val()):e)?i="":"number"==typeof i?i+="":Array.isArray(i)&&(i=w.map(i,function(e){return null==e?"":e+""})),(t=w.valHooks[this.type]||w.valHooks[this.nodeName.toLowerCase()])&&"set"in t&&void 0!==t.set(this,i,"value")||(this.value=i))});if(i)return(t=w.valHooks[i.type]||w.valHooks[i.nodeName.toLowerCase()])&&"get"in t&&void 0!==(n=t.get(i,"value"))?n:"string"==typeof(n=i.value)?n.replace(bt,""):null==n?"":n}}}),w.extend({valHooks:{option:{get:function(e){var t=w.find.attr(e,"value");return null!=t?t:vt(w.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!N(n.parentNode,"optgroup"))){if(t=w(n).val(),a)return t;s.push(t)}return s},set:function(e,t){var n,r,i=e.options,o=w.makeArray(t),a=i.length;while(a--)((r=i[a]).selected=w.inArray(w.valHooks.option.get(r),o)>-1)&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),w.each(["radio","checkbox"],function(){w.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=w.inArray(w(e).val(),t)>-1}},h.checkOn||(w.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),h.focusin="onfocusin"in e;var wt=/^(?:focusinfocus|focusoutblur)$/,Tt=function(e){e.stopPropagation()};w.extend(w.event,{trigger:function(t,n,i,o){var a,s,u,l,c,p,d,h,v=[i||r],m=f.call(t,"type")?t.type:t,x=f.call(t,"namespace")?t.namespace.split("."):[];if(s=h=u=i=i||r,3!==i.nodeType&&8!==i.nodeType&&!wt.test(m+w.event.triggered)&&(m.indexOf(".")>-1&&(m=(x=m.split(".")).shift(),x.sort()),c=m.indexOf(":")<0&&"on"+m,t=t[w.expando]?t:new w.Event(m,"object"==typeof t&&t),t.isTrigger=o?2:3,t.namespace=x.join("."),t.rnamespace=t.namespace?new RegExp("(^|\\.)"+x.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,t.result=void 0,t.target||(t.target=i),n=null==n?[t]:w.makeArray(n,[t]),d=w.event.special[m]||{},o||!d.trigger||!1!==d.trigger.apply(i,n))){if(!o&&!d.noBubble&&!y(i)){for(l=d.delegateType||m,wt.test(l+m)||(s=s.parentNode);s;s=s.parentNode)v.push(s),u=s;u===(i.ownerDocument||r)&&v.push(u.defaultView||u.parentWindow||e)}a=0;while((s=v[a++])&&!t.isPropagationStopped())h=s,t.type=a>1?l:d.bindType||m,(p=(J.get(s,"events")||{})[t.type]&&J.get(s,"handle"))&&p.apply(s,n),(p=c&&s[c])&&p.apply&&Y(s)&&(t.result=p.apply(s,n),!1===t.result&&t.preventDefault());return t.type=m,o||t.isDefaultPrevented()||d._default&&!1!==d._default.apply(v.pop(),n)||!Y(i)||c&&g(i[m])&&!y(i)&&((u=i[c])&&(i[c]=null),w.event.triggered=m,t.isPropagationStopped()&&h.addEventListener(m,Tt),i[m](),t.isPropagationStopped()&&h.removeEventListener(m,Tt),w.event.triggered=void 0,u&&(i[c]=u)),t.result}},simulate:function(e,t,n){var r=w.extend(new w.Event,n,{type:e,isSimulated:!0});w.event.trigger(r,null,t)}}),w.fn.extend({trigger:function(e,t){return this.each(function(){w.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return w.event.trigger(e,t,n,!0)}}),h.focusin||w.each({focus:"focusin",blur:"focusout"},function(e,t){var n=function(e){w.event.simulate(t,e.target,w.event.fix(e))};w.event.special[t]={setup:function(){var r=this.ownerDocument||this,i=J.access(r,t);i||r.addEventListener(e,n,!0),J.access(r,t,(i||0)+1)},teardown:function(){var r=this.ownerDocument||this,i=J.access(r,t)-1;i?J.access(r,t,i):(r.removeEventListener(e,n,!0),J.remove(r,t))}}});var Ct=e.location,Et=Date.now(),kt=/\?/;w.parseXML=function(t){var n;if(!t||"string"!=typeof t)return null;try{n=(new e.DOMParser).parseFromString(t,"text/xml")}catch(e){n=void 0}return n&&!n.getElementsByTagName("parsererror").length||w.error("Invalid XML: "+t),n};var St=/\[\]$/,Dt=/\r?\n/g,Nt=/^(?:submit|button|image|reset|file)$/i,At=/^(?:input|select|textarea|keygen)/i;function jt(e,t,n,r){var i;if(Array.isArray(t))w.each(t,function(t,i){n||St.test(e)?r(e,i):jt(e+"["+("object"==typeof i&&null!=i?t:"")+"]",i,n,r)});else if(n||"object"!==x(t))r(e,t);else for(i in t)jt(e+"["+i+"]",t[i],n,r)}w.param=function(e,t){var n,r=[],i=function(e,t){var n=g(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(Array.isArray(e)||e.jquery&&!w.isPlainObject(e))w.each(e,function(){i(this.name,this.value)});else for(n in e)jt(n,e[n],t,i);return r.join("&")},w.fn.extend({serialize:function(){return w.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=w.prop(this,"elements");return e?w.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!w(this).is(":disabled")&&At.test(this.nodeName)&&!Nt.test(e)&&(this.checked||!pe.test(e))}).map(function(e,t){var n=w(this).val();return null==n?null:Array.isArray(n)?w.map(n,function(e){return{name:t.name,value:e.replace(Dt,"\r\n")}}):{name:t.name,value:n.replace(Dt,"\r\n")}}).get()}});var qt=/%20/g,Lt=/#.*$/,Ht=/([?&])_=[^&]*/,Ot=/^(.*?):[ \t]*([^\r\n]*)$/gm,Pt=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Mt=/^(?:GET|HEAD)$/,Rt=/^\/\//,It={},Wt={},$t="*/".concat("*"),Bt=r.createElement("a");Bt.href=Ct.href;function Ft(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,i=0,o=t.toLowerCase().match(M)||[];if(g(n))while(r=o[i++])"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function _t(e,t,n,r){var i={},o=e===Wt;function a(s){var u;return i[s]=!0,w.each(e[s]||[],function(e,s){var l=s(t,n,r);return"string"!=typeof l||o||i[l]?o?!(u=l):void 0:(t.dataTypes.unshift(l),a(l),!1)}),u}return a(t.dataTypes[0])||!i["*"]&&a("*")}function zt(e,t){var n,r,i=w.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r||(r={}))[n]=t[n]);return r&&w.extend(!0,e,r),e}function Xt(e,t,n){var r,i,o,a,s=e.contents,u=e.dataTypes;while("*"===u[0])u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a||(a=i)}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}function Ut(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];o=c.shift();while(o)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e["throws"])t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}w.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Ct.href,type:"GET",isLocal:Pt.test(Ct.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":$t,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":w.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?zt(zt(e,w.ajaxSettings),t):zt(w.ajaxSettings,e)},ajaxPrefilter:Ft(It),ajaxTransport:Ft(Wt),ajax:function(t,n){"object"==typeof t&&(n=t,t=void 0),n=n||{};var i,o,a,s,u,l,c,f,p,d,h=w.ajaxSetup({},n),g=h.context||h,y=h.context&&(g.nodeType||g.jquery)?w(g):w.event,v=w.Deferred(),m=w.Callbacks("once memory"),x=h.statusCode||{},b={},T={},C="canceled",E={readyState:0,getResponseHeader:function(e){var t;if(c){if(!s){s={};while(t=Ot.exec(a))s[t[1].toLowerCase()]=t[2]}t=s[e.toLowerCase()]}return null==t?null:t},getAllResponseHeaders:function(){return c?a:null},setRequestHeader:function(e,t){return null==c&&(e=T[e.toLowerCase()]=T[e.toLowerCase()]||e,b[e]=t),this},overrideMimeType:function(e){return null==c&&(h.mimeType=e),this},statusCode:function(e){var t;if(e)if(c)E.always(e[E.status]);else for(t in e)x[t]=[x[t],e[t]];return this},abort:function(e){var t=e||C;return i&&i.abort(t),k(0,t),this}};if(v.promise(E),h.url=((t||h.url||Ct.href)+"").replace(Rt,Ct.protocol+"//"),h.type=n.method||n.type||h.method||h.type,h.dataTypes=(h.dataType||"*").toLowerCase().match(M)||[""],null==h.crossDomain){l=r.createElement("a");try{l.href=h.url,l.href=l.href,h.crossDomain=Bt.protocol+"//"+Bt.host!=l.protocol+"//"+l.host}catch(e){h.crossDomain=!0}}if(h.data&&h.processData&&"string"!=typeof h.data&&(h.data=w.param(h.data,h.traditional)),_t(It,h,n,E),c)return E;(f=w.event&&h.global)&&0==w.active++&&w.event.trigger("ajaxStart"),h.type=h.type.toUpperCase(),h.hasContent=!Mt.test(h.type),o=h.url.replace(Lt,""),h.hasContent?h.data&&h.processData&&0===(h.contentType||"").indexOf("application/x-www-form-urlencoded")&&(h.data=h.data.replace(qt,"+")):(d=h.url.slice(o.length),h.data&&(h.processData||"string"==typeof h.data)&&(o+=(kt.test(o)?"&":"?")+h.data,delete h.data),!1===h.cache&&(o=o.replace(Ht,"$1"),d=(kt.test(o)?"&":"?")+"_="+Et+++d),h.url=o+d),h.ifModified&&(w.lastModified[o]&&E.setRequestHeader("If-Modified-Since",w.lastModified[o]),w.etag[o]&&E.setRequestHeader("If-None-Match",w.etag[o])),(h.data&&h.hasContent&&!1!==h.contentType||n.contentType)&&E.setRequestHeader("Content-Type",h.contentType),E.setRequestHeader("Accept",h.dataTypes[0]&&h.accepts[h.dataTypes[0]]?h.accepts[h.dataTypes[0]]+("*"!==h.dataTypes[0]?", "+$t+"; q=0.01":""):h.accepts["*"]);for(p in h.headers)E.setRequestHeader(p,h.headers[p]);if(h.beforeSend&&(!1===h.beforeSend.call(g,E,h)||c))return E.abort();if(C="abort",m.add(h.complete),E.done(h.success),E.fail(h.error),i=_t(Wt,h,n,E)){if(E.readyState=1,f&&y.trigger("ajaxSend",[E,h]),c)return E;h.async&&h.timeout>0&&(u=e.setTimeout(function(){E.abort("timeout")},h.timeout));try{c=!1,i.send(b,k)}catch(e){if(c)throw e;k(-1,e)}}else k(-1,"No Transport");function k(t,n,r,s){var l,p,d,b,T,C=n;c||(c=!0,u&&e.clearTimeout(u),i=void 0,a=s||"",E.readyState=t>0?4:0,l=t>=200&&t<300||304===t,r&&(b=Xt(h,E,r)),b=Ut(h,b,E,l),l?(h.ifModified&&((T=E.getResponseHeader("Last-Modified"))&&(w.lastModified[o]=T),(T=E.getResponseHeader("etag"))&&(w.etag[o]=T)),204===t||"HEAD"===h.type?C="nocontent":304===t?C="notmodified":(C=b.state,p=b.data,l=!(d=b.error))):(d=C,!t&&C||(C="error",t<0&&(t=0))),E.status=t,E.statusText=(n||C)+"",l?v.resolveWith(g,[p,C,E]):v.rejectWith(g,[E,C,d]),E.statusCode(x),x=void 0,f&&y.trigger(l?"ajaxSuccess":"ajaxError",[E,h,l?p:d]),m.fireWith(g,[E,C]),f&&(y.trigger("ajaxComplete",[E,h]),--w.active||w.event.trigger("ajaxStop")))}return E},getJSON:function(e,t,n){return w.get(e,t,n,"json")},getScript:function(e,t){return w.get(e,void 0,t,"script")}}),w.each(["get","post"],function(e,t){w[t]=function(e,n,r,i){return g(n)&&(i=i||r,r=n,n=void 0),w.ajax(w.extend({url:e,type:t,dataType:i,data:n,success:r},w.isPlainObject(e)&&e))}}),w._evalUrl=function(e){return w.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},w.fn.extend({wrapAll:function(e){var t;return this[0]&&(g(e)&&(e=e.call(this[0])),t=w(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstElementChild)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(e){return g(e)?this.each(function(t){w(this).wrapInner(e.call(this,t))}):this.each(function(){var t=w(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=g(e);return this.each(function(n){w(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(e){return this.parent(e).not("body").each(function(){w(this).replaceWith(this.childNodes)}),this}}),w.expr.pseudos.hidden=function(e){return!w.expr.pseudos.visible(e)},w.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},w.ajaxSettings.xhr=function(){try{return new e.XMLHttpRequest}catch(e){}};var Vt={0:200,1223:204},Gt=w.ajaxSettings.xhr();h.cors=!!Gt&&"withCredentials"in Gt,h.ajax=Gt=!!Gt,w.ajaxTransport(function(t){var n,r;if(h.cors||Gt&&!t.crossDomain)return{send:function(i,o){var a,s=t.xhr();if(s.open(t.type,t.url,t.async,t.username,t.password),t.xhrFields)for(a in t.xhrFields)s[a]=t.xhrFields[a];t.mimeType&&s.overrideMimeType&&s.overrideMimeType(t.mimeType),t.crossDomain||i["X-Requested-With"]||(i["X-Requested-With"]="XMLHttpRequest");for(a in i)s.setRequestHeader(a,i[a]);n=function(e){return function(){n&&(n=r=s.onload=s.onerror=s.onabort=s.ontimeout=s.onreadystatechange=null,"abort"===e?s.abort():"error"===e?"number"!=typeof s.status?o(0,"error"):o(s.status,s.statusText):o(Vt[s.status]||s.status,s.statusText,"text"!==(s.responseType||"text")||"string"!=typeof s.responseText?{binary:s.response}:{text:s.responseText},s.getAllResponseHeaders()))}},s.onload=n(),r=s.onerror=s.ontimeout=n("error"),void 0!==s.onabort?s.onabort=r:s.onreadystatechange=function(){4===s.readyState&&e.setTimeout(function(){n&&r()})},n=n("abort");try{s.send(t.hasContent&&t.data||null)}catch(e){if(n)throw e}},abort:function(){n&&n()}}}),w.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),w.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return w.globalEval(e),e}}}),w.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),w.ajaxTransport("script",function(e){if(e.crossDomain){var t,n;return{send:function(i,o){t=w("<script>").prop({charset:e.scriptCharset,src:e.url}).on("load error",n=function(e){t.remove(),n=null,e&&o("error"===e.type?404:200,e.type)}),r.head.appendChild(t[0])},abort:function(){n&&n()}}}});var Yt=[],Qt=/(=)\?(?=&|$)|\?\?/;w.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Yt.pop()||w.expando+"_"+Et++;return this[e]=!0,e}}),w.ajaxPrefilter("json jsonp",function(t,n,r){var i,o,a,s=!1!==t.jsonp&&(Qt.test(t.url)?"url":"string"==typeof t.data&&0===(t.contentType||"").indexOf("application/x-www-form-urlencoded")&&Qt.test(t.data)&&"data");if(s||"jsonp"===t.dataTypes[0])return i=t.jsonpCallback=g(t.jsonpCallback)?t.jsonpCallback():t.jsonpCallback,s?t[s]=t[s].replace(Qt,"$1"+i):!1!==t.jsonp&&(t.url+=(kt.test(t.url)?"&":"?")+t.jsonp+"="+i),t.converters["script json"]=function(){return a||w.error(i+" was not called"),a[0]},t.dataTypes[0]="json",o=e[i],e[i]=function(){a=arguments},r.always(function(){void 0===o?w(e).removeProp(i):e[i]=o,t[i]&&(t.jsonpCallback=n.jsonpCallback,Yt.push(i)),a&&g(o)&&o(a[0]),a=o=void 0}),"script"}),h.createHTMLDocument=function(){var e=r.implementation.createHTMLDocument("").body;return e.innerHTML="<form></form><form></form>",2===e.childNodes.length}(),w.parseHTML=function(e,t,n){if("string"!=typeof e)return[];"boolean"==typeof t&&(n=t,t=!1);var i,o,a;return t||(h.createHTMLDocument?((i=(t=r.implementation.createHTMLDocument("")).createElement("base")).href=r.location.href,t.head.appendChild(i)):t=r),o=A.exec(e),a=!n&&[],o?[t.createElement(o[1])]:(o=xe([e],t,a),a&&a.length&&w(a).remove(),w.merge([],o.childNodes))},w.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return s>-1&&(r=vt(e.slice(s)),e=e.slice(0,s)),g(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),a.length>0&&w.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?w("<div>").append(w.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},w.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){w.fn[t]=function(e){return this.on(t,e)}}),w.expr.pseudos.animated=function(e){return w.grep(w.timers,function(t){return e===t.elem}).length},w.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l,c=w.css(e,"position"),f=w(e),p={};"static"===c&&(e.style.position="relative"),s=f.offset(),o=w.css(e,"top"),u=w.css(e,"left"),(l=("absolute"===c||"fixed"===c)&&(o+u).indexOf("auto")>-1)?(a=(r=f.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),g(t)&&(t=t.call(e,n,w.extend({},s))),null!=t.top&&(p.top=t.top-s.top+a),null!=t.left&&(p.left=t.left-s.left+i),"using"in t?t.using.call(e,p):f.css(p)}},w.fn.extend({offset:function(e){if(arguments.length)return void 0===e?this:this.each(function(t){w.offset.setOffset(this,e,t)});var t,n,r=this[0];if(r)return r.getClientRects().length?(t=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:t.top+n.pageYOffset,left:t.left+n.pageXOffset}):{top:0,left:0}},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===w.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===w.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=w(e).offset()).top+=w.css(e,"borderTopWidth",!0),i.left+=w.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-w.css(r,"marginTop",!0),left:t.left-i.left-w.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===w.css(e,"position"))e=e.offsetParent;return e||be})}}),w.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(e,t){var n="pageYOffset"===t;w.fn[e]=function(r){return z(this,function(e,r,i){var o;if(y(e)?o=e:9===e.nodeType&&(o=e.defaultView),void 0===i)return o?o[t]:e[r];o?o.scrollTo(n?o.pageXOffset:i,n?i:o.pageYOffset):e[r]=i},e,r,arguments.length)}}),w.each(["top","left"],function(e,t){w.cssHooks[t]=_e(h.pixelPosition,function(e,n){if(n)return n=Fe(e,t),We.test(n)?w(e).position()[t]+"px":n})}),w.each({Height:"height",Width:"width"},function(e,t){w.each({padding:"inner"+e,content:t,"":"outer"+e},function(n,r){w.fn[r]=function(i,o){var a=arguments.length&&(n||"boolean"!=typeof i),s=n||(!0===i||!0===o?"margin":"border");return z(this,function(t,n,i){var o;return y(t)?0===r.indexOf("outer")?t["inner"+e]:t.document.documentElement["client"+e]:9===t.nodeType?(o=t.documentElement,Math.max(t.body["scroll"+e],o["scroll"+e],t.body["offset"+e],o["offset"+e],o["client"+e])):void 0===i?w.css(t,n,s):w.style(t,n,i,s)},t,a?i:void 0,a)}})}),w.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,t){w.fn[t]=function(e,n){return arguments.length>0?this.on(t,null,e,n):this.trigger(t)}}),w.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),w.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}}),w.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),g(e))return r=o.call(arguments,2),i=function(){return e.apply(t||this,r.concat(o.call(arguments)))},i.guid=e.guid=e.guid||w.guid++,i},w.holdReady=function(e){e?w.readyWait++:w.ready(!0)},w.isArray=Array.isArray,w.parseJSON=JSON.parse,w.nodeName=N,w.isFunction=g,w.isWindow=y,w.camelCase=G,w.type=x,w.now=Date.now,w.isNumeric=function(e){var t=w.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},"function"==typeof define&&define.amd&&define("jquery",[],function(){return w});var Jt=e.jQuery,Kt=e.$;return w.noConflict=function(t){return e.$===w&&(e.$=Kt),t&&e.jQuery===w&&(e.jQuery=Jt),w},t||(e.jQuery=e.$=w),w});
diff --git a/lib/python3.7/site-packages/werkzeug/debug/shared/less.png b/lib/python3.7/site-packages/werkzeug/debug/shared/less.png
new file mode 100644
index 0000000000000000000000000000000000000000..5efefd62b43e4f11dd300be4355a4b413c7a70d2
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/debug/shared/less.png differ
diff --git a/lib/python3.7/site-packages/werkzeug/debug/shared/more.png b/lib/python3.7/site-packages/werkzeug/debug/shared/more.png
new file mode 100644
index 0000000000000000000000000000000000000000..804fa226fe3ed9e6cc2bd044a848f33a2d7b4e4f
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/debug/shared/more.png differ
diff --git a/lib/python3.7/site-packages/werkzeug/debug/shared/source.png b/lib/python3.7/site-packages/werkzeug/debug/shared/source.png
new file mode 100644
index 0000000000000000000000000000000000000000..f7ea90419d950f9e69d977a1f5847456d96a5f0b
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/debug/shared/source.png differ
diff --git a/lib/python3.7/site-packages/werkzeug/debug/shared/style.css b/lib/python3.7/site-packages/werkzeug/debug/shared/style.css
new file mode 100644
index 0000000000000000000000000000000000000000..fac09ce83af3888d71c3d4e2026c214b312f95aa
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/debug/shared/style.css
@@ -0,0 +1,154 @@
+@font-face {
+  font-family: 'Ubuntu';
+  font-style: normal;
+  font-weight: normal;
+  src: local('Ubuntu'), local('Ubuntu-Regular'),
+    url('?__debugger__=yes&cmd=resource&f=ubuntu.ttf') format('truetype');
+}
+
+body, input  { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+               'Verdana', sans-serif; color: #000; text-align: center;
+               margin: 1em; padding: 0; font-size: 15px; }
+h1, h2, h3   { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
+               'Geneva', 'Verdana', sans-serif; font-weight: normal; }
+
+input        { background-color: #fff; margin: 0; text-align: left;
+               outline: none !important; }
+input[type="submit"] { padding: 3px 6px; }
+a            { color: #11557C; }
+a:hover      { color: #177199; }
+pre, code,
+textarea     { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
+               monospace; font-size: 14px; }
+
+div.debugger { text-align: left; padding: 12px; margin: auto;
+               background-color: white; }
+h1           { font-size: 36px; margin: 0 0 0.3em 0; }
+div.detail { cursor: pointer; }
+div.detail p { margin: 0 0 8px 13px; font-size: 14px; white-space: pre-wrap;
+               font-family: monospace; }
+div.explanation { margin: 20px 13px; font-size: 15px; color: #555; }
+div.footer   { font-size: 13px; text-align: right; margin: 30px 0;
+               color: #86989B; }
+
+h2           { font-size: 16px; margin: 1.3em 0 0.0 0; padding: 9px;
+               background-color: #11557C; color: white; }
+h2 em, h3 em { font-style: normal; color: #A5D6D9; font-weight: normal; }
+
+div.traceback, div.plain { border: 1px solid #ddd; margin: 0 0 1em 0; padding: 10px; }
+div.plain p      { margin: 0; }
+div.plain textarea,
+div.plain pre { margin: 10px 0 0 0; padding: 4px;
+                background-color: #E8EFF0; border: 1px solid #D3E7E9; }
+div.plain textarea { width: 99%; height: 300px; }
+div.traceback h3 { font-size: 1em; margin: 0 0 0.8em 0; }
+div.traceback ul { list-style: none; margin: 0; padding: 0 0 0 1em; }
+div.traceback h4 { font-size: 13px; font-weight: normal; margin: 0.7em 0 0.1em 0; }
+div.traceback pre { margin: 0; padding: 5px 0 3px 15px;
+                    background-color: #E8EFF0; border: 1px solid #D3E7E9; }
+div.traceback .library .current { background: white; color: #555; }
+div.traceback .expanded .current { background: #E8EFF0; color: black; }
+div.traceback pre:hover { background-color: #DDECEE; color: black; cursor: pointer; }
+div.traceback div.source.expanded pre + pre { border-top: none; }
+
+div.traceback span.ws { display: none; }
+div.traceback pre.before, div.traceback pre.after { display: none; background: white; }
+div.traceback div.source.expanded pre.before,
+div.traceback div.source.expanded pre.after {
+    display: block;
+}
+
+div.traceback div.source.expanded span.ws {
+    display: inline;
+}
+
+div.traceback blockquote { margin: 1em 0 0 0; padding: 0; }
+div.traceback img { float: right; padding: 2px; margin: -3px 2px 0 0; display: none; }
+div.traceback img:hover { background-color: #ddd; cursor: pointer;
+                          border-color: #BFDDE0; }
+div.traceback pre:hover img { display: block; }
+div.traceback cite.filename { font-style: normal; color: #3B666B; }
+
+pre.console { border: 1px solid #ccc; background: white!important;
+              color: black; padding: 5px!important;
+              margin: 3px 0 0 0!important; cursor: default!important;
+              max-height: 400px; overflow: auto; }
+pre.console form { color: #555; }
+pre.console input { background-color: transparent; color: #555;
+                    width: 90%; font-family: 'Consolas', 'Deja Vu Sans Mono',
+                    'Bitstream Vera Sans Mono', monospace; font-size: 14px;
+                     border: none!important; }
+
+span.string { color: #30799B; }
+span.number { color: #9C1A1C; }
+span.help   { color: #3A7734; }
+span.object { color: #485F6E; }
+span.extended { opacity: 0.5; }
+span.extended:hover { opacity: 1; }
+a.toggle { text-decoration: none; background-repeat: no-repeat;
+           background-position: center center;
+           background-image: url(?__debugger__=yes&cmd=resource&f=more.png); }
+a.toggle:hover { background-color: #444; }
+a.open { background-image: url(?__debugger__=yes&cmd=resource&f=less.png); }
+
+pre.console div.traceback,
+pre.console div.box { margin: 5px 10px; white-space: normal;
+                      border: 1px solid #11557C; padding: 10px;
+                      font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+                      'Verdana', sans-serif;  }
+pre.console div.box h3,
+pre.console div.traceback h3 { margin: -10px -10px 10px -10px; padding: 5px;
+                               background: #11557C; color: white; }
+
+pre.console div.traceback pre:hover { cursor: default; background: #E8EFF0; }
+pre.console div.traceback pre.syntaxerror { background: inherit; border: none;
+                                            margin: 20px -10px -10px -10px;
+                                            padding: 10px; border-top: 1px solid #BFDDE0;
+                                            background: #E8EFF0; }
+pre.console div.noframe-traceback pre.syntaxerror { margin-top: -10px; border: none; }
+
+pre.console div.box pre.repr { padding: 0; margin: 0; background-color: white; border: none; }
+pre.console div.box table { margin-top: 6px; }
+pre.console div.box pre { border: none; }
+pre.console div.box pre.help { background-color: white; }
+pre.console div.box pre.help:hover { cursor: default; }
+pre.console table tr { vertical-align: top; }
+div.console { border: 1px solid #ccc; padding: 4px; background-color: #fafafa; }
+
+div.traceback pre, div.console pre {
+    white-space: pre-wrap;       /* css-3 should we be so lucky... */
+    white-space: -moz-pre-wrap;  /* Mozilla, since 1999 */
+    white-space: -pre-wrap;      /* Opera 4-6 ?? */
+    white-space: -o-pre-wrap;    /* Opera 7 ?? */
+    word-wrap: break-word;       /* Internet Explorer 5.5+ */
+    _white-space: pre;           /* IE only hack to re-specify in
+                                 addition to word-wrap  */
+}
+
+
+div.pin-prompt {
+    position: absolute;
+    display: none;
+    top: 0;
+    bottom: 0;
+    left: 0;
+    right: 0;
+    background: rgba(255, 255, 255, 0.8);
+}
+
+div.pin-prompt .inner {
+    background: #eee;
+    padding: 10px 50px;
+    width: 350px;
+    margin: 10% auto 0 auto;
+    border: 1px solid #ccc;
+    border-radius: 2px;
+}
+
+div.exc-divider {
+    margin: 0.7em 0 0 -1em;
+    padding: 0.5em;
+    background: #11557C;
+    color: #ddd;
+    border: 1px solid #ddd;
+}
diff --git a/lib/python3.7/site-packages/werkzeug/debug/shared/ubuntu.ttf b/lib/python3.7/site-packages/werkzeug/debug/shared/ubuntu.ttf
new file mode 100644
index 0000000000000000000000000000000000000000..8079f938c9fa5aede9a151439f136e267958ccd1
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/debug/shared/ubuntu.ttf differ
diff --git a/lib/python3.7/site-packages/werkzeug/debug/tbtools.py b/lib/python3.7/site-packages/werkzeug/debug/tbtools.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6af4e307008ced74d6ebcd2bfadbcd90480a210
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/debug/tbtools.py
@@ -0,0 +1,627 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.debug.tbtools
+    ~~~~~~~~~~~~~~~~~~~~~~
+
+    This module provides various traceback related utility functions.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import codecs
+import inspect
+import json
+import os
+import re
+import sys
+import sysconfig
+import traceback
+from tokenize import TokenError
+
+from .._compat import PY2
+from .._compat import range_type
+from .._compat import reraise
+from .._compat import string_types
+from .._compat import text_type
+from .._compat import to_native
+from .._compat import to_unicode
+from ..filesystem import get_filesystem_encoding
+from ..utils import cached_property
+from ..utils import escape
+from .console import Console
+
+
+_coding_re = re.compile(br"coding[:=]\s*([-\w.]+)")
+_line_re = re.compile(br"^(.*?)$", re.MULTILINE)
+_funcdef_re = re.compile(r"^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)")
+UTF8_COOKIE = b"\xef\xbb\xbf"
+
+system_exceptions = (SystemExit, KeyboardInterrupt)
+try:
+    system_exceptions += (GeneratorExit,)
+except NameError:
+    pass
+
+
+HEADER = u"""\
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+  "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+  <head>
+    <title>%(title)s // Werkzeug Debugger</title>
+    <link rel="stylesheet" href="?__debugger__=yes&amp;cmd=resource&amp;f=style.css"
+        type="text/css">
+    <!-- We need to make sure this has a favicon so that the debugger does
+         not by accident trigger a request to /favicon.ico which might
+         change the application state. -->
+    <link rel="shortcut icon"
+        href="?__debugger__=yes&amp;cmd=resource&amp;f=console.png">
+    <script src="?__debugger__=yes&amp;cmd=resource&amp;f=jquery.js"></script>
+    <script src="?__debugger__=yes&amp;cmd=resource&amp;f=debugger.js"></script>
+    <script type="text/javascript">
+      var TRACEBACK = %(traceback_id)d,
+          CONSOLE_MODE = %(console)s,
+          EVALEX = %(evalex)s,
+          EVALEX_TRUSTED = %(evalex_trusted)s,
+          SECRET = "%(secret)s";
+    </script>
+  </head>
+  <body style="background-color: #fff">
+    <div class="debugger">
+"""
+FOOTER = u"""\
+      <div class="footer">
+        Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
+        friendly Werkzeug powered traceback interpreter.
+      </div>
+    </div>
+
+    <div class="pin-prompt">
+      <div class="inner">
+        <h3>Console Locked</h3>
+        <p>
+          The console is locked and needs to be unlocked by entering the PIN.
+          You can find the PIN printed out on the standard output of your
+          shell that runs the server.
+        <form>
+          <p>PIN:
+            <input type=text name=pin size=14>
+            <input type=submit name=btn value="Confirm Pin">
+        </form>
+      </div>
+    </div>
+  </body>
+</html>
+"""
+
+PAGE_HTML = (
+    HEADER
+    + u"""\
+<h1>%(exception_type)s</h1>
+<div class="detail">
+  <p class="errormsg">%(exception)s</p>
+</div>
+<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
+%(summary)s
+<div class="plain">
+  <form action="/?__debugger__=yes&amp;cmd=paste" method="post">
+    <p>
+      <input type="hidden" name="language" value="pytb">
+      This is the Copy/Paste friendly version of the traceback.  <span
+      class="pastemessage">You can also paste this traceback into
+      a <a href="https://gist.github.com/">gist</a>:
+      <input type="submit" value="create paste"></span>
+    </p>
+    <textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
+  </form>
+</div>
+<div class="explanation">
+  The debugger caught an exception in your WSGI application.  You can now
+  look at the traceback which led to the error.  <span class="nojavascript">
+  If you enable JavaScript you can also use additional features such as code
+  execution (if the evalex feature is enabled), automatic pasting of the
+  exceptions and much more.</span>
+</div>
+"""
+    + FOOTER
+    + """
+<!--
+
+%(plaintext_cs)s
+
+-->
+"""
+)
+
+CONSOLE_HTML = (
+    HEADER
+    + u"""\
+<h1>Interactive Console</h1>
+<div class="explanation">
+In this console you can execute Python expressions in the context of the
+application.  The initial namespace was created by the debugger automatically.
+</div>
+<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
+"""
+    + FOOTER
+)
+
+SUMMARY_HTML = u"""\
+<div class="%(classes)s">
+  %(title)s
+  <ul>%(frames)s</ul>
+  %(description)s
+</div>
+"""
+
+FRAME_HTML = u"""\
+<div class="frame" id="frame-%(id)d">
+  <h4>File <cite class="filename">"%(filename)s"</cite>,
+      line <em class="line">%(lineno)s</em>,
+      in <code class="function">%(function_name)s</code></h4>
+  <div class="source %(library)s">%(lines)s</div>
+</div>
+"""
+
+SOURCE_LINE_HTML = u"""\
+<tr class="%(classes)s">
+  <td class=lineno>%(lineno)s</td>
+  <td>%(code)s</td>
+</tr>
+"""
+
+
+def render_console_html(secret, evalex_trusted=True):
+    return CONSOLE_HTML % {
+        "evalex": "true",
+        "evalex_trusted": "true" if evalex_trusted else "false",
+        "console": "true",
+        "title": "Console",
+        "secret": secret,
+        "traceback_id": -1,
+    }
+
+
+def get_current_traceback(
+    ignore_system_exceptions=False, show_hidden_frames=False, skip=0
+):
+    """Get the current exception info as `Traceback` object.  Per default
+    calling this method will reraise system exceptions such as generator exit,
+    system exit or others.  This behavior can be disabled by passing `False`
+    to the function as first parameter.
+    """
+    exc_type, exc_value, tb = sys.exc_info()
+    if ignore_system_exceptions and exc_type in system_exceptions:
+        reraise(exc_type, exc_value, tb)
+    for _ in range_type(skip):
+        if tb.tb_next is None:
+            break
+        tb = tb.tb_next
+    tb = Traceback(exc_type, exc_value, tb)
+    if not show_hidden_frames:
+        tb.filter_hidden_frames()
+    return tb
+
+
+class Line(object):
+    """Helper for the source renderer."""
+
+    __slots__ = ("lineno", "code", "in_frame", "current")
+
+    def __init__(self, lineno, code):
+        self.lineno = lineno
+        self.code = code
+        self.in_frame = False
+        self.current = False
+
+    @property
+    def classes(self):
+        rv = ["line"]
+        if self.in_frame:
+            rv.append("in-frame")
+        if self.current:
+            rv.append("current")
+        return rv
+
+    def render(self):
+        return SOURCE_LINE_HTML % {
+            "classes": u" ".join(self.classes),
+            "lineno": self.lineno,
+            "code": escape(self.code),
+        }
+
+
+class Traceback(object):
+    """Wraps a traceback."""
+
+    def __init__(self, exc_type, exc_value, tb):
+        self.exc_type = exc_type
+        self.exc_value = exc_value
+        self.tb = tb
+
+        exception_type = exc_type.__name__
+        if exc_type.__module__ not in {"builtins", "__builtin__", "exceptions"}:
+            exception_type = exc_type.__module__ + "." + exception_type
+        self.exception_type = exception_type
+
+        self.groups = []
+        while True:
+            self.groups.append(Group(exc_type, exc_value, tb))
+            if PY2:
+                break
+            exc_value = exc_value.__cause__ or exc_value.__context__
+            if exc_value is None:
+                break
+            exc_type = type(exc_value)
+            tb = exc_value.__traceback__
+        self.groups.reverse()
+        self.frames = [frame for group in self.groups for frame in group.frames]
+
+    def filter_hidden_frames(self):
+        """Remove the frames according to the paste spec."""
+        for group in self.groups:
+            group.filter_hidden_frames()
+
+        self.frames[:] = [frame for group in self.groups for frame in group.frames]
+
+    @property
+    def is_syntax_error(self):
+        """Is it a syntax error?"""
+        return isinstance(self.exc_value, SyntaxError)
+
+    @property
+    def exception(self):
+        """String representation of the final exception."""
+        return self.groups[-1].exception
+
+    def log(self, logfile=None):
+        """Log the ASCII traceback into a file object."""
+        if logfile is None:
+            logfile = sys.stderr
+        tb = self.plaintext.rstrip() + u"\n"
+        logfile.write(to_native(tb, "utf-8", "replace"))
+
+    def paste(self):
+        """Create a paste and return the paste id."""
+        data = json.dumps(
+            {
+                "description": "Werkzeug Internal Server Error",
+                "public": False,
+                "files": {"traceback.txt": {"content": self.plaintext}},
+            }
+        ).encode("utf-8")
+        try:
+            from urllib2 import urlopen
+        except ImportError:
+            from urllib.request import urlopen
+        rv = urlopen("https://api.github.com/gists", data=data)
+        resp = json.loads(rv.read().decode("utf-8"))
+        rv.close()
+        return {"url": resp["html_url"], "id": resp["id"]}
+
+    def render_summary(self, include_title=True):
+        """Render the traceback for the interactive console."""
+        title = ""
+        classes = ["traceback"]
+        if not self.frames:
+            classes.append("noframe-traceback")
+            frames = []
+        else:
+            library_frames = sum(frame.is_library for frame in self.frames)
+            mark_lib = 0 < library_frames < len(self.frames)
+            frames = [group.render(mark_lib=mark_lib) for group in self.groups]
+
+        if include_title:
+            if self.is_syntax_error:
+                title = u"Syntax Error"
+            else:
+                title = u"Traceback <em>(most recent call last)</em>:"
+
+        if self.is_syntax_error:
+            description_wrapper = u"<pre class=syntaxerror>%s</pre>"
+        else:
+            description_wrapper = u"<blockquote>%s</blockquote>"
+
+        return SUMMARY_HTML % {
+            "classes": u" ".join(classes),
+            "title": u"<h3>%s</h3>" % title if title else u"",
+            "frames": u"\n".join(frames),
+            "description": description_wrapper % escape(self.exception),
+        }
+
+    def render_full(self, evalex=False, secret=None, evalex_trusted=True):
+        """Render the Full HTML page with the traceback info."""
+        exc = escape(self.exception)
+        return PAGE_HTML % {
+            "evalex": "true" if evalex else "false",
+            "evalex_trusted": "true" if evalex_trusted else "false",
+            "console": "false",
+            "title": exc,
+            "exception": exc,
+            "exception_type": escape(self.exception_type),
+            "summary": self.render_summary(include_title=False),
+            "plaintext": escape(self.plaintext),
+            "plaintext_cs": re.sub("-{2,}", "-", self.plaintext),
+            "traceback_id": self.id,
+            "secret": secret,
+        }
+
+    @cached_property
+    def plaintext(self):
+        return u"\n".join([group.render_text() for group in self.groups])
+
+    @property
+    def id(self):
+        return id(self)
+
+
+class Group(object):
+    """A group of frames for an exception in a traceback. On Python 3,
+    if the exception has a ``__cause__`` or ``__context__``, there are
+    multiple exception groups.
+    """
+
+    def __init__(self, exc_type, exc_value, tb):
+        self.exc_type = exc_type
+        self.exc_value = exc_value
+        self.info = None
+        if not PY2:
+            if exc_value.__cause__ is not None:
+                self.info = (
+                    u"The above exception was the direct cause of the"
+                    u" following exception"
+                )
+            elif exc_value.__context__ is not None:
+                self.info = (
+                    u"During handling of the above exception, another"
+                    u" exception occurred"
+                )
+
+        self.frames = []
+        while tb is not None:
+            self.frames.append(Frame(exc_type, exc_value, tb))
+            tb = tb.tb_next
+
+    def filter_hidden_frames(self):
+        new_frames = []
+        hidden = False
+
+        for frame in self.frames:
+            hide = frame.hide
+            if hide in ("before", "before_and_this"):
+                new_frames = []
+                hidden = False
+                if hide == "before_and_this":
+                    continue
+            elif hide in ("reset", "reset_and_this"):
+                hidden = False
+                if hide == "reset_and_this":
+                    continue
+            elif hide in ("after", "after_and_this"):
+                hidden = True
+                if hide == "after_and_this":
+                    continue
+            elif hide or hidden:
+                continue
+            new_frames.append(frame)
+
+        # if we only have one frame and that frame is from the codeop
+        # module, remove it.
+        if len(new_frames) == 1 and self.frames[0].module == "codeop":
+            del self.frames[:]
+
+        # if the last frame is missing something went terrible wrong :(
+        elif self.frames[-1] in new_frames:
+            self.frames[:] = new_frames
+
+    @property
+    def exception(self):
+        """String representation of the exception."""
+        buf = traceback.format_exception_only(self.exc_type, self.exc_value)
+        rv = "".join(buf).strip()
+        return to_unicode(rv, "utf-8", "replace")
+
+    def render(self, mark_lib=True):
+        out = []
+        if self.info is not None:
+            out.append(u'<li><div class="exc-divider">%s:</div>' % self.info)
+        for frame in self.frames:
+            out.append(
+                u"<li%s>%s"
+                % (
+                    u' title="%s"' % escape(frame.info) if frame.info else u"",
+                    frame.render(mark_lib=mark_lib),
+                )
+            )
+        return u"\n".join(out)
+
+    def render_text(self):
+        out = []
+        if self.info is not None:
+            out.append(u"\n%s:\n" % self.info)
+        out.append(u"Traceback (most recent call last):")
+        for frame in self.frames:
+            out.append(frame.render_text())
+        out.append(self.exception)
+        return u"\n".join(out)
+
+
+class Frame(object):
+    """A single frame in a traceback."""
+
+    def __init__(self, exc_type, exc_value, tb):
+        self.lineno = tb.tb_lineno
+        self.function_name = tb.tb_frame.f_code.co_name
+        self.locals = tb.tb_frame.f_locals
+        self.globals = tb.tb_frame.f_globals
+
+        fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
+        if fn[-4:] in (".pyo", ".pyc"):
+            fn = fn[:-1]
+        # if it's a file on the file system resolve the real filename.
+        if os.path.isfile(fn):
+            fn = os.path.realpath(fn)
+        self.filename = to_unicode(fn, get_filesystem_encoding())
+        self.module = self.globals.get("__name__")
+        self.loader = self.globals.get("__loader__")
+        self.code = tb.tb_frame.f_code
+
+        # support for paste's traceback extensions
+        self.hide = self.locals.get("__traceback_hide__", False)
+        info = self.locals.get("__traceback_info__")
+        if info is not None:
+            info = to_unicode(info, "utf-8", "replace")
+        self.info = info
+
+    def render(self, mark_lib=True):
+        """Render a single frame in a traceback."""
+        return FRAME_HTML % {
+            "id": self.id,
+            "filename": escape(self.filename),
+            "lineno": self.lineno,
+            "function_name": escape(self.function_name),
+            "lines": self.render_line_context(),
+            "library": "library" if mark_lib and self.is_library else "",
+        }
+
+    @cached_property
+    def is_library(self):
+        return any(
+            self.filename.startswith(path) for path in sysconfig.get_paths().values()
+        )
+
+    def render_text(self):
+        return u'  File "%s", line %s, in %s\n    %s' % (
+            self.filename,
+            self.lineno,
+            self.function_name,
+            self.current_line.strip(),
+        )
+
+    def render_line_context(self):
+        before, current, after = self.get_context_lines()
+        rv = []
+
+        def render_line(line, cls):
+            line = line.expandtabs().rstrip()
+            stripped_line = line.strip()
+            prefix = len(line) - len(stripped_line)
+            rv.append(
+                '<pre class="line %s"><span class="ws">%s</span>%s</pre>'
+                % (cls, " " * prefix, escape(stripped_line) or " ")
+            )
+
+        for line in before:
+            render_line(line, "before")
+        render_line(current, "current")
+        for line in after:
+            render_line(line, "after")
+
+        return "\n".join(rv)
+
+    def get_annotated_lines(self):
+        """Helper function that returns lines with extra information."""
+        lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
+
+        # find function definition and mark lines
+        if hasattr(self.code, "co_firstlineno"):
+            lineno = self.code.co_firstlineno - 1
+            while lineno > 0:
+                if _funcdef_re.match(lines[lineno].code):
+                    break
+                lineno -= 1
+            try:
+                offset = len(inspect.getblock([x.code + "\n" for x in lines[lineno:]]))
+            except TokenError:
+                offset = 0
+            for line in lines[lineno : lineno + offset]:
+                line.in_frame = True
+
+        # mark current line
+        try:
+            lines[self.lineno - 1].current = True
+        except IndexError:
+            pass
+
+        return lines
+
+    def eval(self, code, mode="single"):
+        """Evaluate code in the context of the frame."""
+        if isinstance(code, string_types):
+            if PY2 and isinstance(code, text_type):  # noqa
+                code = UTF8_COOKIE + code.encode("utf-8")
+            code = compile(code, "<interactive>", mode)
+        return eval(code, self.globals, self.locals)
+
+    @cached_property
+    def sourcelines(self):
+        """The sourcecode of the file as list of unicode strings."""
+        # get sourcecode from loader or file
+        source = None
+        if self.loader is not None:
+            try:
+                if hasattr(self.loader, "get_source"):
+                    source = self.loader.get_source(self.module)
+                elif hasattr(self.loader, "get_source_by_code"):
+                    source = self.loader.get_source_by_code(self.code)
+            except Exception:
+                # we munch the exception so that we don't cause troubles
+                # if the loader is broken.
+                pass
+
+        if source is None:
+            try:
+                f = open(to_native(self.filename, get_filesystem_encoding()), mode="rb")
+            except IOError:
+                return []
+            try:
+                source = f.read()
+            finally:
+                f.close()
+
+        # already unicode?  return right away
+        if isinstance(source, text_type):
+            return source.splitlines()
+
+        # yes. it should be ascii, but we don't want to reject too many
+        # characters in the debugger if something breaks
+        charset = "utf-8"
+        if source.startswith(UTF8_COOKIE):
+            source = source[3:]
+        else:
+            for idx, match in enumerate(_line_re.finditer(source)):
+                match = _coding_re.search(match.group())
+                if match is not None:
+                    charset = match.group(1)
+                    break
+                if idx > 1:
+                    break
+
+        # on broken cookies we fall back to utf-8 too
+        charset = to_native(charset)
+        try:
+            codecs.lookup(charset)
+        except LookupError:
+            charset = "utf-8"
+
+        return source.decode(charset, "replace").splitlines()
+
+    def get_context_lines(self, context=5):
+        before = self.sourcelines[self.lineno - context - 1 : self.lineno - 1]
+        past = self.sourcelines[self.lineno : self.lineno + context]
+        return (before, self.current_line, past)
+
+    @property
+    def current_line(self):
+        try:
+            return self.sourcelines[self.lineno - 1]
+        except IndexError:
+            return u""
+
+    @cached_property
+    def console(self):
+        return Console(self.globals, self.locals)
+
+    @property
+    def id(self):
+        return id(self)
diff --git a/lib/python3.7/site-packages/werkzeug/exceptions.py b/lib/python3.7/site-packages/werkzeug/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..5642b99d26203f6f6f8202b0741dc2dd948f3559
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/exceptions.py
@@ -0,0 +1,764 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.exceptions
+    ~~~~~~~~~~~~~~~~~~~
+
+    This module implements a number of Python exceptions you can raise from
+    within your views to trigger a standard non-200 response.
+
+
+    Usage Example
+    -------------
+
+    ::
+
+        from werkzeug.wrappers import BaseRequest
+        from werkzeug.wsgi import responder
+        from werkzeug.exceptions import HTTPException, NotFound
+
+        def view(request):
+            raise NotFound()
+
+        @responder
+        def application(environ, start_response):
+            request = BaseRequest(environ)
+            try:
+                return view(request)
+            except HTTPException as e:
+                return e
+
+
+    As you can see from this example those exceptions are callable WSGI
+    applications.  Because of Python 2.4 compatibility those do not extend
+    from the response objects but only from the python exception class.
+
+    As a matter of fact they are not Werkzeug response objects.  However you
+    can get a response object by calling ``get_response()`` on a HTTP
+    exception.
+
+    Keep in mind that you have to pass an environment to ``get_response()``
+    because some errors fetch additional information from the WSGI
+    environment.
+
+    If you want to hook in a different exception page to say, a 404 status
+    code, you can add a second except for a specific subclass of an error::
+
+        @responder
+        def application(environ, start_response):
+            request = BaseRequest(environ)
+            try:
+                return view(request)
+            except NotFound, e:
+                return not_found(request)
+            except HTTPException, e:
+                return e
+
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import sys
+
+import werkzeug
+
+# Because of bootstrapping reasons we need to manually patch ourselves
+# onto our parent module.
+werkzeug.exceptions = sys.modules[__name__]
+
+from ._compat import implements_to_string
+from ._compat import integer_types
+from ._compat import iteritems
+from ._compat import text_type
+from ._internal import _get_environ
+from .wrappers import Response
+
+
+@implements_to_string
+class HTTPException(Exception):
+    """Baseclass for all HTTP exceptions.  This exception can be called as WSGI
+    application to render a default error page or you can catch the subclasses
+    of it independently and render nicer error messages.
+    """
+
+    code = None
+    description = None
+
+    def __init__(self, description=None, response=None):
+        super(Exception, self).__init__()
+        if description is not None:
+            self.description = description
+        self.response = response
+
+    @classmethod
+    def wrap(cls, exception, name=None):
+        """Create an exception that is a subclass of the calling HTTP
+        exception and the ``exception`` argument.
+
+        The first argument to the class will be passed to the
+        wrapped ``exception``, the rest to the HTTP exception. If
+        ``self.args`` is not empty, the wrapped exception message is
+        added to the HTTP exception description.
+
+        .. versionchanged:: 0.15
+            The description includes the wrapped exception message.
+        """
+
+        class newcls(cls, exception):
+            def __init__(self, arg=None, *args, **kwargs):
+                super(cls, self).__init__(*args, **kwargs)
+
+                if arg is None:
+                    exception.__init__(self)
+                else:
+                    exception.__init__(self, arg)
+
+            def get_description(self, environ=None):
+                out = super(cls, self).get_description(environ=environ)
+
+                if self.args:
+                    out += "<p><pre><code>{}: {}</code></pre></p>".format(
+                        exception.__name__, escape(exception.__str__(self))
+                    )
+
+                return out
+
+        newcls.__module__ = sys._getframe(1).f_globals.get("__name__")
+        newcls.__name__ = name or cls.__name__ + exception.__name__
+        return newcls
+
+    @property
+    def name(self):
+        """The status name."""
+        return HTTP_STATUS_CODES.get(self.code, "Unknown Error")
+
+    def get_description(self, environ=None):
+        """Get the description."""
+        return u"<p>%s</p>" % escape(self.description)
+
+    def get_body(self, environ=None):
+        """Get the HTML body."""
+        return text_type(
+            (
+                u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
+                u"<title>%(code)s %(name)s</title>\n"
+                u"<h1>%(name)s</h1>\n"
+                u"%(description)s\n"
+            )
+            % {
+                "code": self.code,
+                "name": escape(self.name),
+                "description": self.get_description(environ),
+            }
+        )
+
+    def get_headers(self, environ=None):
+        """Get a list of headers."""
+        return [("Content-Type", "text/html")]
+
+    def get_response(self, environ=None):
+        """Get a response object.  If one was passed to the exception
+        it's returned directly.
+
+        :param environ: the optional environ for the request.  This
+                        can be used to modify the response depending
+                        on how the request looked like.
+        :return: a :class:`Response` object or a subclass thereof.
+        """
+        if self.response is not None:
+            return self.response
+        if environ is not None:
+            environ = _get_environ(environ)
+        headers = self.get_headers(environ)
+        return Response(self.get_body(environ), self.code, headers)
+
+    def __call__(self, environ, start_response):
+        """Call the exception as WSGI application.
+
+        :param environ: the WSGI environment.
+        :param start_response: the response callable provided by the WSGI
+                               server.
+        """
+        response = self.get_response(environ)
+        return response(environ, start_response)
+
+    def __str__(self):
+        code = self.code if self.code is not None else "???"
+        return "%s %s: %s" % (code, self.name, self.description)
+
+    def __repr__(self):
+        code = self.code if self.code is not None else "???"
+        return "<%s '%s: %s'>" % (self.__class__.__name__, code, self.name)
+
+
+class BadRequest(HTTPException):
+    """*400* `Bad Request`
+
+    Raise if the browser sends something to the application the application
+    or server cannot handle.
+    """
+
+    code = 400
+    description = (
+        "The browser (or proxy) sent a request that this server could "
+        "not understand."
+    )
+
+
+class ClientDisconnected(BadRequest):
+    """Internal exception that is raised if Werkzeug detects a disconnected
+    client.  Since the client is already gone at that point attempting to
+    send the error message to the client might not work and might ultimately
+    result in another exception in the server.  Mainly this is here so that
+    it is silenced by default as far as Werkzeug is concerned.
+
+    Since disconnections cannot be reliably detected and are unspecified
+    by WSGI to a large extent this might or might not be raised if a client
+    is gone.
+
+    .. versionadded:: 0.8
+    """
+
+
+class SecurityError(BadRequest):
+    """Raised if something triggers a security error.  This is otherwise
+    exactly like a bad request error.
+
+    .. versionadded:: 0.9
+    """
+
+
+class BadHost(BadRequest):
+    """Raised if the submitted host is badly formatted.
+
+    .. versionadded:: 0.11.2
+    """
+
+
+class Unauthorized(HTTPException):
+    """*401* ``Unauthorized``
+
+    Raise if the user is not authorized to access a resource.
+
+    The ``www_authenticate`` argument should be used to set the
+    ``WWW-Authenticate`` header. This is used for HTTP basic auth and
+    other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate`
+    to create correctly formatted values. Strictly speaking a 401
+    response is invalid if it doesn't provide at least one value for
+    this header, although real clients typically don't care.
+
+    :param description: Override the default message used for the body
+        of the response.
+    :param www-authenticate: A single value, or list of values, for the
+        WWW-Authenticate header.
+
+    .. versionchanged:: 0.15.1
+        ``description`` was moved back as the first argument, restoring
+         its previous position.
+
+    .. versionchanged:: 0.15.0
+        ``www_authenticate`` was added as the first argument, ahead of
+        ``description``.
+    """
+
+    code = 401
+    description = (
+        "The server could not verify that you are authorized to access"
+        " the URL requested. You either supplied the wrong credentials"
+        " (e.g. a bad password), or your browser doesn't understand"
+        " how to supply the credentials required."
+    )
+
+    def __init__(self, description=None, www_authenticate=None):
+        HTTPException.__init__(self, description)
+        if not isinstance(www_authenticate, (tuple, list)):
+            www_authenticate = (www_authenticate,)
+        self.www_authenticate = www_authenticate
+
+    def get_headers(self, environ=None):
+        headers = HTTPException.get_headers(self, environ)
+        if self.www_authenticate:
+            headers.append(
+                ("WWW-Authenticate", ", ".join([str(x) for x in self.www_authenticate]))
+            )
+        return headers
+
+
+class Forbidden(HTTPException):
+    """*403* `Forbidden`
+
+    Raise if the user doesn't have the permission for the requested resource
+    but was authenticated.
+    """
+
+    code = 403
+    description = (
+        "You don't have the permission to access the requested"
+        " resource. It is either read-protected or not readable by the"
+        " server."
+    )
+
+
+class NotFound(HTTPException):
+    """*404* `Not Found`
+
+    Raise if a resource does not exist and never existed.
+    """
+
+    code = 404
+    description = (
+        "The requested URL was not found on the server. If you entered"
+        " the URL manually please check your spelling and try again."
+    )
+
+
+class MethodNotAllowed(HTTPException):
+    """*405* `Method Not Allowed`
+
+    Raise if the server used a method the resource does not handle.  For
+    example `POST` if the resource is view only.  Especially useful for REST.
+
+    The first argument for this exception should be a list of allowed methods.
+    Strictly speaking the response would be invalid if you don't provide valid
+    methods in the header which you can do with that list.
+    """
+
+    code = 405
+    description = "The method is not allowed for the requested URL."
+
+    def __init__(self, valid_methods=None, description=None):
+        """Takes an optional list of valid http methods
+        starting with werkzeug 0.3 the list will be mandatory."""
+        HTTPException.__init__(self, description)
+        self.valid_methods = valid_methods
+
+    def get_headers(self, environ=None):
+        headers = HTTPException.get_headers(self, environ)
+        if self.valid_methods:
+            headers.append(("Allow", ", ".join(self.valid_methods)))
+        return headers
+
+
+class NotAcceptable(HTTPException):
+    """*406* `Not Acceptable`
+
+    Raise if the server can't return any content conforming to the
+    `Accept` headers of the client.
+    """
+
+    code = 406
+
+    description = (
+        "The resource identified by the request is only capable of"
+        " generating response entities which have content"
+        " characteristics not acceptable according to the accept"
+        " headers sent in the request."
+    )
+
+
+class RequestTimeout(HTTPException):
+    """*408* `Request Timeout`
+
+    Raise to signalize a timeout.
+    """
+
+    code = 408
+    description = (
+        "The server closed the network connection because the browser"
+        " didn't finish the request within the specified time."
+    )
+
+
+class Conflict(HTTPException):
+    """*409* `Conflict`
+
+    Raise to signal that a request cannot be completed because it conflicts
+    with the current state on the server.
+
+    .. versionadded:: 0.7
+    """
+
+    code = 409
+    description = (
+        "A conflict happened while processing the request. The"
+        " resource might have been modified while the request was being"
+        " processed."
+    )
+
+
+class Gone(HTTPException):
+    """*410* `Gone`
+
+    Raise if a resource existed previously and went away without new location.
+    """
+
+    code = 410
+    description = (
+        "The requested URL is no longer available on this server and"
+        " there is no forwarding address. If you followed a link from a"
+        " foreign page, please contact the author of this page."
+    )
+
+
+class LengthRequired(HTTPException):
+    """*411* `Length Required`
+
+    Raise if the browser submitted data but no ``Content-Length`` header which
+    is required for the kind of processing the server does.
+    """
+
+    code = 411
+    description = (
+        "A request with this method requires a valid <code>Content-"
+        "Length</code> header."
+    )
+
+
+class PreconditionFailed(HTTPException):
+    """*412* `Precondition Failed`
+
+    Status code used in combination with ``If-Match``, ``If-None-Match``, or
+    ``If-Unmodified-Since``.
+    """
+
+    code = 412
+    description = (
+        "The precondition on the request for the URL failed positive evaluation."
+    )
+
+
+class RequestEntityTooLarge(HTTPException):
+    """*413* `Request Entity Too Large`
+
+    The status code one should return if the data submitted exceeded a given
+    limit.
+    """
+
+    code = 413
+    description = "The data value transmitted exceeds the capacity limit."
+
+
+class RequestURITooLarge(HTTPException):
+    """*414* `Request URI Too Large`
+
+    Like *413* but for too long URLs.
+    """
+
+    code = 414
+    description = (
+        "The length of the requested URL exceeds the capacity limit for"
+        " this server. The request cannot be processed."
+    )
+
+
+class UnsupportedMediaType(HTTPException):
+    """*415* `Unsupported Media Type`
+
+    The status code returned if the server is unable to handle the media type
+    the client transmitted.
+    """
+
+    code = 415
+    description = (
+        "The server does not support the media type transmitted in the request."
+    )
+
+
+class RequestedRangeNotSatisfiable(HTTPException):
+    """*416* `Requested Range Not Satisfiable`
+
+    The client asked for an invalid part of the file.
+
+    .. versionadded:: 0.7
+    """
+
+    code = 416
+    description = "The server cannot provide the requested range."
+
+    def __init__(self, length=None, units="bytes", description=None):
+        """Takes an optional `Content-Range` header value based on ``length``
+        parameter.
+        """
+        HTTPException.__init__(self, description)
+        self.length = length
+        self.units = units
+
+    def get_headers(self, environ=None):
+        headers = HTTPException.get_headers(self, environ)
+        if self.length is not None:
+            headers.append(("Content-Range", "%s */%d" % (self.units, self.length)))
+        return headers
+
+
+class ExpectationFailed(HTTPException):
+    """*417* `Expectation Failed`
+
+    The server cannot meet the requirements of the Expect request-header.
+
+    .. versionadded:: 0.7
+    """
+
+    code = 417
+    description = "The server could not meet the requirements of the Expect header"
+
+
+class ImATeapot(HTTPException):
+    """*418* `I'm a teapot`
+
+    The server should return this if it is a teapot and someone attempted
+    to brew coffee with it.
+
+    .. versionadded:: 0.7
+    """
+
+    code = 418
+    description = "This server is a teapot, not a coffee machine"
+
+
+class UnprocessableEntity(HTTPException):
+    """*422* `Unprocessable Entity`
+
+    Used if the request is well formed, but the instructions are otherwise
+    incorrect.
+    """
+
+    code = 422
+    description = (
+        "The request was well-formed but was unable to be followed due"
+        " to semantic errors."
+    )
+
+
+class Locked(HTTPException):
+    """*423* `Locked`
+
+    Used if the resource that is being accessed is locked.
+    """
+
+    code = 423
+    description = "The resource that is being accessed is locked."
+
+
+class FailedDependency(HTTPException):
+    """*424* `Failed Dependency`
+
+    Used if the method could not be performed on the resource
+    because the requested action depended on another action and that action failed.
+    """
+
+    code = 424
+    description = (
+        "The method could not be performed on the resource because the"
+        " requested action depended on another action and that action"
+        " failed."
+    )
+
+
+class PreconditionRequired(HTTPException):
+    """*428* `Precondition Required`
+
+    The server requires this request to be conditional, typically to prevent
+    the lost update problem, which is a race condition between two or more
+    clients attempting to update a resource through PUT or DELETE. By requiring
+    each client to include a conditional header ("If-Match" or "If-Unmodified-
+    Since") with the proper value retained from a recent GET request, the
+    server ensures that each client has at least seen the previous revision of
+    the resource.
+    """
+
+    code = 428
+    description = (
+        "This request is required to be conditional; try using"
+        ' "If-Match" or "If-Unmodified-Since".'
+    )
+
+
+class TooManyRequests(HTTPException):
+    """*429* `Too Many Requests`
+
+    The server is limiting the rate at which this user receives responses, and
+    this request exceeds that rate. (The server may use any convenient method
+    to identify users and their request rates). The server may include a
+    "Retry-After" header to indicate how long the user should wait before
+    retrying.
+    """
+
+    code = 429
+    description = "This user has exceeded an allotted request count. Try again later."
+
+
+class RequestHeaderFieldsTooLarge(HTTPException):
+    """*431* `Request Header Fields Too Large`
+
+    The server refuses to process the request because the header fields are too
+    large. One or more individual fields may be too large, or the set of all
+    headers is too large.
+    """
+
+    code = 431
+    description = "One or more header fields exceeds the maximum size."
+
+
+class UnavailableForLegalReasons(HTTPException):
+    """*451* `Unavailable For Legal Reasons`
+
+    This status code indicates that the server is denying access to the
+    resource as a consequence of a legal demand.
+    """
+
+    code = 451
+    description = "Unavailable for legal reasons."
+
+
+class InternalServerError(HTTPException):
+    """*500* `Internal Server Error`
+
+    Raise if an internal server error occurred.  This is a good fallback if an
+    unknown error occurred in the dispatcher.
+    """
+
+    code = 500
+    description = (
+        "The server encountered an internal error and was unable to"
+        " complete your request. Either the server is overloaded or"
+        " there is an error in the application."
+    )
+
+
+class NotImplemented(HTTPException):
+    """*501* `Not Implemented`
+
+    Raise if the application does not support the action requested by the
+    browser.
+    """
+
+    code = 501
+    description = "The server does not support the action requested by the browser."
+
+
+class BadGateway(HTTPException):
+    """*502* `Bad Gateway`
+
+    If you do proxying in your application you should return this status code
+    if you received an invalid response from the upstream server it accessed
+    in attempting to fulfill the request.
+    """
+
+    code = 502
+    description = (
+        "The proxy server received an invalid response from an upstream server."
+    )
+
+
+class ServiceUnavailable(HTTPException):
+    """*503* `Service Unavailable`
+
+    Status code you should return if a service is temporarily unavailable.
+    """
+
+    code = 503
+    description = (
+        "The server is temporarily unable to service your request due"
+        " to maintenance downtime or capacity problems. Please try"
+        " again later."
+    )
+
+
+class GatewayTimeout(HTTPException):
+    """*504* `Gateway Timeout`
+
+    Status code you should return if a connection to an upstream server
+    times out.
+    """
+
+    code = 504
+    description = "The connection to an upstream server timed out."
+
+
+class HTTPVersionNotSupported(HTTPException):
+    """*505* `HTTP Version Not Supported`
+
+    The server does not support the HTTP protocol version used in the request.
+    """
+
+    code = 505
+    description = (
+        "The server does not support the HTTP protocol version used in the request."
+    )
+
+
+default_exceptions = {}
+__all__ = ["HTTPException"]
+
+
+def _find_exceptions():
+    for _name, obj in iteritems(globals()):
+        try:
+            is_http_exception = issubclass(obj, HTTPException)
+        except TypeError:
+            is_http_exception = False
+        if not is_http_exception or obj.code is None:
+            continue
+        __all__.append(obj.__name__)
+        old_obj = default_exceptions.get(obj.code, None)
+        if old_obj is not None and issubclass(obj, old_obj):
+            continue
+        default_exceptions[obj.code] = obj
+
+
+_find_exceptions()
+del _find_exceptions
+
+
+class Aborter(object):
+    """When passed a dict of code -> exception items it can be used as
+    callable that raises exceptions.  If the first argument to the
+    callable is an integer it will be looked up in the mapping, if it's
+    a WSGI application it will be raised in a proxy exception.
+
+    The rest of the arguments are forwarded to the exception constructor.
+    """
+
+    def __init__(self, mapping=None, extra=None):
+        if mapping is None:
+            mapping = default_exceptions
+        self.mapping = dict(mapping)
+        if extra is not None:
+            self.mapping.update(extra)
+
+    def __call__(self, code, *args, **kwargs):
+        if not args and not kwargs and not isinstance(code, integer_types):
+            raise HTTPException(response=code)
+        if code not in self.mapping:
+            raise LookupError("no exception for %r" % code)
+        raise self.mapping[code](*args, **kwargs)
+
+
+def abort(status, *args, **kwargs):
+    """Raises an :py:exc:`HTTPException` for the given status code or WSGI
+    application::
+
+        abort(404)  # 404 Not Found
+        abort(Response('Hello World'))
+
+    Can be passed a WSGI application or a status code.  If a status code is
+    given it's looked up in the list of exceptions and will raise that
+    exception, if passed a WSGI application it will wrap it in a proxy WSGI
+    exception and raise that::
+
+       abort(404)
+       abort(Response('Hello World'))
+
+    """
+    return _aborter(status, *args, **kwargs)
+
+
+_aborter = Aborter()
+
+
+#: an exception that is used internally to signal both a key error and a
+#: bad request.  Used by a lot of the datastructures.
+BadRequestKeyError = BadRequest.wrap(KeyError)
+
+# imported here because of circular dependencies of werkzeug.utils
+from .http import HTTP_STATUS_CODES
+from .utils import escape
diff --git a/lib/python3.7/site-packages/werkzeug/filesystem.py b/lib/python3.7/site-packages/werkzeug/filesystem.py
new file mode 100644
index 0000000000000000000000000000000000000000..d016caea6abc0a25593906097971fc715eb33536
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/filesystem.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.filesystem
+    ~~~~~~~~~~~~~~~~~~~
+
+    Various utilities for the local filesystem.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import codecs
+import sys
+import warnings
+
+# We do not trust traditional unixes.
+has_likely_buggy_unicode_filesystem = (
+    sys.platform.startswith("linux") or "bsd" in sys.platform
+)
+
+
+def _is_ascii_encoding(encoding):
+    """Given an encoding this figures out if the encoding is actually ASCII (which
+    is something we don't actually want in most cases). This is necessary
+    because ASCII comes under many names such as ANSI_X3.4-1968.
+    """
+    if encoding is None:
+        return False
+    try:
+        return codecs.lookup(encoding).name == "ascii"
+    except LookupError:
+        return False
+
+
+class BrokenFilesystemWarning(RuntimeWarning, UnicodeWarning):
+    """The warning used by Werkzeug to signal a broken filesystem. Will only be
+    used once per runtime."""
+
+
+_warned_about_filesystem_encoding = False
+
+
+def get_filesystem_encoding():
+    """Returns the filesystem encoding that should be used. Note that this is
+    different from the Python understanding of the filesystem encoding which
+    might be deeply flawed. Do not use this value against Python's unicode APIs
+    because it might be different. See :ref:`filesystem-encoding` for the exact
+    behavior.
+
+    The concept of a filesystem encoding in generally is not something you
+    should rely on. As such if you ever need to use this function except for
+    writing wrapper code reconsider.
+    """
+    global _warned_about_filesystem_encoding
+    rv = sys.getfilesystemencoding()
+    if has_likely_buggy_unicode_filesystem and not rv or _is_ascii_encoding(rv):
+        if not _warned_about_filesystem_encoding:
+            warnings.warn(
+                "Detected a misconfigured UNIX filesystem: Will use"
+                " UTF-8 as filesystem encoding instead of {0!r}".format(rv),
+                BrokenFilesystemWarning,
+            )
+            _warned_about_filesystem_encoding = True
+        return "utf-8"
+    return rv
diff --git a/lib/python3.7/site-packages/werkzeug/formparser.py b/lib/python3.7/site-packages/werkzeug/formparser.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ddc5c8ff2be0332fe8afac40e3c9dccf0f599e3
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/formparser.py
@@ -0,0 +1,586 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.formparser
+    ~~~~~~~~~~~~~~~~~~~
+
+    This module implements the form parsing.  It supports url-encoded forms
+    as well as non-nested multipart uploads.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import codecs
+import re
+from functools import update_wrapper
+from itertools import chain
+from itertools import repeat
+from itertools import tee
+
+from ._compat import BytesIO
+from ._compat import text_type
+from ._compat import to_native
+from .datastructures import FileStorage
+from .datastructures import Headers
+from .datastructures import MultiDict
+from .http import parse_options_header
+from .urls import url_decode_stream
+from .wsgi import get_content_length
+from .wsgi import get_input_stream
+from .wsgi import make_line_iter
+
+# there are some platforms where SpooledTemporaryFile is not available.
+# In that case we need to provide a fallback.
+try:
+    from tempfile import SpooledTemporaryFile
+except ImportError:
+    from tempfile import TemporaryFile
+
+    SpooledTemporaryFile = None
+
+
+#: an iterator that yields empty strings
+_empty_string_iter = repeat("")
+
+#: a regular expression for multipart boundaries
+_multipart_boundary_re = re.compile("^[ -~]{0,200}[!-~]$")
+
+#: supported http encodings that are also available in python we support
+#: for multipart messages.
+_supported_multipart_encodings = frozenset(["base64", "quoted-printable"])
+
+
+def default_stream_factory(
+    total_content_length, filename, content_type, content_length=None
+):
+    """The stream factory that is used per default."""
+    max_size = 1024 * 500
+    if SpooledTemporaryFile is not None:
+        return SpooledTemporaryFile(max_size=max_size, mode="wb+")
+    if total_content_length is None or total_content_length > max_size:
+        return TemporaryFile("wb+")
+    return BytesIO()
+
+
+def parse_form_data(
+    environ,
+    stream_factory=None,
+    charset="utf-8",
+    errors="replace",
+    max_form_memory_size=None,
+    max_content_length=None,
+    cls=None,
+    silent=True,
+):
+    """Parse the form data in the environ and return it as tuple in the form
+    ``(stream, form, files)``.  You should only call this method if the
+    transport method is `POST`, `PUT`, or `PATCH`.
+
+    If the mimetype of the data transmitted is `multipart/form-data` the
+    files multidict will be filled with `FileStorage` objects.  If the
+    mimetype is unknown the input stream is wrapped and returned as first
+    argument, else the stream is empty.
+
+    This is a shortcut for the common usage of :class:`FormDataParser`.
+
+    Have a look at :ref:`dealing-with-request-data` for more details.
+
+    .. versionadded:: 0.5
+       The `max_form_memory_size`, `max_content_length` and
+       `cls` parameters were added.
+
+    .. versionadded:: 0.5.1
+       The optional `silent` flag was added.
+
+    :param environ: the WSGI environment to be used for parsing.
+    :param stream_factory: An optional callable that returns a new read and
+                           writeable file descriptor.  This callable works
+                           the same as :meth:`~BaseResponse._get_file_stream`.
+    :param charset: The character set for URL and url encoded form data.
+    :param errors: The encoding error behavior.
+    :param max_form_memory_size: the maximum number of bytes to be accepted for
+                           in-memory stored form data.  If the data
+                           exceeds the value specified an
+                           :exc:`~exceptions.RequestEntityTooLarge`
+                           exception is raised.
+    :param max_content_length: If this is provided and the transmitted data
+                               is longer than this value an
+                               :exc:`~exceptions.RequestEntityTooLarge`
+                               exception is raised.
+    :param cls: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`MultiDict` is used.
+    :param silent: If set to False parsing errors will not be caught.
+    :return: A tuple in the form ``(stream, form, files)``.
+    """
+    return FormDataParser(
+        stream_factory,
+        charset,
+        errors,
+        max_form_memory_size,
+        max_content_length,
+        cls,
+        silent,
+    ).parse_from_environ(environ)
+
+
+def exhaust_stream(f):
+    """Helper decorator for methods that exhausts the stream on return."""
+
+    def wrapper(self, stream, *args, **kwargs):
+        try:
+            return f(self, stream, *args, **kwargs)
+        finally:
+            exhaust = getattr(stream, "exhaust", None)
+            if exhaust is not None:
+                exhaust()
+            else:
+                while 1:
+                    chunk = stream.read(1024 * 64)
+                    if not chunk:
+                        break
+
+    return update_wrapper(wrapper, f)
+
+
+class FormDataParser(object):
+    """This class implements parsing of form data for Werkzeug.  By itself
+    it can parse multipart and url encoded form data.  It can be subclassed
+    and extended but for most mimetypes it is a better idea to use the
+    untouched stream and expose it as separate attributes on a request
+    object.
+
+    .. versionadded:: 0.8
+
+    :param stream_factory: An optional callable that returns a new read and
+                           writeable file descriptor.  This callable works
+                           the same as :meth:`~BaseResponse._get_file_stream`.
+    :param charset: The character set for URL and url encoded form data.
+    :param errors: The encoding error behavior.
+    :param max_form_memory_size: the maximum number of bytes to be accepted for
+                           in-memory stored form data.  If the data
+                           exceeds the value specified an
+                           :exc:`~exceptions.RequestEntityTooLarge`
+                           exception is raised.
+    :param max_content_length: If this is provided and the transmitted data
+                               is longer than this value an
+                               :exc:`~exceptions.RequestEntityTooLarge`
+                               exception is raised.
+    :param cls: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`MultiDict` is used.
+    :param silent: If set to False parsing errors will not be caught.
+    """
+
+    def __init__(
+        self,
+        stream_factory=None,
+        charset="utf-8",
+        errors="replace",
+        max_form_memory_size=None,
+        max_content_length=None,
+        cls=None,
+        silent=True,
+    ):
+        if stream_factory is None:
+            stream_factory = default_stream_factory
+        self.stream_factory = stream_factory
+        self.charset = charset
+        self.errors = errors
+        self.max_form_memory_size = max_form_memory_size
+        self.max_content_length = max_content_length
+        if cls is None:
+            cls = MultiDict
+        self.cls = cls
+        self.silent = silent
+
+    def get_parse_func(self, mimetype, options):
+        return self.parse_functions.get(mimetype)
+
+    def parse_from_environ(self, environ):
+        """Parses the information from the environment as form data.
+
+        :param environ: the WSGI environment to be used for parsing.
+        :return: A tuple in the form ``(stream, form, files)``.
+        """
+        content_type = environ.get("CONTENT_TYPE", "")
+        content_length = get_content_length(environ)
+        mimetype, options = parse_options_header(content_type)
+        return self.parse(get_input_stream(environ), mimetype, content_length, options)
+
+    def parse(self, stream, mimetype, content_length, options=None):
+        """Parses the information from the given stream, mimetype,
+        content length and mimetype parameters.
+
+        :param stream: an input stream
+        :param mimetype: the mimetype of the data
+        :param content_length: the content length of the incoming data
+        :param options: optional mimetype parameters (used for
+                        the multipart boundary for instance)
+        :return: A tuple in the form ``(stream, form, files)``.
+        """
+        if (
+            self.max_content_length is not None
+            and content_length is not None
+            and content_length > self.max_content_length
+        ):
+            raise exceptions.RequestEntityTooLarge()
+        if options is None:
+            options = {}
+
+        parse_func = self.get_parse_func(mimetype, options)
+        if parse_func is not None:
+            try:
+                return parse_func(self, stream, mimetype, content_length, options)
+            except ValueError:
+                if not self.silent:
+                    raise
+
+        return stream, self.cls(), self.cls()
+
+    @exhaust_stream
+    def _parse_multipart(self, stream, mimetype, content_length, options):
+        parser = MultiPartParser(
+            self.stream_factory,
+            self.charset,
+            self.errors,
+            max_form_memory_size=self.max_form_memory_size,
+            cls=self.cls,
+        )
+        boundary = options.get("boundary")
+        if boundary is None:
+            raise ValueError("Missing boundary")
+        if isinstance(boundary, text_type):
+            boundary = boundary.encode("ascii")
+        form, files = parser.parse(stream, boundary, content_length)
+        return stream, form, files
+
+    @exhaust_stream
+    def _parse_urlencoded(self, stream, mimetype, content_length, options):
+        if (
+            self.max_form_memory_size is not None
+            and content_length is not None
+            and content_length > self.max_form_memory_size
+        ):
+            raise exceptions.RequestEntityTooLarge()
+        form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls)
+        return stream, form, self.cls()
+
+    #: mapping of mimetypes to parsing functions
+    parse_functions = {
+        "multipart/form-data": _parse_multipart,
+        "application/x-www-form-urlencoded": _parse_urlencoded,
+        "application/x-url-encoded": _parse_urlencoded,
+    }
+
+
+def is_valid_multipart_boundary(boundary):
+    """Checks if the string given is a valid multipart boundary."""
+    return _multipart_boundary_re.match(boundary) is not None
+
+
+def _line_parse(line):
+    """Removes line ending characters and returns a tuple (`stripped_line`,
+    `is_terminated`).
+    """
+    if line[-2:] in ["\r\n", b"\r\n"]:
+        return line[:-2], True
+    elif line[-1:] in ["\r", "\n", b"\r", b"\n"]:
+        return line[:-1], True
+    return line, False
+
+
+def parse_multipart_headers(iterable):
+    """Parses multipart headers from an iterable that yields lines (including
+    the trailing newline symbol).  The iterable has to be newline terminated.
+
+    The iterable will stop at the line where the headers ended so it can be
+    further consumed.
+
+    :param iterable: iterable of strings that are newline terminated
+    """
+    result = []
+    for line in iterable:
+        line = to_native(line)
+        line, line_terminated = _line_parse(line)
+        if not line_terminated:
+            raise ValueError("unexpected end of line in multipart header")
+        if not line:
+            break
+        elif line[0] in " \t" and result:
+            key, value = result[-1]
+            result[-1] = (key, value + "\n " + line[1:])
+        else:
+            parts = line.split(":", 1)
+            if len(parts) == 2:
+                result.append((parts[0].strip(), parts[1].strip()))
+
+    # we link the list to the headers, no need to create a copy, the
+    # list was not shared anyways.
+    return Headers(result)
+
+
+_begin_form = "begin_form"
+_begin_file = "begin_file"
+_cont = "cont"
+_end = "end"
+
+
+class MultiPartParser(object):
+    def __init__(
+        self,
+        stream_factory=None,
+        charset="utf-8",
+        errors="replace",
+        max_form_memory_size=None,
+        cls=None,
+        buffer_size=64 * 1024,
+    ):
+        self.charset = charset
+        self.errors = errors
+        self.max_form_memory_size = max_form_memory_size
+        self.stream_factory = (
+            default_stream_factory if stream_factory is None else stream_factory
+        )
+        self.cls = MultiDict if cls is None else cls
+
+        # make sure the buffer size is divisible by four so that we can base64
+        # decode chunk by chunk
+        assert buffer_size % 4 == 0, "buffer size has to be divisible by 4"
+        # also the buffer size has to be at least 1024 bytes long or long headers
+        # will freak out the system
+        assert buffer_size >= 1024, "buffer size has to be at least 1KB"
+
+        self.buffer_size = buffer_size
+
+    def _fix_ie_filename(self, filename):
+        """Internet Explorer 6 transmits the full file name if a file is
+        uploaded.  This function strips the full path if it thinks the
+        filename is Windows-like absolute.
+        """
+        if filename[1:3] == ":\\" or filename[:2] == "\\\\":
+            return filename.split("\\")[-1]
+        return filename
+
+    def _find_terminator(self, iterator):
+        """The terminator might have some additional newlines before it.
+        There is at least one application that sends additional newlines
+        before headers (the python setuptools package).
+        """
+        for line in iterator:
+            if not line:
+                break
+            line = line.strip()
+            if line:
+                return line
+        return b""
+
+    def fail(self, message):
+        raise ValueError(message)
+
+    def get_part_encoding(self, headers):
+        transfer_encoding = headers.get("content-transfer-encoding")
+        if (
+            transfer_encoding is not None
+            and transfer_encoding in _supported_multipart_encodings
+        ):
+            return transfer_encoding
+
+    def get_part_charset(self, headers):
+        # Figure out input charset for current part
+        content_type = headers.get("content-type")
+        if content_type:
+            mimetype, ct_params = parse_options_header(content_type)
+            return ct_params.get("charset", self.charset)
+        return self.charset
+
+    def start_file_streaming(self, filename, headers, total_content_length):
+        if isinstance(filename, bytes):
+            filename = filename.decode(self.charset, self.errors)
+        filename = self._fix_ie_filename(filename)
+        content_type = headers.get("content-type")
+        try:
+            content_length = int(headers["content-length"])
+        except (KeyError, ValueError):
+            content_length = 0
+        container = self.stream_factory(
+            total_content_length=total_content_length,
+            filename=filename,
+            content_type=content_type,
+            content_length=content_length,
+        )
+        return filename, container
+
+    def in_memory_threshold_reached(self, bytes):
+        raise exceptions.RequestEntityTooLarge()
+
+    def validate_boundary(self, boundary):
+        if not boundary:
+            self.fail("Missing boundary")
+        if not is_valid_multipart_boundary(boundary):
+            self.fail("Invalid boundary: %s" % boundary)
+        if len(boundary) > self.buffer_size:  # pragma: no cover
+            # this should never happen because we check for a minimum size
+            # of 1024 and boundaries may not be longer than 200.  The only
+            # situation when this happens is for non debug builds where
+            # the assert is skipped.
+            self.fail("Boundary longer than buffer size")
+
+    def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
+        """Generate parts of
+        ``('begin_form', (headers, name))``
+        ``('begin_file', (headers, name, filename))``
+        ``('cont', bytestring)``
+        ``('end', None)``
+
+        Always obeys the grammar
+        parts = ( begin_form cont* end |
+                  begin_file cont* end )*
+        """
+        next_part = b"--" + boundary
+        last_part = next_part + b"--"
+
+        iterator = chain(
+            make_line_iter(
+                file,
+                limit=content_length,
+                buffer_size=self.buffer_size,
+                cap_at_buffer=cap_at_buffer,
+            ),
+            _empty_string_iter,
+        )
+
+        terminator = self._find_terminator(iterator)
+
+        if terminator == last_part:
+            return
+        elif terminator != next_part:
+            self.fail("Expected boundary at start of multipart data")
+
+        while terminator != last_part:
+            headers = parse_multipart_headers(iterator)
+
+            disposition = headers.get("content-disposition")
+            if disposition is None:
+                self.fail("Missing Content-Disposition header")
+            disposition, extra = parse_options_header(disposition)
+            transfer_encoding = self.get_part_encoding(headers)
+            name = extra.get("name")
+            filename = extra.get("filename")
+
+            # if no content type is given we stream into memory.  A list is
+            # used as a temporary container.
+            if filename is None:
+                yield _begin_form, (headers, name)
+
+            # otherwise we parse the rest of the headers and ask the stream
+            # factory for something we can write in.
+            else:
+                yield _begin_file, (headers, name, filename)
+
+            buf = b""
+            for line in iterator:
+                if not line:
+                    self.fail("unexpected end of stream")
+
+                if line[:2] == b"--":
+                    terminator = line.rstrip()
+                    if terminator in (next_part, last_part):
+                        break
+
+                if transfer_encoding is not None:
+                    if transfer_encoding == "base64":
+                        transfer_encoding = "base64_codec"
+                    try:
+                        line = codecs.decode(line, transfer_encoding)
+                    except Exception:
+                        self.fail("could not decode transfer encoded chunk")
+
+                # we have something in the buffer from the last iteration.
+                # this is usually a newline delimiter.
+                if buf:
+                    yield _cont, buf
+                    buf = b""
+
+                # If the line ends with windows CRLF we write everything except
+                # the last two bytes.  In all other cases however we write
+                # everything except the last byte.  If it was a newline, that's
+                # fine, otherwise it does not matter because we will write it
+                # the next iteration.  this ensures we do not write the
+                # final newline into the stream.  That way we do not have to
+                # truncate the stream.  However we do have to make sure that
+                # if something else than a newline is in there we write it
+                # out.
+                if line[-2:] == b"\r\n":
+                    buf = b"\r\n"
+                    cutoff = -2
+                else:
+                    buf = line[-1:]
+                    cutoff = -1
+                yield _cont, line[:cutoff]
+
+            else:  # pragma: no cover
+                raise ValueError("unexpected end of part")
+
+            # if we have a leftover in the buffer that is not a newline
+            # character we have to flush it, otherwise we will chop of
+            # certain values.
+            if buf not in (b"", b"\r", b"\n", b"\r\n"):
+                yield _cont, buf
+
+            yield _end, None
+
+    def parse_parts(self, file, boundary, content_length):
+        """Generate ``('file', (name, val))`` and
+        ``('form', (name, val))`` parts.
+        """
+        in_memory = 0
+
+        for ellt, ell in self.parse_lines(file, boundary, content_length):
+            if ellt == _begin_file:
+                headers, name, filename = ell
+                is_file = True
+                guard_memory = False
+                filename, container = self.start_file_streaming(
+                    filename, headers, content_length
+                )
+                _write = container.write
+
+            elif ellt == _begin_form:
+                headers, name = ell
+                is_file = False
+                container = []
+                _write = container.append
+                guard_memory = self.max_form_memory_size is not None
+
+            elif ellt == _cont:
+                _write(ell)
+                # if we write into memory and there is a memory size limit we
+                # count the number of bytes in memory and raise an exception if
+                # there is too much data in memory.
+                if guard_memory:
+                    in_memory += len(ell)
+                    if in_memory > self.max_form_memory_size:
+                        self.in_memory_threshold_reached(in_memory)
+
+            elif ellt == _end:
+                if is_file:
+                    container.seek(0)
+                    yield (
+                        "file",
+                        (name, FileStorage(container, filename, name, headers=headers)),
+                    )
+                else:
+                    part_charset = self.get_part_charset(headers)
+                    yield (
+                        "form",
+                        (name, b"".join(container).decode(part_charset, self.errors)),
+                    )
+
+    def parse(self, file, boundary, content_length):
+        formstream, filestream = tee(
+            self.parse_parts(file, boundary, content_length), 2
+        )
+        form = (p[1] for p in formstream if p[0] == "form")
+        files = (p[1] for p in filestream if p[0] == "file")
+        return self.cls(form), self.cls(files)
+
+
+from . import exceptions
diff --git a/lib/python3.7/site-packages/werkzeug/http.py b/lib/python3.7/site-packages/werkzeug/http.py
new file mode 100644
index 0000000000000000000000000000000000000000..af3200750f9d8985b9ea0009c398f4752e914f11
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/http.py
@@ -0,0 +1,1303 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.http
+    ~~~~~~~~~~~~~
+
+    Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
+    HTTP data.  Most of the classes and functions provided by this module are
+    used by the wrappers, but they are useful on their own, too, especially if
+    the response and request objects are not used.
+
+    This covers some of the more HTTP centric features of WSGI, some other
+    utilities such as cookie handling are documented in the `werkzeug.utils`
+    module.
+
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import base64
+import re
+import warnings
+from datetime import datetime
+from datetime import timedelta
+from hashlib import md5
+from time import gmtime
+from time import time
+
+from ._compat import integer_types
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import to_bytes
+from ._compat import to_unicode
+from ._compat import try_coerce_native
+from ._internal import _cookie_parse_impl
+from ._internal import _cookie_quote
+from ._internal import _make_cookie_domain
+
+try:
+    from email.utils import parsedate_tz
+except ImportError:
+    from email.Utils import parsedate_tz
+
+try:
+    from urllib.request import parse_http_list as _parse_list_header
+    from urllib.parse import unquote_to_bytes as _unquote
+except ImportError:
+    from urllib2 import parse_http_list as _parse_list_header
+    from urllib2 import unquote as _unquote
+
+_cookie_charset = "latin1"
+_basic_auth_charset = "utf-8"
+# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231
+_accept_re = re.compile(
+    r"""
+    (                       # media-range capturing-parenthesis
+      [^\s;,]+              # type/subtype
+      (?:[ \t]*;[ \t]*      # ";"
+        (?:                 # parameter non-capturing-parenthesis
+          [^\s;,q][^\s;,]*  # token that doesn't start with "q"
+        |                   # or
+          q[^\s;,=][^\s;,]* # token that is more than just "q"
+        )
+      )*                    # zero or more parameters
+    )                       # end of media-range
+    (?:[ \t]*;[ \t]*q=      # weight is a "q" parameter
+      (\d*(?:\.\d+)?)       # qvalue capturing-parentheses
+      [^,]*                 # "extension" accept params: who cares?
+    )?                      # accept params are optional
+    """,
+    re.VERBOSE,
+)
+_token_chars = frozenset(
+    "!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~"
+)
+_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
+_unsafe_header_chars = set('()<>@,;:"/[]?={} \t')
+_option_header_piece_re = re.compile(
+    r"""
+    ;\s*,?\s*  # newlines were replaced with commas
+    (?P<key>
+        "[^"\\]*(?:\\.[^"\\]*)*"  # quoted string
+    |
+        [^\s;,=*]+  # token
+    )
+    (?:\*(?P<count>\d+))?  # *1, optional continuation index
+    \s*
+    (?:  # optionally followed by =value
+        (?:  # equals sign, possibly with encoding
+            \*\s*=\s*  # * indicates extended notation
+            (?:  # optional encoding
+                (?P<encoding>[^\s]+?)
+                '(?P<language>[^\s]*?)'
+            )?
+        |
+            =\s*  # basic notation
+        )
+        (?P<value>
+            "[^"\\]*(?:\\.[^"\\]*)*"  # quoted string
+        |
+            [^;,]+  # token
+        )?
+    )?
+    \s*
+    """,
+    flags=re.VERBOSE,
+)
+_option_header_start_mime_type = re.compile(r",\s*([^;,\s]+)([;,]\s*.+)?")
+
+_entity_headers = frozenset(
+    [
+        "allow",
+        "content-encoding",
+        "content-language",
+        "content-length",
+        "content-location",
+        "content-md5",
+        "content-range",
+        "content-type",
+        "expires",
+        "last-modified",
+    ]
+)
+_hop_by_hop_headers = frozenset(
+    [
+        "connection",
+        "keep-alive",
+        "proxy-authenticate",
+        "proxy-authorization",
+        "te",
+        "trailer",
+        "transfer-encoding",
+        "upgrade",
+    ]
+)
+
+
+HTTP_STATUS_CODES = {
+    100: "Continue",
+    101: "Switching Protocols",
+    102: "Processing",
+    200: "OK",
+    201: "Created",
+    202: "Accepted",
+    203: "Non Authoritative Information",
+    204: "No Content",
+    205: "Reset Content",
+    206: "Partial Content",
+    207: "Multi Status",
+    226: "IM Used",  # see RFC 3229
+    300: "Multiple Choices",
+    301: "Moved Permanently",
+    302: "Found",
+    303: "See Other",
+    304: "Not Modified",
+    305: "Use Proxy",
+    307: "Temporary Redirect",
+    308: "Permanent Redirect",
+    400: "Bad Request",
+    401: "Unauthorized",
+    402: "Payment Required",  # unused
+    403: "Forbidden",
+    404: "Not Found",
+    405: "Method Not Allowed",
+    406: "Not Acceptable",
+    407: "Proxy Authentication Required",
+    408: "Request Timeout",
+    409: "Conflict",
+    410: "Gone",
+    411: "Length Required",
+    412: "Precondition Failed",
+    413: "Request Entity Too Large",
+    414: "Request URI Too Long",
+    415: "Unsupported Media Type",
+    416: "Requested Range Not Satisfiable",
+    417: "Expectation Failed",
+    418: "I'm a teapot",  # see RFC 2324
+    421: "Misdirected Request",  # see RFC 7540
+    422: "Unprocessable Entity",
+    423: "Locked",
+    424: "Failed Dependency",
+    426: "Upgrade Required",
+    428: "Precondition Required",  # see RFC 6585
+    429: "Too Many Requests",
+    431: "Request Header Fields Too Large",
+    449: "Retry With",  # proprietary MS extension
+    451: "Unavailable For Legal Reasons",
+    500: "Internal Server Error",
+    501: "Not Implemented",
+    502: "Bad Gateway",
+    503: "Service Unavailable",
+    504: "Gateway Timeout",
+    505: "HTTP Version Not Supported",
+    507: "Insufficient Storage",
+    510: "Not Extended",
+}
+
+
+def wsgi_to_bytes(data):
+    """coerce wsgi unicode represented bytes to real ones"""
+    if isinstance(data, bytes):
+        return data
+    return data.encode("latin1")  # XXX: utf8 fallback?
+
+
+def bytes_to_wsgi(data):
+    assert isinstance(data, bytes), "data must be bytes"
+    if isinstance(data, str):
+        return data
+    else:
+        return data.decode("latin1")
+
+
+def quote_header_value(value, extra_chars="", allow_token=True):
+    """Quote a header value if necessary.
+
+    .. versionadded:: 0.5
+
+    :param value: the value to quote.
+    :param extra_chars: a list of extra characters to skip quoting.
+    :param allow_token: if this is enabled token values are returned
+                        unchanged.
+    """
+    if isinstance(value, bytes):
+        value = bytes_to_wsgi(value)
+    value = str(value)
+    if allow_token:
+        token_chars = _token_chars | set(extra_chars)
+        if set(value).issubset(token_chars):
+            return value
+    return '"%s"' % value.replace("\\", "\\\\").replace('"', '\\"')
+
+
+def unquote_header_value(value, is_filename=False):
+    r"""Unquotes a header value.  (Reversal of :func:`quote_header_value`).
+    This does not use the real unquoting but what browsers are actually
+    using for quoting.
+
+    .. versionadded:: 0.5
+
+    :param value: the header value to unquote.
+    """
+    if value and value[0] == value[-1] == '"':
+        # this is not the real unquoting, but fixing this so that the
+        # RFC is met will result in bugs with internet explorer and
+        # probably some other browsers as well.  IE for example is
+        # uploading files with "C:\foo\bar.txt" as filename
+        value = value[1:-1]
+
+        # if this is a filename and the starting characters look like
+        # a UNC path, then just return the value without quotes.  Using the
+        # replace sequence below on a UNC path has the effect of turning
+        # the leading double slash into a single slash and then
+        # _fix_ie_filename() doesn't work correctly.  See #458.
+        if not is_filename or value[:2] != "\\\\":
+            return value.replace("\\\\", "\\").replace('\\"', '"')
+    return value
+
+
+def dump_options_header(header, options):
+    """The reverse function to :func:`parse_options_header`.
+
+    :param header: the header to dump
+    :param options: a dict of options to append.
+    """
+    segments = []
+    if header is not None:
+        segments.append(header)
+    for key, value in iteritems(options):
+        if value is None:
+            segments.append(key)
+        else:
+            segments.append("%s=%s" % (key, quote_header_value(value)))
+    return "; ".join(segments)
+
+
+def dump_header(iterable, allow_token=True):
+    """Dump an HTTP header again.  This is the reversal of
+    :func:`parse_list_header`, :func:`parse_set_header` and
+    :func:`parse_dict_header`.  This also quotes strings that include an
+    equals sign unless you pass it as dict of key, value pairs.
+
+    >>> dump_header({'foo': 'bar baz'})
+    'foo="bar baz"'
+    >>> dump_header(('foo', 'bar baz'))
+    'foo, "bar baz"'
+
+    :param iterable: the iterable or dict of values to quote.
+    :param allow_token: if set to `False` tokens as values are disallowed.
+                        See :func:`quote_header_value` for more details.
+    """
+    if isinstance(iterable, dict):
+        items = []
+        for key, value in iteritems(iterable):
+            if value is None:
+                items.append(key)
+            else:
+                items.append(
+                    "%s=%s" % (key, quote_header_value(value, allow_token=allow_token))
+                )
+    else:
+        items = [quote_header_value(x, allow_token=allow_token) for x in iterable]
+    return ", ".join(items)
+
+
+def parse_list_header(value):
+    """Parse lists as described by RFC 2068 Section 2.
+
+    In particular, parse comma-separated lists where the elements of
+    the list may include quoted-strings.  A quoted-string could
+    contain a comma.  A non-quoted string could have quotes in the
+    middle.  Quotes are removed automatically after parsing.
+
+    It basically works like :func:`parse_set_header` just that items
+    may appear multiple times and case sensitivity is preserved.
+
+    The return value is a standard :class:`list`:
+
+    >>> parse_list_header('token, "quoted value"')
+    ['token', 'quoted value']
+
+    To create a header from the :class:`list` again, use the
+    :func:`dump_header` function.
+
+    :param value: a string with a list header.
+    :return: :class:`list`
+    """
+    result = []
+    for item in _parse_list_header(value):
+        if item[:1] == item[-1:] == '"':
+            item = unquote_header_value(item[1:-1])
+        result.append(item)
+    return result
+
+
+def parse_dict_header(value, cls=dict):
+    """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
+    convert them into a python dict (or any other mapping object created from
+    the type with a dict like interface provided by the `cls` argument):
+
+    >>> d = parse_dict_header('foo="is a fish", bar="as well"')
+    >>> type(d) is dict
+    True
+    >>> sorted(d.items())
+    [('bar', 'as well'), ('foo', 'is a fish')]
+
+    If there is no value for a key it will be `None`:
+
+    >>> parse_dict_header('key_without_value')
+    {'key_without_value': None}
+
+    To create a header from the :class:`dict` again, use the
+    :func:`dump_header` function.
+
+    .. versionchanged:: 0.9
+       Added support for `cls` argument.
+
+    :param value: a string with a dict header.
+    :param cls: callable to use for storage of parsed results.
+    :return: an instance of `cls`
+    """
+    result = cls()
+    if not isinstance(value, text_type):
+        # XXX: validate
+        value = bytes_to_wsgi(value)
+    for item in _parse_list_header(value):
+        if "=" not in item:
+            result[item] = None
+            continue
+        name, value = item.split("=", 1)
+        if value[:1] == value[-1:] == '"':
+            value = unquote_header_value(value[1:-1])
+        result[name] = value
+    return result
+
+
+def parse_options_header(value, multiple=False):
+    """Parse a ``Content-Type`` like header into a tuple with the content
+    type and the options:
+
+    >>> parse_options_header('text/html; charset=utf8')
+    ('text/html', {'charset': 'utf8'})
+
+    This should not be used to parse ``Cache-Control`` like headers that use
+    a slightly different format.  For these headers use the
+    :func:`parse_dict_header` function.
+
+    .. versionchanged:: 0.15
+        :rfc:`2231` parameter continuations are handled.
+
+    .. versionadded:: 0.5
+
+    :param value: the header to parse.
+    :param multiple: Whether try to parse and return multiple MIME types
+    :return: (mimetype, options) or (mimetype, options, mimetype, options, …)
+             if multiple=True
+    """
+    if not value:
+        return "", {}
+
+    result = []
+
+    value = "," + value.replace("\n", ",")
+    while value:
+        match = _option_header_start_mime_type.match(value)
+        if not match:
+            break
+        result.append(match.group(1))  # mimetype
+        options = {}
+        # Parse options
+        rest = match.group(2)
+        continued_encoding = None
+        while rest:
+            optmatch = _option_header_piece_re.match(rest)
+            if not optmatch:
+                break
+            option, count, encoding, language, option_value = optmatch.groups()
+            # Continuations don't have to supply the encoding after the
+            # first line. If we're in a continuation, track the current
+            # encoding to use for subsequent lines. Reset it when the
+            # continuation ends.
+            if not count:
+                continued_encoding = None
+            else:
+                if not encoding:
+                    encoding = continued_encoding
+                continued_encoding = encoding
+            option = unquote_header_value(option)
+            if option_value is not None:
+                option_value = unquote_header_value(option_value, option == "filename")
+                if encoding is not None:
+                    option_value = _unquote(option_value).decode(encoding)
+            if count:
+                # Continuations append to the existing value. For
+                # simplicity, this ignores the possibility of
+                # out-of-order indices, which shouldn't happen anyway.
+                options[option] = options.get(option, "") + option_value
+            else:
+                options[option] = option_value
+            rest = rest[optmatch.end() :]
+        result.append(options)
+        if multiple is False:
+            return tuple(result)
+        value = rest
+
+    return tuple(result) if result else ("", {})
+
+
+def parse_accept_header(value, cls=None):
+    """Parses an HTTP Accept-* header.  This does not implement a complete
+    valid algorithm but one that supports at least value and quality
+    extraction.
+
+    Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
+    tuples sorted by the quality with some additional accessor methods).
+
+    The second parameter can be a subclass of :class:`Accept` that is created
+    with the parsed values and returned.
+
+    :param value: the accept header string to be parsed.
+    :param cls: the wrapper class for the return value (can be
+                         :class:`Accept` or a subclass thereof)
+    :return: an instance of `cls`.
+    """
+    if cls is None:
+        cls = Accept
+
+    if not value:
+        return cls(None)
+
+    result = []
+    for match in _accept_re.finditer(value):
+        quality = match.group(2)
+        if not quality:
+            quality = 1
+        else:
+            quality = max(min(float(quality), 1), 0)
+        result.append((match.group(1), quality))
+    return cls(result)
+
+
+def parse_cache_control_header(value, on_update=None, cls=None):
+    """Parse a cache control header.  The RFC differs between response and
+    request cache control, this method does not.  It's your responsibility
+    to not use the wrong control statements.
+
+    .. versionadded:: 0.5
+       The `cls` was added.  If not specified an immutable
+       :class:`~werkzeug.datastructures.RequestCacheControl` is returned.
+
+    :param value: a cache control header to be parsed.
+    :param on_update: an optional callable that is called every time a value
+                      on the :class:`~werkzeug.datastructures.CacheControl`
+                      object is changed.
+    :param cls: the class for the returned object.  By default
+                :class:`~werkzeug.datastructures.RequestCacheControl` is used.
+    :return: a `cls` object.
+    """
+    if cls is None:
+        cls = RequestCacheControl
+    if not value:
+        return cls(None, on_update)
+    return cls(parse_dict_header(value), on_update)
+
+
+def parse_set_header(value, on_update=None):
+    """Parse a set-like header and return a
+    :class:`~werkzeug.datastructures.HeaderSet` object:
+
+    >>> hs = parse_set_header('token, "quoted value"')
+
+    The return value is an object that treats the items case-insensitively
+    and keeps the order of the items:
+
+    >>> 'TOKEN' in hs
+    True
+    >>> hs.index('quoted value')
+    1
+    >>> hs
+    HeaderSet(['token', 'quoted value'])
+
+    To create a header from the :class:`HeaderSet` again, use the
+    :func:`dump_header` function.
+
+    :param value: a set header to be parsed.
+    :param on_update: an optional callable that is called every time a
+                      value on the :class:`~werkzeug.datastructures.HeaderSet`
+                      object is changed.
+    :return: a :class:`~werkzeug.datastructures.HeaderSet`
+    """
+    if not value:
+        return HeaderSet(None, on_update)
+    return HeaderSet(parse_list_header(value), on_update)
+
+
+def parse_authorization_header(value):
+    """Parse an HTTP basic/digest authorization header transmitted by the web
+    browser.  The return value is either `None` if the header was invalid or
+    not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
+    object.
+
+    :param value: the authorization header to parse.
+    :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
+    """
+    if not value:
+        return
+    value = wsgi_to_bytes(value)
+    try:
+        auth_type, auth_info = value.split(None, 1)
+        auth_type = auth_type.lower()
+    except ValueError:
+        return
+    if auth_type == b"basic":
+        try:
+            username, password = base64.b64decode(auth_info).split(b":", 1)
+        except Exception:
+            return
+        return Authorization(
+            "basic",
+            {
+                "username": to_unicode(username, _basic_auth_charset),
+                "password": to_unicode(password, _basic_auth_charset),
+            },
+        )
+    elif auth_type == b"digest":
+        auth_map = parse_dict_header(auth_info)
+        for key in "username", "realm", "nonce", "uri", "response":
+            if key not in auth_map:
+                return
+        if "qop" in auth_map:
+            if not auth_map.get("nc") or not auth_map.get("cnonce"):
+                return
+        return Authorization("digest", auth_map)
+
+
+def parse_www_authenticate_header(value, on_update=None):
+    """Parse an HTTP WWW-Authenticate header into a
+    :class:`~werkzeug.datastructures.WWWAuthenticate` object.
+
+    :param value: a WWW-Authenticate header to parse.
+    :param on_update: an optional callable that is called every time a value
+                      on the :class:`~werkzeug.datastructures.WWWAuthenticate`
+                      object is changed.
+    :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
+    """
+    if not value:
+        return WWWAuthenticate(on_update=on_update)
+    try:
+        auth_type, auth_info = value.split(None, 1)
+        auth_type = auth_type.lower()
+    except (ValueError, AttributeError):
+        return WWWAuthenticate(value.strip().lower(), on_update=on_update)
+    return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update)
+
+
+def parse_if_range_header(value):
+    """Parses an if-range header which can be an etag or a date.  Returns
+    a :class:`~werkzeug.datastructures.IfRange` object.
+
+    .. versionadded:: 0.7
+    """
+    if not value:
+        return IfRange()
+    date = parse_date(value)
+    if date is not None:
+        return IfRange(date=date)
+    # drop weakness information
+    return IfRange(unquote_etag(value)[0])
+
+
+def parse_range_header(value, make_inclusive=True):
+    """Parses a range header into a :class:`~werkzeug.datastructures.Range`
+    object.  If the header is missing or malformed `None` is returned.
+    `ranges` is a list of ``(start, stop)`` tuples where the ranges are
+    non-inclusive.
+
+    .. versionadded:: 0.7
+    """
+    if not value or "=" not in value:
+        return None
+
+    ranges = []
+    last_end = 0
+    units, rng = value.split("=", 1)
+    units = units.strip().lower()
+
+    for item in rng.split(","):
+        item = item.strip()
+        if "-" not in item:
+            return None
+        if item.startswith("-"):
+            if last_end < 0:
+                return None
+            try:
+                begin = int(item)
+            except ValueError:
+                return None
+            end = None
+            last_end = -1
+        elif "-" in item:
+            begin, end = item.split("-", 1)
+            begin = begin.strip()
+            end = end.strip()
+            if not begin.isdigit():
+                return None
+            begin = int(begin)
+            if begin < last_end or last_end < 0:
+                return None
+            if end:
+                if not end.isdigit():
+                    return None
+                end = int(end) + 1
+                if begin >= end:
+                    return None
+            else:
+                end = None
+            last_end = end
+        ranges.append((begin, end))
+
+    return Range(units, ranges)
+
+
+def parse_content_range_header(value, on_update=None):
+    """Parses a range header into a
+    :class:`~werkzeug.datastructures.ContentRange` object or `None` if
+    parsing is not possible.
+
+    .. versionadded:: 0.7
+
+    :param value: a content range header to be parsed.
+    :param on_update: an optional callable that is called every time a value
+                      on the :class:`~werkzeug.datastructures.ContentRange`
+                      object is changed.
+    """
+    if value is None:
+        return None
+    try:
+        units, rangedef = (value or "").strip().split(None, 1)
+    except ValueError:
+        return None
+
+    if "/" not in rangedef:
+        return None
+    rng, length = rangedef.split("/", 1)
+    if length == "*":
+        length = None
+    elif length.isdigit():
+        length = int(length)
+    else:
+        return None
+
+    if rng == "*":
+        return ContentRange(units, None, None, length, on_update=on_update)
+    elif "-" not in rng:
+        return None
+
+    start, stop = rng.split("-", 1)
+    try:
+        start = int(start)
+        stop = int(stop) + 1
+    except ValueError:
+        return None
+
+    if is_byte_range_valid(start, stop, length):
+        return ContentRange(units, start, stop, length, on_update=on_update)
+
+
+def quote_etag(etag, weak=False):
+    """Quote an etag.
+
+    :param etag: the etag to quote.
+    :param weak: set to `True` to tag it "weak".
+    """
+    if '"' in etag:
+        raise ValueError("invalid etag")
+    etag = '"%s"' % etag
+    if weak:
+        etag = "W/" + etag
+    return etag
+
+
+def unquote_etag(etag):
+    """Unquote a single etag:
+
+    >>> unquote_etag('W/"bar"')
+    ('bar', True)
+    >>> unquote_etag('"bar"')
+    ('bar', False)
+
+    :param etag: the etag identifier to unquote.
+    :return: a ``(etag, weak)`` tuple.
+    """
+    if not etag:
+        return None, None
+    etag = etag.strip()
+    weak = False
+    if etag.startswith(("W/", "w/")):
+        weak = True
+        etag = etag[2:]
+    if etag[:1] == etag[-1:] == '"':
+        etag = etag[1:-1]
+    return etag, weak
+
+
+def parse_etags(value):
+    """Parse an etag header.
+
+    :param value: the tag header to parse
+    :return: an :class:`~werkzeug.datastructures.ETags` object.
+    """
+    if not value:
+        return ETags()
+    strong = []
+    weak = []
+    end = len(value)
+    pos = 0
+    while pos < end:
+        match = _etag_re.match(value, pos)
+        if match is None:
+            break
+        is_weak, quoted, raw = match.groups()
+        if raw == "*":
+            return ETags(star_tag=True)
+        elif quoted:
+            raw = quoted
+        if is_weak:
+            weak.append(raw)
+        else:
+            strong.append(raw)
+        pos = match.end()
+    return ETags(strong, weak)
+
+
+def generate_etag(data):
+    """Generate an etag for some data."""
+    return md5(data).hexdigest()
+
+
+def parse_date(value):
+    """Parse one of the following date formats into a datetime object:
+
+    .. sourcecode:: text
+
+        Sun, 06 Nov 1994 08:49:37 GMT  ; RFC 822, updated by RFC 1123
+        Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
+        Sun Nov  6 08:49:37 1994       ; ANSI C's asctime() format
+
+    If parsing fails the return value is `None`.
+
+    :param value: a string with a supported date format.
+    :return: a :class:`datetime.datetime` object.
+    """
+    if value:
+        t = parsedate_tz(value.strip())
+        if t is not None:
+            try:
+                year = t[0]
+                # unfortunately that function does not tell us if two digit
+                # years were part of the string, or if they were prefixed
+                # with two zeroes.  So what we do is to assume that 69-99
+                # refer to 1900, and everything below to 2000
+                if year >= 0 and year <= 68:
+                    year += 2000
+                elif year >= 69 and year <= 99:
+                    year += 1900
+                return datetime(*((year,) + t[1:7])) - timedelta(seconds=t[-1] or 0)
+            except (ValueError, OverflowError):
+                return None
+
+
+def _dump_date(d, delim):
+    """Used for `http_date` and `cookie_date`."""
+    if d is None:
+        d = gmtime()
+    elif isinstance(d, datetime):
+        d = d.utctimetuple()
+    elif isinstance(d, (integer_types, float)):
+        d = gmtime(d)
+    return "%s, %02d%s%s%s%s %02d:%02d:%02d GMT" % (
+        ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday],
+        d.tm_mday,
+        delim,
+        (
+            "Jan",
+            "Feb",
+            "Mar",
+            "Apr",
+            "May",
+            "Jun",
+            "Jul",
+            "Aug",
+            "Sep",
+            "Oct",
+            "Nov",
+            "Dec",
+        )[d.tm_mon - 1],
+        delim,
+        str(d.tm_year),
+        d.tm_hour,
+        d.tm_min,
+        d.tm_sec,
+    )
+
+
+def cookie_date(expires=None):
+    """Formats the time to ensure compatibility with Netscape's cookie
+    standard.
+
+    Accepts a floating point number expressed in seconds since the epoch in, a
+    datetime object or a timetuple.  All times in UTC.  The :func:`parse_date`
+    function can be used to parse such a date.
+
+    Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
+
+    :param expires: If provided that date is used, otherwise the current.
+    """
+    return _dump_date(expires, "-")
+
+
+def http_date(timestamp=None):
+    """Formats the time to match the RFC1123 date format.
+
+    Accepts a floating point number expressed in seconds since the epoch in, a
+    datetime object or a timetuple.  All times in UTC.  The :func:`parse_date`
+    function can be used to parse such a date.
+
+    Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
+
+    :param timestamp: If provided that date is used, otherwise the current.
+    """
+    return _dump_date(timestamp, " ")
+
+
+def parse_age(value=None):
+    """Parses a base-10 integer count of seconds into a timedelta.
+
+    If parsing fails, the return value is `None`.
+
+    :param value: a string consisting of an integer represented in base-10
+    :return: a :class:`datetime.timedelta` object or `None`.
+    """
+    if not value:
+        return None
+    try:
+        seconds = int(value)
+    except ValueError:
+        return None
+    if seconds < 0:
+        return None
+    try:
+        return timedelta(seconds=seconds)
+    except OverflowError:
+        return None
+
+
+def dump_age(age=None):
+    """Formats the duration as a base-10 integer.
+
+    :param age: should be an integer number of seconds,
+                a :class:`datetime.timedelta` object, or,
+                if the age is unknown, `None` (default).
+    """
+    if age is None:
+        return
+    if isinstance(age, timedelta):
+        # do the equivalent of Python 2.7's timedelta.total_seconds(),
+        # but disregarding fractional seconds
+        age = age.seconds + (age.days * 24 * 3600)
+
+    age = int(age)
+    if age < 0:
+        raise ValueError("age cannot be negative")
+
+    return str(age)
+
+
+def is_resource_modified(
+    environ, etag=None, data=None, last_modified=None, ignore_if_range=True
+):
+    """Convenience method for conditional requests.
+
+    :param environ: the WSGI environment of the request to be checked.
+    :param etag: the etag for the response for comparison.
+    :param data: or alternatively the data of the response to automatically
+                 generate an etag using :func:`generate_etag`.
+    :param last_modified: an optional date of the last modification.
+    :param ignore_if_range: If `False`, `If-Range` header will be taken into
+                            account.
+    :return: `True` if the resource was modified, otherwise `False`.
+    """
+    if etag is None and data is not None:
+        etag = generate_etag(data)
+    elif data is not None:
+        raise TypeError("both data and etag given")
+    if environ["REQUEST_METHOD"] not in ("GET", "HEAD"):
+        return False
+
+    unmodified = False
+    if isinstance(last_modified, string_types):
+        last_modified = parse_date(last_modified)
+
+    # ensure that microsecond is zero because the HTTP spec does not transmit
+    # that either and we might have some false positives.  See issue #39
+    if last_modified is not None:
+        last_modified = last_modified.replace(microsecond=0)
+
+    if_range = None
+    if not ignore_if_range and "HTTP_RANGE" in environ:
+        # https://tools.ietf.org/html/rfc7233#section-3.2
+        # A server MUST ignore an If-Range header field received in a request
+        # that does not contain a Range header field.
+        if_range = parse_if_range_header(environ.get("HTTP_IF_RANGE"))
+
+    if if_range is not None and if_range.date is not None:
+        modified_since = if_range.date
+    else:
+        modified_since = parse_date(environ.get("HTTP_IF_MODIFIED_SINCE"))
+
+    if modified_since and last_modified and last_modified <= modified_since:
+        unmodified = True
+
+    if etag:
+        etag, _ = unquote_etag(etag)
+        if if_range is not None and if_range.etag is not None:
+            unmodified = parse_etags(if_range.etag).contains(etag)
+        else:
+            if_none_match = parse_etags(environ.get("HTTP_IF_NONE_MATCH"))
+            if if_none_match:
+                # https://tools.ietf.org/html/rfc7232#section-3.2
+                # "A recipient MUST use the weak comparison function when comparing
+                # entity-tags for If-None-Match"
+                unmodified = if_none_match.contains_weak(etag)
+
+            # https://tools.ietf.org/html/rfc7232#section-3.1
+            # "Origin server MUST use the strong comparison function when
+            # comparing entity-tags for If-Match"
+            if_match = parse_etags(environ.get("HTTP_IF_MATCH"))
+            if if_match:
+                unmodified = not if_match.is_strong(etag)
+
+    return not unmodified
+
+
+def remove_entity_headers(headers, allowed=("expires", "content-location")):
+    """Remove all entity headers from a list or :class:`Headers` object.  This
+    operation works in-place.  `Expires` and `Content-Location` headers are
+    by default not removed.  The reason for this is :rfc:`2616` section
+    10.3.5 which specifies some entity headers that should be sent.
+
+    .. versionchanged:: 0.5
+       added `allowed` parameter.
+
+    :param headers: a list or :class:`Headers` object.
+    :param allowed: a list of headers that should still be allowed even though
+                    they are entity headers.
+    """
+    allowed = set(x.lower() for x in allowed)
+    headers[:] = [
+        (key, value)
+        for key, value in headers
+        if not is_entity_header(key) or key.lower() in allowed
+    ]
+
+
+def remove_hop_by_hop_headers(headers):
+    """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
+    :class:`Headers` object.  This operation works in-place.
+
+    .. versionadded:: 0.5
+
+    :param headers: a list or :class:`Headers` object.
+    """
+    headers[:] = [
+        (key, value) for key, value in headers if not is_hop_by_hop_header(key)
+    ]
+
+
+def is_entity_header(header):
+    """Check if a header is an entity header.
+
+    .. versionadded:: 0.5
+
+    :param header: the header to test.
+    :return: `True` if it's an entity header, `False` otherwise.
+    """
+    return header.lower() in _entity_headers
+
+
+def is_hop_by_hop_header(header):
+    """Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
+
+    .. versionadded:: 0.5
+
+    :param header: the header to test.
+    :return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise.
+    """
+    return header.lower() in _hop_by_hop_headers
+
+
+def parse_cookie(header, charset="utf-8", errors="replace", cls=None):
+    """Parse a cookie.  Either from a string or WSGI environ.
+
+    Per default encoding errors are ignored.  If you want a different behavior
+    you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
+    :exc:`HTTPUnicodeError` is raised.
+
+    .. versionchanged:: 0.5
+       This function now returns a :class:`TypeConversionDict` instead of a
+       regular dict.  The `cls` parameter was added.
+
+    :param header: the header to be used to parse the cookie.  Alternatively
+                   this can be a WSGI environment.
+    :param charset: the charset for the cookie values.
+    :param errors: the error behavior for the charset decoding.
+    :param cls: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`TypeConversionDict` is
+                       used.
+    """
+    if isinstance(header, dict):
+        header = header.get("HTTP_COOKIE", "")
+    elif header is None:
+        header = ""
+
+    # If the value is an unicode string it's mangled through latin1.  This
+    # is done because on PEP 3333 on Python 3 all headers are assumed latin1
+    # which however is incorrect for cookies, which are sent in page encoding.
+    # As a result we
+    if isinstance(header, text_type):
+        header = header.encode("latin1", "replace")
+
+    if cls is None:
+        cls = TypeConversionDict
+
+    def _parse_pairs():
+        for key, val in _cookie_parse_impl(header):
+            key = to_unicode(key, charset, errors, allow_none_charset=True)
+            if not key:
+                continue
+            val = to_unicode(val, charset, errors, allow_none_charset=True)
+            yield try_coerce_native(key), val
+
+    return cls(_parse_pairs())
+
+
+def dump_cookie(
+    key,
+    value="",
+    max_age=None,
+    expires=None,
+    path="/",
+    domain=None,
+    secure=False,
+    httponly=False,
+    charset="utf-8",
+    sync_expires=True,
+    max_size=4093,
+    samesite=None,
+):
+    """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
+    The parameters are the same as in the cookie Morsel object in the
+    Python standard library but it accepts unicode data, too.
+
+    On Python 3 the return value of this function will be a unicode
+    string, on Python 2 it will be a native string.  In both cases the
+    return value is usually restricted to ascii as the vast majority of
+    values are properly escaped, but that is no guarantee.  If a unicode
+    string is returned it's tunneled through latin1 as required by
+    PEP 3333.
+
+    The return value is not ASCII safe if the key contains unicode
+    characters.  This is technically against the specification but
+    happens in the wild.  It's strongly recommended to not use
+    non-ASCII values for the keys.
+
+    :param max_age: should be a number of seconds, or `None` (default) if
+                    the cookie should last only as long as the client's
+                    browser session.  Additionally `timedelta` objects
+                    are accepted, too.
+    :param expires: should be a `datetime` object or unix timestamp.
+    :param path: limits the cookie to a given path, per default it will
+                 span the whole domain.
+    :param domain: Use this if you want to set a cross-domain cookie. For
+                   example, ``domain=".example.com"`` will set a cookie
+                   that is readable by the domain ``www.example.com``,
+                   ``foo.example.com`` etc. Otherwise, a cookie will only
+                   be readable by the domain that set it.
+    :param secure: The cookie will only be available via HTTPS
+    :param httponly: disallow JavaScript to access the cookie.  This is an
+                     extension to the cookie standard and probably not
+                     supported by all browsers.
+    :param charset: the encoding for unicode values.
+    :param sync_expires: automatically set expires if max_age is defined
+                         but expires not.
+    :param max_size: Warn if the final header value exceeds this size. The
+        default, 4093, should be safely `supported by most browsers
+        <cookie_>`_. Set to 0 to disable this check.
+    :param samesite: Limits the scope of the cookie such that it will only
+                     be attached to requests if those requests are "same-site".
+
+    .. _`cookie`: http://browsercookielimits.squawky.net/
+    """
+    key = to_bytes(key, charset)
+    value = to_bytes(value, charset)
+
+    if path is not None:
+        path = iri_to_uri(path, charset)
+    domain = _make_cookie_domain(domain)
+    if isinstance(max_age, timedelta):
+        max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
+    if expires is not None:
+        if not isinstance(expires, string_types):
+            expires = cookie_date(expires)
+    elif max_age is not None and sync_expires:
+        expires = to_bytes(cookie_date(time() + max_age))
+
+    samesite = samesite.title() if samesite else None
+    if samesite not in ("Strict", "Lax", None):
+        raise ValueError("invalid SameSite value; must be 'Strict', 'Lax' or None")
+
+    buf = [key + b"=" + _cookie_quote(value)]
+
+    # XXX: In theory all of these parameters that are not marked with `None`
+    # should be quoted.  Because stdlib did not quote it before I did not
+    # want to introduce quoting there now.
+    for k, v, q in (
+        (b"Domain", domain, True),
+        (b"Expires", expires, False),
+        (b"Max-Age", max_age, False),
+        (b"Secure", secure, None),
+        (b"HttpOnly", httponly, None),
+        (b"Path", path, False),
+        (b"SameSite", samesite, False),
+    ):
+        if q is None:
+            if v:
+                buf.append(k)
+            continue
+
+        if v is None:
+            continue
+
+        tmp = bytearray(k)
+        if not isinstance(v, (bytes, bytearray)):
+            v = to_bytes(text_type(v), charset)
+        if q:
+            v = _cookie_quote(v)
+        tmp += b"=" + v
+        buf.append(bytes(tmp))
+
+    # The return value will be an incorrectly encoded latin1 header on
+    # Python 3 for consistency with the headers object and a bytestring
+    # on Python 2 because that's how the API makes more sense.
+    rv = b"; ".join(buf)
+    if not PY2:
+        rv = rv.decode("latin1")
+
+    # Warn if the final value of the cookie is less than the limit. If the
+    # cookie is too large, then it may be silently ignored, which can be quite
+    # hard to debug.
+    cookie_size = len(rv)
+
+    if max_size and cookie_size > max_size:
+        value_size = len(value)
+        warnings.warn(
+            'The "{key}" cookie is too large: the value was {value_size} bytes'
+            " but the header required {extra_size} extra bytes. The final size"
+            " was {cookie_size} bytes but the limit is {max_size} bytes."
+            " Browsers may silently ignore cookies larger than this.".format(
+                key=key,
+                value_size=value_size,
+                extra_size=cookie_size - value_size,
+                cookie_size=cookie_size,
+                max_size=max_size,
+            ),
+            stacklevel=2,
+        )
+
+    return rv
+
+
+def is_byte_range_valid(start, stop, length):
+    """Checks if a given byte content range is valid for the given length.
+
+    .. versionadded:: 0.7
+    """
+    if (start is None) != (stop is None):
+        return False
+    elif start is None:
+        return length is None or length >= 0
+    elif length is None:
+        return 0 <= start < stop
+    elif start >= stop:
+        return False
+    return 0 <= start < length
+
+
+# circular dependency fun
+from .datastructures import Accept
+from .datastructures import Authorization
+from .datastructures import ContentRange
+from .datastructures import ETags
+from .datastructures import HeaderSet
+from .datastructures import IfRange
+from .datastructures import Range
+from .datastructures import RequestCacheControl
+from .datastructures import TypeConversionDict
+from .datastructures import WWWAuthenticate
+from .urls import iri_to_uri
+
+# DEPRECATED
+from .datastructures import CharsetAccept as _CharsetAccept
+from .datastructures import Headers as _Headers
+from .datastructures import LanguageAccept as _LanguageAccept
+from .datastructures import MIMEAccept as _MIMEAccept
+
+
+class MIMEAccept(_MIMEAccept):
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.http.MIMEAccept' has moved to 'werkzeug"
+            ".datastructures.MIMEAccept' as of version 0.5. This old"
+            " import will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(MIMEAccept, self).__init__(*args, **kwargs)
+
+
+class CharsetAccept(_CharsetAccept):
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.http.CharsetAccept' has moved to 'werkzeug"
+            ".datastructures.CharsetAccept' as of version 0.5. This old"
+            " import will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(CharsetAccept, self).__init__(*args, **kwargs)
+
+
+class LanguageAccept(_LanguageAccept):
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.http.LanguageAccept' has moved to 'werkzeug"
+            ".datastructures.LanguageAccept' as of version 0.5. This"
+            " old import will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(LanguageAccept, self).__init__(*args, **kwargs)
+
+
+class Headers(_Headers):
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.http.Headers' has moved to 'werkzeug"
+            ".datastructures.Headers' as of version 0.5. This old"
+            " import will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(Headers, self).__init__(*args, **kwargs)
diff --git a/lib/python3.7/site-packages/werkzeug/local.py b/lib/python3.7/site-packages/werkzeug/local.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a6088ccf44df65a5bd19603a710c33d1a41aafa
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/local.py
@@ -0,0 +1,421 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.local
+    ~~~~~~~~~~~~~~
+
+    This module implements context-local objects.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import copy
+from functools import update_wrapper
+
+from ._compat import implements_bool
+from ._compat import PY2
+from .wsgi import ClosingIterator
+
+# since each thread has its own greenlet we can just use those as identifiers
+# for the context.  If greenlets are not available we fall back to the
+# current thread ident depending on where it is.
+try:
+    from greenlet import getcurrent as get_ident
+except ImportError:
+    try:
+        from thread import get_ident
+    except ImportError:
+        from _thread import get_ident
+
+
+def release_local(local):
+    """Releases the contents of the local for the current context.
+    This makes it possible to use locals without a manager.
+
+    Example::
+
+        >>> loc = Local()
+        >>> loc.foo = 42
+        >>> release_local(loc)
+        >>> hasattr(loc, 'foo')
+        False
+
+    With this function one can release :class:`Local` objects as well
+    as :class:`LocalStack` objects.  However it is not possible to
+    release data held by proxies that way, one always has to retain
+    a reference to the underlying local object in order to be able
+    to release it.
+
+    .. versionadded:: 0.6.1
+    """
+    local.__release_local__()
+
+
+class Local(object):
+    __slots__ = ("__storage__", "__ident_func__")
+
+    def __init__(self):
+        object.__setattr__(self, "__storage__", {})
+        object.__setattr__(self, "__ident_func__", get_ident)
+
+    def __iter__(self):
+        return iter(self.__storage__.items())
+
+    def __call__(self, proxy):
+        """Create a proxy for a name."""
+        return LocalProxy(self, proxy)
+
+    def __release_local__(self):
+        self.__storage__.pop(self.__ident_func__(), None)
+
+    def __getattr__(self, name):
+        try:
+            return self.__storage__[self.__ident_func__()][name]
+        except KeyError:
+            raise AttributeError(name)
+
+    def __setattr__(self, name, value):
+        ident = self.__ident_func__()
+        storage = self.__storage__
+        try:
+            storage[ident][name] = value
+        except KeyError:
+            storage[ident] = {name: value}
+
+    def __delattr__(self, name):
+        try:
+            del self.__storage__[self.__ident_func__()][name]
+        except KeyError:
+            raise AttributeError(name)
+
+
+class LocalStack(object):
+    """This class works similar to a :class:`Local` but keeps a stack
+    of objects instead.  This is best explained with an example::
+
+        >>> ls = LocalStack()
+        >>> ls.push(42)
+        >>> ls.top
+        42
+        >>> ls.push(23)
+        >>> ls.top
+        23
+        >>> ls.pop()
+        23
+        >>> ls.top
+        42
+
+    They can be force released by using a :class:`LocalManager` or with
+    the :func:`release_local` function but the correct way is to pop the
+    item from the stack after using.  When the stack is empty it will
+    no longer be bound to the current context (and as such released).
+
+    By calling the stack without arguments it returns a proxy that resolves to
+    the topmost item on the stack.
+
+    .. versionadded:: 0.6.1
+    """
+
+    def __init__(self):
+        self._local = Local()
+
+    def __release_local__(self):
+        self._local.__release_local__()
+
+    def _get__ident_func__(self):
+        return self._local.__ident_func__
+
+    def _set__ident_func__(self, value):
+        object.__setattr__(self._local, "__ident_func__", value)
+
+    __ident_func__ = property(_get__ident_func__, _set__ident_func__)
+    del _get__ident_func__, _set__ident_func__
+
+    def __call__(self):
+        def _lookup():
+            rv = self.top
+            if rv is None:
+                raise RuntimeError("object unbound")
+            return rv
+
+        return LocalProxy(_lookup)
+
+    def push(self, obj):
+        """Pushes a new item to the stack"""
+        rv = getattr(self._local, "stack", None)
+        if rv is None:
+            self._local.stack = rv = []
+        rv.append(obj)
+        return rv
+
+    def pop(self):
+        """Removes the topmost item from the stack, will return the
+        old value or `None` if the stack was already empty.
+        """
+        stack = getattr(self._local, "stack", None)
+        if stack is None:
+            return None
+        elif len(stack) == 1:
+            release_local(self._local)
+            return stack[-1]
+        else:
+            return stack.pop()
+
+    @property
+    def top(self):
+        """The topmost item on the stack.  If the stack is empty,
+        `None` is returned.
+        """
+        try:
+            return self._local.stack[-1]
+        except (AttributeError, IndexError):
+            return None
+
+
+class LocalManager(object):
+    """Local objects cannot manage themselves. For that you need a local
+    manager.  You can pass a local manager multiple locals or add them later
+    by appending them to `manager.locals`.  Every time the manager cleans up,
+    it will clean up all the data left in the locals for this context.
+
+    The `ident_func` parameter can be added to override the default ident
+    function for the wrapped locals.
+
+    .. versionchanged:: 0.6.1
+       Instead of a manager the :func:`release_local` function can be used
+       as well.
+
+    .. versionchanged:: 0.7
+       `ident_func` was added.
+    """
+
+    def __init__(self, locals=None, ident_func=None):
+        if locals is None:
+            self.locals = []
+        elif isinstance(locals, Local):
+            self.locals = [locals]
+        else:
+            self.locals = list(locals)
+        if ident_func is not None:
+            self.ident_func = ident_func
+            for local in self.locals:
+                object.__setattr__(local, "__ident_func__", ident_func)
+        else:
+            self.ident_func = get_ident
+
+    def get_ident(self):
+        """Return the context identifier the local objects use internally for
+        this context.  You cannot override this method to change the behavior
+        but use it to link other context local objects (such as SQLAlchemy's
+        scoped sessions) to the Werkzeug locals.
+
+        .. versionchanged:: 0.7
+           You can pass a different ident function to the local manager that
+           will then be propagated to all the locals passed to the
+           constructor.
+        """
+        return self.ident_func()
+
+    def cleanup(self):
+        """Manually clean up the data in the locals for this context.  Call
+        this at the end of the request or use `make_middleware()`.
+        """
+        for local in self.locals:
+            release_local(local)
+
+    def make_middleware(self, app):
+        """Wrap a WSGI application so that cleaning up happens after
+        request end.
+        """
+
+        def application(environ, start_response):
+            return ClosingIterator(app(environ, start_response), self.cleanup)
+
+        return application
+
+    def middleware(self, func):
+        """Like `make_middleware` but for decorating functions.
+
+        Example usage::
+
+            @manager.middleware
+            def application(environ, start_response):
+                ...
+
+        The difference to `make_middleware` is that the function passed
+        will have all the arguments copied from the inner application
+        (name, docstring, module).
+        """
+        return update_wrapper(self.make_middleware(func), func)
+
+    def __repr__(self):
+        return "<%s storages: %d>" % (self.__class__.__name__, len(self.locals))
+
+
+@implements_bool
+class LocalProxy(object):
+    """Acts as a proxy for a werkzeug local.  Forwards all operations to
+    a proxied object.  The only operations not supported for forwarding
+    are right handed operands and any kind of assignment.
+
+    Example usage::
+
+        from werkzeug.local import Local
+        l = Local()
+
+        # these are proxies
+        request = l('request')
+        user = l('user')
+
+
+        from werkzeug.local import LocalStack
+        _response_local = LocalStack()
+
+        # this is a proxy
+        response = _response_local()
+
+    Whenever something is bound to l.user / l.request the proxy objects
+    will forward all operations.  If no object is bound a :exc:`RuntimeError`
+    will be raised.
+
+    To create proxies to :class:`Local` or :class:`LocalStack` objects,
+    call the object as shown above.  If you want to have a proxy to an
+    object looked up by a function, you can (as of Werkzeug 0.6.1) pass
+    a function to the :class:`LocalProxy` constructor::
+
+        session = LocalProxy(lambda: get_current_request().session)
+
+    .. versionchanged:: 0.6.1
+       The class can be instantiated with a callable as well now.
+    """
+
+    __slots__ = ("__local", "__dict__", "__name__", "__wrapped__")
+
+    def __init__(self, local, name=None):
+        object.__setattr__(self, "_LocalProxy__local", local)
+        object.__setattr__(self, "__name__", name)
+        if callable(local) and not hasattr(local, "__release_local__"):
+            # "local" is a callable that is not an instance of Local or
+            # LocalManager: mark it as a wrapped function.
+            object.__setattr__(self, "__wrapped__", local)
+
+    def _get_current_object(self):
+        """Return the current object.  This is useful if you want the real
+        object behind the proxy at a time for performance reasons or because
+        you want to pass the object into a different context.
+        """
+        if not hasattr(self.__local, "__release_local__"):
+            return self.__local()
+        try:
+            return getattr(self.__local, self.__name__)
+        except AttributeError:
+            raise RuntimeError("no object bound to %s" % self.__name__)
+
+    @property
+    def __dict__(self):
+        try:
+            return self._get_current_object().__dict__
+        except RuntimeError:
+            raise AttributeError("__dict__")
+
+    def __repr__(self):
+        try:
+            obj = self._get_current_object()
+        except RuntimeError:
+            return "<%s unbound>" % self.__class__.__name__
+        return repr(obj)
+
+    def __bool__(self):
+        try:
+            return bool(self._get_current_object())
+        except RuntimeError:
+            return False
+
+    def __unicode__(self):
+        try:
+            return unicode(self._get_current_object())  # noqa
+        except RuntimeError:
+            return repr(self)
+
+    def __dir__(self):
+        try:
+            return dir(self._get_current_object())
+        except RuntimeError:
+            return []
+
+    def __getattr__(self, name):
+        if name == "__members__":
+            return dir(self._get_current_object())
+        return getattr(self._get_current_object(), name)
+
+    def __setitem__(self, key, value):
+        self._get_current_object()[key] = value
+
+    def __delitem__(self, key):
+        del self._get_current_object()[key]
+
+    if PY2:
+        __getslice__ = lambda x, i, j: x._get_current_object()[i:j]
+
+        def __setslice__(self, i, j, seq):
+            self._get_current_object()[i:j] = seq
+
+        def __delslice__(self, i, j):
+            del self._get_current_object()[i:j]
+
+    __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
+    __delattr__ = lambda x, n: delattr(x._get_current_object(), n)
+    __str__ = lambda x: str(x._get_current_object())
+    __lt__ = lambda x, o: x._get_current_object() < o
+    __le__ = lambda x, o: x._get_current_object() <= o
+    __eq__ = lambda x, o: x._get_current_object() == o
+    __ne__ = lambda x, o: x._get_current_object() != o
+    __gt__ = lambda x, o: x._get_current_object() > o
+    __ge__ = lambda x, o: x._get_current_object() >= o
+    __cmp__ = lambda x, o: cmp(x._get_current_object(), o)  # noqa
+    __hash__ = lambda x: hash(x._get_current_object())
+    __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
+    __len__ = lambda x: len(x._get_current_object())
+    __getitem__ = lambda x, i: x._get_current_object()[i]
+    __iter__ = lambda x: iter(x._get_current_object())
+    __contains__ = lambda x, i: i in x._get_current_object()
+    __add__ = lambda x, o: x._get_current_object() + o
+    __sub__ = lambda x, o: x._get_current_object() - o
+    __mul__ = lambda x, o: x._get_current_object() * o
+    __floordiv__ = lambda x, o: x._get_current_object() // o
+    __mod__ = lambda x, o: x._get_current_object() % o
+    __divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
+    __pow__ = lambda x, o: x._get_current_object() ** o
+    __lshift__ = lambda x, o: x._get_current_object() << o
+    __rshift__ = lambda x, o: x._get_current_object() >> o
+    __and__ = lambda x, o: x._get_current_object() & o
+    __xor__ = lambda x, o: x._get_current_object() ^ o
+    __or__ = lambda x, o: x._get_current_object() | o
+    __div__ = lambda x, o: x._get_current_object().__div__(o)
+    __truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
+    __neg__ = lambda x: -(x._get_current_object())
+    __pos__ = lambda x: +(x._get_current_object())
+    __abs__ = lambda x: abs(x._get_current_object())
+    __invert__ = lambda x: ~(x._get_current_object())
+    __complex__ = lambda x: complex(x._get_current_object())
+    __int__ = lambda x: int(x._get_current_object())
+    __long__ = lambda x: long(x._get_current_object())  # noqa
+    __float__ = lambda x: float(x._get_current_object())
+    __oct__ = lambda x: oct(x._get_current_object())
+    __hex__ = lambda x: hex(x._get_current_object())
+    __index__ = lambda x: x._get_current_object().__index__()
+    __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
+    __enter__ = lambda x: x._get_current_object().__enter__()
+    __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
+    __radd__ = lambda x, o: o + x._get_current_object()
+    __rsub__ = lambda x, o: o - x._get_current_object()
+    __rmul__ = lambda x, o: o * x._get_current_object()
+    __rdiv__ = lambda x, o: o / x._get_current_object()
+    if PY2:
+        __rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
+    else:
+        __rtruediv__ = __rdiv__
+    __rfloordiv__ = lambda x, o: o // x._get_current_object()
+    __rmod__ = lambda x, o: o % x._get_current_object()
+    __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
+    __copy__ = lambda x: copy.copy(x._get_current_object())
+    __deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/__init__.py b/lib/python3.7/site-packages/werkzeug/middleware/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e049f5ee97070ee39bcc46db88c37b4334d32d6
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/middleware/__init__.py
@@ -0,0 +1,25 @@
+"""
+Middleware
+==========
+
+A WSGI middleware is a WSGI application that wraps another application
+in order to observe or change its behavior. Werkzeug provides some
+middleware for common use cases.
+
+.. toctree::
+    :maxdepth: 1
+
+    proxy_fix
+    shared_data
+    dispatcher
+    http_proxy
+    lint
+    profiler
+
+The :doc:`interactive debugger </debug>` is also a middleware that can
+be applied manually, although it is typically used automatically with
+the :doc:`development server </serving>`.
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dfc7ed3152fde94d7de86b57d7cb0d320af0ccbe
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/dispatcher.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/dispatcher.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9df8ad54f69350b46bf92cf65a9b92f8403ad92d
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/dispatcher.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/http_proxy.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/http_proxy.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dead790f5b5a4ca227c1ddc080cbb44623617f75
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/http_proxy.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/lint.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/lint.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f0aefb45f4494fb19da48e58e2cf76e673e96a70
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/lint.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/profiler.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/profiler.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf525919dfad409d5d815bf9eaaef10e94bff12f
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/profiler.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/proxy_fix.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/proxy_fix.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72798c7e64406d8dfe9dd9fbc8c65e73d334eed0
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/proxy_fix.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/shared_data.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/shared_data.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..de80acbd87753000908927f9816fcd3f56aa3285
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/shared_data.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/dispatcher.py b/lib/python3.7/site-packages/werkzeug/middleware/dispatcher.py
new file mode 100644
index 0000000000000000000000000000000000000000..2eb173e0c1186e6269e093798a721766d71f0d35
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/middleware/dispatcher.py
@@ -0,0 +1,66 @@
+"""
+Application Dispatcher
+======================
+
+This middleware creates a single WSGI application that dispatches to
+multiple other WSGI applications mounted at different URL paths.
+
+A common example is writing a Single Page Application, where you have a
+backend API and a frontend written in JavaScript that does the routing
+in the browser rather than requesting different pages from the server.
+The frontend is a single HTML and JS file that should be served for any
+path besides "/api".
+
+This example dispatches to an API app under "/api", an admin app
+under "/admin", and an app that serves frontend files for all other
+requests::
+
+    app = DispatcherMiddleware(serve_frontend, {
+        '/api': api_app,
+        '/admin': admin_app,
+    })
+
+In production, you might instead handle this at the HTTP server level,
+serving files or proxying to application servers based on location. The
+API and admin apps would each be deployed with a separate WSGI server,
+and the static files would be served directly by the HTTP server.
+
+.. autoclass:: DispatcherMiddleware
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+
+
+class DispatcherMiddleware(object):
+    """Combine multiple applications as a single WSGI application.
+    Requests are dispatched to an application based on the path it is
+    mounted under.
+
+    :param app: The WSGI application to dispatch to if the request
+        doesn't match a mounted path.
+    :param mounts: Maps path prefixes to applications for dispatching.
+    """
+
+    def __init__(self, app, mounts=None):
+        self.app = app
+        self.mounts = mounts or {}
+
+    def __call__(self, environ, start_response):
+        script = environ.get("PATH_INFO", "")
+        path_info = ""
+
+        while "/" in script:
+            if script in self.mounts:
+                app = self.mounts[script]
+                break
+
+            script, last_item = script.rsplit("/", 1)
+            path_info = "/%s%s" % (last_item, path_info)
+        else:
+            app = self.mounts.get(script, self.app)
+
+        original_script_name = environ.get("SCRIPT_NAME", "")
+        environ["SCRIPT_NAME"] = original_script_name + script
+        environ["PATH_INFO"] = path_info
+        return app(environ, start_response)
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/http_proxy.py b/lib/python3.7/site-packages/werkzeug/middleware/http_proxy.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfdc071202bc39021644288d10c9568ae54e6fa9
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/middleware/http_proxy.py
@@ -0,0 +1,219 @@
+"""
+Basic HTTP Proxy
+================
+
+.. autoclass:: ProxyMiddleware
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+import socket
+
+from ..datastructures import EnvironHeaders
+from ..http import is_hop_by_hop_header
+from ..urls import url_parse
+from ..urls import url_quote
+from ..wsgi import get_input_stream
+
+try:
+    from http import client
+except ImportError:
+    import httplib as client
+
+
+class ProxyMiddleware(object):
+    """Proxy requests under a path to an external server, routing other
+    requests to the app.
+
+    This middleware can only proxy HTTP requests, as that is the only
+    protocol handled by the WSGI server. Other protocols, such as
+    websocket requests, cannot be proxied at this layer. This should
+    only be used for development, in production a real proxying server
+    should be used.
+
+    The middleware takes a dict that maps a path prefix to a dict
+    describing the host to be proxied to::
+
+        app = ProxyMiddleware(app, {
+            "/static/": {
+                "target": "http://127.0.0.1:5001/",
+            }
+        })
+
+    Each host has the following options:
+
+    ``target``:
+        The target URL to dispatch to. This is required.
+    ``remove_prefix``:
+        Whether to remove the prefix from the URL before dispatching it
+        to the target. The default is ``False``.
+    ``host``:
+        ``"<auto>"`` (default):
+            The host header is automatically rewritten to the URL of the
+            target.
+        ``None``:
+            The host header is unmodified from the client request.
+        Any other value:
+            The host header is overwritten with the value.
+    ``headers``:
+        A dictionary of headers to be sent with the request to the
+        target. The default is ``{}``.
+    ``ssl_context``:
+        A :class:`ssl.SSLContext` defining how to verify requests if the
+        target is HTTPS. The default is ``None``.
+
+    In the example above, everything under ``"/static/"`` is proxied to
+    the server on port 5001. The host header is rewritten to the target,
+    and the ``"/static/"`` prefix is removed from the URLs.
+
+    :param app: The WSGI application to wrap.
+    :param targets: Proxy target configurations. See description above.
+    :param chunk_size: Size of chunks to read from input stream and
+        write to target.
+    :param timeout: Seconds before an operation to a target fails.
+
+    .. versionadded:: 0.14
+    """
+
+    def __init__(self, app, targets, chunk_size=2 << 13, timeout=10):
+        def _set_defaults(opts):
+            opts.setdefault("remove_prefix", False)
+            opts.setdefault("host", "<auto>")
+            opts.setdefault("headers", {})
+            opts.setdefault("ssl_context", None)
+            return opts
+
+        self.app = app
+        self.targets = dict(
+            ("/%s/" % k.strip("/"), _set_defaults(v)) for k, v in targets.items()
+        )
+        self.chunk_size = chunk_size
+        self.timeout = timeout
+
+    def proxy_to(self, opts, path, prefix):
+        target = url_parse(opts["target"])
+
+        def application(environ, start_response):
+            headers = list(EnvironHeaders(environ).items())
+            headers[:] = [
+                (k, v)
+                for k, v in headers
+                if not is_hop_by_hop_header(k)
+                and k.lower() not in ("content-length", "host")
+            ]
+            headers.append(("Connection", "close"))
+
+            if opts["host"] == "<auto>":
+                headers.append(("Host", target.ascii_host))
+            elif opts["host"] is None:
+                headers.append(("Host", environ["HTTP_HOST"]))
+            else:
+                headers.append(("Host", opts["host"]))
+
+            headers.extend(opts["headers"].items())
+            remote_path = path
+
+            if opts["remove_prefix"]:
+                remote_path = "%s/%s" % (
+                    target.path.rstrip("/"),
+                    remote_path[len(prefix) :].lstrip("/"),
+                )
+
+            content_length = environ.get("CONTENT_LENGTH")
+            chunked = False
+
+            if content_length not in ("", None):
+                headers.append(("Content-Length", content_length))
+            elif content_length is not None:
+                headers.append(("Transfer-Encoding", "chunked"))
+                chunked = True
+
+            try:
+                if target.scheme == "http":
+                    con = client.HTTPConnection(
+                        target.ascii_host, target.port or 80, timeout=self.timeout
+                    )
+                elif target.scheme == "https":
+                    con = client.HTTPSConnection(
+                        target.ascii_host,
+                        target.port or 443,
+                        timeout=self.timeout,
+                        context=opts["ssl_context"],
+                    )
+                else:
+                    raise RuntimeError(
+                        "Target scheme must be 'http' or 'https', got '{}'.".format(
+                            target.scheme
+                        )
+                    )
+
+                con.connect()
+                remote_url = url_quote(remote_path)
+                querystring = environ["QUERY_STRING"]
+
+                if querystring:
+                    remote_url = remote_url + "?" + querystring
+
+                con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True)
+
+                for k, v in headers:
+                    if k.lower() == "connection":
+                        v = "close"
+
+                    con.putheader(k, v)
+
+                con.endheaders()
+                stream = get_input_stream(environ)
+
+                while 1:
+                    data = stream.read(self.chunk_size)
+
+                    if not data:
+                        break
+
+                    if chunked:
+                        con.send(b"%x\r\n%s\r\n" % (len(data), data))
+                    else:
+                        con.send(data)
+
+                resp = con.getresponse()
+            except socket.error:
+                from ..exceptions import BadGateway
+
+                return BadGateway()(environ, start_response)
+
+            start_response(
+                "%d %s" % (resp.status, resp.reason),
+                [
+                    (k.title(), v)
+                    for k, v in resp.getheaders()
+                    if not is_hop_by_hop_header(k)
+                ],
+            )
+
+            def read():
+                while 1:
+                    try:
+                        data = resp.read(self.chunk_size)
+                    except socket.error:
+                        break
+
+                    if not data:
+                        break
+
+                    yield data
+
+            return read()
+
+        return application
+
+    def __call__(self, environ, start_response):
+        path = environ["PATH_INFO"]
+        app = self.app
+
+        for prefix, opts in self.targets.items():
+            if path.startswith(prefix):
+                app = self.proxy_to(opts, path, prefix)
+                break
+
+        return app(environ, start_response)
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/lint.py b/lib/python3.7/site-packages/werkzeug/middleware/lint.py
new file mode 100644
index 0000000000000000000000000000000000000000..15372819be07d61c42512dbe90fbfec6608459ce
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/middleware/lint.py
@@ -0,0 +1,402 @@
+"""
+WSGI Protocol Linter
+====================
+
+This module provides a middleware that performs sanity checks on the
+behavior of the WSGI server and application. It checks that the
+:pep:`3333` WSGI spec is properly implemented. It also warns on some
+common HTTP errors such as non-empty responses for 304 status codes.
+
+.. autoclass:: LintMiddleware
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+from warnings import warn
+
+from .._compat import string_types
+from ..datastructures import Headers
+from ..http import is_entity_header
+from ..wsgi import FileWrapper
+
+try:
+    from urllib.parse import urlparse
+except ImportError:
+    from urlparse import urlparse
+
+
+class WSGIWarning(Warning):
+    """Warning class for WSGI warnings."""
+
+
+class HTTPWarning(Warning):
+    """Warning class for HTTP warnings."""
+
+
+def check_string(context, obj, stacklevel=3):
+    if type(obj) is not str:
+        warn(
+            "'%s' requires strings, got '%s'" % (context, type(obj).__name__),
+            WSGIWarning,
+        )
+
+
+class InputStream(object):
+    def __init__(self, stream):
+        self._stream = stream
+
+    def read(self, *args):
+        if len(args) == 0:
+            warn(
+                "WSGI does not guarantee an EOF marker on the input stream, thus making"
+                " calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
+                " return from this call.",
+                WSGIWarning,
+                stacklevel=2,
+            )
+        elif len(args) != 1:
+            warn(
+                "Too many parameters passed to 'wsgi.input.read()'.",
+                WSGIWarning,
+                stacklevel=2,
+            )
+        return self._stream.read(*args)
+
+    def readline(self, *args):
+        if len(args) == 0:
+            warn(
+                "Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
+                " 'wsgi.input.read()' instead.",
+                WSGIWarning,
+                stacklevel=2,
+            )
+        elif len(args) == 1:
+            warn(
+                "'wsgi.input.readline()' was called with a size hint. WSGI does not"
+                " support this, although it's available on all major servers.",
+                WSGIWarning,
+                stacklevel=2,
+            )
+        else:
+            raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.")
+        return self._stream.readline(*args)
+
+    def __iter__(self):
+        try:
+            return iter(self._stream)
+        except TypeError:
+            warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
+            return iter(())
+
+    def close(self):
+        warn("The application closed the input stream!", WSGIWarning, stacklevel=2)
+        self._stream.close()
+
+
+class ErrorStream(object):
+    def __init__(self, stream):
+        self._stream = stream
+
+    def write(self, s):
+        check_string("wsgi.error.write()", s)
+        self._stream.write(s)
+
+    def flush(self):
+        self._stream.flush()
+
+    def writelines(self, seq):
+        for line in seq:
+            self.write(line)
+
+    def close(self):
+        warn("The application closed the error stream!", WSGIWarning, stacklevel=2)
+        self._stream.close()
+
+
+class GuardedWrite(object):
+    def __init__(self, write, chunks):
+        self._write = write
+        self._chunks = chunks
+
+    def __call__(self, s):
+        check_string("write()", s)
+        self._write.write(s)
+        self._chunks.append(len(s))
+
+
+class GuardedIterator(object):
+    def __init__(self, iterator, headers_set, chunks):
+        self._iterator = iterator
+        self._next = iter(iterator).next
+        self.closed = False
+        self.headers_set = headers_set
+        self.chunks = chunks
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        if self.closed:
+            warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
+
+        rv = self._next()
+
+        if not self.headers_set:
+            warn(
+                "The application returned before it started the response.",
+                WSGIWarning,
+                stacklevel=2,
+            )
+
+        check_string("application iterator items", rv)
+        self.chunks.append(len(rv))
+        return rv
+
+    def close(self):
+        self.closed = True
+
+        if hasattr(self._iterator, "close"):
+            self._iterator.close()
+
+        if self.headers_set:
+            status_code, headers = self.headers_set
+            bytes_sent = sum(self.chunks)
+            content_length = headers.get("content-length", type=int)
+
+            if status_code == 304:
+                for key, _value in headers:
+                    key = key.lower()
+                    if key not in ("expires", "content-location") and is_entity_header(
+                        key
+                    ):
+                        warn(
+                            "Entity header %r found in 304 response." % key, HTTPWarning
+                        )
+                if bytes_sent:
+                    warn("304 responses must not have a body.", HTTPWarning)
+            elif 100 <= status_code < 200 or status_code == 204:
+                if content_length != 0:
+                    warn(
+                        "%r responses must have an empty content length." % status_code,
+                        HTTPWarning,
+                    )
+                if bytes_sent:
+                    warn(
+                        "%r responses must not have a body." % status_code, HTTPWarning
+                    )
+            elif content_length is not None and content_length != bytes_sent:
+                warn(
+                    "Content-Length and the number of bytes sent to the client do not"
+                    " match.",
+                    WSGIWarning,
+                )
+
+    def __del__(self):
+        if not self.closed:
+            try:
+                warn(
+                    "Iterator was garbage collected before it was closed.", WSGIWarning
+                )
+            except Exception:
+                pass
+
+
+class LintMiddleware(object):
+    """Warns about common errors in the WSGI and HTTP behavior of the
+    server and wrapped application. Some of the issues it check are:
+
+    -   invalid status codes
+    -   non-bytestrings sent to the WSGI server
+    -   strings returned from the WSGI application
+    -   non-empty conditional responses
+    -   unquoted etags
+    -   relative URLs in the Location header
+    -   unsafe calls to wsgi.input
+    -   unclosed iterators
+
+    Error information is emitted using the :mod:`warnings` module.
+
+    :param app: The WSGI application to wrap.
+
+    .. code-block:: python
+
+        from werkzeug.middleware.lint import LintMiddleware
+        app = LintMiddleware(app)
+    """
+
+    def __init__(self, app):
+        self.app = app
+
+    def check_environ(self, environ):
+        if type(environ) is not dict:
+            warn(
+                "WSGI environment is not a standard Python dict.",
+                WSGIWarning,
+                stacklevel=4,
+            )
+        for key in (
+            "REQUEST_METHOD",
+            "SERVER_NAME",
+            "SERVER_PORT",
+            "wsgi.version",
+            "wsgi.input",
+            "wsgi.errors",
+            "wsgi.multithread",
+            "wsgi.multiprocess",
+            "wsgi.run_once",
+        ):
+            if key not in environ:
+                warn(
+                    "Required environment key %r not found" % key,
+                    WSGIWarning,
+                    stacklevel=3,
+                )
+        if environ["wsgi.version"] != (1, 0):
+            warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3)
+
+        script_name = environ.get("SCRIPT_NAME", "")
+        path_info = environ.get("PATH_INFO", "")
+
+        if script_name and script_name[0] != "/":
+            warn(
+                "'SCRIPT_NAME' does not start with a slash: %r" % script_name,
+                WSGIWarning,
+                stacklevel=3,
+            )
+
+        if path_info and path_info[0] != "/":
+            warn(
+                "'PATH_INFO' does not start with a slash: %r" % path_info,
+                WSGIWarning,
+                stacklevel=3,
+            )
+
+    def check_start_response(self, status, headers, exc_info):
+        check_string("status", status)
+        status_code = status.split(None, 1)[0]
+
+        if len(status_code) != 3 or not status_code.isdigit():
+            warn(WSGIWarning("Status code must be three digits"), stacklevel=3)
+
+        if len(status) < 4 or status[3] != " ":
+            warn(
+                WSGIWarning(
+                    "Invalid value for status %r.  Valid "
+                    "status strings are three digits, a space "
+                    "and a status explanation"
+                ),
+                stacklevel=3,
+            )
+
+        status_code = int(status_code)
+
+        if status_code < 100:
+            warn(WSGIWarning("status code < 100 detected"), stacklevel=3)
+
+        if type(headers) is not list:
+            warn(WSGIWarning("header list is not a list"), stacklevel=3)
+
+        for item in headers:
+            if type(item) is not tuple or len(item) != 2:
+                warn(WSGIWarning("Headers must tuple 2-item tuples"), stacklevel=3)
+            name, value = item
+            if type(name) is not str or type(value) is not str:
+                warn(WSGIWarning("header items must be strings"), stacklevel=3)
+            if name.lower() == "status":
+                warn(
+                    WSGIWarning(
+                        "The status header is not supported due to "
+                        "conflicts with the CGI spec."
+                    ),
+                    stacklevel=3,
+                )
+
+        if exc_info is not None and not isinstance(exc_info, tuple):
+            warn(WSGIWarning("invalid value for exc_info"), stacklevel=3)
+
+        headers = Headers(headers)
+        self.check_headers(headers)
+
+        return status_code, headers
+
+    def check_headers(self, headers):
+        etag = headers.get("etag")
+
+        if etag is not None:
+            if etag.startswith(("W/", "w/")):
+                if etag.startswith("w/"):
+                    warn(
+                        HTTPWarning("weak etag indicator should be upcase."),
+                        stacklevel=4,
+                    )
+
+                etag = etag[2:]
+
+            if not (etag[:1] == etag[-1:] == '"'):
+                warn(HTTPWarning("unquoted etag emitted."), stacklevel=4)
+
+        location = headers.get("location")
+
+        if location is not None:
+            if not urlparse(location).netloc:
+                warn(
+                    HTTPWarning("absolute URLs required for location header"),
+                    stacklevel=4,
+                )
+
+    def check_iterator(self, app_iter):
+        if isinstance(app_iter, string_types):
+            warn(
+                "The application returned astring. The response will send one character"
+                " at a time to the client, which will kill performance. Return a list"
+                " or iterable instead.",
+                WSGIWarning,
+                stacklevel=3,
+            )
+
+    def __call__(self, *args, **kwargs):
+        if len(args) != 2:
+            warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2)
+
+        if kwargs:
+            warn(
+                "A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2
+            )
+
+        environ, start_response = args
+
+        self.check_environ(environ)
+        environ["wsgi.input"] = InputStream(environ["wsgi.input"])
+        environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"])
+
+        # Hook our own file wrapper in so that applications will always
+        # iterate to the end and we can check the content length.
+        environ["wsgi.file_wrapper"] = FileWrapper
+
+        headers_set = []
+        chunks = []
+
+        def checking_start_response(*args, **kwargs):
+            if len(args) not in (2, 3):
+                warn(
+                    "Invalid number of arguments: %s, expected 2 or 3." % len(args),
+                    WSGIWarning,
+                    stacklevel=2,
+                )
+
+            if kwargs:
+                warn("'start_response' does not take keyword arguments.", WSGIWarning)
+
+            status, headers = args[:2]
+
+            if len(args) == 3:
+                exc_info = args[2]
+            else:
+                exc_info = None
+
+            headers_set[:] = self.check_start_response(status, headers, exc_info)
+            return GuardedWrite(start_response(status, headers, exc_info), chunks)
+
+        app_iter = self.app(environ, checking_start_response)
+        self.check_iterator(app_iter)
+        return GuardedIterator(app_iter, headers_set, chunks)
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/profiler.py b/lib/python3.7/site-packages/werkzeug/middleware/profiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e2edc20a4992a5fe350e6f0b9d1efb24e134ee1
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/middleware/profiler.py
@@ -0,0 +1,132 @@
+"""
+Application Profiler
+====================
+
+This module provides a middleware that profiles each request with the
+:mod:`cProfile` module. This can help identify bottlenecks in your code
+that may be slowing down your application.
+
+.. autoclass:: ProfilerMiddleware
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+from __future__ import print_function
+
+import os.path
+import sys
+import time
+from pstats import Stats
+
+try:
+    from cProfile import Profile
+except ImportError:
+    from profile import Profile
+
+
+class ProfilerMiddleware(object):
+    """Wrap a WSGI application and profile the execution of each
+    request. Responses are buffered so that timings are more exact.
+
+    If ``stream`` is given, :class:`pstats.Stats` are written to it
+    after each request. If ``profile_dir`` is given, :mod:`cProfile`
+    data files are saved to that directory, one file per request.
+
+    The filename can be customized by passing ``filename_format``. If
+    it is a string, it will be formatted using :meth:`str.format` with
+    the following fields available:
+
+    -   ``{method}`` - The request method; GET, POST, etc.
+    -   ``{path}`` - The request path or 'root' should one not exist.
+    -   ``{elapsed}`` - The elapsed time of the request.
+    -   ``{time}`` - The time of the request.
+
+    If it is a callable, it will be called with the WSGI ``environ``
+    dict and should return a filename.
+
+    :param app: The WSGI application to wrap.
+    :param stream: Write stats to this stream. Disable with ``None``.
+    :param sort_by: A tuple of columns to sort stats by. See
+        :meth:`pstats.Stats.sort_stats`.
+    :param restrictions: A tuple of restrictions to filter stats by. See
+        :meth:`pstats.Stats.print_stats`.
+    :param profile_dir: Save profile data files to this directory.
+    :param filename_format: Format string for profile data file names,
+        or a callable returning a name. See explanation above.
+
+    .. code-block:: python
+
+        from werkzeug.middleware.profile import ProfilerMiddleware
+        app = ProfilerMiddleware(app)
+
+    .. versionchanged:: 0.15
+        Stats are written even if ``profile_dir`` is given, and can be
+        disable by passing ``stream=None``.
+
+    .. versionadded:: 0.15
+        Added ``filename_format``.
+
+    .. versionadded:: 0.9
+        Added ``restrictions`` and ``profile_dir``.
+    """
+
+    def __init__(
+        self,
+        app,
+        stream=sys.stdout,
+        sort_by=("time", "calls"),
+        restrictions=(),
+        profile_dir=None,
+        filename_format="{method}.{path}.{elapsed:06d}ms.{time:d}.prof",
+    ):
+        self._app = app
+        self._stream = stream
+        self._sort_by = sort_by
+        self._restrictions = restrictions
+        self._profile_dir = profile_dir
+        self._filename_format = filename_format
+
+    def __call__(self, environ, start_response):
+        response_body = []
+
+        def catching_start_response(status, headers, exc_info=None):
+            start_response(status, headers, exc_info)
+            return response_body.append
+
+        def runapp():
+            app_iter = self._app(environ, catching_start_response)
+            response_body.extend(app_iter)
+
+            if hasattr(app_iter, "close"):
+                app_iter.close()
+
+        profile = Profile()
+        start = time.time()
+        profile.runcall(runapp)
+        body = b"".join(response_body)
+        elapsed = time.time() - start
+
+        if self._profile_dir is not None:
+            if callable(self._filename_format):
+                filename = self._filename_format(environ)
+            else:
+                filename = self._filename_format.format(
+                    method=environ["REQUEST_METHOD"],
+                    path=(
+                        environ.get("PATH_INFO").strip("/").replace("/", ".") or "root"
+                    ),
+                    elapsed=elapsed * 1000.0,
+                    time=time.time(),
+                )
+            filename = os.path.join(self._profile_dir, filename)
+            profile.dump_stats(filename)
+
+        if self._stream is not None:
+            stats = Stats(profile, stream=self._stream)
+            stats.sort_stats(*self._sort_by)
+            print("-" * 80, file=self._stream)
+            print("PATH: {!r}".format(environ.get("PATH_INFO", "")), file=self._stream)
+            stats.print_stats(*self._restrictions)
+            print("-" * 80 + "\n", file=self._stream)
+
+        return [body]
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/proxy_fix.py b/lib/python3.7/site-packages/werkzeug/middleware/proxy_fix.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc1dacc8c8eb1c47117e47ecad35542764264449
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/middleware/proxy_fix.py
@@ -0,0 +1,228 @@
+"""
+X-Forwarded-For Proxy Fix
+=========================
+
+This module provides a middleware that adjusts the WSGI environ based on
+``X-Forwarded-`` headers that proxies in front of an application may
+set.
+
+When an application is running behind a proxy server, WSGI may see the
+request as coming from that server rather than the real client. Proxies
+set various headers to track where the request actually came from.
+
+This middleware should only be applied if the application is actually
+behind such a proxy, and should be configured with the number of proxies
+that are chained in front of it. Not all proxies set all the headers.
+Since incoming headers can be faked, you must set how many proxies are
+setting each header so the middleware knows what to trust.
+
+.. autoclass:: ProxyFix
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+import warnings
+
+
+class ProxyFix(object):
+    """Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in
+    front of the application may set.
+
+    -   ``X-Forwarded-For`` sets ``REMOTE_ADDR``.
+    -   ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.
+    -   ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and
+        ``SERVER_PORT``.
+    -   ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.
+    -   ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.
+
+    You must tell the middleware how many proxies set each header so it
+    knows what values to trust. It is a security issue to trust values
+    that came from the client rather than a proxy.
+
+    The original values of the headers are stored in the WSGI
+    environ as ``werkzeug.proxy_fix.orig``, a dict.
+
+    :param app: The WSGI application to wrap.
+    :param x_for: Number of values to trust for ``X-Forwarded-For``.
+    :param x_proto: Number of values to trust for ``X-Forwarded-Proto``.
+    :param x_host: Number of values to trust for ``X-Forwarded-Host``.
+    :param x_port: Number of values to trust for ``X-Forwarded-Port``.
+    :param x_prefix: Number of values to trust for
+        ``X-Forwarded-Prefix``.
+    :param num_proxies: Deprecated, use ``x_for`` instead.
+
+    .. code-block:: python
+
+        from werkzeug.middleware.proxy_fix import ProxyFix
+        # App is behind one proxy that sets the -For and -Host headers.
+        app = ProxyFix(app, x_for=1, x_host=1)
+
+    .. versionchanged:: 0.15
+        All headers support multiple values. The ``num_proxies``
+        argument is deprecated. Each header is configured with a
+        separate number of trusted proxies.
+
+    .. versionchanged:: 0.15
+        Original WSGI environ values are stored in the
+        ``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,
+        ``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated
+        and will be removed in 1.0.
+
+    .. versionchanged:: 0.15
+        Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.
+
+    .. versionchanged:: 0.15
+        ``X-Fowarded-Host`` and ``X-Forwarded-Port`` modify
+        ``SERVER_NAME`` and ``SERVER_PORT``.
+    """
+
+    def __init__(
+        self, app, num_proxies=None, x_for=1, x_proto=0, x_host=0, x_port=0, x_prefix=0
+    ):
+        self.app = app
+        self.x_for = x_for
+        self.x_proto = x_proto
+        self.x_host = x_host
+        self.x_port = x_port
+        self.x_prefix = x_prefix
+        self.num_proxies = num_proxies
+
+    @property
+    def num_proxies(self):
+        """The number of proxies setting ``X-Forwarded-For`` in front
+        of the application.
+
+        .. deprecated:: 0.15
+            A separate number of trusted proxies is configured for each
+            header. ``num_proxies`` maps to ``x_for``. This method will
+            be removed in 1.0.
+
+        :internal:
+        """
+        warnings.warn(
+            "'num_proxies' is deprecated as of version 0.15 and will be"
+            " removed in version 1.0. Use 'x_for' instead.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return self.x_for
+
+    @num_proxies.setter
+    def num_proxies(self, value):
+        if value is not None:
+            warnings.warn(
+                "'num_proxies' is deprecated as of version 0.15 and"
+                " will be removed in version 1.0. Use 'x_for' instead.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+            self.x_for = value
+
+    def get_remote_addr(self, forwarded_for):
+        """Get the real ``remote_addr`` by looking backwards ``x_for``
+        number of values in the ``X-Forwarded-For`` header.
+
+        :param forwarded_for: List of values parsed from the
+            ``X-Forwarded-For`` header.
+        :return: The real ``remote_addr``, or ``None`` if there were not
+            at least ``x_for`` values.
+
+        .. deprecated:: 0.15
+            This is handled internally for each header. This method will
+            be removed in 1.0.
+
+        .. versionchanged:: 0.9
+            Use ``num_proxies`` instead of always picking the first
+            value.
+
+        .. versionadded:: 0.8
+        """
+        warnings.warn(
+            "'get_remote_addr' is deprecated as of version 0.15 and"
+            " will be removed in version 1.0. It is now handled"
+            " internally for each header.",
+            DeprecationWarning,
+        )
+        return self._get_trusted_comma(self.x_for, ",".join(forwarded_for))
+
+    def _get_trusted_comma(self, trusted, value):
+        """Get the real value from a comma-separated header based on the
+        configured number of trusted proxies.
+
+        :param trusted: Number of values to trust in the header.
+        :param value: Header value to parse.
+        :return: The real value, or ``None`` if there are fewer values
+            than the number of trusted proxies.
+
+        .. versionadded:: 0.15
+        """
+        if not (trusted and value):
+            return
+        values = [x.strip() for x in value.split(",")]
+        if len(values) >= trusted:
+            return values[-trusted]
+
+    def __call__(self, environ, start_response):
+        """Modify the WSGI environ based on the various ``Forwarded``
+        headers before calling the wrapped application. Store the
+        original environ values in ``werkzeug.proxy_fix.orig_{key}``.
+        """
+        environ_get = environ.get
+        orig_remote_addr = environ_get("REMOTE_ADDR")
+        orig_wsgi_url_scheme = environ_get("wsgi.url_scheme")
+        orig_http_host = environ_get("HTTP_HOST")
+        environ.update(
+            {
+                "werkzeug.proxy_fix.orig": {
+                    "REMOTE_ADDR": orig_remote_addr,
+                    "wsgi.url_scheme": orig_wsgi_url_scheme,
+                    "HTTP_HOST": orig_http_host,
+                    "SERVER_NAME": environ_get("SERVER_NAME"),
+                    "SERVER_PORT": environ_get("SERVER_PORT"),
+                    "SCRIPT_NAME": environ_get("SCRIPT_NAME"),
+                },
+                # todo: remove deprecated keys
+                "werkzeug.proxy_fix.orig_remote_addr": orig_remote_addr,
+                "werkzeug.proxy_fix.orig_wsgi_url_scheme": orig_wsgi_url_scheme,
+                "werkzeug.proxy_fix.orig_http_host": orig_http_host,
+            }
+        )
+
+        x_for = self._get_trusted_comma(self.x_for, environ_get("HTTP_X_FORWARDED_FOR"))
+        if x_for:
+            environ["REMOTE_ADDR"] = x_for
+
+        x_proto = self._get_trusted_comma(
+            self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO")
+        )
+        if x_proto:
+            environ["wsgi.url_scheme"] = x_proto
+
+        x_host = self._get_trusted_comma(
+            self.x_host, environ_get("HTTP_X_FORWARDED_HOST")
+        )
+        if x_host:
+            environ["HTTP_HOST"] = x_host
+            parts = x_host.split(":", 1)
+            environ["SERVER_NAME"] = parts[0]
+            if len(parts) == 2:
+                environ["SERVER_PORT"] = parts[1]
+
+        x_port = self._get_trusted_comma(
+            self.x_port, environ_get("HTTP_X_FORWARDED_PORT")
+        )
+        if x_port:
+            host = environ.get("HTTP_HOST")
+            if host:
+                parts = host.split(":", 1)
+                host = parts[0] if len(parts) == 2 else host
+                environ["HTTP_HOST"] = "%s:%s" % (host, x_port)
+            environ["SERVER_PORT"] = x_port
+
+        x_prefix = self._get_trusted_comma(
+            self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX")
+        )
+        if x_prefix:
+            environ["SCRIPT_NAME"] = x_prefix
+
+        return self.app(environ, start_response)
diff --git a/lib/python3.7/site-packages/werkzeug/middleware/shared_data.py b/lib/python3.7/site-packages/werkzeug/middleware/shared_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..a902281dacd781dc944261847424b3fc7599e7db
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/middleware/shared_data.py
@@ -0,0 +1,260 @@
+"""
+Serve Shared Static Files
+=========================
+
+.. autoclass:: SharedDataMiddleware
+    :members: is_allowed
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+import mimetypes
+import os
+import posixpath
+from datetime import datetime
+from io import BytesIO
+from time import mktime
+from time import time
+from zlib import adler32
+
+from .._compat import PY2
+from .._compat import string_types
+from ..filesystem import get_filesystem_encoding
+from ..http import http_date
+from ..http import is_resource_modified
+from ..wsgi import get_path_info
+from ..wsgi import wrap_file
+
+
+class SharedDataMiddleware(object):
+
+    """A WSGI middleware that provides static content for development
+    environments or simple server setups. Usage is quite simple::
+
+        import os
+        from werkzeug.wsgi import SharedDataMiddleware
+
+        app = SharedDataMiddleware(app, {
+            '/static': os.path.join(os.path.dirname(__file__), 'static')
+        })
+
+    The contents of the folder ``./shared`` will now be available on
+    ``http://example.com/shared/``.  This is pretty useful during development
+    because a standalone media server is not required.  One can also mount
+    files on the root folder and still continue to use the application because
+    the shared data middleware forwards all unhandled requests to the
+    application, even if the requests are below one of the shared folders.
+
+    If `pkg_resources` is available you can also tell the middleware to serve
+    files from package data::
+
+        app = SharedDataMiddleware(app, {
+            '/static': ('myapplication', 'static')
+        })
+
+    This will then serve the ``static`` folder in the `myapplication`
+    Python package.
+
+    The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
+    rules for files that are not accessible from the web.  If `cache` is set to
+    `False` no caching headers are sent.
+
+    Currently the middleware does not support non ASCII filenames.  If the
+    encoding on the file system happens to be the encoding of the URI it may
+    work but this could also be by accident.  We strongly suggest using ASCII
+    only file names for static files.
+
+    The middleware will guess the mimetype using the Python `mimetype`
+    module.  If it's unable to figure out the charset it will fall back
+    to `fallback_mimetype`.
+
+    .. versionchanged:: 0.5
+       The cache timeout is configurable now.
+
+    .. versionadded:: 0.6
+       The `fallback_mimetype` parameter was added.
+
+    :param app: the application to wrap.  If you don't want to wrap an
+                application you can pass it :exc:`NotFound`.
+    :param exports: a list or dict of exported files and folders.
+    :param disallow: a list of :func:`~fnmatch.fnmatch` rules.
+    :param fallback_mimetype: the fallback mimetype for unknown files.
+    :param cache: enable or disable caching headers.
+    :param cache_timeout: the cache timeout in seconds for the headers.
+    """
+
+    def __init__(
+        self,
+        app,
+        exports,
+        disallow=None,
+        cache=True,
+        cache_timeout=60 * 60 * 12,
+        fallback_mimetype="text/plain",
+    ):
+        self.app = app
+        self.exports = []
+        self.cache = cache
+        self.cache_timeout = cache_timeout
+
+        if hasattr(exports, "items"):
+            exports = exports.items()
+
+        for key, value in exports:
+            if isinstance(value, tuple):
+                loader = self.get_package_loader(*value)
+            elif isinstance(value, string_types):
+                if os.path.isfile(value):
+                    loader = self.get_file_loader(value)
+                else:
+                    loader = self.get_directory_loader(value)
+            else:
+                raise TypeError("unknown def %r" % value)
+
+            self.exports.append((key, loader))
+
+        if disallow is not None:
+            from fnmatch import fnmatch
+
+            self.is_allowed = lambda x: not fnmatch(x, disallow)
+
+        self.fallback_mimetype = fallback_mimetype
+
+    def is_allowed(self, filename):
+        """Subclasses can override this method to disallow the access to
+        certain files.  However by providing `disallow` in the constructor
+        this method is overwritten.
+        """
+        return True
+
+    def _opener(self, filename):
+        return lambda: (
+            open(filename, "rb"),
+            datetime.utcfromtimestamp(os.path.getmtime(filename)),
+            int(os.path.getsize(filename)),
+        )
+
+    def get_file_loader(self, filename):
+        return lambda x: (os.path.basename(filename), self._opener(filename))
+
+    def get_package_loader(self, package, package_path):
+        from pkg_resources import DefaultProvider, ResourceManager, get_provider
+
+        loadtime = datetime.utcnow()
+        provider = get_provider(package)
+        manager = ResourceManager()
+        filesystem_bound = isinstance(provider, DefaultProvider)
+
+        def loader(path):
+            if path is None:
+                return None, None
+
+            path = posixpath.join(package_path, path)
+
+            if not provider.has_resource(path):
+                return None, None
+
+            basename = posixpath.basename(path)
+
+            if filesystem_bound:
+                return (
+                    basename,
+                    self._opener(provider.get_resource_filename(manager, path)),
+                )
+
+            s = provider.get_resource_string(manager, path)
+            return basename, lambda: (BytesIO(s), loadtime, len(s))
+
+        return loader
+
+    def get_directory_loader(self, directory):
+        def loader(path):
+            if path is not None:
+                path = os.path.join(directory, path)
+            else:
+                path = directory
+
+            if os.path.isfile(path):
+                return os.path.basename(path), self._opener(path)
+
+            return None, None
+
+        return loader
+
+    def generate_etag(self, mtime, file_size, real_filename):
+        if not isinstance(real_filename, bytes):
+            real_filename = real_filename.encode(get_filesystem_encoding())
+
+        return "wzsdm-%d-%s-%s" % (
+            mktime(mtime.timetuple()),
+            file_size,
+            adler32(real_filename) & 0xFFFFFFFF,
+        )
+
+    def __call__(self, environ, start_response):
+        cleaned_path = get_path_info(environ)
+
+        if PY2:
+            cleaned_path = cleaned_path.encode(get_filesystem_encoding())
+
+        # sanitize the path for non unix systems
+        cleaned_path = cleaned_path.strip("/")
+
+        for sep in os.sep, os.altsep:
+            if sep and sep != "/":
+                cleaned_path = cleaned_path.replace(sep, "/")
+
+        path = "/" + "/".join(x for x in cleaned_path.split("/") if x and x != "..")
+        file_loader = None
+
+        for search_path, loader in self.exports:
+            if search_path == path:
+                real_filename, file_loader = loader(None)
+
+                if file_loader is not None:
+                    break
+
+            if not search_path.endswith("/"):
+                search_path += "/"
+
+            if path.startswith(search_path):
+                real_filename, file_loader = loader(path[len(search_path) :])
+
+                if file_loader is not None:
+                    break
+
+        if file_loader is None or not self.is_allowed(real_filename):
+            return self.app(environ, start_response)
+
+        guessed_type = mimetypes.guess_type(real_filename)
+        mime_type = guessed_type[0] or self.fallback_mimetype
+        f, mtime, file_size = file_loader()
+
+        headers = [("Date", http_date())]
+
+        if self.cache:
+            timeout = self.cache_timeout
+            etag = self.generate_etag(mtime, file_size, real_filename)
+            headers += [
+                ("Etag", '"%s"' % etag),
+                ("Cache-Control", "max-age=%d, public" % timeout),
+            ]
+
+            if not is_resource_modified(environ, etag, last_modified=mtime):
+                f.close()
+                start_response("304 Not Modified", headers)
+                return []
+
+            headers.append(("Expires", http_date(time() + timeout)))
+        else:
+            headers.append(("Cache-Control", "public"))
+
+        headers.extend(
+            (
+                ("Content-Type", mime_type),
+                ("Content-Length", str(file_size)),
+                ("Last-Modified", http_date(mtime)),
+            )
+        )
+        start_response("200 OK", headers)
+        return wrap_file(environ, f)
diff --git a/lib/python3.7/site-packages/werkzeug/posixemulation.py b/lib/python3.7/site-packages/werkzeug/posixemulation.py
new file mode 100644
index 0000000000000000000000000000000000000000..696b45622025ee66566d3a293f4cd8b8f819a4b5
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/posixemulation.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+r"""
+    werkzeug.posixemulation
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    Provides a POSIX emulation for some features that are relevant to
+    web applications.  The main purpose is to simplify support for
+    systems such as Windows NT that are not 100% POSIX compatible.
+
+    Currently this only implements a :func:`rename` function that
+    follows POSIX semantics.  Eg: if the target file already exists it
+    will be replaced without asking.
+
+    This module was introduced in 0.6.1 and is not a public interface.
+    It might become one in later versions of Werkzeug.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import errno
+import os
+import random
+import sys
+import time
+
+from ._compat import to_unicode
+from .filesystem import get_filesystem_encoding
+
+can_rename_open_file = False
+
+if os.name == "nt":
+    try:
+        import ctypes
+
+        _MOVEFILE_REPLACE_EXISTING = 0x1
+        _MOVEFILE_WRITE_THROUGH = 0x8
+        _MoveFileEx = ctypes.windll.kernel32.MoveFileExW
+
+        def _rename(src, dst):
+            src = to_unicode(src, get_filesystem_encoding())
+            dst = to_unicode(dst, get_filesystem_encoding())
+            if _rename_atomic(src, dst):
+                return True
+            retry = 0
+            rv = False
+            while not rv and retry < 100:
+                rv = _MoveFileEx(
+                    src, dst, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH
+                )
+                if not rv:
+                    time.sleep(0.001)
+                    retry += 1
+            return rv
+
+        # new in Vista and Windows Server 2008
+        _CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
+        _CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
+        _MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
+        _CloseHandle = ctypes.windll.kernel32.CloseHandle
+        can_rename_open_file = True
+
+        def _rename_atomic(src, dst):
+            ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, "Werkzeug rename")
+            if ta == -1:
+                return False
+            try:
+                retry = 0
+                rv = False
+                while not rv and retry < 100:
+                    rv = _MoveFileTransacted(
+                        src,
+                        dst,
+                        None,
+                        None,
+                        _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH,
+                        ta,
+                    )
+                    if rv:
+                        rv = _CommitTransaction(ta)
+                        break
+                    else:
+                        time.sleep(0.001)
+                        retry += 1
+                return rv
+            finally:
+                _CloseHandle(ta)
+
+    except Exception:
+
+        def _rename(src, dst):
+            return False
+
+        def _rename_atomic(src, dst):
+            return False
+
+    def rename(src, dst):
+        # Try atomic or pseudo-atomic rename
+        if _rename(src, dst):
+            return
+        # Fall back to "move away and replace"
+        try:
+            os.rename(src, dst)
+        except OSError as e:
+            if e.errno != errno.EEXIST:
+                raise
+            old = "%s-%08x" % (dst, random.randint(0, sys.maxsize))
+            os.rename(dst, old)
+            os.rename(src, dst)
+            try:
+                os.unlink(old)
+            except Exception:
+                pass
+
+
+else:
+    rename = os.rename
+    can_rename_open_file = True
diff --git a/lib/python3.7/site-packages/werkzeug/routing.py b/lib/python3.7/site-packages/werkzeug/routing.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7cff94d4ffa98bab6b47999b53637b5fb0d8ff6
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/routing.py
@@ -0,0 +1,2200 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.routing
+    ~~~~~~~~~~~~~~~~
+
+    When it comes to combining multiple controller or view functions (however
+    you want to call them) you need a dispatcher.  A simple way would be
+    applying regular expression tests on the ``PATH_INFO`` and calling
+    registered callback functions that return the value then.
+
+    This module implements a much more powerful system than simple regular
+    expression matching because it can also convert values in the URLs and
+    build URLs.
+
+    Here a simple example that creates an URL map for an application with
+    two subdomains (www and kb) and some URL rules:
+
+    >>> m = Map([
+    ...     # Static URLs
+    ...     Rule('/', endpoint='static/index'),
+    ...     Rule('/about', endpoint='static/about'),
+    ...     Rule('/help', endpoint='static/help'),
+    ...     # Knowledge Base
+    ...     Subdomain('kb', [
+    ...         Rule('/', endpoint='kb/index'),
+    ...         Rule('/browse/', endpoint='kb/browse'),
+    ...         Rule('/browse/<int:id>/', endpoint='kb/browse'),
+    ...         Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
+    ...     ])
+    ... ], default_subdomain='www')
+
+    If the application doesn't use subdomains it's perfectly fine to not set
+    the default subdomain and not use the `Subdomain` rule factory.  The endpoint
+    in the rules can be anything, for example import paths or unique
+    identifiers.  The WSGI application can use those endpoints to get the
+    handler for that URL.  It doesn't have to be a string at all but it's
+    recommended.
+
+    Now it's possible to create a URL adapter for one of the subdomains and
+    build URLs:
+
+    >>> c = m.bind('example.com')
+    >>> c.build("kb/browse", dict(id=42))
+    'http://kb.example.com/browse/42/'
+    >>> c.build("kb/browse", dict())
+    'http://kb.example.com/browse/'
+    >>> c.build("kb/browse", dict(id=42, page=3))
+    'http://kb.example.com/browse/42/3'
+    >>> c.build("static/about")
+    '/about'
+    >>> c.build("static/index", force_external=True)
+    'http://www.example.com/'
+
+    >>> c = m.bind('example.com', subdomain='kb')
+    >>> c.build("static/about")
+    'http://www.example.com/about'
+
+    The first argument to bind is the server name *without* the subdomain.
+    Per default it will assume that the script is mounted on the root, but
+    often that's not the case so you can provide the real mount point as
+    second argument:
+
+    >>> c = m.bind('example.com', '/applications/example')
+
+    The third argument can be the subdomain, if not given the default
+    subdomain is used.  For more details about binding have a look at the
+    documentation of the `MapAdapter`.
+
+    And here is how you can match URLs:
+
+    >>> c = m.bind('example.com')
+    >>> c.match("/")
+    ('static/index', {})
+    >>> c.match("/about")
+    ('static/about', {})
+    >>> c = m.bind('example.com', '/', 'kb')
+    >>> c.match("/")
+    ('kb/index', {})
+    >>> c.match("/browse/42/23")
+    ('kb/browse', {'id': 42, 'page': 23})
+
+    If matching fails you get a `NotFound` exception, if the rule thinks
+    it's a good idea to redirect (for example because the URL was defined
+    to have a slash at the end but the request was missing that slash) it
+    will raise a `RequestRedirect` exception.  Both are subclasses of the
+    `HTTPException` so you can use those errors as responses in the
+    application.
+
+    If matching succeeded but the URL rule was incompatible to the given
+    method (for example there were only rules for `GET` and `HEAD` and
+    routing system tried to match a `POST` request) a `MethodNotAllowed`
+    exception is raised.
+
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import difflib
+import dis
+import posixpath
+import re
+import sys
+import types
+import uuid
+from functools import partial
+from pprint import pformat
+from threading import Lock
+
+from ._compat import implements_to_string
+from ._compat import iteritems
+from ._compat import itervalues
+from ._compat import native_string_result
+from ._compat import PY2
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import to_bytes
+from ._compat import to_unicode
+from ._compat import wsgi_decoding_dance
+from ._internal import _encode_idna
+from ._internal import _get_environ
+from .datastructures import ImmutableDict
+from .datastructures import MultiDict
+from .exceptions import BadHost
+from .exceptions import HTTPException
+from .exceptions import MethodNotAllowed
+from .exceptions import NotFound
+from .urls import _fast_url_quote
+from .urls import url_encode
+from .urls import url_join
+from .urls import url_quote
+from .utils import cached_property
+from .utils import format_string
+from .utils import redirect
+from .wsgi import get_host
+
+_rule_re = re.compile(
+    r"""
+    (?P<static>[^<]*)                           # static rule data
+    <
+    (?:
+        (?P<converter>[a-zA-Z_][a-zA-Z0-9_]*)   # converter name
+        (?:\((?P<args>.*?)\))?                  # converter arguments
+        \:                                      # variable delimiter
+    )?
+    (?P<variable>[a-zA-Z_][a-zA-Z0-9_]*)        # variable name
+    >
+    """,
+    re.VERBOSE,
+)
+_simple_rule_re = re.compile(r"<([^>]+)>")
+_converter_args_re = re.compile(
+    r"""
+    ((?P<name>\w+)\s*=\s*)?
+    (?P<value>
+        True|False|
+        \d+.\d+|
+        \d+.|
+        \d+|
+        [\w\d_.]+|
+        [urUR]?(?P<stringval>"[^"]*?"|'[^']*')
+    )\s*,
+    """,
+    re.VERBOSE | re.UNICODE,
+)
+
+
+_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False}
+
+
+def _pythonize(value):
+    if value in _PYTHON_CONSTANTS:
+        return _PYTHON_CONSTANTS[value]
+    for convert in int, float:
+        try:
+            return convert(value)
+        except ValueError:
+            pass
+    if value[:1] == value[-1:] and value[0] in "\"'":
+        value = value[1:-1]
+    return text_type(value)
+
+
+def parse_converter_args(argstr):
+    argstr += ","
+    args = []
+    kwargs = {}
+
+    for item in _converter_args_re.finditer(argstr):
+        value = item.group("stringval")
+        if value is None:
+            value = item.group("value")
+        value = _pythonize(value)
+        if not item.group("name"):
+            args.append(value)
+        else:
+            name = item.group("name")
+            kwargs[name] = value
+
+    return tuple(args), kwargs
+
+
+def parse_rule(rule):
+    """Parse a rule and return it as generator. Each iteration yields tuples
+    in the form ``(converter, arguments, variable)``. If the converter is
+    `None` it's a static url part, otherwise it's a dynamic one.
+
+    :internal:
+    """
+    pos = 0
+    end = len(rule)
+    do_match = _rule_re.match
+    used_names = set()
+    while pos < end:
+        m = do_match(rule, pos)
+        if m is None:
+            break
+        data = m.groupdict()
+        if data["static"]:
+            yield None, None, data["static"]
+        variable = data["variable"]
+        converter = data["converter"] or "default"
+        if variable in used_names:
+            raise ValueError("variable name %r used twice." % variable)
+        used_names.add(variable)
+        yield converter, data["args"] or None, variable
+        pos = m.end()
+    if pos < end:
+        remaining = rule[pos:]
+        if ">" in remaining or "<" in remaining:
+            raise ValueError("malformed url rule: %r" % rule)
+        yield None, None, remaining
+
+
+class RoutingException(Exception):
+    """Special exceptions that require the application to redirect, notifying
+    about missing urls, etc.
+
+    :internal:
+    """
+
+
+class RequestRedirect(HTTPException, RoutingException):
+    """Raise if the map requests a redirect. This is for example the case if
+    `strict_slashes` are activated and an url that requires a trailing slash.
+
+    The attribute `new_url` contains the absolute destination url.
+    """
+
+    code = 308
+
+    def __init__(self, new_url):
+        RoutingException.__init__(self, new_url)
+        self.new_url = new_url
+
+    def get_response(self, environ):
+        return redirect(self.new_url, self.code)
+
+
+class RequestSlash(RoutingException):
+    """Internal exception."""
+
+
+class RequestAliasRedirect(RoutingException):  # noqa: B903
+    """This rule is an alias and wants to redirect to the canonical URL."""
+
+    def __init__(self, matched_values):
+        self.matched_values = matched_values
+
+
+@implements_to_string
+class BuildError(RoutingException, LookupError):
+    """Raised if the build system cannot find a URL for an endpoint with the
+    values provided.
+    """
+
+    def __init__(self, endpoint, values, method, adapter=None):
+        LookupError.__init__(self, endpoint, values, method)
+        self.endpoint = endpoint
+        self.values = values
+        self.method = method
+        self.adapter = adapter
+
+    @cached_property
+    def suggested(self):
+        return self.closest_rule(self.adapter)
+
+    def closest_rule(self, adapter):
+        def _score_rule(rule):
+            return sum(
+                [
+                    0.98
+                    * difflib.SequenceMatcher(
+                        None, rule.endpoint, self.endpoint
+                    ).ratio(),
+                    0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
+                    0.01 * bool(rule.methods and self.method in rule.methods),
+                ]
+            )
+
+        if adapter and adapter.map._rules:
+            return max(adapter.map._rules, key=_score_rule)
+
+    def __str__(self):
+        message = []
+        message.append("Could not build url for endpoint %r" % self.endpoint)
+        if self.method:
+            message.append(" (%r)" % self.method)
+        if self.values:
+            message.append(" with values %r" % sorted(self.values.keys()))
+        message.append(".")
+        if self.suggested:
+            if self.endpoint == self.suggested.endpoint:
+                if self.method and self.method not in self.suggested.methods:
+                    message.append(
+                        " Did you mean to use methods %r?"
+                        % sorted(self.suggested.methods)
+                    )
+                missing_values = self.suggested.arguments.union(
+                    set(self.suggested.defaults or ())
+                ) - set(self.values.keys())
+                if missing_values:
+                    message.append(
+                        " Did you forget to specify values %r?" % sorted(missing_values)
+                    )
+            else:
+                message.append(" Did you mean %r instead?" % self.suggested.endpoint)
+        return u"".join(message)
+
+
+class ValidationError(ValueError):
+    """Validation error.  If a rule converter raises this exception the rule
+    does not match the current URL and the next URL is tried.
+    """
+
+
+class RuleFactory(object):
+    """As soon as you have more complex URL setups it's a good idea to use rule
+    factories to avoid repetitive tasks.  Some of them are builtin, others can
+    be added by subclassing `RuleFactory` and overriding `get_rules`.
+    """
+
+    def get_rules(self, map):
+        """Subclasses of `RuleFactory` have to override this method and return
+        an iterable of rules."""
+        raise NotImplementedError()
+
+
+class Subdomain(RuleFactory):
+    """All URLs provided by this factory have the subdomain set to a
+    specific domain. For example if you want to use the subdomain for
+    the current language this can be a good setup::
+
+        url_map = Map([
+            Rule('/', endpoint='#select_language'),
+            Subdomain('<string(length=2):lang_code>', [
+                Rule('/', endpoint='index'),
+                Rule('/about', endpoint='about'),
+                Rule('/help', endpoint='help')
+            ])
+        ])
+
+    All the rules except for the ``'#select_language'`` endpoint will now
+    listen on a two letter long subdomain that holds the language code
+    for the current request.
+    """
+
+    def __init__(self, subdomain, rules):
+        self.subdomain = subdomain
+        self.rules = rules
+
+    def get_rules(self, map):
+        for rulefactory in self.rules:
+            for rule in rulefactory.get_rules(map):
+                rule = rule.empty()
+                rule.subdomain = self.subdomain
+                yield rule
+
+
+class Submount(RuleFactory):
+    """Like `Subdomain` but prefixes the URL rule with a given string::
+
+        url_map = Map([
+            Rule('/', endpoint='index'),
+            Submount('/blog', [
+                Rule('/', endpoint='blog/index'),
+                Rule('/entry/<entry_slug>', endpoint='blog/show')
+            ])
+        ])
+
+    Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
+    """
+
+    def __init__(self, path, rules):
+        self.path = path.rstrip("/")
+        self.rules = rules
+
+    def get_rules(self, map):
+        for rulefactory in self.rules:
+            for rule in rulefactory.get_rules(map):
+                rule = rule.empty()
+                rule.rule = self.path + rule.rule
+                yield rule
+
+
+class EndpointPrefix(RuleFactory):
+    """Prefixes all endpoints (which must be strings for this factory) with
+    another string. This can be useful for sub applications::
+
+        url_map = Map([
+            Rule('/', endpoint='index'),
+            EndpointPrefix('blog/', [Submount('/blog', [
+                Rule('/', endpoint='index'),
+                Rule('/entry/<entry_slug>', endpoint='show')
+            ])])
+        ])
+    """
+
+    def __init__(self, prefix, rules):
+        self.prefix = prefix
+        self.rules = rules
+
+    def get_rules(self, map):
+        for rulefactory in self.rules:
+            for rule in rulefactory.get_rules(map):
+                rule = rule.empty()
+                rule.endpoint = self.prefix + rule.endpoint
+                yield rule
+
+
+class RuleTemplate(object):
+    """Returns copies of the rules wrapped and expands string templates in
+    the endpoint, rule, defaults or subdomain sections.
+
+    Here a small example for such a rule template::
+
+        from werkzeug.routing import Map, Rule, RuleTemplate
+
+        resource = RuleTemplate([
+            Rule('/$name/', endpoint='$name.list'),
+            Rule('/$name/<int:id>', endpoint='$name.show')
+        ])
+
+        url_map = Map([resource(name='user'), resource(name='page')])
+
+    When a rule template is called the keyword arguments are used to
+    replace the placeholders in all the string parameters.
+    """
+
+    def __init__(self, rules):
+        self.rules = list(rules)
+
+    def __call__(self, *args, **kwargs):
+        return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
+
+
+class RuleTemplateFactory(RuleFactory):
+    """A factory that fills in template variables into rules.  Used by
+    `RuleTemplate` internally.
+
+    :internal:
+    """
+
+    def __init__(self, rules, context):
+        self.rules = rules
+        self.context = context
+
+    def get_rules(self, map):
+        for rulefactory in self.rules:
+            for rule in rulefactory.get_rules(map):
+                new_defaults = subdomain = None
+                if rule.defaults:
+                    new_defaults = {}
+                    for key, value in iteritems(rule.defaults):
+                        if isinstance(value, string_types):
+                            value = format_string(value, self.context)
+                        new_defaults[key] = value
+                if rule.subdomain is not None:
+                    subdomain = format_string(rule.subdomain, self.context)
+                new_endpoint = rule.endpoint
+                if isinstance(new_endpoint, string_types):
+                    new_endpoint = format_string(new_endpoint, self.context)
+                yield Rule(
+                    format_string(rule.rule, self.context),
+                    new_defaults,
+                    subdomain,
+                    rule.methods,
+                    rule.build_only,
+                    new_endpoint,
+                    rule.strict_slashes,
+                )
+
+
+@implements_to_string
+class Rule(RuleFactory):
+    """A Rule represents one URL pattern.  There are some options for `Rule`
+    that change the way it behaves and are passed to the `Rule` constructor.
+    Note that besides the rule-string all arguments *must* be keyword arguments
+    in order to not break the application on Werkzeug upgrades.
+
+    `string`
+        Rule strings basically are just normal URL paths with placeholders in
+        the format ``<converter(arguments):name>`` where the converter and the
+        arguments are optional.  If no converter is defined the `default`
+        converter is used which means `string` in the normal configuration.
+
+        URL rules that end with a slash are branch URLs, others are leaves.
+        If you have `strict_slashes` enabled (which is the default), all
+        branch URLs that are matched without a trailing slash will trigger a
+        redirect to the same URL with the missing slash appended.
+
+        The converters are defined on the `Map`.
+
+    `endpoint`
+        The endpoint for this rule. This can be anything. A reference to a
+        function, a string, a number etc.  The preferred way is using a string
+        because the endpoint is used for URL generation.
+
+    `defaults`
+        An optional dict with defaults for other rules with the same endpoint.
+        This is a bit tricky but useful if you want to have unique URLs::
+
+            url_map = Map([
+                Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
+                Rule('/all/page/<int:page>', endpoint='all_entries')
+            ])
+
+        If a user now visits ``http://example.com/all/page/1`` he will be
+        redirected to ``http://example.com/all/``.  If `redirect_defaults` is
+        disabled on the `Map` instance this will only affect the URL
+        generation.
+
+    `subdomain`
+        The subdomain rule string for this rule. If not specified the rule
+        only matches for the `default_subdomain` of the map.  If the map is
+        not bound to a subdomain this feature is disabled.
+
+        Can be useful if you want to have user profiles on different subdomains
+        and all subdomains are forwarded to your application::
+
+            url_map = Map([
+                Rule('/', subdomain='<username>', endpoint='user/homepage'),
+                Rule('/stats', subdomain='<username>', endpoint='user/stats')
+            ])
+
+    `methods`
+        A sequence of http methods this rule applies to.  If not specified, all
+        methods are allowed. For example this can be useful if you want different
+        endpoints for `POST` and `GET`.  If methods are defined and the path
+        matches but the method matched against is not in this list or in the
+        list of another rule for that path the error raised is of the type
+        `MethodNotAllowed` rather than `NotFound`.  If `GET` is present in the
+        list of methods and `HEAD` is not, `HEAD` is added automatically.
+
+        .. versionchanged:: 0.6.1
+           `HEAD` is now automatically added to the methods if `GET` is
+           present.  The reason for this is that existing code often did not
+           work properly in servers not rewriting `HEAD` to `GET`
+           automatically and it was not documented how `HEAD` should be
+           treated.  This was considered a bug in Werkzeug because of that.
+
+    `strict_slashes`
+        Override the `Map` setting for `strict_slashes` only for this rule. If
+        not specified the `Map` setting is used.
+
+    `build_only`
+        Set this to True and the rule will never match but will create a URL
+        that can be build. This is useful if you have resources on a subdomain
+        or folder that are not handled by the WSGI application (like static data)
+
+    `redirect_to`
+        If given this must be either a string or callable.  In case of a
+        callable it's called with the url adapter that triggered the match and
+        the values of the URL as keyword arguments and has to return the target
+        for the redirect, otherwise it has to be a string with placeholders in
+        rule syntax::
+
+            def foo_with_slug(adapter, id):
+                # ask the database for the slug for the old id.  this of
+                # course has nothing to do with werkzeug.
+                return 'foo/' + Foo.get_slug_for_id(id)
+
+            url_map = Map([
+                Rule('/foo/<slug>', endpoint='foo'),
+                Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
+                Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
+            ])
+
+        When the rule is matched the routing system will raise a
+        `RequestRedirect` exception with the target for the redirect.
+
+        Keep in mind that the URL will be joined against the URL root of the
+        script so don't use a leading slash on the target URL unless you
+        really mean root of that domain.
+
+    `alias`
+        If enabled this rule serves as an alias for another rule with the same
+        endpoint and arguments.
+
+    `host`
+        If provided and the URL map has host matching enabled this can be
+        used to provide a match rule for the whole host.  This also means
+        that the subdomain feature is disabled.
+
+    .. versionadded:: 0.7
+       The `alias` and `host` parameters were added.
+    """
+
+    def __init__(
+        self,
+        string,
+        defaults=None,
+        subdomain=None,
+        methods=None,
+        build_only=False,
+        endpoint=None,
+        strict_slashes=None,
+        redirect_to=None,
+        alias=False,
+        host=None,
+    ):
+        if not string.startswith("/"):
+            raise ValueError("urls must start with a leading slash")
+        self.rule = string
+        self.is_leaf = not string.endswith("/")
+
+        self.map = None
+        self.strict_slashes = strict_slashes
+        self.subdomain = subdomain
+        self.host = host
+        self.defaults = defaults
+        self.build_only = build_only
+        self.alias = alias
+        if methods is None:
+            self.methods = None
+        else:
+            if isinstance(methods, str):
+                raise TypeError("param `methods` should be `Iterable[str]`, not `str`")
+            self.methods = set([x.upper() for x in methods])
+            if "HEAD" not in self.methods and "GET" in self.methods:
+                self.methods.add("HEAD")
+        self.endpoint = endpoint
+        self.redirect_to = redirect_to
+
+        if defaults:
+            self.arguments = set(map(str, defaults))
+        else:
+            self.arguments = set()
+        self._trace = self._converters = self._regex = self._argument_weights = None
+
+    def empty(self):
+        """
+        Return an unbound copy of this rule.
+
+        This can be useful if want to reuse an already bound URL for another
+        map.  See ``get_empty_kwargs`` to override what keyword arguments are
+        provided to the new copy.
+        """
+        return type(self)(self.rule, **self.get_empty_kwargs())
+
+    def get_empty_kwargs(self):
+        """
+        Provides kwargs for instantiating empty copy with empty()
+
+        Use this method to provide custom keyword arguments to the subclass of
+        ``Rule`` when calling ``some_rule.empty()``.  Helpful when the subclass
+        has custom keyword arguments that are needed at instantiation.
+
+        Must return a ``dict`` that will be provided as kwargs to the new
+        instance of ``Rule``, following the initial ``self.rule`` value which
+        is always provided as the first, required positional argument.
+        """
+        defaults = None
+        if self.defaults:
+            defaults = dict(self.defaults)
+        return dict(
+            defaults=defaults,
+            subdomain=self.subdomain,
+            methods=self.methods,
+            build_only=self.build_only,
+            endpoint=self.endpoint,
+            strict_slashes=self.strict_slashes,
+            redirect_to=self.redirect_to,
+            alias=self.alias,
+            host=self.host,
+        )
+
+    def get_rules(self, map):
+        yield self
+
+    def refresh(self):
+        """Rebinds and refreshes the URL.  Call this if you modified the
+        rule in place.
+
+        :internal:
+        """
+        self.bind(self.map, rebind=True)
+
+    def bind(self, map, rebind=False):
+        """Bind the url to a map and create a regular expression based on
+        the information from the rule itself and the defaults from the map.
+
+        :internal:
+        """
+        if self.map is not None and not rebind:
+            raise RuntimeError("url rule %r already bound to map %r" % (self, self.map))
+        self.map = map
+        if self.strict_slashes is None:
+            self.strict_slashes = map.strict_slashes
+        if self.subdomain is None:
+            self.subdomain = map.default_subdomain
+        self.compile()
+
+    def get_converter(self, variable_name, converter_name, args, kwargs):
+        """Looks up the converter for the given parameter.
+
+        .. versionadded:: 0.9
+        """
+        if converter_name not in self.map.converters:
+            raise LookupError("the converter %r does not exist" % converter_name)
+        return self.map.converters[converter_name](self.map, *args, **kwargs)
+
+    def compile(self):
+        """Compiles the regular expression and stores it."""
+        assert self.map is not None, "rule not bound"
+
+        if self.map.host_matching:
+            domain_rule = self.host or ""
+        else:
+            domain_rule = self.subdomain or ""
+
+        self._trace = []
+        self._converters = {}
+        self._static_weights = []
+        self._argument_weights = []
+        regex_parts = []
+
+        def _build_regex(rule):
+            index = 0
+            for converter, arguments, variable in parse_rule(rule):
+                if converter is None:
+                    regex_parts.append(re.escape(variable))
+                    self._trace.append((False, variable))
+                    for part in variable.split("/"):
+                        if part:
+                            self._static_weights.append((index, -len(part)))
+                else:
+                    if arguments:
+                        c_args, c_kwargs = parse_converter_args(arguments)
+                    else:
+                        c_args = ()
+                        c_kwargs = {}
+                    convobj = self.get_converter(variable, converter, c_args, c_kwargs)
+                    regex_parts.append("(?P<%s>%s)" % (variable, convobj.regex))
+                    self._converters[variable] = convobj
+                    self._trace.append((True, variable))
+                    self._argument_weights.append(convobj.weight)
+                    self.arguments.add(str(variable))
+                index = index + 1
+
+        _build_regex(domain_rule)
+        regex_parts.append("\\|")
+        self._trace.append((False, "|"))
+        _build_regex(self.rule if self.is_leaf else self.rule.rstrip("/"))
+        if not self.is_leaf:
+            self._trace.append((False, "/"))
+
+        self._build = self._compile_builder(False)
+        self._build_unknown = self._compile_builder(True)
+
+        if self.build_only:
+            return
+        regex = r"^%s%s$" % (
+            u"".join(regex_parts),
+            (not self.is_leaf or not self.strict_slashes)
+            and "(?<!/)(?P<__suffix__>/?)"
+            or "",
+        )
+        self._regex = re.compile(regex, re.UNICODE)
+
+    def match(self, path, method=None):
+        """Check if the rule matches a given path. Path is a string in the
+        form ``"subdomain|/path"`` and is assembled by the map.  If
+        the map is doing host matching the subdomain part will be the host
+        instead.
+
+        If the rule matches a dict with the converted values is returned,
+        otherwise the return value is `None`.
+
+        :internal:
+        """
+        if not self.build_only:
+            m = self._regex.search(path)
+            if m is not None:
+                groups = m.groupdict()
+                # we have a folder like part of the url without a trailing
+                # slash and strict slashes enabled. raise an exception that
+                # tells the map to redirect to the same url but with a
+                # trailing slash
+                if (
+                    self.strict_slashes
+                    and not self.is_leaf
+                    and not groups.pop("__suffix__")
+                    and (
+                        method is None or self.methods is None or method in self.methods
+                    )
+                ):
+                    raise RequestSlash()
+                # if we are not in strict slashes mode we have to remove
+                # a __suffix__
+                elif not self.strict_slashes:
+                    del groups["__suffix__"]
+
+                result = {}
+                for name, value in iteritems(groups):
+                    try:
+                        value = self._converters[name].to_python(value)
+                    except ValidationError:
+                        return
+                    result[str(name)] = value
+                if self.defaults:
+                    result.update(self.defaults)
+
+                if self.alias and self.map.redirect_defaults:
+                    raise RequestAliasRedirect(result)
+
+                return result
+
+    class BuilderCompiler:
+        JOIN_EMPTY = "".join
+        if sys.version_info >= (3, 6):
+            OPARG_SIZE = 256
+            OPARG_VARI = False
+        else:
+            OPARG_SIZE = 65536
+            OPARG_VARI = True
+
+        def __init__(self, rule):
+            self.rule = rule
+            self.consts = []
+            self.const_table = {}
+            self.var = []
+            self.var_table = {}
+            self.argdefs = ()
+            self.defaults = dict(self.rule.defaults or {})
+
+        def get_const(self, x):
+            """Return a constant ID for an object, adding it to the pool
+            if not already present.
+            """
+            if x not in self.const_table:
+                self.const_table[x] = len(self.consts)
+                self.consts.append(x)
+            return self.const_table[x]
+
+        def get_var(self, x):
+            """Return a local variable ID for a name, adding it to the
+            pool if not already present.
+
+            Our only use for local variables is as function arguments:
+            any variable name that exists before the call to
+            ``add_defaults()`` will become one.
+            """
+            x = str(x)
+            if x not in self.var_table:
+                self.var_table[x] = len(self.var)
+                self.var.append(x)
+            return self.var_table[x]
+
+        def add_defaults(self):
+            """A rule builder is allowed to receive any of its defaults
+            as arguments. We don't bother to check that they match
+            anywhere, since ``suitable_for()`` should have already done
+            that, but we do need them to be optional arguments. Since
+            their values are known at compile-time, the builder will
+            never refer to these arguments.
+            """
+            # ensure every default exists
+            for k in self.defaults.keys():
+                self.get_var(k)
+            # reorder to put anything with a default at the end
+            req = []
+            opt = []
+            defs = []
+            for k in self.var:
+                if k in self.defaults:
+                    opt.append(k)
+                    defs.append(self.defaults[k])
+                else:
+                    req.append(k)
+            self.var = req + opt
+            self.argdefs = tuple(defs)
+            for i, k in enumerate(self.var):
+                self.var_table[k] = i
+
+        def collapse_constants(self, opl):
+            """Given a list of build operations, spit out a new list
+            with runs of constant elements joined."""
+            new = []
+            for op, elem in opl:
+                if op is not None:
+                    new.append((op, elem))
+                    continue
+                if elem == "":
+                    continue
+                if not new or new[-1][0] is not None:
+                    new.append((op, elem))
+                    continue
+                new[-1] = (None, new[-1][1] + elem)
+            if not new:
+                new.append((None, ""))
+            return new
+
+        def build_op(self, op, arg=None):
+            """Return a byte representation of a Python instruction."""
+            if isinstance(op, str):
+                op = dis.opmap[op]
+            if arg is None and op >= dis.HAVE_ARGUMENT:
+                raise ValueError("Operation requires an argument: %s" % dis.opname[op])
+            if arg is not None and op < dis.HAVE_ARGUMENT:
+                raise ValueError("Operation takes no argument: %s" % dis.opname[op])
+            if arg is None:
+                arg = 0
+            # Python 3.6 changed the argument to an 8-bit integer, so this
+            # could be a practical consideration
+            if arg >= self.OPARG_SIZE:
+                return self.build_op(
+                    "EXTENDED_ARG", arg // self.OPARG_SIZE
+                ) + self.build_op(op, arg % self.OPARG_SIZE)
+            if not self.OPARG_VARI:
+                return bytearray((op, arg))
+            elif op >= dis.HAVE_ARGUMENT:
+                return bytearray((op, arg % 256, arg // 256))
+            else:
+                return bytearray((op,))
+
+        def build_string(self, n):
+            """Return the correct opcode(s) for building a string from
+            ``n`` elements. If the ``''.join`` crutch is needed, it must
+            already be immediately below the string elements on the
+            stack.
+            """
+            if "BUILD_STRING" in dis.opmap:
+                return self.build_op("BUILD_STRING", n)
+            else:
+                return self.build_op("BUILD_TUPLE", n) + self.build_op(
+                    "CALL_FUNCTION", 1
+                )
+
+        def emit_build(
+            self, ind, opl, append_unknown=False, encode_query_vars=None, kwargs=None
+        ):
+            ops = b""
+            n = len(opl)
+            stack = 0
+            stack_overhead = 0
+
+            for op, elem in opl:
+                if op is None:
+                    ops += self.build_op("LOAD_CONST", self.get_const(elem))
+                    stack_overhead = 0
+                    continue
+                ops += self.build_op("LOAD_CONST", self.get_const(op))
+                ops += self.build_op("LOAD_FAST", self.get_var(elem))
+                ops += self.build_op("CALL_FUNCTION", 1)
+                stack_overhead = 2
+
+            stack += len(opl)
+            peak_stack = stack + stack_overhead
+            dont_build_string = False
+            needs_build_string = "BUILD_STRING" not in dis.opmap
+
+            if n <= 1:
+                dont_build_string = True
+                needs_build_string = False
+
+            if append_unknown:
+                if "BUILD_STRING" not in dis.opmap:
+                    needs_build_string = True
+                    ops = (
+                        self.build_op("LOAD_CONST", self.get_const(self.JOIN_EMPTY))
+                        + ops
+                    )
+                ops += self.build_op("LOAD_FAST", kwargs)
+
+                # assemble this in its own buffers because we need to
+                # jump over it
+                uops = bytearray()  # run if kwargs. TOS=kwargs
+                uops += self.build_op("LOAD_CONST", self.get_const(encode_query_vars))
+                uops += self.build_op("ROT_TWO")
+                uops += self.build_op("CALL_FUNCTION", 1)
+                uops += self.build_op("LOAD_CONST", self.get_const("?"))
+                uops += self.build_op("ROT_TWO")
+                if dont_build_string:
+                    uops += self.build_string(n + 2)
+
+                nops = bytearray()  # otherwise
+                if not dont_build_string:
+                    # if we're going to build a string, we need to pad out to
+                    # a constant length
+                    nops += self.build_op("LOAD_CONST", self.get_const(""))
+                    nops += self.build_op("DUP_TOP")
+                elif needs_build_string:
+                    # we inserted the ''.join reference at the bottom of the
+                    # stack, but we don't want to call it: throw it away
+                    nops += self.build_op("ROT_TWO")
+                    nops += self.build_op("POP_TOP")
+                nops += self.build_op("JUMP_FORWARD", len(uops))
+
+                # this jump needs to take its own length into account. the
+                # simple way to do that is to compute a minimal guess for the
+                # length of the jump instruction, and keep revising it upward
+                jump_op = self.build_op("JUMP_IF_TRUE_OR_POP", 0)
+                while True:
+                    jump_len = len(jump_op)
+                    jump_target = ind + len(ops) + jump_len + len(nops)
+                    jump_op = self.build_op("JUMP_IF_TRUE_OR_POP", jump_target)
+                    assert len(jump_op) >= jump_len
+                    if len(jump_op) == jump_len:
+                        break
+
+                ops += jump_op
+                ops += nops
+                ops += uops
+                stack += 1
+                n += 2
+                peak_stack = max(peak_stack, stack + 2)
+            elif needs_build_string:
+                ops = self.build_op("LOAD_CONST", self.get_const(self.JOIN_EMPTY)) + ops
+                peak_stack += 1
+            if not dont_build_string:
+                ops += self.build_string(n)
+            return peak_stack, ops
+
+        def compile(self, append_unknown=True):
+            flags = 0x08
+            dom_ops = []
+            url_ops = []
+            opl = dom_ops
+            if append_unknown:
+                encode_query_vars = partial(
+                    url_encode,
+                    charset=self.rule.map.charset,
+                    sort=self.rule.map.sort_parameters,
+                    key=self.rule.map.sort_key,
+                )
+            for is_dynamic, data in self.rule._trace:
+                if data == "|" and opl is dom_ops:
+                    opl = url_ops
+                    continue
+                # this seems like a silly case to ever come up but:
+                # if a default is given for a value that appears in the rule,
+                # resolve it to a constant ahead of time
+                if is_dynamic and data in self.defaults:
+                    data = self.rule._converters[data].to_url(self.defaults[data])
+                    is_dynamic = False
+                if not is_dynamic:
+                    opl.append(
+                        (
+                            None,
+                            url_quote(
+                                to_bytes(data, self.rule.map.charset), safe="/:|+"
+                            ),
+                        )
+                    )
+                    continue
+                opl.append((self.rule._converters[data].to_url, data))
+            dom_ops = self.collapse_constants(dom_ops)
+            url_ops = self.collapse_constants(url_ops)
+            for op, elem in dom_ops + url_ops:
+                if op is not None:
+                    self.get_var(elem)
+            self.add_defaults()
+            argcount = len(self.var)
+            # invalid name for paranoia reasons
+            self.get_var(".keyword_arguments")
+            stack = 0
+            peak_stack = 0
+            ops = b""
+            if (
+                not append_unknown
+                and len(dom_ops) == len(url_ops) == 1
+                and dom_ops[0][0] is url_ops[0][0] is None
+            ):
+                # shortcut: just return the constant
+                stack = peak_stack = 1
+                constant_value = (dom_ops[0][1], url_ops[0][1])
+                ops += self.build_op("LOAD_CONST", self.get_const(constant_value))
+            else:
+                ps, rv = self.emit_build(len(ops), dom_ops)
+                ops += rv
+                peak_stack = max(stack + ps, peak_stack)
+                stack += 1
+                if append_unknown:
+                    ps, rv = self.emit_build(
+                        len(ops), url_ops, append_unknown, encode_query_vars, argcount
+                    )
+                else:
+                    ps, rv = self.emit_build(len(ops), url_ops)
+                ops += rv
+                peak_stack = max(stack + ps, peak_stack)
+                ops += self.build_op("BUILD_TUPLE", 2)
+            ops += self.build_op("RETURN_VALUE")
+            code_args = [  # CodeType doesn't take keywords
+                argcount,  # argcount
+                len(self.var),  # nlocals
+                peak_stack + len(self.var),  # stacksize
+                flags,  # flags
+                bytes(ops),  # codestring
+                tuple(self.consts),  # constants
+                (),  # names
+                tuple(self.var),  # varnames
+                "<werkzeug routing>",  # filename, coverage ignores "<"
+                "<builder:%r>" % self.rule.rule,  # name
+                1,  # firstlineno
+                b"",  # lnotab
+            ]
+            if not PY2:
+                code_args[1:1] = [0]  # kwonlyargcount
+            co = types.CodeType(*code_args)
+            fn = types.FunctionType(co, {}, None, self.argdefs)
+            return fn
+
+    def _compile_builder(self, append_unknown=True):
+        """Generate a function that builds this rule.
+
+        :internal:
+        """
+        return self.BuilderCompiler(self).compile(append_unknown)
+
+    def build(self, values, append_unknown=True):
+        """Assembles the relative url for that rule and the subdomain.
+        If building doesn't work for some reasons `None` is returned.
+
+        :internal:
+        """
+        try:
+            if append_unknown:
+                return self._build_unknown(**values)
+            else:
+                return self._build(**values)
+        except ValidationError:
+            return None
+
+    def provides_defaults_for(self, rule):
+        """Check if this rule has defaults for a given rule.
+
+        :internal:
+        """
+        return (
+            not self.build_only
+            and self.defaults
+            and self.endpoint == rule.endpoint
+            and self != rule
+            and self.arguments == rule.arguments
+        )
+
+    def suitable_for(self, values, method=None):
+        """Check if the dict of values has enough data for url generation.
+
+        :internal:
+        """
+        # if a method was given explicitly and that method is not supported
+        # by this rule, this rule is not suitable.
+        if (
+            method is not None
+            and self.methods is not None
+            and method not in self.methods
+        ):
+            return False
+
+        defaults = self.defaults or ()
+
+        # all arguments required must be either in the defaults dict or
+        # the value dictionary otherwise it's not suitable
+        for key in self.arguments:
+            if key not in defaults and key not in values:
+                return False
+
+        # in case defaults are given we ensure that either the value was
+        # skipped or the value is the same as the default value.
+        if defaults:
+            for key, value in iteritems(defaults):
+                if key in values and value != values[key]:
+                    return False
+
+        return True
+
+    def match_compare_key(self):
+        """The match compare key for sorting.
+
+        Current implementation:
+
+        1.  rules without any arguments come first for performance
+            reasons only as we expect them to match faster and some
+            common ones usually don't have any arguments (index pages etc.)
+        2.  rules with more static parts come first so the second argument
+            is the negative length of the number of the static weights.
+        3.  we order by static weights, which is a combination of index
+            and length
+        4.  The more complex rules come first so the next argument is the
+            negative length of the number of argument weights.
+        5.  lastly we order by the actual argument weights.
+
+        :internal:
+        """
+        return (
+            bool(self.arguments),
+            -len(self._static_weights),
+            self._static_weights,
+            -len(self._argument_weights),
+            self._argument_weights,
+        )
+
+    def build_compare_key(self):
+        """The build compare key for sorting.
+
+        :internal:
+        """
+        return 1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ())
+
+    def __eq__(self, other):
+        return self.__class__ is other.__class__ and self._trace == other._trace
+
+    __hash__ = None
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def __str__(self):
+        return self.rule
+
+    @native_string_result
+    def __repr__(self):
+        if self.map is None:
+            return u"<%s (unbound)>" % self.__class__.__name__
+        tmp = []
+        for is_dynamic, data in self._trace:
+            if is_dynamic:
+                tmp.append(u"<%s>" % data)
+            else:
+                tmp.append(data)
+        return u"<%s %s%s -> %s>" % (
+            self.__class__.__name__,
+            repr((u"".join(tmp)).lstrip(u"|")).lstrip(u"u"),
+            self.methods is not None and u" (%s)" % u", ".join(self.methods) or u"",
+            self.endpoint,
+        )
+
+
+class BaseConverter(object):
+    """Base class for all converters."""
+
+    regex = "[^/]+"
+    weight = 100
+
+    def __init__(self, map):
+        self.map = map
+
+    def to_python(self, value):
+        return value
+
+    def to_url(self, value):
+        return _fast_url_quote(text_type(value).encode(self.map.charset))
+
+
+class UnicodeConverter(BaseConverter):
+    """This converter is the default converter and accepts any string but
+    only one path segment.  Thus the string can not include a slash.
+
+    This is the default validator.
+
+    Example::
+
+        Rule('/pages/<page>'),
+        Rule('/<string(length=2):lang_code>')
+
+    :param map: the :class:`Map`.
+    :param minlength: the minimum length of the string.  Must be greater
+                      or equal 1.
+    :param maxlength: the maximum length of the string.
+    :param length: the exact length of the string.
+    """
+
+    def __init__(self, map, minlength=1, maxlength=None, length=None):
+        BaseConverter.__init__(self, map)
+        if length is not None:
+            length = "{%d}" % int(length)
+        else:
+            if maxlength is None:
+                maxlength = ""
+            else:
+                maxlength = int(maxlength)
+            length = "{%s,%s}" % (int(minlength), maxlength)
+        self.regex = "[^/]" + length
+
+
+class AnyConverter(BaseConverter):
+    """Matches one of the items provided.  Items can either be Python
+    identifiers or strings::
+
+        Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
+
+    :param map: the :class:`Map`.
+    :param items: this function accepts the possible items as positional
+                  arguments.
+    """
+
+    def __init__(self, map, *items):
+        BaseConverter.__init__(self, map)
+        self.regex = "(?:%s)" % "|".join([re.escape(x) for x in items])
+
+
+class PathConverter(BaseConverter):
+    """Like the default :class:`UnicodeConverter`, but it also matches
+    slashes.  This is useful for wikis and similar applications::
+
+        Rule('/<path:wikipage>')
+        Rule('/<path:wikipage>/edit')
+
+    :param map: the :class:`Map`.
+    """
+
+    regex = "[^/].*?"
+    weight = 200
+
+
+class NumberConverter(BaseConverter):
+    """Baseclass for `IntegerConverter` and `FloatConverter`.
+
+    :internal:
+    """
+
+    weight = 50
+
+    def __init__(self, map, fixed_digits=0, min=None, max=None, signed=False):
+        if signed:
+            self.regex = self.signed_regex
+        BaseConverter.__init__(self, map)
+        self.fixed_digits = fixed_digits
+        self.min = min
+        self.max = max
+        self.signed = signed
+
+    def to_python(self, value):
+        if self.fixed_digits and len(value) != self.fixed_digits:
+            raise ValidationError()
+        value = self.num_convert(value)
+        if (self.min is not None and value < self.min) or (
+            self.max is not None and value > self.max
+        ):
+            raise ValidationError()
+        return value
+
+    def to_url(self, value):
+        value = self.num_convert(value)
+        if self.fixed_digits:
+            value = ("%%0%sd" % self.fixed_digits) % value
+        return str(value)
+
+    @property
+    def signed_regex(self):
+        return r"-?" + self.regex
+
+
+class IntegerConverter(NumberConverter):
+    """This converter only accepts integer values::
+
+        Rule("/page/<int:page>")
+
+    By default it only accepts unsigned, positive values. The ``signed``
+    parameter will enable signed, negative values. ::
+
+        Rule("/page/<int(signed=True):page>")
+
+    :param map: The :class:`Map`.
+    :param fixed_digits: The number of fixed digits in the URL. If you
+        set this to ``4`` for example, the rule will only match if the
+        URL looks like ``/0001/``. The default is variable length.
+    :param min: The minimal value.
+    :param max: The maximal value.
+    :param signed: Allow signed (negative) values.
+
+    .. versionadded:: 0.15
+        The ``signed`` parameter.
+    """
+
+    regex = r"\d+"
+    num_convert = int
+
+
+class FloatConverter(NumberConverter):
+    """This converter only accepts floating point values::
+
+        Rule("/probability/<float:probability>")
+
+    By default it only accepts unsigned, positive values. The ``signed``
+    parameter will enable signed, negative values. ::
+
+        Rule("/offset/<float(signed=True):offset>")
+
+    :param map: The :class:`Map`.
+    :param min: The minimal value.
+    :param max: The maximal value.
+    :param signed: Allow signed (negative) values.
+
+    .. versionadded:: 0.15
+        The ``signed`` parameter.
+    """
+
+    regex = r"\d+\.\d+"
+    num_convert = float
+
+    def __init__(self, map, min=None, max=None, signed=False):
+        NumberConverter.__init__(self, map, min=min, max=max, signed=signed)
+
+
+class UUIDConverter(BaseConverter):
+    """This converter only accepts UUID strings::
+
+        Rule('/object/<uuid:identifier>')
+
+    .. versionadded:: 0.10
+
+    :param map: the :class:`Map`.
+    """
+
+    regex = (
+        r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
+        r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"
+    )
+
+    def to_python(self, value):
+        return uuid.UUID(value)
+
+    def to_url(self, value):
+        return str(value)
+
+
+#: the default converter mapping for the map.
+DEFAULT_CONVERTERS = {
+    "default": UnicodeConverter,
+    "string": UnicodeConverter,
+    "any": AnyConverter,
+    "path": PathConverter,
+    "int": IntegerConverter,
+    "float": FloatConverter,
+    "uuid": UUIDConverter,
+}
+
+
+class Map(object):
+    """The map class stores all the URL rules and some configuration
+    parameters.  Some of the configuration values are only stored on the
+    `Map` instance since those affect all rules, others are just defaults
+    and can be overridden for each rule.  Note that you have to specify all
+    arguments besides the `rules` as keyword arguments!
+
+    :param rules: sequence of url rules for this map.
+    :param default_subdomain: The default subdomain for rules without a
+                              subdomain defined.
+    :param charset: charset of the url. defaults to ``"utf-8"``
+    :param strict_slashes: Take care of trailing slashes.
+    :param redirect_defaults: This will redirect to the default rule if it
+                              wasn't visited that way. This helps creating
+                              unique URLs.
+    :param converters: A dict of converters that adds additional converters
+                       to the list of converters. If you redefine one
+                       converter this will override the original one.
+    :param sort_parameters: If set to `True` the url parameters are sorted.
+                            See `url_encode` for more details.
+    :param sort_key: The sort key function for `url_encode`.
+    :param encoding_errors: the error method to use for decoding
+    :param host_matching: if set to `True` it enables the host matching
+                          feature and disables the subdomain one.  If
+                          enabled the `host` parameter to rules is used
+                          instead of the `subdomain` one.
+
+    .. versionadded:: 0.5
+        `sort_parameters` and `sort_key` was added.
+
+    .. versionadded:: 0.7
+        `encoding_errors` and `host_matching` was added.
+    """
+
+    #: A dict of default converters to be used.
+    default_converters = ImmutableDict(DEFAULT_CONVERTERS)
+
+    def __init__(
+        self,
+        rules=None,
+        default_subdomain="",
+        charset="utf-8",
+        strict_slashes=True,
+        redirect_defaults=True,
+        converters=None,
+        sort_parameters=False,
+        sort_key=None,
+        encoding_errors="replace",
+        host_matching=False,
+    ):
+        self._rules = []
+        self._rules_by_endpoint = {}
+        self._remap = True
+        self._remap_lock = Lock()
+
+        self.default_subdomain = default_subdomain
+        self.charset = charset
+        self.encoding_errors = encoding_errors
+        self.strict_slashes = strict_slashes
+        self.redirect_defaults = redirect_defaults
+        self.host_matching = host_matching
+
+        self.converters = self.default_converters.copy()
+        if converters:
+            self.converters.update(converters)
+
+        self.sort_parameters = sort_parameters
+        self.sort_key = sort_key
+
+        for rulefactory in rules or ():
+            self.add(rulefactory)
+
+    def is_endpoint_expecting(self, endpoint, *arguments):
+        """Iterate over all rules and check if the endpoint expects
+        the arguments provided.  This is for example useful if you have
+        some URLs that expect a language code and others that do not and
+        you want to wrap the builder a bit so that the current language
+        code is automatically added if not provided but endpoints expect
+        it.
+
+        :param endpoint: the endpoint to check.
+        :param arguments: this function accepts one or more arguments
+                          as positional arguments.  Each one of them is
+                          checked.
+        """
+        self.update()
+        arguments = set(arguments)
+        for rule in self._rules_by_endpoint[endpoint]:
+            if arguments.issubset(rule.arguments):
+                return True
+        return False
+
+    def iter_rules(self, endpoint=None):
+        """Iterate over all rules or the rules of an endpoint.
+
+        :param endpoint: if provided only the rules for that endpoint
+                         are returned.
+        :return: an iterator
+        """
+        self.update()
+        if endpoint is not None:
+            return iter(self._rules_by_endpoint[endpoint])
+        return iter(self._rules)
+
+    def add(self, rulefactory):
+        """Add a new rule or factory to the map and bind it.  Requires that the
+        rule is not bound to another map.
+
+        :param rulefactory: a :class:`Rule` or :class:`RuleFactory`
+        """
+        for rule in rulefactory.get_rules(self):
+            rule.bind(self)
+            self._rules.append(rule)
+            self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
+        self._remap = True
+
+    def bind(
+        self,
+        server_name,
+        script_name=None,
+        subdomain=None,
+        url_scheme="http",
+        default_method="GET",
+        path_info=None,
+        query_args=None,
+    ):
+        """Return a new :class:`MapAdapter` with the details specified to the
+        call.  Note that `script_name` will default to ``'/'`` if not further
+        specified or `None`.  The `server_name` at least is a requirement
+        because the HTTP RFC requires absolute URLs for redirects and so all
+        redirect exceptions raised by Werkzeug will contain the full canonical
+        URL.
+
+        If no path_info is passed to :meth:`match` it will use the default path
+        info passed to bind.  While this doesn't really make sense for
+        manual bind calls, it's useful if you bind a map to a WSGI
+        environment which already contains the path info.
+
+        `subdomain` will default to the `default_subdomain` for this map if
+        no defined. If there is no `default_subdomain` you cannot use the
+        subdomain feature.
+
+        .. versionadded:: 0.7
+           `query_args` added
+
+        .. versionadded:: 0.8
+           `query_args` can now also be a string.
+
+        .. versionchanged:: 0.15
+            ``path_info`` defaults to ``'/'`` if ``None``.
+        """
+        server_name = server_name.lower()
+        if self.host_matching:
+            if subdomain is not None:
+                raise RuntimeError("host matching enabled and a subdomain was provided")
+        elif subdomain is None:
+            subdomain = self.default_subdomain
+        if script_name is None:
+            script_name = "/"
+        if path_info is None:
+            path_info = "/"
+        try:
+            server_name = _encode_idna(server_name)
+        except UnicodeError:
+            raise BadHost()
+        return MapAdapter(
+            self,
+            server_name,
+            script_name,
+            subdomain,
+            url_scheme,
+            path_info,
+            default_method,
+            query_args,
+        )
+
+    def bind_to_environ(self, environ, server_name=None, subdomain=None):
+        """Like :meth:`bind` but you can pass it an WSGI environment and it
+        will fetch the information from that dictionary.  Note that because of
+        limitations in the protocol there is no way to get the current
+        subdomain and real `server_name` from the environment.  If you don't
+        provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
+        `HTTP_HOST` if provided) as used `server_name` with disabled subdomain
+        feature.
+
+        If `subdomain` is `None` but an environment and a server name is
+        provided it will calculate the current subdomain automatically.
+        Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
+        in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
+        subdomain will be ``'staging.dev'``.
+
+        If the object passed as environ has an environ attribute, the value of
+        this attribute is used instead.  This allows you to pass request
+        objects.  Additionally `PATH_INFO` added as a default of the
+        :class:`MapAdapter` so that you don't have to pass the path info to
+        the match method.
+
+        .. versionchanged:: 0.5
+            previously this method accepted a bogus `calculate_subdomain`
+            parameter that did not have any effect.  It was removed because
+            of that.
+
+        .. versionchanged:: 0.8
+           This will no longer raise a ValueError when an unexpected server
+           name was passed.
+
+        :param environ: a WSGI environment.
+        :param server_name: an optional server name hint (see above).
+        :param subdomain: optionally the current subdomain (see above).
+        """
+        environ = _get_environ(environ)
+
+        wsgi_server_name = get_host(environ).lower()
+
+        if server_name is None:
+            server_name = wsgi_server_name
+        else:
+            server_name = server_name.lower()
+
+        if subdomain is None and not self.host_matching:
+            cur_server_name = wsgi_server_name.split(".")
+            real_server_name = server_name.split(".")
+            offset = -len(real_server_name)
+            if cur_server_name[offset:] != real_server_name:
+                # This can happen even with valid configs if the server was
+                # accesssed directly by IP address under some situations.
+                # Instead of raising an exception like in Werkzeug 0.7 or
+                # earlier we go by an invalid subdomain which will result
+                # in a 404 error on matching.
+                subdomain = "<invalid>"
+            else:
+                subdomain = ".".join(filter(None, cur_server_name[:offset]))
+
+        def _get_wsgi_string(name):
+            val = environ.get(name)
+            if val is not None:
+                return wsgi_decoding_dance(val, self.charset)
+
+        script_name = _get_wsgi_string("SCRIPT_NAME")
+        path_info = _get_wsgi_string("PATH_INFO")
+        query_args = _get_wsgi_string("QUERY_STRING")
+        return Map.bind(
+            self,
+            server_name,
+            script_name,
+            subdomain,
+            environ["wsgi.url_scheme"],
+            environ["REQUEST_METHOD"],
+            path_info,
+            query_args=query_args,
+        )
+
+    def update(self):
+        """Called before matching and building to keep the compiled rules
+        in the correct order after things changed.
+        """
+        if not self._remap:
+            return
+
+        with self._remap_lock:
+            if not self._remap:
+                return
+
+            self._rules.sort(key=lambda x: x.match_compare_key())
+            for rules in itervalues(self._rules_by_endpoint):
+                rules.sort(key=lambda x: x.build_compare_key())
+            self._remap = False
+
+    def __repr__(self):
+        rules = self.iter_rules()
+        return "%s(%s)" % (self.__class__.__name__, pformat(list(rules)))
+
+
+class MapAdapter(object):
+
+    """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
+    the URL matching and building based on runtime information.
+    """
+
+    def __init__(
+        self,
+        map,
+        server_name,
+        script_name,
+        subdomain,
+        url_scheme,
+        path_info,
+        default_method,
+        query_args=None,
+    ):
+        self.map = map
+        self.server_name = to_unicode(server_name)
+        script_name = to_unicode(script_name)
+        if not script_name.endswith(u"/"):
+            script_name += u"/"
+        self.script_name = script_name
+        self.subdomain = to_unicode(subdomain)
+        self.url_scheme = to_unicode(url_scheme)
+        self.path_info = to_unicode(path_info)
+        self.default_method = to_unicode(default_method)
+        self.query_args = query_args
+
+    def dispatch(
+        self, view_func, path_info=None, method=None, catch_http_exceptions=False
+    ):
+        """Does the complete dispatching process.  `view_func` is called with
+        the endpoint and a dict with the values for the view.  It should
+        look up the view function, call it, and return a response object
+        or WSGI application.  http exceptions are not caught by default
+        so that applications can display nicer error messages by just
+        catching them by hand.  If you want to stick with the default
+        error messages you can pass it ``catch_http_exceptions=True`` and
+        it will catch the http exceptions.
+
+        Here a small example for the dispatch usage::
+
+            from werkzeug.wrappers import Request, Response
+            from werkzeug.wsgi import responder
+            from werkzeug.routing import Map, Rule
+
+            def on_index(request):
+                return Response('Hello from the index')
+
+            url_map = Map([Rule('/', endpoint='index')])
+            views = {'index': on_index}
+
+            @responder
+            def application(environ, start_response):
+                request = Request(environ)
+                urls = url_map.bind_to_environ(environ)
+                return urls.dispatch(lambda e, v: views[e](request, **v),
+                                     catch_http_exceptions=True)
+
+        Keep in mind that this method might return exception objects, too, so
+        use :class:`Response.force_type` to get a response object.
+
+        :param view_func: a function that is called with the endpoint as
+                          first argument and the value dict as second.  Has
+                          to dispatch to the actual view function with this
+                          information.  (see above)
+        :param path_info: the path info to use for matching.  Overrides the
+                          path info specified on binding.
+        :param method: the HTTP method used for matching.  Overrides the
+                       method specified on binding.
+        :param catch_http_exceptions: set to `True` to catch any of the
+                                      werkzeug :class:`HTTPException`\\s.
+        """
+        try:
+            try:
+                endpoint, args = self.match(path_info, method)
+            except RequestRedirect as e:
+                return e
+            return view_func(endpoint, args)
+        except HTTPException as e:
+            if catch_http_exceptions:
+                return e
+            raise
+
+    def match(self, path_info=None, method=None, return_rule=False, query_args=None):
+        """The usage is simple: you just pass the match method the current
+        path info as well as the method (which defaults to `GET`).  The
+        following things can then happen:
+
+        - you receive a `NotFound` exception that indicates that no URL is
+          matching.  A `NotFound` exception is also a WSGI application you
+          can call to get a default page not found page (happens to be the
+          same object as `werkzeug.exceptions.NotFound`)
+
+        - you receive a `MethodNotAllowed` exception that indicates that there
+          is a match for this URL but not for the current request method.
+          This is useful for RESTful applications.
+
+        - you receive a `RequestRedirect` exception with a `new_url`
+          attribute.  This exception is used to notify you about a request
+          Werkzeug requests from your WSGI application.  This is for example the
+          case if you request ``/foo`` although the correct URL is ``/foo/``
+          You can use the `RequestRedirect` instance as response-like object
+          similar to all other subclasses of `HTTPException`.
+
+        - you get a tuple in the form ``(endpoint, arguments)`` if there is
+          a match (unless `return_rule` is True, in which case you get a tuple
+          in the form ``(rule, arguments)``)
+
+        If the path info is not passed to the match method the default path
+        info of the map is used (defaults to the root URL if not defined
+        explicitly).
+
+        All of the exceptions raised are subclasses of `HTTPException` so they
+        can be used as WSGI responses. They will all render generic error or
+        redirect pages.
+
+        Here is a small example for matching:
+
+        >>> m = Map([
+        ...     Rule('/', endpoint='index'),
+        ...     Rule('/downloads/', endpoint='downloads/index'),
+        ...     Rule('/downloads/<int:id>', endpoint='downloads/show')
+        ... ])
+        >>> urls = m.bind("example.com", "/")
+        >>> urls.match("/", "GET")
+        ('index', {})
+        >>> urls.match("/downloads/42")
+        ('downloads/show', {'id': 42})
+
+        And here is what happens on redirect and missing URLs:
+
+        >>> urls.match("/downloads")
+        Traceback (most recent call last):
+          ...
+        RequestRedirect: http://example.com/downloads/
+        >>> urls.match("/missing")
+        Traceback (most recent call last):
+          ...
+        NotFound: 404 Not Found
+
+        :param path_info: the path info to use for matching.  Overrides the
+                          path info specified on binding.
+        :param method: the HTTP method used for matching.  Overrides the
+                       method specified on binding.
+        :param return_rule: return the rule that matched instead of just the
+                            endpoint (defaults to `False`).
+        :param query_args: optional query arguments that are used for
+                           automatic redirects as string or dictionary.  It's
+                           currently not possible to use the query arguments
+                           for URL matching.
+
+        .. versionadded:: 0.6
+           `return_rule` was added.
+
+        .. versionadded:: 0.7
+           `query_args` was added.
+
+        .. versionchanged:: 0.8
+           `query_args` can now also be a string.
+        """
+        self.map.update()
+        if path_info is None:
+            path_info = self.path_info
+        else:
+            path_info = to_unicode(path_info, self.map.charset)
+        if query_args is None:
+            query_args = self.query_args
+        method = (method or self.default_method).upper()
+
+        path = u"%s|%s" % (
+            self.map.host_matching and self.server_name or self.subdomain,
+            path_info and "/%s" % path_info.lstrip("/"),
+        )
+
+        have_match_for = set()
+        for rule in self.map._rules:
+            try:
+                rv = rule.match(path, method)
+            except RequestSlash:
+                raise RequestRedirect(
+                    self.make_redirect_url(
+                        url_quote(path_info, self.map.charset, safe="/:|+") + "/",
+                        query_args,
+                    )
+                )
+            except RequestAliasRedirect as e:
+                raise RequestRedirect(
+                    self.make_alias_redirect_url(
+                        path, rule.endpoint, e.matched_values, method, query_args
+                    )
+                )
+            if rv is None:
+                continue
+            if rule.methods is not None and method not in rule.methods:
+                have_match_for.update(rule.methods)
+                continue
+
+            if self.map.redirect_defaults:
+                redirect_url = self.get_default_redirect(rule, method, rv, query_args)
+                if redirect_url is not None:
+                    raise RequestRedirect(redirect_url)
+
+            if rule.redirect_to is not None:
+                if isinstance(rule.redirect_to, string_types):
+
+                    def _handle_match(match):
+                        value = rv[match.group(1)]
+                        return rule._converters[match.group(1)].to_url(value)
+
+                    redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to)
+                else:
+                    redirect_url = rule.redirect_to(self, **rv)
+                raise RequestRedirect(
+                    str(
+                        url_join(
+                            "%s://%s%s%s"
+                            % (
+                                self.url_scheme or "http",
+                                self.subdomain + "." if self.subdomain else "",
+                                self.server_name,
+                                self.script_name,
+                            ),
+                            redirect_url,
+                        )
+                    )
+                )
+
+            if return_rule:
+                return rule, rv
+            else:
+                return rule.endpoint, rv
+
+        if have_match_for:
+            raise MethodNotAllowed(valid_methods=list(have_match_for))
+        raise NotFound()
+
+    def test(self, path_info=None, method=None):
+        """Test if a rule would match.  Works like `match` but returns `True`
+        if the URL matches, or `False` if it does not exist.
+
+        :param path_info: the path info to use for matching.  Overrides the
+                          path info specified on binding.
+        :param method: the HTTP method used for matching.  Overrides the
+                       method specified on binding.
+        """
+        try:
+            self.match(path_info, method)
+        except RequestRedirect:
+            pass
+        except HTTPException:
+            return False
+        return True
+
+    def allowed_methods(self, path_info=None):
+        """Returns the valid methods that match for a given path.
+
+        .. versionadded:: 0.7
+        """
+        try:
+            self.match(path_info, method="--")
+        except MethodNotAllowed as e:
+            return e.valid_methods
+        except HTTPException:
+            pass
+        return []
+
+    def get_host(self, domain_part):
+        """Figures out the full host name for the given domain part.  The
+        domain part is a subdomain in case host matching is disabled or
+        a full host name.
+        """
+        if self.map.host_matching:
+            if domain_part is None:
+                return self.server_name
+            return to_unicode(domain_part, "ascii")
+        subdomain = domain_part
+        if subdomain is None:
+            subdomain = self.subdomain
+        else:
+            subdomain = to_unicode(subdomain, "ascii")
+        return (subdomain + u"." if subdomain else u"") + self.server_name
+
+    def get_default_redirect(self, rule, method, values, query_args):
+        """A helper that returns the URL to redirect to if it finds one.
+        This is used for default redirecting only.
+
+        :internal:
+        """
+        assert self.map.redirect_defaults
+        for r in self.map._rules_by_endpoint[rule.endpoint]:
+            # every rule that comes after this one, including ourself
+            # has a lower priority for the defaults.  We order the ones
+            # with the highest priority up for building.
+            if r is rule:
+                break
+            if r.provides_defaults_for(rule) and r.suitable_for(values, method):
+                values.update(r.defaults)
+                domain_part, path = r.build(values)
+                return self.make_redirect_url(path, query_args, domain_part=domain_part)
+
+    def encode_query_args(self, query_args):
+        if not isinstance(query_args, string_types):
+            query_args = url_encode(query_args, self.map.charset)
+        return query_args
+
+    def make_redirect_url(self, path_info, query_args=None, domain_part=None):
+        """Creates a redirect URL.
+
+        :internal:
+        """
+        suffix = ""
+        if query_args:
+            suffix = "?" + self.encode_query_args(query_args)
+        return str(
+            "%s://%s/%s%s"
+            % (
+                self.url_scheme or "http",
+                self.get_host(domain_part),
+                posixpath.join(
+                    self.script_name[:-1].lstrip("/"), path_info.lstrip("/")
+                ),
+                suffix,
+            )
+        )
+
+    def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
+        """Internally called to make an alias redirect URL."""
+        url = self.build(
+            endpoint, values, method, append_unknown=False, force_external=True
+        )
+        if query_args:
+            url += "?" + self.encode_query_args(query_args)
+        assert url != path, "detected invalid alias setting. No canonical URL found"
+        return url
+
+    def _partial_build(self, endpoint, values, method, append_unknown):
+        """Helper for :meth:`build`.  Returns subdomain and path for the
+        rule that accepts this endpoint, values and method.
+
+        :internal:
+        """
+        # in case the method is none, try with the default method first
+        if method is None:
+            rv = self._partial_build(
+                endpoint, values, self.default_method, append_unknown
+            )
+            if rv is not None:
+                return rv
+
+        # default method did not match or a specific method is passed,
+        # check all and go with first result.
+        for rule in self.map._rules_by_endpoint.get(endpoint, ()):
+            if rule.suitable_for(values, method):
+                rv = rule.build(values, append_unknown)
+                if rv is not None:
+                    return rv
+
+    def build(
+        self,
+        endpoint,
+        values=None,
+        method=None,
+        force_external=False,
+        append_unknown=True,
+    ):
+        """Building URLs works pretty much the other way round.  Instead of
+        `match` you call `build` and pass it the endpoint and a dict of
+        arguments for the placeholders.
+
+        The `build` function also accepts an argument called `force_external`
+        which, if you set it to `True` will force external URLs. Per default
+        external URLs (include the server name) will only be used if the
+        target URL is on a different subdomain.
+
+        >>> m = Map([
+        ...     Rule('/', endpoint='index'),
+        ...     Rule('/downloads/', endpoint='downloads/index'),
+        ...     Rule('/downloads/<int:id>', endpoint='downloads/show')
+        ... ])
+        >>> urls = m.bind("example.com", "/")
+        >>> urls.build("index", {})
+        '/'
+        >>> urls.build("downloads/show", {'id': 42})
+        '/downloads/42'
+        >>> urls.build("downloads/show", {'id': 42}, force_external=True)
+        'http://example.com/downloads/42'
+
+        Because URLs cannot contain non ASCII data you will always get
+        bytestrings back.  Non ASCII characters are urlencoded with the
+        charset defined on the map instance.
+
+        Additional values are converted to unicode and appended to the URL as
+        URL querystring parameters:
+
+        >>> urls.build("index", {'q': 'My Searchstring'})
+        '/?q=My+Searchstring'
+
+        When processing those additional values, lists are furthermore
+        interpreted as multiple values (as per
+        :py:class:`werkzeug.datastructures.MultiDict`):
+
+        >>> urls.build("index", {'q': ['a', 'b', 'c']})
+        '/?q=a&q=b&q=c'
+
+        Passing a ``MultiDict`` will also add multiple values:
+
+        >>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
+        '/?p=z&q=a&q=b'
+
+        If a rule does not exist when building a `BuildError` exception is
+        raised.
+
+        The build method accepts an argument called `method` which allows you
+        to specify the method you want to have an URL built for if you have
+        different methods for the same endpoint specified.
+
+        .. versionadded:: 0.6
+           the `append_unknown` parameter was added.
+
+        :param endpoint: the endpoint of the URL to build.
+        :param values: the values for the URL to build.  Unhandled values are
+                       appended to the URL as query parameters.
+        :param method: the HTTP method for the rule if there are different
+                       URLs for different methods on the same endpoint.
+        :param force_external: enforce full canonical external URLs. If the URL
+                               scheme is not provided, this will generate
+                               a protocol-relative URL.
+        :param append_unknown: unknown parameters are appended to the generated
+                               URL as query string argument.  Disable this
+                               if you want the builder to ignore those.
+        """
+        self.map.update()
+
+        if values:
+            if isinstance(values, MultiDict):
+                temp_values = {}
+                # iteritems(dict, values) is like `values.lists()`
+                # without the call or `list()` coercion overhead.
+                for key, value in iteritems(dict, values):
+                    if not value:
+                        continue
+                    if len(value) == 1:  # flatten single item lists
+                        value = value[0]
+                        if value is None:  # drop None
+                            continue
+                    temp_values[key] = value
+                values = temp_values
+            else:
+                # drop None
+                values = dict(i for i in iteritems(values) if i[1] is not None)
+        else:
+            values = {}
+
+        rv = self._partial_build(endpoint, values, method, append_unknown)
+        if rv is None:
+            raise BuildError(endpoint, values, method, self)
+        domain_part, path = rv
+
+        host = self.get_host(domain_part)
+
+        # shortcut this.
+        if not force_external and (
+            (self.map.host_matching and host == self.server_name)
+            or (not self.map.host_matching and domain_part == self.subdomain)
+        ):
+            return "%s/%s" % (self.script_name.rstrip("/"), path.lstrip("/"))
+        return str(
+            "%s//%s%s/%s"
+            % (
+                self.url_scheme + ":" if self.url_scheme else "",
+                host,
+                self.script_name[:-1],
+                path.lstrip("/"),
+            )
+        )
diff --git a/lib/python3.7/site-packages/werkzeug/security.py b/lib/python3.7/site-packages/werkzeug/security.py
new file mode 100644
index 0000000000000000000000000000000000000000..1842afd0a1974ffa12c4bb75ee056eec02ea3069
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/security.py
@@ -0,0 +1,241 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.security
+    ~~~~~~~~~~~~~~~~~
+
+    Security related helpers such as secure password hashing tools.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import codecs
+import hashlib
+import hmac
+import os
+import posixpath
+from random import SystemRandom
+from struct import Struct
+
+from ._compat import izip
+from ._compat import PY2
+from ._compat import range_type
+from ._compat import text_type
+from ._compat import to_bytes
+from ._compat import to_native
+
+SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
+DEFAULT_PBKDF2_ITERATIONS = 150000
+
+_pack_int = Struct(">I").pack
+_builtin_safe_str_cmp = getattr(hmac, "compare_digest", None)
+_sys_rng = SystemRandom()
+_os_alt_seps = list(
+    sep for sep in [os.path.sep, os.path.altsep] if sep not in (None, "/")
+)
+
+
+def pbkdf2_hex(
+    data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None
+):
+    """Like :func:`pbkdf2_bin`, but returns a hex-encoded string.
+
+    .. versionadded:: 0.9
+
+    :param data: the data to derive.
+    :param salt: the salt for the derivation.
+    :param iterations: the number of iterations.
+    :param keylen: the length of the resulting key.  If not provided,
+                   the digest size will be used.
+    :param hashfunc: the hash function to use.  This can either be the
+                     string name of a known hash function, or a function
+                     from the hashlib module.  Defaults to sha256.
+    """
+    rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc)
+    return to_native(codecs.encode(rv, "hex_codec"))
+
+
+def pbkdf2_bin(
+    data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None
+):
+    """Returns a binary digest for the PBKDF2 hash algorithm of `data`
+    with the given `salt`. It iterates `iterations` times and produces a
+    key of `keylen` bytes. By default, SHA-256 is used as hash function;
+    a different hashlib `hashfunc` can be provided.
+
+    .. versionadded:: 0.9
+
+    :param data: the data to derive.
+    :param salt: the salt for the derivation.
+    :param iterations: the number of iterations.
+    :param keylen: the length of the resulting key.  If not provided
+                   the digest size will be used.
+    :param hashfunc: the hash function to use.  This can either be the
+                     string name of a known hash function or a function
+                     from the hashlib module.  Defaults to sha256.
+    """
+    if not hashfunc:
+        hashfunc = "sha256"
+
+    data = to_bytes(data)
+    salt = to_bytes(salt)
+
+    if callable(hashfunc):
+        _test_hash = hashfunc()
+        hash_name = getattr(_test_hash, "name", None)
+    else:
+        hash_name = hashfunc
+    return hashlib.pbkdf2_hmac(hash_name, data, salt, iterations, keylen)
+
+
+def safe_str_cmp(a, b):
+    """This function compares strings in somewhat constant time.  This
+    requires that the length of at least one string is known in advance.
+
+    Returns `True` if the two strings are equal, or `False` if they are not.
+
+    .. versionadded:: 0.7
+    """
+    if isinstance(a, text_type):
+        a = a.encode("utf-8")
+    if isinstance(b, text_type):
+        b = b.encode("utf-8")
+
+    if _builtin_safe_str_cmp is not None:
+        return _builtin_safe_str_cmp(a, b)
+
+    if len(a) != len(b):
+        return False
+
+    rv = 0
+    if PY2:
+        for x, y in izip(a, b):
+            rv |= ord(x) ^ ord(y)
+    else:
+        for x, y in izip(a, b):
+            rv |= x ^ y
+
+    return rv == 0
+
+
+def gen_salt(length):
+    """Generate a random string of SALT_CHARS with specified ``length``."""
+    if length <= 0:
+        raise ValueError("Salt length must be positive")
+    return "".join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length))
+
+
+def _hash_internal(method, salt, password):
+    """Internal password hash helper.  Supports plaintext without salt,
+    unsalted and salted passwords.  In case salted passwords are used
+    hmac is used.
+    """
+    if method == "plain":
+        return password, method
+
+    if isinstance(password, text_type):
+        password = password.encode("utf-8")
+
+    if method.startswith("pbkdf2:"):
+        args = method[7:].split(":")
+        if len(args) not in (1, 2):
+            raise ValueError("Invalid number of arguments for PBKDF2")
+        method = args.pop(0)
+        iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS
+        is_pbkdf2 = True
+        actual_method = "pbkdf2:%s:%d" % (method, iterations)
+    else:
+        is_pbkdf2 = False
+        actual_method = method
+
+    if is_pbkdf2:
+        if not salt:
+            raise ValueError("Salt is required for PBKDF2")
+        rv = pbkdf2_hex(password, salt, iterations, hashfunc=method)
+    elif salt:
+        if isinstance(salt, text_type):
+            salt = salt.encode("utf-8")
+        mac = _create_mac(salt, password, method)
+        rv = mac.hexdigest()
+    else:
+        rv = hashlib.new(method, password).hexdigest()
+    return rv, actual_method
+
+
+def _create_mac(key, msg, method):
+    if callable(method):
+        return hmac.HMAC(key, msg, method)
+
+    def hashfunc(d=b""):
+        return hashlib.new(method, d)
+
+    # Python 2.7 used ``hasattr(digestmod, '__call__')``
+    # to detect if hashfunc is callable
+    hashfunc.__call__ = hashfunc
+    return hmac.HMAC(key, msg, hashfunc)
+
+
+def generate_password_hash(password, method="pbkdf2:sha256", salt_length=8):
+    """Hash a password with the given method and salt with a string of
+    the given length. The format of the string returned includes the method
+    that was used so that :func:`check_password_hash` can check the hash.
+
+    The format for the hashed string looks like this::
+
+        method$salt$hash
+
+    This method can **not** generate unsalted passwords but it is possible
+    to set param method='plain' in order to enforce plaintext passwords.
+    If a salt is used, hmac is used internally to salt the password.
+
+    If PBKDF2 is wanted it can be enabled by setting the method to
+    ``pbkdf2:method:iterations`` where iterations is optional::
+
+        pbkdf2:sha256:80000$salt$hash
+        pbkdf2:sha256$salt$hash
+
+    :param password: the password to hash.
+    :param method: the hash method to use (one that hashlib supports). Can
+                   optionally be in the format ``pbkdf2:<method>[:iterations]``
+                   to enable PBKDF2.
+    :param salt_length: the length of the salt in letters.
+    """
+    salt = gen_salt(salt_length) if method != "plain" else ""
+    h, actual_method = _hash_internal(method, salt, password)
+    return "%s$%s$%s" % (actual_method, salt, h)
+
+
+def check_password_hash(pwhash, password):
+    """check a password against a given salted and hashed password value.
+    In order to support unsalted legacy passwords this method supports
+    plain text passwords, md5 and sha1 hashes (both salted and unsalted).
+
+    Returns `True` if the password matched, `False` otherwise.
+
+    :param pwhash: a hashed string like returned by
+                   :func:`generate_password_hash`.
+    :param password: the plaintext password to compare against the hash.
+    """
+    if pwhash.count("$") < 2:
+        return False
+    method, salt, hashval = pwhash.split("$", 2)
+    return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval)
+
+
+def safe_join(directory, *pathnames):
+    """Safely join `directory` and one or more untrusted `pathnames`.  If this
+    cannot be done, this function returns ``None``.
+
+    :param directory: the base directory.
+    :param pathnames: the untrusted pathnames relative to that directory.
+    """
+    parts = [directory]
+    for filename in pathnames:
+        if filename != "":
+            filename = posixpath.normpath(filename)
+        for sep in _os_alt_seps:
+            if sep in filename:
+                return None
+        if os.path.isabs(filename) or filename == ".." or filename.startswith("../"):
+            return None
+        parts.append(filename)
+    return posixpath.join(*parts)
diff --git a/lib/python3.7/site-packages/werkzeug/serving.py b/lib/python3.7/site-packages/werkzeug/serving.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a179b3e5b945001b345b68f997cb2351d0985ce
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/serving.py
@@ -0,0 +1,1055 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.serving
+    ~~~~~~~~~~~~~~~~
+
+    There are many ways to serve a WSGI application.  While you're developing
+    it you usually don't want a full blown webserver like Apache but a simple
+    standalone one.  From Python 2.5 onwards there is the `wsgiref`_ server in
+    the standard library.  If you're using older versions of Python you can
+    download the package from the cheeseshop.
+
+    However there are some caveats. Sourcecode won't reload itself when
+    changed and each time you kill the server using ``^C`` you get an
+    `KeyboardInterrupt` error.  While the latter is easy to solve the first
+    one can be a pain in the ass in some situations.
+
+    The easiest way is creating a small ``start-myproject.py`` that runs the
+    application::
+
+        #!/usr/bin/env python
+        # -*- coding: utf-8 -*-
+        from myproject import make_app
+        from werkzeug.serving import run_simple
+
+        app = make_app(...)
+        run_simple('localhost', 8080, app, use_reloader=True)
+
+    You can also pass it a `extra_files` keyword argument with a list of
+    additional files (like configuration files) you want to observe.
+
+    For bigger applications you should consider using `click`
+    (http://click.pocoo.org) instead of a simple start file.
+
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import io
+import os
+import signal
+import socket
+import sys
+
+import werkzeug
+from ._compat import PY2
+from ._compat import reraise
+from ._compat import WIN
+from ._compat import wsgi_encoding_dance
+from ._internal import _log
+from .exceptions import InternalServerError
+from .urls import uri_to_iri
+from .urls import url_parse
+from .urls import url_unquote
+
+try:
+    import socketserver
+    from http.server import BaseHTTPRequestHandler
+    from http.server import HTTPServer
+except ImportError:
+    import SocketServer as socketserver
+    from BaseHTTPServer import HTTPServer
+    from BaseHTTPServer import BaseHTTPRequestHandler
+
+try:
+    import ssl
+except ImportError:
+
+    class _SslDummy(object):
+        def __getattr__(self, name):
+            raise RuntimeError("SSL support unavailable")
+
+    ssl = _SslDummy()
+
+try:
+    import termcolor
+except ImportError:
+    termcolor = None
+
+
+def _get_openssl_crypto_module():
+    try:
+        from OpenSSL import crypto
+    except ImportError:
+        raise TypeError("Using ad-hoc certificates requires the pyOpenSSL library.")
+    else:
+        return crypto
+
+
+ThreadingMixIn = socketserver.ThreadingMixIn
+can_fork = hasattr(os, "fork")
+
+if can_fork:
+    ForkingMixIn = socketserver.ForkingMixIn
+else:
+
+    class ForkingMixIn(object):
+        pass
+
+
+try:
+    af_unix = socket.AF_UNIX
+except AttributeError:
+    af_unix = None
+
+
+LISTEN_QUEUE = 128
+can_open_by_fd = not WIN and hasattr(socket, "fromfd")
+
+# On Python 3, ConnectionError represents the same errnos as
+# socket.error from Python 2, while socket.error is an alias for the
+# more generic OSError.
+if PY2:
+    _ConnectionError = socket.error
+else:
+    _ConnectionError = ConnectionError
+
+
+class DechunkedInput(io.RawIOBase):
+    """An input stream that handles Transfer-Encoding 'chunked'"""
+
+    def __init__(self, rfile):
+        self._rfile = rfile
+        self._done = False
+        self._len = 0
+
+    def readable(self):
+        return True
+
+    def read_chunk_len(self):
+        try:
+            line = self._rfile.readline().decode("latin1")
+            _len = int(line.strip(), 16)
+        except ValueError:
+            raise IOError("Invalid chunk header")
+        if _len < 0:
+            raise IOError("Negative chunk length not allowed")
+        return _len
+
+    def readinto(self, buf):
+        read = 0
+        while not self._done and read < len(buf):
+            if self._len == 0:
+                # This is the first chunk or we fully consumed the previous
+                # one. Read the next length of the next chunk
+                self._len = self.read_chunk_len()
+
+            if self._len == 0:
+                # Found the final chunk of size 0. The stream is now exhausted,
+                # but there is still a final newline that should be consumed
+                self._done = True
+
+            if self._len > 0:
+                # There is data (left) in this chunk, so append it to the
+                # buffer. If this operation fully consumes the chunk, this will
+                # reset self._len to 0.
+                n = min(len(buf), self._len)
+                buf[read : read + n] = self._rfile.read(n)
+                self._len -= n
+                read += n
+
+            if self._len == 0:
+                # Skip the terminating newline of a chunk that has been fully
+                # consumed. This also applies to the 0-sized final chunk
+                terminator = self._rfile.readline()
+                if terminator not in (b"\n", b"\r\n", b"\r"):
+                    raise IOError("Missing chunk terminating newline")
+
+        return read
+
+
+class WSGIRequestHandler(BaseHTTPRequestHandler, object):
+
+    """A request handler that implements WSGI dispatching."""
+
+    @property
+    def server_version(self):
+        return "Werkzeug/" + werkzeug.__version__
+
+    def make_environ(self):
+        request_url = url_parse(self.path)
+
+        def shutdown_server():
+            self.server.shutdown_signal = True
+
+        url_scheme = "http" if self.server.ssl_context is None else "https"
+        if not self.client_address:
+            self.client_address = "<local>"
+        if isinstance(self.client_address, str):
+            self.client_address = (self.client_address, 0)
+        else:
+            pass
+        path_info = url_unquote(request_url.path)
+
+        environ = {
+            "wsgi.version": (1, 0),
+            "wsgi.url_scheme": url_scheme,
+            "wsgi.input": self.rfile,
+            "wsgi.errors": sys.stderr,
+            "wsgi.multithread": self.server.multithread,
+            "wsgi.multiprocess": self.server.multiprocess,
+            "wsgi.run_once": False,
+            "werkzeug.server.shutdown": shutdown_server,
+            "SERVER_SOFTWARE": self.server_version,
+            "REQUEST_METHOD": self.command,
+            "SCRIPT_NAME": "",
+            "PATH_INFO": wsgi_encoding_dance(path_info),
+            "QUERY_STRING": wsgi_encoding_dance(request_url.query),
+            # Non-standard, added by mod_wsgi, uWSGI
+            "REQUEST_URI": wsgi_encoding_dance(self.path),
+            # Non-standard, added by gunicorn
+            "RAW_URI": wsgi_encoding_dance(self.path),
+            "REMOTE_ADDR": self.address_string(),
+            "REMOTE_PORT": self.port_integer(),
+            "SERVER_NAME": self.server.server_address[0],
+            "SERVER_PORT": str(self.server.server_address[1]),
+            "SERVER_PROTOCOL": self.request_version,
+        }
+
+        for key, value in self.get_header_items():
+            key = key.upper().replace("-", "_")
+            if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"):
+                key = "HTTP_" + key
+                if key in environ:
+                    value = "{},{}".format(environ[key], value)
+            environ[key] = value
+
+        if environ.get("HTTP_TRANSFER_ENCODING", "").strip().lower() == "chunked":
+            environ["wsgi.input_terminated"] = True
+            environ["wsgi.input"] = DechunkedInput(environ["wsgi.input"])
+
+        if request_url.scheme and request_url.netloc:
+            environ["HTTP_HOST"] = request_url.netloc
+
+        return environ
+
+    def run_wsgi(self):
+        if self.headers.get("Expect", "").lower().strip() == "100-continue":
+            self.wfile.write(b"HTTP/1.1 100 Continue\r\n\r\n")
+
+        self.environ = environ = self.make_environ()
+        headers_set = []
+        headers_sent = []
+
+        def write(data):
+            assert headers_set, "write() before start_response"
+            if not headers_sent:
+                status, response_headers = headers_sent[:] = headers_set
+                try:
+                    code, msg = status.split(None, 1)
+                except ValueError:
+                    code, msg = status, ""
+                code = int(code)
+                self.send_response(code, msg)
+                header_keys = set()
+                for key, value in response_headers:
+                    self.send_header(key, value)
+                    key = key.lower()
+                    header_keys.add(key)
+                if not (
+                    "content-length" in header_keys
+                    or environ["REQUEST_METHOD"] == "HEAD"
+                    or code < 200
+                    or code in (204, 304)
+                ):
+                    self.close_connection = True
+                    self.send_header("Connection", "close")
+                if "server" not in header_keys:
+                    self.send_header("Server", self.version_string())
+                if "date" not in header_keys:
+                    self.send_header("Date", self.date_time_string())
+                self.end_headers()
+
+            assert isinstance(data, bytes), "applications must write bytes"
+            self.wfile.write(data)
+            self.wfile.flush()
+
+        def start_response(status, response_headers, exc_info=None):
+            if exc_info:
+                try:
+                    if headers_sent:
+                        reraise(*exc_info)
+                finally:
+                    exc_info = None
+            elif headers_set:
+                raise AssertionError("Headers already set")
+            headers_set[:] = [status, response_headers]
+            return write
+
+        def execute(app):
+            application_iter = app(environ, start_response)
+            try:
+                for data in application_iter:
+                    write(data)
+                if not headers_sent:
+                    write(b"")
+            finally:
+                if hasattr(application_iter, "close"):
+                    application_iter.close()
+                application_iter = None
+
+        try:
+            execute(self.server.app)
+        except (_ConnectionError, socket.timeout) as e:
+            self.connection_dropped(e, environ)
+        except Exception:
+            if self.server.passthrough_errors:
+                raise
+            from .debug.tbtools import get_current_traceback
+
+            traceback = get_current_traceback(ignore_system_exceptions=True)
+            try:
+                # if we haven't yet sent the headers but they are set
+                # we roll back to be able to set them again.
+                if not headers_sent:
+                    del headers_set[:]
+                execute(InternalServerError())
+            except Exception:
+                pass
+            self.server.log("error", "Error on request:\n%s", traceback.plaintext)
+
+    def handle(self):
+        """Handles a request ignoring dropped connections."""
+        rv = None
+        try:
+            rv = BaseHTTPRequestHandler.handle(self)
+        except (_ConnectionError, socket.timeout) as e:
+            self.connection_dropped(e)
+        except Exception as e:
+            if self.server.ssl_context is None or not is_ssl_error(e):
+                raise
+        if self.server.shutdown_signal:
+            self.initiate_shutdown()
+        return rv
+
+    def initiate_shutdown(self):
+        """A horrible, horrible way to kill the server for Python 2.6 and
+        later.  It's the best we can do.
+        """
+        # Windows does not provide SIGKILL, go with SIGTERM then.
+        sig = getattr(signal, "SIGKILL", signal.SIGTERM)
+        # reloader active
+        if is_running_from_reloader():
+            os.kill(os.getpid(), sig)
+        # python 2.7
+        self.server._BaseServer__shutdown_request = True
+        # python 2.6
+        self.server._BaseServer__serving = False
+
+    def connection_dropped(self, error, environ=None):
+        """Called if the connection was closed by the client.  By default
+        nothing happens.
+        """
+
+    def handle_one_request(self):
+        """Handle a single HTTP request."""
+        self.raw_requestline = self.rfile.readline()
+        if not self.raw_requestline:
+            self.close_connection = 1
+        elif self.parse_request():
+            return self.run_wsgi()
+
+    def send_response(self, code, message=None):
+        """Send the response header and log the response code."""
+        self.log_request(code)
+        if message is None:
+            message = code in self.responses and self.responses[code][0] or ""
+        if self.request_version != "HTTP/0.9":
+            hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
+            self.wfile.write(hdr.encode("ascii"))
+
+    def version_string(self):
+        return BaseHTTPRequestHandler.version_string(self).strip()
+
+    def address_string(self):
+        if getattr(self, "environ", None):
+            return self.environ["REMOTE_ADDR"]
+        elif not self.client_address:
+            return "<local>"
+        elif isinstance(self.client_address, str):
+            return self.client_address
+        else:
+            return self.client_address[0]
+
+    def port_integer(self):
+        return self.client_address[1]
+
+    def log_request(self, code="-", size="-"):
+        try:
+            path = uri_to_iri(self.path)
+            msg = "%s %s %s" % (self.command, path, self.request_version)
+        except AttributeError:
+            # path isn't set if the requestline was bad
+            msg = self.requestline
+
+        code = str(code)
+
+        if termcolor:
+            color = termcolor.colored
+
+            if code[0] == "1":  # 1xx - Informational
+                msg = color(msg, attrs=["bold"])
+            elif code[0] == "2":  # 2xx - Success
+                msg = color(msg, color="white")
+            elif code == "304":  # 304 - Resource Not Modified
+                msg = color(msg, color="cyan")
+            elif code[0] == "3":  # 3xx - Redirection
+                msg = color(msg, color="green")
+            elif code == "404":  # 404 - Resource Not Found
+                msg = color(msg, color="yellow")
+            elif code[0] == "4":  # 4xx - Client Error
+                msg = color(msg, color="red", attrs=["bold"])
+            else:  # 5xx, or any other response
+                msg = color(msg, color="magenta", attrs=["bold"])
+
+        self.log("info", '"%s" %s %s', msg, code, size)
+
+    def log_error(self, *args):
+        self.log("error", *args)
+
+    def log_message(self, format, *args):
+        self.log("info", format, *args)
+
+    def log(self, type, message, *args):
+        _log(
+            type,
+            "%s - - [%s] %s\n"
+            % (self.address_string(), self.log_date_time_string(), message % args),
+        )
+
+    def get_header_items(self):
+        """
+        Get an iterable list of key/value pairs representing headers.
+
+        This function provides Python 2/3 compatibility as related to the
+        parsing of request headers. Python 2.7 is not compliant with
+        RFC 3875 Section 4.1.18 which requires multiple values for headers
+        to be provided. This function will return a matching list regardless
+        of Python version. It can be removed once Python 2.7 support
+        is dropped.
+
+        :return: List of tuples containing header hey/value pairs
+        """
+        if PY2:
+            # For Python 2, process the headers manually according to
+            # W3C RFC 2616 Section 4.2.
+            items = []
+            for header in self.headers.headers:
+                # Remove "\n\r" from the header and split on ":" to get
+                # the field name and value.
+                key, value = header[0:-2].split(":", 1)
+                # Add the key and the value once stripped of leading
+                # white space. The specification allows for stripping
+                # trailing white space but the Python 3 code does not
+                # strip trailing white space. Therefore, trailing space
+                # will be left as is to match the Python 3 behavior.
+                items.append((key, value.lstrip()))
+        else:
+            items = self.headers.items()
+
+        return items
+
+
+#: backwards compatible name if someone is subclassing it
+BaseRequestHandler = WSGIRequestHandler
+
+
+def generate_adhoc_ssl_pair(cn=None):
+    from random import random
+
+    crypto = _get_openssl_crypto_module()
+
+    # pretty damn sure that this is not actually accepted by anyone
+    if cn is None:
+        cn = "*"
+
+    cert = crypto.X509()
+    cert.set_serial_number(int(random() * sys.maxsize))
+    cert.gmtime_adj_notBefore(0)
+    cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
+
+    subject = cert.get_subject()
+    subject.CN = cn
+    subject.O = "Dummy Certificate"  # noqa: E741
+
+    issuer = cert.get_issuer()
+    issuer.CN = subject.CN
+    issuer.O = subject.O  # noqa: E741
+
+    pkey = crypto.PKey()
+    pkey.generate_key(crypto.TYPE_RSA, 2048)
+    cert.set_pubkey(pkey)
+    cert.sign(pkey, "sha256")
+
+    return cert, pkey
+
+
+def make_ssl_devcert(base_path, host=None, cn=None):
+    """Creates an SSL key for development.  This should be used instead of
+    the ``'adhoc'`` key which generates a new cert on each server start.
+    It accepts a path for where it should store the key and cert and
+    either a host or CN.  If a host is given it will use the CN
+    ``*.host/CN=host``.
+
+    For more information see :func:`run_simple`.
+
+    .. versionadded:: 0.9
+
+    :param base_path: the path to the certificate and key.  The extension
+                      ``.crt`` is added for the certificate, ``.key`` is
+                      added for the key.
+    :param host: the name of the host.  This can be used as an alternative
+                 for the `cn`.
+    :param cn: the `CN` to use.
+    """
+    from OpenSSL import crypto
+
+    if host is not None:
+        cn = "*.%s/CN=%s" % (host, host)
+    cert, pkey = generate_adhoc_ssl_pair(cn=cn)
+
+    cert_file = base_path + ".crt"
+    pkey_file = base_path + ".key"
+
+    with open(cert_file, "wb") as f:
+        f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
+    with open(pkey_file, "wb") as f:
+        f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
+
+    return cert_file, pkey_file
+
+
+def generate_adhoc_ssl_context():
+    """Generates an adhoc SSL context for the development server."""
+    crypto = _get_openssl_crypto_module()
+    import tempfile
+    import atexit
+
+    cert, pkey = generate_adhoc_ssl_pair()
+    cert_handle, cert_file = tempfile.mkstemp()
+    pkey_handle, pkey_file = tempfile.mkstemp()
+    atexit.register(os.remove, pkey_file)
+    atexit.register(os.remove, cert_file)
+
+    os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
+    os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
+    os.close(cert_handle)
+    os.close(pkey_handle)
+    ctx = load_ssl_context(cert_file, pkey_file)
+    return ctx
+
+
+def load_ssl_context(cert_file, pkey_file=None, protocol=None):
+    """Loads SSL context from cert/private key files and optional protocol.
+    Many parameters are directly taken from the API of
+    :py:class:`ssl.SSLContext`.
+
+    :param cert_file: Path of the certificate to use.
+    :param pkey_file: Path of the private key to use. If not given, the key
+                      will be obtained from the certificate file.
+    :param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl``
+                     module. Defaults to ``PROTOCOL_SSLv23``.
+    """
+    if protocol is None:
+        protocol = ssl.PROTOCOL_SSLv23
+    ctx = _SSLContext(protocol)
+    ctx.load_cert_chain(cert_file, pkey_file)
+    return ctx
+
+
+class _SSLContext(object):
+
+    """A dummy class with a small subset of Python3's ``ssl.SSLContext``, only
+    intended to be used with and by Werkzeug."""
+
+    def __init__(self, protocol):
+        self._protocol = protocol
+        self._certfile = None
+        self._keyfile = None
+        self._password = None
+
+    def load_cert_chain(self, certfile, keyfile=None, password=None):
+        self._certfile = certfile
+        self._keyfile = keyfile or certfile
+        self._password = password
+
+    def wrap_socket(self, sock, **kwargs):
+        return ssl.wrap_socket(
+            sock,
+            keyfile=self._keyfile,
+            certfile=self._certfile,
+            ssl_version=self._protocol,
+            **kwargs
+        )
+
+
+def is_ssl_error(error=None):
+    """Checks if the given error (or the current one) is an SSL error."""
+    exc_types = (ssl.SSLError,)
+    try:
+        from OpenSSL.SSL import Error
+
+        exc_types += (Error,)
+    except ImportError:
+        pass
+
+    if error is None:
+        error = sys.exc_info()[1]
+    return isinstance(error, exc_types)
+
+
+def select_address_family(host, port):
+    """Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on
+    the host and port."""
+    # disabled due to problems with current ipv6 implementations
+    # and various operating systems.  Probably this code also is
+    # not supposed to work, but I can't come up with any other
+    # ways to implement this.
+    # try:
+    #     info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+    #                               socket.SOCK_STREAM, 0,
+    #                               socket.AI_PASSIVE)
+    #     if info:
+    #         return info[0][0]
+    # except socket.gaierror:
+    #     pass
+    if host.startswith("unix://"):
+        return socket.AF_UNIX
+    elif ":" in host and hasattr(socket, "AF_INET6"):
+        return socket.AF_INET6
+    return socket.AF_INET
+
+
+def get_sockaddr(host, port, family):
+    """Return a fully qualified socket address that can be passed to
+    :func:`socket.bind`."""
+    if family == af_unix:
+        return host.split("://", 1)[1]
+    try:
+        res = socket.getaddrinfo(
+            host, port, family, socket.SOCK_STREAM, socket.IPPROTO_TCP
+        )
+    except socket.gaierror:
+        return host, port
+    return res[0][4]
+
+
+class BaseWSGIServer(HTTPServer, object):
+
+    """Simple single-threaded, single-process WSGI server."""
+
+    multithread = False
+    multiprocess = False
+    request_queue_size = LISTEN_QUEUE
+
+    def __init__(
+        self,
+        host,
+        port,
+        app,
+        handler=None,
+        passthrough_errors=False,
+        ssl_context=None,
+        fd=None,
+    ):
+        if handler is None:
+            handler = WSGIRequestHandler
+
+        self.address_family = select_address_family(host, port)
+
+        if fd is not None:
+            real_sock = socket.fromfd(fd, self.address_family, socket.SOCK_STREAM)
+            port = 0
+
+        server_address = get_sockaddr(host, int(port), self.address_family)
+
+        # remove socket file if it already exists
+        if self.address_family == af_unix and os.path.exists(server_address):
+            os.unlink(server_address)
+        HTTPServer.__init__(self, server_address, handler)
+
+        self.app = app
+        self.passthrough_errors = passthrough_errors
+        self.shutdown_signal = False
+        self.host = host
+        self.port = self.socket.getsockname()[1]
+
+        # Patch in the original socket.
+        if fd is not None:
+            self.socket.close()
+            self.socket = real_sock
+            self.server_address = self.socket.getsockname()
+
+        if ssl_context is not None:
+            if isinstance(ssl_context, tuple):
+                ssl_context = load_ssl_context(*ssl_context)
+            if ssl_context == "adhoc":
+                ssl_context = generate_adhoc_ssl_context()
+            # If we are on Python 2 the return value from socket.fromfd
+            # is an internal socket object but what we need for ssl wrap
+            # is the wrapper around it :(
+            sock = self.socket
+            if PY2 and not isinstance(sock, socket.socket):
+                sock = socket.socket(sock.family, sock.type, sock.proto, sock)
+            self.socket = ssl_context.wrap_socket(sock, server_side=True)
+            self.ssl_context = ssl_context
+        else:
+            self.ssl_context = None
+
+    def log(self, type, message, *args):
+        _log(type, message, *args)
+
+    def serve_forever(self):
+        self.shutdown_signal = False
+        try:
+            HTTPServer.serve_forever(self)
+        except KeyboardInterrupt:
+            pass
+        finally:
+            self.server_close()
+
+    def handle_error(self, request, client_address):
+        if self.passthrough_errors:
+            raise
+        # Python 2 still causes a socket.error after the earlier
+        # handling, so silence it here.
+        if isinstance(sys.exc_info()[1], _ConnectionError):
+            return
+        return HTTPServer.handle_error(self, request, client_address)
+
+    def get_request(self):
+        con, info = self.socket.accept()
+        return con, info
+
+
+class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
+
+    """A WSGI server that does threading."""
+
+    multithread = True
+    daemon_threads = True
+
+
+class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
+
+    """A WSGI server that does forking."""
+
+    multiprocess = True
+
+    def __init__(
+        self,
+        host,
+        port,
+        app,
+        processes=40,
+        handler=None,
+        passthrough_errors=False,
+        ssl_context=None,
+        fd=None,
+    ):
+        if not can_fork:
+            raise ValueError("Your platform does not support forking.")
+        BaseWSGIServer.__init__(
+            self, host, port, app, handler, passthrough_errors, ssl_context, fd
+        )
+        self.max_children = processes
+
+
+def make_server(
+    host=None,
+    port=None,
+    app=None,
+    threaded=False,
+    processes=1,
+    request_handler=None,
+    passthrough_errors=False,
+    ssl_context=None,
+    fd=None,
+):
+    """Create a new server instance that is either threaded, or forks
+    or just processes one request after another.
+    """
+    if threaded and processes > 1:
+        raise ValueError("cannot have a multithreaded and multi process server.")
+    elif threaded:
+        return ThreadedWSGIServer(
+            host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
+        )
+    elif processes > 1:
+        return ForkingWSGIServer(
+            host,
+            port,
+            app,
+            processes,
+            request_handler,
+            passthrough_errors,
+            ssl_context,
+            fd=fd,
+        )
+    else:
+        return BaseWSGIServer(
+            host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
+        )
+
+
+def is_running_from_reloader():
+    """Checks if the application is running from within the Werkzeug
+    reloader subprocess.
+
+    .. versionadded:: 0.10
+    """
+    return os.environ.get("WERKZEUG_RUN_MAIN") == "true"
+
+
+def run_simple(
+    hostname,
+    port,
+    application,
+    use_reloader=False,
+    use_debugger=False,
+    use_evalex=True,
+    extra_files=None,
+    reloader_interval=1,
+    reloader_type="auto",
+    threaded=False,
+    processes=1,
+    request_handler=None,
+    static_files=None,
+    passthrough_errors=False,
+    ssl_context=None,
+):
+    """Start a WSGI application. Optional features include a reloader,
+    multithreading and fork support.
+
+    This function has a command-line interface too::
+
+        python -m werkzeug.serving --help
+
+    .. versionadded:: 0.5
+       `static_files` was added to simplify serving of static files as well
+       as `passthrough_errors`.
+
+    .. versionadded:: 0.6
+       support for SSL was added.
+
+    .. versionadded:: 0.8
+       Added support for automatically loading a SSL context from certificate
+       file and private key.
+
+    .. versionadded:: 0.9
+       Added command-line interface.
+
+    .. versionadded:: 0.10
+       Improved the reloader and added support for changing the backend
+       through the `reloader_type` parameter.  See :ref:`reloader`
+       for more information.
+
+    .. versionchanged:: 0.15
+        Bind to a Unix socket by passing a path that starts with
+        ``unix://`` as the ``hostname``.
+
+    :param hostname: The host to bind to, for example ``'localhost'``.
+        If the value is a path that starts with ``unix://`` it will bind
+        to a Unix socket instead of a TCP socket..
+    :param port: The port for the server.  eg: ``8080``
+    :param application: the WSGI application to execute
+    :param use_reloader: should the server automatically restart the python
+                         process if modules were changed?
+    :param use_debugger: should the werkzeug debugging system be used?
+    :param use_evalex: should the exception evaluation feature be enabled?
+    :param extra_files: a list of files the reloader should watch
+                        additionally to the modules.  For example configuration
+                        files.
+    :param reloader_interval: the interval for the reloader in seconds.
+    :param reloader_type: the type of reloader to use.  The default is
+                          auto detection.  Valid values are ``'stat'`` and
+                          ``'watchdog'``. See :ref:`reloader` for more
+                          information.
+    :param threaded: should the process handle each request in a separate
+                     thread?
+    :param processes: if greater than 1 then handle each request in a new process
+                      up to this maximum number of concurrent processes.
+    :param request_handler: optional parameter that can be used to replace
+                            the default one.  You can use this to replace it
+                            with a different
+                            :class:`~BaseHTTPServer.BaseHTTPRequestHandler`
+                            subclass.
+    :param static_files: a list or dict of paths for static files.  This works
+                         exactly like :class:`SharedDataMiddleware`, it's actually
+                         just wrapping the application in that middleware before
+                         serving.
+    :param passthrough_errors: set this to `True` to disable the error catching.
+                               This means that the server will die on errors but
+                               it can be useful to hook debuggers in (pdb etc.)
+    :param ssl_context: an SSL context for the connection. Either an
+                        :class:`ssl.SSLContext`, a tuple in the form
+                        ``(cert_file, pkey_file)``, the string ``'adhoc'`` if
+                        the server should automatically create one, or ``None``
+                        to disable SSL (which is the default).
+    """
+    if not isinstance(port, int):
+        raise TypeError("port must be an integer")
+    if use_debugger:
+        from .debug import DebuggedApplication
+
+        application = DebuggedApplication(application, use_evalex)
+    if static_files:
+        from .middleware.shared_data import SharedDataMiddleware
+
+        application = SharedDataMiddleware(application, static_files)
+
+    def log_startup(sock):
+        display_hostname = hostname if hostname not in ("", "*") else "localhost"
+        quit_msg = "(Press CTRL+C to quit)"
+        if sock.family == af_unix:
+            _log("info", " * Running on %s %s", display_hostname, quit_msg)
+        else:
+            if ":" in display_hostname:
+                display_hostname = "[%s]" % display_hostname
+            port = sock.getsockname()[1]
+            _log(
+                "info",
+                " * Running on %s://%s:%d/ %s",
+                "http" if ssl_context is None else "https",
+                display_hostname,
+                port,
+                quit_msg,
+            )
+
+    def inner():
+        try:
+            fd = int(os.environ["WERKZEUG_SERVER_FD"])
+        except (LookupError, ValueError):
+            fd = None
+        srv = make_server(
+            hostname,
+            port,
+            application,
+            threaded,
+            processes,
+            request_handler,
+            passthrough_errors,
+            ssl_context,
+            fd=fd,
+        )
+        if fd is None:
+            log_startup(srv.socket)
+        srv.serve_forever()
+
+    if use_reloader:
+        # If we're not running already in the subprocess that is the
+        # reloader we want to open up a socket early to make sure the
+        # port is actually available.
+        if not is_running_from_reloader():
+            if port == 0 and not can_open_by_fd:
+                raise ValueError(
+                    "Cannot bind to a random port with enabled "
+                    "reloader if the Python interpreter does "
+                    "not support socket opening by fd."
+                )
+
+            # Create and destroy a socket so that any exceptions are
+            # raised before we spawn a separate Python interpreter and
+            # lose this ability.
+            address_family = select_address_family(hostname, port)
+            server_address = get_sockaddr(hostname, port, address_family)
+            s = socket.socket(address_family, socket.SOCK_STREAM)
+            s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+            s.bind(server_address)
+            if hasattr(s, "set_inheritable"):
+                s.set_inheritable(True)
+
+            # If we can open the socket by file descriptor, then we can just
+            # reuse this one and our socket will survive the restarts.
+            if can_open_by_fd:
+                os.environ["WERKZEUG_SERVER_FD"] = str(s.fileno())
+                s.listen(LISTEN_QUEUE)
+                log_startup(s)
+            else:
+                s.close()
+                if address_family == af_unix:
+                    _log("info", "Unlinking %s" % server_address)
+                    os.unlink(server_address)
+
+        # Do not use relative imports, otherwise "python -m werkzeug.serving"
+        # breaks.
+        from ._reloader import run_with_reloader
+
+        run_with_reloader(inner, extra_files, reloader_interval, reloader_type)
+    else:
+        inner()
+
+
+def run_with_reloader(*args, **kwargs):
+    # People keep using undocumented APIs.  Do not use this function
+    # please, we do not guarantee that it continues working.
+    from ._reloader import run_with_reloader
+
+    return run_with_reloader(*args, **kwargs)
+
+
+def main():
+    """A simple command-line interface for :py:func:`run_simple`."""
+
+    # in contrast to argparse, this works at least under Python < 2.7
+    import optparse
+    from .utils import import_string
+
+    parser = optparse.OptionParser(usage="Usage: %prog [options] app_module:app_object")
+    parser.add_option(
+        "-b",
+        "--bind",
+        dest="address",
+        help="The hostname:port the app should listen on.",
+    )
+    parser.add_option(
+        "-d",
+        "--debug",
+        dest="use_debugger",
+        action="store_true",
+        default=False,
+        help="Use Werkzeug's debugger.",
+    )
+    parser.add_option(
+        "-r",
+        "--reload",
+        dest="use_reloader",
+        action="store_true",
+        default=False,
+        help="Reload Python process if modules change.",
+    )
+    options, args = parser.parse_args()
+
+    hostname, port = None, None
+    if options.address:
+        address = options.address.split(":")
+        hostname = address[0]
+        if len(address) > 1:
+            port = address[1]
+
+    if len(args) != 1:
+        sys.stdout.write("No application supplied, or too much. See --help\n")
+        sys.exit(1)
+    app = import_string(args[0])
+
+    run_simple(
+        hostname=(hostname or "127.0.0.1"),
+        port=int(port or 5000),
+        application=app,
+        use_reloader=options.use_reloader,
+        use_debugger=options.use_debugger,
+    )
+
+
+if __name__ == "__main__":
+    main()
diff --git a/lib/python3.7/site-packages/werkzeug/test.py b/lib/python3.7/site-packages/werkzeug/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c9fae617e640c44e5b84795ad5d5438a8c02a89
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/test.py
@@ -0,0 +1,1146 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.test
+    ~~~~~~~~~~~~~
+
+    This module implements a client to WSGI applications for testing.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import mimetypes
+import sys
+from io import BytesIO
+from itertools import chain
+from random import random
+from tempfile import TemporaryFile
+from time import time
+
+from ._compat import iteritems
+from ._compat import iterlists
+from ._compat import itervalues
+from ._compat import make_literal_wrapper
+from ._compat import reraise
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import to_bytes
+from ._compat import wsgi_encoding_dance
+from ._internal import _get_environ
+from .datastructures import CallbackDict
+from .datastructures import CombinedMultiDict
+from .datastructures import EnvironHeaders
+from .datastructures import FileMultiDict
+from .datastructures import FileStorage
+from .datastructures import Headers
+from .datastructures import MultiDict
+from .http import dump_cookie
+from .http import dump_options_header
+from .http import parse_options_header
+from .urls import iri_to_uri
+from .urls import url_encode
+from .urls import url_fix
+from .urls import url_parse
+from .urls import url_unparse
+from .urls import url_unquote
+from .utils import get_content_type
+from .wrappers import BaseRequest
+from .wsgi import ClosingIterator
+from .wsgi import get_current_url
+
+try:
+    from urllib.request import Request as U2Request
+except ImportError:
+    from urllib2 import Request as U2Request
+
+try:
+    from http.cookiejar import CookieJar
+except ImportError:
+    from cookielib import CookieJar
+
+
+def stream_encode_multipart(
+    values, use_tempfile=True, threshold=1024 * 500, boundary=None, charset="utf-8"
+):
+    """Encode a dict of values (either strings or file descriptors or
+    :class:`FileStorage` objects.) into a multipart encoded string stored
+    in a file descriptor.
+    """
+    if boundary is None:
+        boundary = "---------------WerkzeugFormPart_%s%s" % (time(), random())
+    _closure = [BytesIO(), 0, False]
+
+    if use_tempfile:
+
+        def write_binary(string):
+            stream, total_length, on_disk = _closure
+            if on_disk:
+                stream.write(string)
+            else:
+                length = len(string)
+                if length + _closure[1] <= threshold:
+                    stream.write(string)
+                else:
+                    new_stream = TemporaryFile("wb+")
+                    new_stream.write(stream.getvalue())
+                    new_stream.write(string)
+                    _closure[0] = new_stream
+                    _closure[2] = True
+                _closure[1] = total_length + length
+
+    else:
+        write_binary = _closure[0].write
+
+    def write(string):
+        write_binary(string.encode(charset))
+
+    if not isinstance(values, MultiDict):
+        values = MultiDict(values)
+
+    for key, values in iterlists(values):
+        for value in values:
+            write('--%s\r\nContent-Disposition: form-data; name="%s"' % (boundary, key))
+            reader = getattr(value, "read", None)
+            if reader is not None:
+                filename = getattr(value, "filename", getattr(value, "name", None))
+                content_type = getattr(value, "content_type", None)
+                if content_type is None:
+                    content_type = (
+                        filename
+                        and mimetypes.guess_type(filename)[0]
+                        or "application/octet-stream"
+                    )
+                if filename is not None:
+                    write('; filename="%s"\r\n' % filename)
+                else:
+                    write("\r\n")
+                write("Content-Type: %s\r\n\r\n" % content_type)
+                while 1:
+                    chunk = reader(16384)
+                    if not chunk:
+                        break
+                    write_binary(chunk)
+            else:
+                if not isinstance(value, string_types):
+                    value = str(value)
+
+                value = to_bytes(value, charset)
+                write("\r\n\r\n")
+                write_binary(value)
+            write("\r\n")
+    write("--%s--\r\n" % boundary)
+
+    length = int(_closure[0].tell())
+    _closure[0].seek(0)
+    return _closure[0], length, boundary
+
+
+def encode_multipart(values, boundary=None, charset="utf-8"):
+    """Like `stream_encode_multipart` but returns a tuple in the form
+    (``boundary``, ``data``) where data is a bytestring.
+    """
+    stream, length, boundary = stream_encode_multipart(
+        values, use_tempfile=False, boundary=boundary, charset=charset
+    )
+    return boundary, stream.read()
+
+
+def File(fd, filename=None, mimetype=None):
+    """Backwards compat.
+
+    .. deprecated:: 0.5
+    """
+    from warnings import warn
+
+    warn(
+        "'werkzeug.test.File' is deprecated as of version 0.5 and will"
+        " be removed in version 1.0. Use 'EnvironBuilder' or"
+        " 'FileStorage' instead.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+    return FileStorage(fd, filename=filename, content_type=mimetype)
+
+
+class _TestCookieHeaders(object):
+
+    """A headers adapter for cookielib
+    """
+
+    def __init__(self, headers):
+        self.headers = headers
+
+    def getheaders(self, name):
+        headers = []
+        name = name.lower()
+        for k, v in self.headers:
+            if k.lower() == name:
+                headers.append(v)
+        return headers
+
+    def get_all(self, name, default=None):
+        rv = []
+        for k, v in self.headers:
+            if k.lower() == name.lower():
+                rv.append(v)
+        return rv or default or []
+
+
+class _TestCookieResponse(object):
+
+    """Something that looks like a httplib.HTTPResponse, but is actually just an
+    adapter for our test responses to make them available for cookielib.
+    """
+
+    def __init__(self, headers):
+        self.headers = _TestCookieHeaders(headers)
+
+    def info(self):
+        return self.headers
+
+
+class _TestCookieJar(CookieJar):
+
+    """A cookielib.CookieJar modified to inject and read cookie headers from
+    and to wsgi environments, and wsgi application responses.
+    """
+
+    def inject_wsgi(self, environ):
+        """Inject the cookies as client headers into the server's wsgi
+        environment.
+        """
+        cvals = ["%s=%s" % (c.name, c.value) for c in self]
+
+        if cvals:
+            environ["HTTP_COOKIE"] = "; ".join(cvals)
+        else:
+            environ.pop("HTTP_COOKIE", None)
+
+    def extract_wsgi(self, environ, headers):
+        """Extract the server's set-cookie headers as cookies into the
+        cookie jar.
+        """
+        self.extract_cookies(
+            _TestCookieResponse(headers), U2Request(get_current_url(environ))
+        )
+
+
+def _iter_data(data):
+    """Iterates over a `dict` or :class:`MultiDict` yielding all keys and
+    values.
+    This is used to iterate over the data passed to the
+    :class:`EnvironBuilder`.
+    """
+    if isinstance(data, MultiDict):
+        for key, values in iterlists(data):
+            for value in values:
+                yield key, value
+    else:
+        for key, values in iteritems(data):
+            if isinstance(values, list):
+                for value in values:
+                    yield key, value
+            else:
+                yield key, values
+
+
+class EnvironBuilder(object):
+    """This class can be used to conveniently create a WSGI environment
+    for testing purposes.  It can be used to quickly create WSGI environments
+    or request objects from arbitrary data.
+
+    The signature of this class is also used in some other places as of
+    Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
+    :meth:`Client.open`).  Because of this most of the functionality is
+    available through the constructor alone.
+
+    Files and regular form data can be manipulated independently of each
+    other with the :attr:`form` and :attr:`files` attributes, but are
+    passed with the same argument to the constructor: `data`.
+
+    `data` can be any of these values:
+
+    -   a `str` or `bytes` object: The object is converted into an
+        :attr:`input_stream`, the :attr:`content_length` is set and you have to
+        provide a :attr:`content_type`.
+    -   a `dict` or :class:`MultiDict`: The keys have to be strings. The values
+        have to be either any of the following objects, or a list of any of the
+        following objects:
+
+        -   a :class:`file`-like object:  These are converted into
+            :class:`FileStorage` objects automatically.
+        -   a `tuple`:  The :meth:`~FileMultiDict.add_file` method is called
+            with the key and the unpacked `tuple` items as positional
+            arguments.
+        -   a `str`:  The string is set as form data for the associated key.
+    -   a file-like object: The object content is loaded in memory and then
+        handled like a regular `str` or a `bytes`.
+
+    :param path: the path of the request.  In the WSGI environment this will
+                 end up as `PATH_INFO`.  If the `query_string` is not defined
+                 and there is a question mark in the `path` everything after
+                 it is used as query string.
+    :param base_url: the base URL is a URL that is used to extract the WSGI
+                     URL scheme, host (server name + server port) and the
+                     script root (`SCRIPT_NAME`).
+    :param query_string: an optional string or dict with URL parameters.
+    :param method: the HTTP method to use, defaults to `GET`.
+    :param input_stream: an optional input stream.  Do not specify this and
+                         `data`.  As soon as an input stream is set you can't
+                         modify :attr:`args` and :attr:`files` unless you
+                         set the :attr:`input_stream` to `None` again.
+    :param content_type: The content type for the request.  As of 0.5 you
+                         don't have to provide this when specifying files
+                         and form data via `data`.
+    :param content_length: The content length for the request.  You don't
+                           have to specify this when providing data via
+                           `data`.
+    :param errors_stream: an optional error stream that is used for
+                          `wsgi.errors`.  Defaults to :data:`stderr`.
+    :param multithread: controls `wsgi.multithread`.  Defaults to `False`.
+    :param multiprocess: controls `wsgi.multiprocess`.  Defaults to `False`.
+    :param run_once: controls `wsgi.run_once`.  Defaults to `False`.
+    :param headers: an optional list or :class:`Headers` object of headers.
+    :param data: a string or dict of form data or a file-object.
+                 See explanation above.
+    :param json: An object to be serialized and assigned to ``data``.
+        Defaults the content type to ``"application/json"``.
+        Serialized with the function assigned to :attr:`json_dumps`.
+    :param environ_base: an optional dict of environment defaults.
+    :param environ_overrides: an optional dict of environment overrides.
+    :param charset: the charset used to encode unicode data.
+
+    .. versionadded:: 0.15
+        The ``json`` param and :meth:`json_dumps` method.
+
+    .. versionadded:: 0.15
+        The environ has keys ``REQUEST_URI`` and ``RAW_URI`` containing
+        the path before perecent-decoding. This is not part of the WSGI
+        PEP, but many WSGI servers include it.
+
+    .. versionchanged:: 0.6
+       ``path`` and ``base_url`` can now be unicode strings that are
+       encoded with :func:`iri_to_uri`.
+    """
+
+    #: the server protocol to use.  defaults to HTTP/1.1
+    server_protocol = "HTTP/1.1"
+
+    #: the wsgi version to use.  defaults to (1, 0)
+    wsgi_version = (1, 0)
+
+    #: the default request class for :meth:`get_request`
+    request_class = BaseRequest
+
+    import json
+
+    #: The serialization function used when ``json`` is passed.
+    json_dumps = staticmethod(json.dumps)
+    del json
+
+    def __init__(
+        self,
+        path="/",
+        base_url=None,
+        query_string=None,
+        method="GET",
+        input_stream=None,
+        content_type=None,
+        content_length=None,
+        errors_stream=None,
+        multithread=False,
+        multiprocess=False,
+        run_once=False,
+        headers=None,
+        data=None,
+        environ_base=None,
+        environ_overrides=None,
+        charset="utf-8",
+        mimetype=None,
+        json=None,
+    ):
+        path_s = make_literal_wrapper(path)
+        if query_string is not None and path_s("?") in path:
+            raise ValueError("Query string is defined in the path and as an argument")
+        if query_string is None and path_s("?") in path:
+            path, query_string = path.split(path_s("?"), 1)
+        self.charset = charset
+        self.path = iri_to_uri(path)
+        if base_url is not None:
+            base_url = url_fix(iri_to_uri(base_url, charset), charset)
+        self.base_url = base_url
+        if isinstance(query_string, (bytes, text_type)):
+            self.query_string = query_string
+        else:
+            if query_string is None:
+                query_string = MultiDict()
+            elif not isinstance(query_string, MultiDict):
+                query_string = MultiDict(query_string)
+            self.args = query_string
+        self.method = method
+        if headers is None:
+            headers = Headers()
+        elif not isinstance(headers, Headers):
+            headers = Headers(headers)
+        self.headers = headers
+        if content_type is not None:
+            self.content_type = content_type
+        if errors_stream is None:
+            errors_stream = sys.stderr
+        self.errors_stream = errors_stream
+        self.multithread = multithread
+        self.multiprocess = multiprocess
+        self.run_once = run_once
+        self.environ_base = environ_base
+        self.environ_overrides = environ_overrides
+        self.input_stream = input_stream
+        self.content_length = content_length
+        self.closed = False
+
+        if json is not None:
+            if data is not None:
+                raise TypeError("can't provide both json and data")
+
+            data = self.json_dumps(json)
+
+            if self.content_type is None:
+                self.content_type = "application/json"
+
+        if data:
+            if input_stream is not None:
+                raise TypeError("can't provide input stream and data")
+            if hasattr(data, "read"):
+                data = data.read()
+            if isinstance(data, text_type):
+                data = data.encode(self.charset)
+            if isinstance(data, bytes):
+                self.input_stream = BytesIO(data)
+                if self.content_length is None:
+                    self.content_length = len(data)
+            else:
+                for key, value in _iter_data(data):
+                    if isinstance(value, (tuple, dict)) or hasattr(value, "read"):
+                        self._add_file_from_data(key, value)
+                    else:
+                        self.form.setlistdefault(key).append(value)
+
+        if mimetype is not None:
+            self.mimetype = mimetype
+
+    @classmethod
+    def from_environ(cls, environ, **kwargs):
+        """Turn an environ dict back into a builder. Any extra kwargs
+        override the args extracted from the environ.
+
+        .. versionadded:: 0.15
+        """
+        headers = Headers(EnvironHeaders(environ))
+        out = {
+            "path": environ["PATH_INFO"],
+            "base_url": cls._make_base_url(
+                environ["wsgi.url_scheme"], headers.pop("Host"), environ["SCRIPT_NAME"]
+            ),
+            "query_string": environ["QUERY_STRING"],
+            "method": environ["REQUEST_METHOD"],
+            "input_stream": environ["wsgi.input"],
+            "content_type": headers.pop("Content-Type", None),
+            "content_length": headers.pop("Content-Length", None),
+            "errors_stream": environ["wsgi.errors"],
+            "multithread": environ["wsgi.multithread"],
+            "multiprocess": environ["wsgi.multiprocess"],
+            "run_once": environ["wsgi.run_once"],
+            "headers": headers,
+        }
+        out.update(kwargs)
+        return cls(**out)
+
+    def _add_file_from_data(self, key, value):
+        """Called in the EnvironBuilder to add files from the data dict."""
+        if isinstance(value, tuple):
+            self.files.add_file(key, *value)
+        elif isinstance(value, dict):
+            from warnings import warn
+
+            warn(
+                "Passing a dict as file data is deprecated as of"
+                " version 0.5 and will be removed in version 1.0. Use"
+                " a tuple or 'FileStorage' object instead.",
+                DeprecationWarning,
+                stacklevel=2,
+            )
+            value = dict(value)
+            mimetype = value.pop("mimetype", None)
+            if mimetype is not None:
+                value["content_type"] = mimetype
+            self.files.add_file(key, **value)
+        else:
+            self.files.add_file(key, value)
+
+    @staticmethod
+    def _make_base_url(scheme, host, script_root):
+        return url_unparse((scheme, host, script_root, "", "")).rstrip("/") + "/"
+
+    @property
+    def base_url(self):
+        """The base URL is used to extract the URL scheme, host name,
+        port, and root path.
+        """
+        return self._make_base_url(self.url_scheme, self.host, self.script_root)
+
+    @base_url.setter
+    def base_url(self, value):
+        if value is None:
+            scheme = "http"
+            netloc = "localhost"
+            script_root = ""
+        else:
+            scheme, netloc, script_root, qs, anchor = url_parse(value)
+            if qs or anchor:
+                raise ValueError("base url must not contain a query string or fragment")
+        self.script_root = script_root.rstrip("/")
+        self.host = netloc
+        self.url_scheme = scheme
+
+    def _get_content_type(self):
+        ct = self.headers.get("Content-Type")
+        if ct is None and not self._input_stream:
+            if self._files:
+                return "multipart/form-data"
+            elif self._form:
+                return "application/x-www-form-urlencoded"
+            return None
+        return ct
+
+    def _set_content_type(self, value):
+        if value is None:
+            self.headers.pop("Content-Type", None)
+        else:
+            self.headers["Content-Type"] = value
+
+    content_type = property(
+        _get_content_type,
+        _set_content_type,
+        doc="""The content type for the request.  Reflected from and to
+        the :attr:`headers`.  Do not set if you set :attr:`files` or
+        :attr:`form` for auto detection.""",
+    )
+    del _get_content_type, _set_content_type
+
+    def _get_content_length(self):
+        return self.headers.get("Content-Length", type=int)
+
+    def _get_mimetype(self):
+        ct = self.content_type
+        if ct:
+            return ct.split(";")[0].strip()
+
+    def _set_mimetype(self, value):
+        self.content_type = get_content_type(value, self.charset)
+
+    def _get_mimetype_params(self):
+        def on_update(d):
+            self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
+
+        d = parse_options_header(self.headers.get("content-type", ""))[1]
+        return CallbackDict(d, on_update)
+
+    mimetype = property(
+        _get_mimetype,
+        _set_mimetype,
+        doc="""The mimetype (content type without charset etc.)
+
+        .. versionadded:: 0.14
+    """,
+    )
+    mimetype_params = property(
+        _get_mimetype_params,
+        doc=""" The mimetype parameters as dict.  For example if the
+        content type is ``text/html; charset=utf-8`` the params would be
+        ``{'charset': 'utf-8'}``.
+
+        .. versionadded:: 0.14
+        """,
+    )
+    del _get_mimetype, _set_mimetype, _get_mimetype_params
+
+    def _set_content_length(self, value):
+        if value is None:
+            self.headers.pop("Content-Length", None)
+        else:
+            self.headers["Content-Length"] = str(value)
+
+    content_length = property(
+        _get_content_length,
+        _set_content_length,
+        doc="""The content length as integer.  Reflected from and to the
+        :attr:`headers`.  Do not set if you set :attr:`files` or
+        :attr:`form` for auto detection.""",
+    )
+    del _get_content_length, _set_content_length
+
+    def form_property(name, storage, doc):  # noqa: B902
+        key = "_" + name
+
+        def getter(self):
+            if self._input_stream is not None:
+                raise AttributeError("an input stream is defined")
+            rv = getattr(self, key)
+            if rv is None:
+                rv = storage()
+                setattr(self, key, rv)
+
+            return rv
+
+        def setter(self, value):
+            self._input_stream = None
+            setattr(self, key, value)
+
+        return property(getter, setter, doc=doc)
+
+    form = form_property("form", MultiDict, doc="A :class:`MultiDict` of form values.")
+    files = form_property(
+        "files",
+        FileMultiDict,
+        doc="""A :class:`FileMultiDict` of uploaded files.  You can use
+        the :meth:`~FileMultiDict.add_file` method to add new files to
+        the dict.""",
+    )
+    del form_property
+
+    def _get_input_stream(self):
+        return self._input_stream
+
+    def _set_input_stream(self, value):
+        self._input_stream = value
+        self._form = self._files = None
+
+    input_stream = property(
+        _get_input_stream,
+        _set_input_stream,
+        doc="""An optional input stream.  If you set this it will clear
+        :attr:`form` and :attr:`files`.""",
+    )
+    del _get_input_stream, _set_input_stream
+
+    def _get_query_string(self):
+        if self._query_string is None:
+            if self._args is not None:
+                return url_encode(self._args, charset=self.charset)
+            return ""
+        return self._query_string
+
+    def _set_query_string(self, value):
+        self._query_string = value
+        self._args = None
+
+    query_string = property(
+        _get_query_string,
+        _set_query_string,
+        doc="""The query string.  If you set this to a string
+        :attr:`args` will no longer be available.""",
+    )
+    del _get_query_string, _set_query_string
+
+    def _get_args(self):
+        if self._query_string is not None:
+            raise AttributeError("a query string is defined")
+        if self._args is None:
+            self._args = MultiDict()
+        return self._args
+
+    def _set_args(self, value):
+        self._query_string = None
+        self._args = value
+
+    args = property(
+        _get_args, _set_args, doc="The URL arguments as :class:`MultiDict`."
+    )
+    del _get_args, _set_args
+
+    @property
+    def server_name(self):
+        """The server name (read-only, use :attr:`host` to set)"""
+        return self.host.split(":", 1)[0]
+
+    @property
+    def server_port(self):
+        """The server port as integer (read-only, use :attr:`host` to set)"""
+        pieces = self.host.split(":", 1)
+        if len(pieces) == 2 and pieces[1].isdigit():
+            return int(pieces[1])
+        elif self.url_scheme == "https":
+            return 443
+        return 80
+
+    def __del__(self):
+        try:
+            self.close()
+        except Exception:
+            pass
+
+    def close(self):
+        """Closes all files.  If you put real :class:`file` objects into the
+        :attr:`files` dict you can call this method to automatically close
+        them all in one go.
+        """
+        if self.closed:
+            return
+        try:
+            files = itervalues(self.files)
+        except AttributeError:
+            files = ()
+        for f in files:
+            try:
+                f.close()
+            except Exception:
+                pass
+        self.closed = True
+
+    def get_environ(self):
+        """Return the built environ.
+
+        .. versionchanged:: 0.15
+            The content type and length headers are set based on
+            input stream detection. Previously this only set the WSGI
+            keys.
+        """
+        input_stream = self.input_stream
+        content_length = self.content_length
+
+        mimetype = self.mimetype
+        content_type = self.content_type
+
+        if input_stream is not None:
+            start_pos = input_stream.tell()
+            input_stream.seek(0, 2)
+            end_pos = input_stream.tell()
+            input_stream.seek(start_pos)
+            content_length = end_pos - start_pos
+        elif mimetype == "multipart/form-data":
+            values = CombinedMultiDict([self.form, self.files])
+            input_stream, content_length, boundary = stream_encode_multipart(
+                values, charset=self.charset
+            )
+            content_type = mimetype + '; boundary="%s"' % boundary
+        elif mimetype == "application/x-www-form-urlencoded":
+            # XXX: py2v3 review
+            values = url_encode(self.form, charset=self.charset)
+            values = values.encode("ascii")
+            content_length = len(values)
+            input_stream = BytesIO(values)
+        else:
+            input_stream = BytesIO()
+
+        result = {}
+        if self.environ_base:
+            result.update(self.environ_base)
+
+        def _path_encode(x):
+            return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
+
+        qs = wsgi_encoding_dance(self.query_string)
+
+        result.update(
+            {
+                "REQUEST_METHOD": self.method,
+                "SCRIPT_NAME": _path_encode(self.script_root),
+                "PATH_INFO": _path_encode(self.path),
+                "QUERY_STRING": qs,
+                # Non-standard, added by mod_wsgi, uWSGI
+                "REQUEST_URI": wsgi_encoding_dance(self.path),
+                # Non-standard, added by gunicorn
+                "RAW_URI": wsgi_encoding_dance(self.path),
+                "SERVER_NAME": self.server_name,
+                "SERVER_PORT": str(self.server_port),
+                "HTTP_HOST": self.host,
+                "SERVER_PROTOCOL": self.server_protocol,
+                "wsgi.version": self.wsgi_version,
+                "wsgi.url_scheme": self.url_scheme,
+                "wsgi.input": input_stream,
+                "wsgi.errors": self.errors_stream,
+                "wsgi.multithread": self.multithread,
+                "wsgi.multiprocess": self.multiprocess,
+                "wsgi.run_once": self.run_once,
+            }
+        )
+
+        headers = self.headers.copy()
+
+        if content_type is not None:
+            result["CONTENT_TYPE"] = content_type
+            headers.set("Content-Type", content_type)
+
+        if content_length is not None:
+            result["CONTENT_LENGTH"] = str(content_length)
+            headers.set("Content-Length", content_length)
+
+        for key, value in headers.to_wsgi_list():
+            result["HTTP_%s" % key.upper().replace("-", "_")] = value
+
+        if self.environ_overrides:
+            result.update(self.environ_overrides)
+
+        return result
+
+    def get_request(self, cls=None):
+        """Returns a request with the data.  If the request class is not
+        specified :attr:`request_class` is used.
+
+        :param cls: The request wrapper to use.
+        """
+        if cls is None:
+            cls = self.request_class
+        return cls(self.get_environ())
+
+
+class ClientRedirectError(Exception):
+    """If a redirect loop is detected when using follow_redirects=True with
+    the :cls:`Client`, then this exception is raised.
+    """
+
+
+class Client(object):
+    """This class allows to send requests to a wrapped application.
+
+    The response wrapper can be a class or factory function that takes
+    three arguments: app_iter, status and headers.  The default response
+    wrapper just returns a tuple.
+
+    Example::
+
+        class ClientResponse(BaseResponse):
+            ...
+
+        client = Client(MyApplication(), response_wrapper=ClientResponse)
+
+    The use_cookies parameter indicates whether cookies should be stored and
+    sent for subsequent requests. This is True by default, but passing False
+    will disable this behaviour.
+
+    If you want to request some subdomain of your application you may set
+    `allow_subdomain_redirects` to `True` as if not no external redirects
+    are allowed.
+
+    .. versionadded:: 0.5
+       `use_cookies` is new in this version.  Older versions did not provide
+       builtin cookie support.
+
+    .. versionadded:: 0.14
+       The `mimetype` parameter was added.
+
+    .. versionadded:: 0.15
+        The ``json`` parameter.
+    """
+
+    def __init__(
+        self,
+        application,
+        response_wrapper=None,
+        use_cookies=True,
+        allow_subdomain_redirects=False,
+    ):
+        self.application = application
+        self.response_wrapper = response_wrapper
+        if use_cookies:
+            self.cookie_jar = _TestCookieJar()
+        else:
+            self.cookie_jar = None
+        self.allow_subdomain_redirects = allow_subdomain_redirects
+
+    def set_cookie(
+        self,
+        server_name,
+        key,
+        value="",
+        max_age=None,
+        expires=None,
+        path="/",
+        domain=None,
+        secure=None,
+        httponly=False,
+        charset="utf-8",
+    ):
+        """Sets a cookie in the client's cookie jar.  The server name
+        is required and has to match the one that is also passed to
+        the open call.
+        """
+        assert self.cookie_jar is not None, "cookies disabled"
+        header = dump_cookie(
+            key, value, max_age, expires, path, domain, secure, httponly, charset
+        )
+        environ = create_environ(path, base_url="http://" + server_name)
+        headers = [("Set-Cookie", header)]
+        self.cookie_jar.extract_wsgi(environ, headers)
+
+    def delete_cookie(self, server_name, key, path="/", domain=None):
+        """Deletes a cookie in the test client."""
+        self.set_cookie(
+            server_name, key, expires=0, max_age=0, path=path, domain=domain
+        )
+
+    def run_wsgi_app(self, environ, buffered=False):
+        """Runs the wrapped WSGI app with the given environment."""
+        if self.cookie_jar is not None:
+            self.cookie_jar.inject_wsgi(environ)
+        rv = run_wsgi_app(self.application, environ, buffered=buffered)
+        if self.cookie_jar is not None:
+            self.cookie_jar.extract_wsgi(environ, rv[2])
+        return rv
+
+    def resolve_redirect(self, response, new_location, environ, buffered=False):
+        """Perform a new request to the location given by the redirect
+        response to the previous request.
+        """
+        scheme, netloc, path, qs, anchor = url_parse(new_location)
+        builder = EnvironBuilder.from_environ(environ, query_string=qs)
+
+        to_name_parts = netloc.split(":", 1)[0].split(".")
+        from_name_parts = builder.server_name.split(".")
+
+        if to_name_parts != [""]:
+            # The new location has a host, use it for the base URL.
+            builder.url_scheme = scheme
+            builder.host = netloc
+        else:
+            # A local redirect with autocorrect_location_header=False
+            # doesn't have a host, so use the request's host.
+            to_name_parts = from_name_parts
+
+        # Explain why a redirect to a different server name won't be followed.
+        if to_name_parts != from_name_parts:
+            if to_name_parts[-len(from_name_parts) :] == from_name_parts:
+                if not self.allow_subdomain_redirects:
+                    raise RuntimeError("Following subdomain redirects is not enabled.")
+            else:
+                raise RuntimeError("Following external redirects is not supported.")
+
+        path_parts = path.split("/")
+        root_parts = builder.script_root.split("/")
+
+        if path_parts[: len(root_parts)] == root_parts:
+            # Strip the script root from the path.
+            builder.path = path[len(builder.script_root) :]
+        else:
+            # The new location is not under the script root, so use the
+            # whole path and clear the previous root.
+            builder.path = path
+            builder.script_root = ""
+
+        status_code = int(response[1].split(None, 1)[0])
+
+        # Only 307 and 308 preserve all of the original request.
+        if status_code not in {307, 308}:
+            # HEAD is preserved, everything else becomes GET.
+            if builder.method != "HEAD":
+                builder.method = "GET"
+
+            # Clear the body and the headers that describe it.
+            builder.input_stream = None
+            builder.content_type = None
+            builder.content_length = None
+            builder.headers.pop("Transfer-Encoding", None)
+
+        # Disable the response wrapper while handling redirects. Not
+        # thread safe, but the client should not be shared anyway.
+        old_response_wrapper = self.response_wrapper
+        self.response_wrapper = None
+
+        try:
+            return self.open(builder, as_tuple=True, buffered=buffered)
+        finally:
+            self.response_wrapper = old_response_wrapper
+
+    def open(self, *args, **kwargs):
+        """Takes the same arguments as the :class:`EnvironBuilder` class with
+        some additions:  You can provide a :class:`EnvironBuilder` or a WSGI
+        environment as only argument instead of the :class:`EnvironBuilder`
+        arguments and two optional keyword arguments (`as_tuple`, `buffered`)
+        that change the type of the return value or the way the application is
+        executed.
+
+        .. versionchanged:: 0.5
+           If a dict is provided as file in the dict for the `data` parameter
+           the content type has to be called `content_type` now instead of
+           `mimetype`.  This change was made for consistency with
+           :class:`werkzeug.FileWrapper`.
+
+            The `follow_redirects` parameter was added to :func:`open`.
+
+        Additional parameters:
+
+        :param as_tuple: Returns a tuple in the form ``(environ, result)``
+        :param buffered: Set this to True to buffer the application run.
+                         This will automatically close the application for
+                         you as well.
+        :param follow_redirects: Set this to True if the `Client` should
+                                 follow HTTP redirects.
+        """
+        as_tuple = kwargs.pop("as_tuple", False)
+        buffered = kwargs.pop("buffered", False)
+        follow_redirects = kwargs.pop("follow_redirects", False)
+        environ = None
+        if not kwargs and len(args) == 1:
+            if isinstance(args[0], EnvironBuilder):
+                environ = args[0].get_environ()
+            elif isinstance(args[0], dict):
+                environ = args[0]
+        if environ is None:
+            builder = EnvironBuilder(*args, **kwargs)
+            try:
+                environ = builder.get_environ()
+            finally:
+                builder.close()
+
+        response = self.run_wsgi_app(environ.copy(), buffered=buffered)
+
+        # handle redirects
+        redirect_chain = []
+        while 1:
+            status_code = int(response[1].split(None, 1)[0])
+            if (
+                status_code not in {301, 302, 303, 305, 307, 308}
+                or not follow_redirects
+            ):
+                break
+
+            # Exhaust intermediate response bodies to ensure middleware
+            # that returns an iterator runs any cleanup code.
+            if not buffered:
+                for _ in response[0]:
+                    pass
+
+            new_location = response[2]["location"]
+            new_redirect_entry = (new_location, status_code)
+            if new_redirect_entry in redirect_chain:
+                raise ClientRedirectError("loop detected")
+            redirect_chain.append(new_redirect_entry)
+            environ, response = self.resolve_redirect(
+                response, new_location, environ, buffered=buffered
+            )
+
+        if self.response_wrapper is not None:
+            response = self.response_wrapper(*response)
+        if as_tuple:
+            return environ, response
+        return response
+
+    def get(self, *args, **kw):
+        """Like open but method is enforced to GET."""
+        kw["method"] = "GET"
+        return self.open(*args, **kw)
+
+    def patch(self, *args, **kw):
+        """Like open but method is enforced to PATCH."""
+        kw["method"] = "PATCH"
+        return self.open(*args, **kw)
+
+    def post(self, *args, **kw):
+        """Like open but method is enforced to POST."""
+        kw["method"] = "POST"
+        return self.open(*args, **kw)
+
+    def head(self, *args, **kw):
+        """Like open but method is enforced to HEAD."""
+        kw["method"] = "HEAD"
+        return self.open(*args, **kw)
+
+    def put(self, *args, **kw):
+        """Like open but method is enforced to PUT."""
+        kw["method"] = "PUT"
+        return self.open(*args, **kw)
+
+    def delete(self, *args, **kw):
+        """Like open but method is enforced to DELETE."""
+        kw["method"] = "DELETE"
+        return self.open(*args, **kw)
+
+    def options(self, *args, **kw):
+        """Like open but method is enforced to OPTIONS."""
+        kw["method"] = "OPTIONS"
+        return self.open(*args, **kw)
+
+    def trace(self, *args, **kw):
+        """Like open but method is enforced to TRACE."""
+        kw["method"] = "TRACE"
+        return self.open(*args, **kw)
+
+    def __repr__(self):
+        return "<%s %r>" % (self.__class__.__name__, self.application)
+
+
+def create_environ(*args, **kwargs):
+    """Create a new WSGI environ dict based on the values passed.  The first
+    parameter should be the path of the request which defaults to '/'.  The
+    second one can either be an absolute path (in that case the host is
+    localhost:80) or a full path to the request with scheme, netloc port and
+    the path to the script.
+
+    This accepts the same arguments as the :class:`EnvironBuilder`
+    constructor.
+
+    .. versionchanged:: 0.5
+       This function is now a thin wrapper over :class:`EnvironBuilder` which
+       was added in 0.5.  The `headers`, `environ_base`, `environ_overrides`
+       and `charset` parameters were added.
+    """
+    builder = EnvironBuilder(*args, **kwargs)
+    try:
+        return builder.get_environ()
+    finally:
+        builder.close()
+
+
+def run_wsgi_app(app, environ, buffered=False):
+    """Return a tuple in the form (app_iter, status, headers) of the
+    application output.  This works best if you pass it an application that
+    returns an iterator all the time.
+
+    Sometimes applications may use the `write()` callable returned
+    by the `start_response` function.  This tries to resolve such edge
+    cases automatically.  But if you don't get the expected output you
+    should set `buffered` to `True` which enforces buffering.
+
+    If passed an invalid WSGI application the behavior of this function is
+    undefined.  Never pass non-conforming WSGI applications to this function.
+
+    :param app: the application to execute.
+    :param buffered: set to `True` to enforce buffering.
+    :return: tuple in the form ``(app_iter, status, headers)``
+    """
+    environ = _get_environ(environ)
+    response = []
+    buffer = []
+
+    def start_response(status, headers, exc_info=None):
+        if exc_info is not None:
+            reraise(*exc_info)
+        response[:] = [status, headers]
+        return buffer.append
+
+    app_rv = app(environ, start_response)
+    close_func = getattr(app_rv, "close", None)
+    app_iter = iter(app_rv)
+
+    # when buffering we emit the close call early and convert the
+    # application iterator into a regular list
+    if buffered:
+        try:
+            app_iter = list(app_iter)
+        finally:
+            if close_func is not None:
+                close_func()
+
+    # otherwise we iterate the application iter until we have a response, chain
+    # the already received data with the already collected data and wrap it in
+    # a new `ClosingIterator` if we need to restore a `close` callable from the
+    # original return value.
+    else:
+        for item in app_iter:
+            buffer.append(item)
+            if response:
+                break
+        if buffer:
+            app_iter = chain(buffer, app_iter)
+        if close_func is not None and app_iter is not app_rv:
+            app_iter = ClosingIterator(app_iter, close_func)
+
+    return app_iter, response[0], Headers(response[1])
diff --git a/lib/python3.7/site-packages/werkzeug/testapp.py b/lib/python3.7/site-packages/werkzeug/testapp.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ea23bee13a295a4f2a5eb2dd048fc4b87a644c0
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/testapp.py
@@ -0,0 +1,241 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.testapp
+    ~~~~~~~~~~~~~~~~
+
+    Provide a small test application that can be used to test a WSGI server
+    and check it for WSGI compliance.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import base64
+import os
+import sys
+from textwrap import wrap
+
+import werkzeug
+from .utils import escape
+from .wrappers import BaseRequest as Request
+from .wrappers import BaseResponse as Response
+
+logo = Response(
+    base64.b64decode(
+        """
+R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
+//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
+nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
+7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
+ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
+m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
+p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
+SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
+78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
+ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
+tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
+w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
+lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
+Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
+yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
+dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
+idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
+EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
+ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
+gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
+JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
+Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
+YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
+c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
+qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
+cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
+cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
+KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
+EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
+UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
+Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
+aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
+kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
+="""
+    ),
+    mimetype="image/png",
+)
+
+
+TEMPLATE = u"""\
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+  "http://www.w3.org/TR/html4/loose.dtd">
+<title>WSGI Information</title>
+<style type="text/css">
+  @import url(https://fonts.googleapis.com/css?family=Ubuntu);
+
+  body       { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
+               'Verdana', sans-serif; background-color: white; color: #000;
+               font-size: 15px; text-align: center; }
+  #logo      { float: right; padding: 0 0 10px 10px; }
+  div.box    { text-align: left; width: 45em; margin: auto; padding: 50px 0;
+               background-color: white; }
+  h1, h2     { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
+               'Geneva', 'Verdana', sans-serif; font-weight: normal; }
+  h1         { margin: 0 0 30px 0; }
+  h2         { font-size: 1.4em; margin: 1em 0 0.5em 0; }
+  table      { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
+  table th   { background-color: #AFC1C4; color: white; font-size: 0.72em;
+               font-weight: normal; width: 18em; vertical-align: top;
+               padding: 0.5em 0 0.1em 0.5em; }
+  table td   { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
+  code       { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
+               monospace; font-size: 0.7em; }
+  ul li      { line-height: 1.5em; }
+  ul.path    { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
+               list-style: none; background: #E8EFF0; }
+  ul.path li { line-height: 1.6em; }
+  li.virtual { color: #999; text-decoration: underline; }
+  li.exp     { background: white; }
+</style>
+<div class="box">
+  <img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
+  <h1>WSGI Information</h1>
+  <p>
+    This page displays all available information about the WSGI server and
+    the underlying Python interpreter.
+  <h2 id="python-interpreter">Python Interpreter</h2>
+  <table>
+    <tr>
+      <th>Python Version
+      <td>%(python_version)s
+    <tr>
+      <th>Platform
+      <td>%(platform)s [%(os)s]
+    <tr>
+      <th>API Version
+      <td>%(api_version)s
+    <tr>
+      <th>Byteorder
+      <td>%(byteorder)s
+    <tr>
+      <th>Werkzeug Version
+      <td>%(werkzeug_version)s
+  </table>
+  <h2 id="wsgi-environment">WSGI Environment</h2>
+  <table>%(wsgi_env)s</table>
+  <h2 id="installed-eggs">Installed Eggs</h2>
+  <p>
+    The following python packages were installed on the system as
+    Python eggs:
+  <ul>%(python_eggs)s</ul>
+  <h2 id="sys-path">System Path</h2>
+  <p>
+    The following paths are the current contents of the load path.  The
+    following entries are looked up for Python packages.  Note that not
+    all items in this path are folders.  Gray and underlined items are
+    entries pointing to invalid resources or used by custom import hooks
+    such as the zip importer.
+  <p>
+    Items with a bright background were expanded for display from a relative
+    path.  If you encounter such paths in the output you might want to check
+    your setup as relative paths are usually problematic in multithreaded
+    environments.
+  <ul class="path">%(sys_path)s</ul>
+</div>
+"""
+
+
+def iter_sys_path():
+    if os.name == "posix":
+
+        def strip(x):
+            prefix = os.path.expanduser("~")
+            if x.startswith(prefix):
+                x = "~" + x[len(prefix) :]
+            return x
+
+    else:
+
+        def strip(x):
+            return x
+
+    cwd = os.path.abspath(os.getcwd())
+    for item in sys.path:
+        path = os.path.join(cwd, item or os.path.curdir)
+        yield strip(os.path.normpath(path)), not os.path.isdir(path), path != item
+
+
+def render_testapp(req):
+    try:
+        import pkg_resources
+    except ImportError:
+        eggs = ()
+    else:
+        eggs = sorted(pkg_resources.working_set, key=lambda x: x.project_name.lower())
+    python_eggs = []
+    for egg in eggs:
+        try:
+            version = egg.version
+        except (ValueError, AttributeError):
+            version = "unknown"
+        python_eggs.append(
+            "<li>%s <small>[%s]</small>" % (escape(egg.project_name), escape(version))
+        )
+
+    wsgi_env = []
+    sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower())
+    for key, value in sorted_environ:
+        wsgi_env.append(
+            "<tr><th>%s<td><code>%s</code>"
+            % (escape(str(key)), " ".join(wrap(escape(repr(value)))))
+        )
+
+    sys_path = []
+    for item, virtual, expanded in iter_sys_path():
+        class_ = []
+        if virtual:
+            class_.append("virtual")
+        if expanded:
+            class_.append("exp")
+        sys_path.append(
+            "<li%s>%s"
+            % (' class="%s"' % " ".join(class_) if class_ else "", escape(item))
+        )
+
+    return (
+        TEMPLATE
+        % {
+            "python_version": "<br>".join(escape(sys.version).splitlines()),
+            "platform": escape(sys.platform),
+            "os": escape(os.name),
+            "api_version": sys.api_version,
+            "byteorder": sys.byteorder,
+            "werkzeug_version": werkzeug.__version__,
+            "python_eggs": "\n".join(python_eggs),
+            "wsgi_env": "\n".join(wsgi_env),
+            "sys_path": "\n".join(sys_path),
+        }
+    ).encode("utf-8")
+
+
+def test_app(environ, start_response):
+    """Simple test application that dumps the environment.  You can use
+    it to check if Werkzeug is working properly:
+
+    .. sourcecode:: pycon
+
+        >>> from werkzeug.serving import run_simple
+        >>> from werkzeug.testapp import test_app
+        >>> run_simple('localhost', 3000, test_app)
+         * Running on http://localhost:3000/
+
+    The application displays important information from the WSGI environment,
+    the Python interpreter and the installed libraries.
+    """
+    req = Request(environ, populate_request=False)
+    if req.args.get("resource") == "logo":
+        response = logo
+    else:
+        response = Response(render_testapp(req), mimetype="text/html")
+    return response(environ, start_response)
+
+
+if __name__ == "__main__":
+    from .serving import run_simple
+
+    run_simple("localhost", 5000, test_app, use_reloader=True)
diff --git a/lib/python3.7/site-packages/werkzeug/urls.py b/lib/python3.7/site-packages/werkzeug/urls.py
new file mode 100644
index 0000000000000000000000000000000000000000..38e9e5adf22b4d369fb0b70bc186863047a5043e
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/urls.py
@@ -0,0 +1,1134 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.urls
+    ~~~~~~~~~~~~~
+
+    ``werkzeug.urls`` used to provide several wrapper functions for Python 2
+    urlparse, whose main purpose were to work around the behavior of the Py2
+    stdlib and its lack of unicode support. While this was already a somewhat
+    inconvenient situation, it got even more complicated because Python 3's
+    ``urllib.parse`` actually does handle unicode properly. In other words,
+    this module would wrap two libraries with completely different behavior. So
+    now this module contains a 2-and-3-compatible backport of Python 3's
+    ``urllib.parse``, which is mostly API-compatible.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import codecs
+import os
+import re
+from collections import namedtuple
+
+from ._compat import fix_tuple_repr
+from ._compat import implements_to_string
+from ._compat import make_literal_wrapper
+from ._compat import normalize_string_tuple
+from ._compat import PY2
+from ._compat import text_type
+from ._compat import to_native
+from ._compat import to_unicode
+from ._compat import try_coerce_native
+from ._internal import _decode_idna
+from ._internal import _encode_idna
+from .datastructures import iter_multi_items
+from .datastructures import MultiDict
+
+# A regular expression for what a valid schema looks like
+_scheme_re = re.compile(r"^[a-zA-Z0-9+-.]+$")
+
+# Characters that are safe in any part of an URL.
+_always_safe = frozenset(
+    bytearray(
+        b"abcdefghijklmnopqrstuvwxyz"
+        b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+        b"0123456789"
+        b"-._~"
+    )
+)
+
+_hexdigits = "0123456789ABCDEFabcdef"
+_hextobyte = dict(
+    ((a + b).encode(), int(a + b, 16)) for a in _hexdigits for b in _hexdigits
+)
+_bytetohex = [("%%%02X" % char).encode("ascii") for char in range(256)]
+
+
+_URLTuple = fix_tuple_repr(
+    namedtuple("_URLTuple", ["scheme", "netloc", "path", "query", "fragment"])
+)
+
+
+class BaseURL(_URLTuple):
+    """Superclass of :py:class:`URL` and :py:class:`BytesURL`."""
+
+    __slots__ = ()
+
+    def replace(self, **kwargs):
+        """Return an URL with the same values, except for those parameters
+        given new values by whichever keyword arguments are specified."""
+        return self._replace(**kwargs)
+
+    @property
+    def host(self):
+        """The host part of the URL if available, otherwise `None`.  The
+        host is either the hostname or the IP address mentioned in the
+        URL.  It will not contain the port.
+        """
+        return self._split_host()[0]
+
+    @property
+    def ascii_host(self):
+        """Works exactly like :attr:`host` but will return a result that
+        is restricted to ASCII.  If it finds a netloc that is not ASCII
+        it will attempt to idna decode it.  This is useful for socket
+        operations when the URL might include internationalized characters.
+        """
+        rv = self.host
+        if rv is not None and isinstance(rv, text_type):
+            try:
+                rv = _encode_idna(rv)
+            except UnicodeError:
+                rv = rv.encode("ascii", "ignore")
+        return to_native(rv, "ascii", "ignore")
+
+    @property
+    def port(self):
+        """The port in the URL as an integer if it was present, `None`
+        otherwise.  This does not fill in default ports.
+        """
+        try:
+            rv = int(to_native(self._split_host()[1]))
+            if 0 <= rv <= 65535:
+                return rv
+        except (ValueError, TypeError):
+            pass
+
+    @property
+    def auth(self):
+        """The authentication part in the URL if available, `None`
+        otherwise.
+        """
+        return self._split_netloc()[0]
+
+    @property
+    def username(self):
+        """The username if it was part of the URL, `None` otherwise.
+        This undergoes URL decoding and will always be a unicode string.
+        """
+        rv = self._split_auth()[0]
+        if rv is not None:
+            return _url_unquote_legacy(rv)
+
+    @property
+    def raw_username(self):
+        """The username if it was part of the URL, `None` otherwise.
+        Unlike :attr:`username` this one is not being decoded.
+        """
+        return self._split_auth()[0]
+
+    @property
+    def password(self):
+        """The password if it was part of the URL, `None` otherwise.
+        This undergoes URL decoding and will always be a unicode string.
+        """
+        rv = self._split_auth()[1]
+        if rv is not None:
+            return _url_unquote_legacy(rv)
+
+    @property
+    def raw_password(self):
+        """The password if it was part of the URL, `None` otherwise.
+        Unlike :attr:`password` this one is not being decoded.
+        """
+        return self._split_auth()[1]
+
+    def decode_query(self, *args, **kwargs):
+        """Decodes the query part of the URL.  Ths is a shortcut for
+        calling :func:`url_decode` on the query argument.  The arguments and
+        keyword arguments are forwarded to :func:`url_decode` unchanged.
+        """
+        return url_decode(self.query, *args, **kwargs)
+
+    def join(self, *args, **kwargs):
+        """Joins this URL with another one.  This is just a convenience
+        function for calling into :meth:`url_join` and then parsing the
+        return value again.
+        """
+        return url_parse(url_join(self, *args, **kwargs))
+
+    def to_url(self):
+        """Returns a URL string or bytes depending on the type of the
+        information stored.  This is just a convenience function
+        for calling :meth:`url_unparse` for this URL.
+        """
+        return url_unparse(self)
+
+    def decode_netloc(self):
+        """Decodes the netloc part into a string."""
+        rv = _decode_idna(self.host or "")
+
+        if ":" in rv:
+            rv = "[%s]" % rv
+        port = self.port
+        if port is not None:
+            rv = "%s:%d" % (rv, port)
+        auth = ":".join(
+            filter(
+                None,
+                [
+                    _url_unquote_legacy(self.raw_username or "", "/:%@"),
+                    _url_unquote_legacy(self.raw_password or "", "/:%@"),
+                ],
+            )
+        )
+        if auth:
+            rv = "%s@%s" % (auth, rv)
+        return rv
+
+    def to_uri_tuple(self):
+        """Returns a :class:`BytesURL` tuple that holds a URI.  This will
+        encode all the information in the URL properly to ASCII using the
+        rules a web browser would follow.
+
+        It's usually more interesting to directly call :meth:`iri_to_uri` which
+        will return a string.
+        """
+        return url_parse(iri_to_uri(self).encode("ascii"))
+
+    def to_iri_tuple(self):
+        """Returns a :class:`URL` tuple that holds a IRI.  This will try
+        to decode as much information as possible in the URL without
+        losing information similar to how a web browser does it for the
+        URL bar.
+
+        It's usually more interesting to directly call :meth:`uri_to_iri` which
+        will return a string.
+        """
+        return url_parse(uri_to_iri(self))
+
+    def get_file_location(self, pathformat=None):
+        """Returns a tuple with the location of the file in the form
+        ``(server, location)``.  If the netloc is empty in the URL or
+        points to localhost, it's represented as ``None``.
+
+        The `pathformat` by default is autodetection but needs to be set
+        when working with URLs of a specific system.  The supported values
+        are ``'windows'`` when working with Windows or DOS paths and
+        ``'posix'`` when working with posix paths.
+
+        If the URL does not point to a local file, the server and location
+        are both represented as ``None``.
+
+        :param pathformat: The expected format of the path component.
+                           Currently ``'windows'`` and ``'posix'`` are
+                           supported.  Defaults to ``None`` which is
+                           autodetect.
+        """
+        if self.scheme != "file":
+            return None, None
+
+        path = url_unquote(self.path)
+        host = self.netloc or None
+
+        if pathformat is None:
+            if os.name == "nt":
+                pathformat = "windows"
+            else:
+                pathformat = "posix"
+
+        if pathformat == "windows":
+            if path[:1] == "/" and path[1:2].isalpha() and path[2:3] in "|:":
+                path = path[1:2] + ":" + path[3:]
+            windows_share = path[:3] in ("\\" * 3, "/" * 3)
+            import ntpath
+
+            path = ntpath.normpath(path)
+            # Windows shared drives are represented as ``\\host\\directory``.
+            # That results in a URL like ``file://///host/directory``, and a
+            # path like ``///host/directory``. We need to special-case this
+            # because the path contains the hostname.
+            if windows_share and host is None:
+                parts = path.lstrip("\\").split("\\", 1)
+                if len(parts) == 2:
+                    host, path = parts
+                else:
+                    host = parts[0]
+                    path = ""
+        elif pathformat == "posix":
+            import posixpath
+
+            path = posixpath.normpath(path)
+        else:
+            raise TypeError("Invalid path format %s" % repr(pathformat))
+
+        if host in ("127.0.0.1", "::1", "localhost"):
+            host = None
+
+        return host, path
+
+    def _split_netloc(self):
+        if self._at in self.netloc:
+            return self.netloc.split(self._at, 1)
+        return None, self.netloc
+
+    def _split_auth(self):
+        auth = self._split_netloc()[0]
+        if not auth:
+            return None, None
+        if self._colon not in auth:
+            return auth, None
+        return auth.split(self._colon, 1)
+
+    def _split_host(self):
+        rv = self._split_netloc()[1]
+        if not rv:
+            return None, None
+
+        if not rv.startswith(self._lbracket):
+            if self._colon in rv:
+                return rv.split(self._colon, 1)
+            return rv, None
+
+        idx = rv.find(self._rbracket)
+        if idx < 0:
+            return rv, None
+
+        host = rv[1:idx]
+        rest = rv[idx + 1 :]
+        if rest.startswith(self._colon):
+            return host, rest[1:]
+        return host, None
+
+
+@implements_to_string
+class URL(BaseURL):
+    """Represents a parsed URL.  This behaves like a regular tuple but
+    also has some extra attributes that give further insight into the
+    URL.
+    """
+
+    __slots__ = ()
+    _at = "@"
+    _colon = ":"
+    _lbracket = "["
+    _rbracket = "]"
+
+    def __str__(self):
+        return self.to_url()
+
+    def encode_netloc(self):
+        """Encodes the netloc part to an ASCII safe URL as bytes."""
+        rv = self.ascii_host or ""
+        if ":" in rv:
+            rv = "[%s]" % rv
+        port = self.port
+        if port is not None:
+            rv = "%s:%d" % (rv, port)
+        auth = ":".join(
+            filter(
+                None,
+                [
+                    url_quote(self.raw_username or "", "utf-8", "strict", "/:%"),
+                    url_quote(self.raw_password or "", "utf-8", "strict", "/:%"),
+                ],
+            )
+        )
+        if auth:
+            rv = "%s@%s" % (auth, rv)
+        return to_native(rv)
+
+    def encode(self, charset="utf-8", errors="replace"):
+        """Encodes the URL to a tuple made out of bytes.  The charset is
+        only being used for the path, query and fragment.
+        """
+        return BytesURL(
+            self.scheme.encode("ascii"),
+            self.encode_netloc(),
+            self.path.encode(charset, errors),
+            self.query.encode(charset, errors),
+            self.fragment.encode(charset, errors),
+        )
+
+
+class BytesURL(BaseURL):
+    """Represents a parsed URL in bytes."""
+
+    __slots__ = ()
+    _at = b"@"
+    _colon = b":"
+    _lbracket = b"["
+    _rbracket = b"]"
+
+    def __str__(self):
+        return self.to_url().decode("utf-8", "replace")
+
+    def encode_netloc(self):
+        """Returns the netloc unchanged as bytes."""
+        return self.netloc
+
+    def decode(self, charset="utf-8", errors="replace"):
+        """Decodes the URL to a tuple made out of strings.  The charset is
+        only being used for the path, query and fragment.
+        """
+        return URL(
+            self.scheme.decode("ascii"),
+            self.decode_netloc(),
+            self.path.decode(charset, errors),
+            self.query.decode(charset, errors),
+            self.fragment.decode(charset, errors),
+        )
+
+
+_unquote_maps = {frozenset(): _hextobyte}
+
+
+def _unquote_to_bytes(string, unsafe=""):
+    if isinstance(string, text_type):
+        string = string.encode("utf-8")
+
+    if isinstance(unsafe, text_type):
+        unsafe = unsafe.encode("utf-8")
+
+    unsafe = frozenset(bytearray(unsafe))
+    groups = iter(string.split(b"%"))
+    result = bytearray(next(groups, b""))
+
+    try:
+        hex_to_byte = _unquote_maps[unsafe]
+    except KeyError:
+        hex_to_byte = _unquote_maps[unsafe] = {
+            h: b for h, b in _hextobyte.items() if b not in unsafe
+        }
+
+    for group in groups:
+        code = group[:2]
+
+        if code in hex_to_byte:
+            result.append(hex_to_byte[code])
+            result.extend(group[2:])
+        else:
+            result.append(37)  # %
+            result.extend(group)
+
+    return bytes(result)
+
+
+def _url_encode_impl(obj, charset, encode_keys, sort, key):
+    iterable = iter_multi_items(obj)
+    if sort:
+        iterable = sorted(iterable, key=key)
+    for key, value in iterable:
+        if value is None:
+            continue
+        if not isinstance(key, bytes):
+            key = text_type(key).encode(charset)
+        if not isinstance(value, bytes):
+            value = text_type(value).encode(charset)
+        yield _fast_url_quote_plus(key) + "=" + _fast_url_quote_plus(value)
+
+
+def _url_unquote_legacy(value, unsafe=""):
+    try:
+        return url_unquote(value, charset="utf-8", errors="strict", unsafe=unsafe)
+    except UnicodeError:
+        return url_unquote(value, charset="latin1", unsafe=unsafe)
+
+
+def url_parse(url, scheme=None, allow_fragments=True):
+    """Parses a URL from a string into a :class:`URL` tuple.  If the URL
+    is lacking a scheme it can be provided as second argument. Otherwise,
+    it is ignored.  Optionally fragments can be stripped from the URL
+    by setting `allow_fragments` to `False`.
+
+    The inverse of this function is :func:`url_unparse`.
+
+    :param url: the URL to parse.
+    :param scheme: the default schema to use if the URL is schemaless.
+    :param allow_fragments: if set to `False` a fragment will be removed
+                            from the URL.
+    """
+    s = make_literal_wrapper(url)
+    is_text_based = isinstance(url, text_type)
+
+    if scheme is None:
+        scheme = s("")
+    netloc = query = fragment = s("")
+    i = url.find(s(":"))
+    if i > 0 and _scheme_re.match(to_native(url[:i], errors="replace")):
+        # make sure "iri" is not actually a port number (in which case
+        # "scheme" is really part of the path)
+        rest = url[i + 1 :]
+        if not rest or any(c not in s("0123456789") for c in rest):
+            # not a port number
+            scheme, url = url[:i].lower(), rest
+
+    if url[:2] == s("//"):
+        delim = len(url)
+        for c in s("/?#"):
+            wdelim = url.find(c, 2)
+            if wdelim >= 0:
+                delim = min(delim, wdelim)
+        netloc, url = url[2:delim], url[delim:]
+        if (s("[") in netloc and s("]") not in netloc) or (
+            s("]") in netloc and s("[") not in netloc
+        ):
+            raise ValueError("Invalid IPv6 URL")
+
+    if allow_fragments and s("#") in url:
+        url, fragment = url.split(s("#"), 1)
+    if s("?") in url:
+        url, query = url.split(s("?"), 1)
+
+    result_type = URL if is_text_based else BytesURL
+    return result_type(scheme, netloc, url, query, fragment)
+
+
+def _make_fast_url_quote(charset="utf-8", errors="strict", safe="/:", unsafe=""):
+    """Precompile the translation table for a URL encoding function.
+
+    Unlike :func:`url_quote`, the generated function only takes the
+    string to quote.
+
+    :param charset: The charset to encode the result with.
+    :param errors: How to handle encoding errors.
+    :param safe: An optional sequence of safe characters to never encode.
+    :param unsafe: An optional sequence of unsafe characters to always encode.
+    """
+    if isinstance(safe, text_type):
+        safe = safe.encode(charset, errors)
+
+    if isinstance(unsafe, text_type):
+        unsafe = unsafe.encode(charset, errors)
+
+    safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
+    table = [chr(c) if c in safe else "%%%02X" % c for c in range(256)]
+
+    if not PY2:
+
+        def quote(string):
+            return "".join([table[c] for c in string])
+
+    else:
+
+        def quote(string):
+            return "".join([table[c] for c in bytearray(string)])
+
+    return quote
+
+
+_fast_url_quote = _make_fast_url_quote()
+_fast_quote_plus = _make_fast_url_quote(safe=" ", unsafe="+")
+
+
+def _fast_url_quote_plus(string):
+    return _fast_quote_plus(string).replace(" ", "+")
+
+
+def url_quote(string, charset="utf-8", errors="strict", safe="/:", unsafe=""):
+    """URL encode a single string with a given encoding.
+
+    :param s: the string to quote.
+    :param charset: the charset to be used.
+    :param safe: an optional sequence of safe characters.
+    :param unsafe: an optional sequence of unsafe characters.
+
+    .. versionadded:: 0.9.2
+       The `unsafe` parameter was added.
+    """
+    if not isinstance(string, (text_type, bytes, bytearray)):
+        string = text_type(string)
+    if isinstance(string, text_type):
+        string = string.encode(charset, errors)
+    if isinstance(safe, text_type):
+        safe = safe.encode(charset, errors)
+    if isinstance(unsafe, text_type):
+        unsafe = unsafe.encode(charset, errors)
+    safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
+    rv = bytearray()
+    for char in bytearray(string):
+        if char in safe:
+            rv.append(char)
+        else:
+            rv.extend(_bytetohex[char])
+    return to_native(bytes(rv))
+
+
+def url_quote_plus(string, charset="utf-8", errors="strict", safe=""):
+    """URL encode a single string with the given encoding and convert
+    whitespace to "+".
+
+    :param s: The string to quote.
+    :param charset: The charset to be used.
+    :param safe: An optional sequence of safe characters.
+    """
+    return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+")
+
+
+def url_unparse(components):
+    """The reverse operation to :meth:`url_parse`.  This accepts arbitrary
+    as well as :class:`URL` tuples and returns a URL as a string.
+
+    :param components: the parsed URL as tuple which should be converted
+                       into a URL string.
+    """
+    scheme, netloc, path, query, fragment = normalize_string_tuple(components)
+    s = make_literal_wrapper(scheme)
+    url = s("")
+
+    # We generally treat file:///x and file:/x the same which is also
+    # what browsers seem to do.  This also allows us to ignore a schema
+    # register for netloc utilization or having to differenciate between
+    # empty and missing netloc.
+    if netloc or (scheme and path.startswith(s("/"))):
+        if path and path[:1] != s("/"):
+            path = s("/") + path
+        url = s("//") + (netloc or s("")) + path
+    elif path:
+        url += path
+    if scheme:
+        url = scheme + s(":") + url
+    if query:
+        url = url + s("?") + query
+    if fragment:
+        url = url + s("#") + fragment
+    return url
+
+
+def url_unquote(string, charset="utf-8", errors="replace", unsafe=""):
+    """URL decode a single string with a given encoding.  If the charset
+    is set to `None` no unicode decoding is performed and raw bytes
+    are returned.
+
+    :param s: the string to unquote.
+    :param charset: the charset of the query string.  If set to `None`
+                    no unicode decoding will take place.
+    :param errors: the error handling for the charset decoding.
+    """
+    rv = _unquote_to_bytes(string, unsafe)
+    if charset is not None:
+        rv = rv.decode(charset, errors)
+    return rv
+
+
+def url_unquote_plus(s, charset="utf-8", errors="replace"):
+    """URL decode a single string with the given `charset` and decode "+" to
+    whitespace.
+
+    Per default encoding errors are ignored.  If you want a different behavior
+    you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
+    :exc:`HTTPUnicodeError` is raised.
+
+    :param s: The string to unquote.
+    :param charset: the charset of the query string.  If set to `None`
+                    no unicode decoding will take place.
+    :param errors: The error handling for the `charset` decoding.
+    """
+    if isinstance(s, text_type):
+        s = s.replace(u"+", u" ")
+    else:
+        s = s.replace(b"+", b" ")
+    return url_unquote(s, charset, errors)
+
+
+def url_fix(s, charset="utf-8"):
+    r"""Sometimes you get an URL by a user that just isn't a real URL because
+    it contains unsafe characters like ' ' and so on. This function can fix
+    some of the problems in a similar way browsers handle data entered by the
+    user:
+
+    >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
+    'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
+
+    :param s: the string with the URL to fix.
+    :param charset: The target charset for the URL if the url was given as
+                    unicode string.
+    """
+    # First step is to switch to unicode processing and to convert
+    # backslashes (which are invalid in URLs anyways) to slashes.  This is
+    # consistent with what Chrome does.
+    s = to_unicode(s, charset, "replace").replace("\\", "/")
+
+    # For the specific case that we look like a malformed windows URL
+    # we want to fix this up manually:
+    if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"):
+        s = "file:///" + s[7:]
+
+    url = url_parse(s)
+    path = url_quote(url.path, charset, safe="/%+$!*'(),")
+    qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),")
+    anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),")
+    return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor)))
+
+
+# not-unreserved characters remain quoted when unquoting to IRI
+_to_iri_unsafe = "".join([chr(c) for c in range(128) if c not in _always_safe])
+
+
+def _codec_error_url_quote(e):
+    """Used in :func:`uri_to_iri` after unquoting to re-quote any
+    invalid bytes.
+    """
+    out = _fast_url_quote(e.object[e.start : e.end])
+
+    if PY2:
+        out = out.decode("utf-8")
+
+    return out, e.end
+
+
+codecs.register_error("werkzeug.url_quote", _codec_error_url_quote)
+
+
+def uri_to_iri(uri, charset="utf-8", errors="werkzeug.url_quote"):
+    """Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
+    leaving all reserved and invalid characters quoted. If the URL has
+    a domain, it is decoded from Punycode.
+
+    >>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
+    'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
+
+    :param uri: The URI to convert.
+    :param charset: The encoding to encode unquoted bytes with.
+    :param errors: Error handler to use during ``bytes.encode``. By
+        default, invalid bytes are left quoted.
+
+    .. versionchanged:: 0.15
+        All reserved and invalid characters remain quoted. Previously,
+        only some reserved characters were preserved, and invalid bytes
+        were replaced instead of left quoted.
+
+    .. versionadded:: 0.6
+    """
+    if isinstance(uri, tuple):
+        uri = url_unparse(uri)
+
+    uri = url_parse(to_unicode(uri, charset))
+    path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)
+    query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)
+    fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)
+    return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))
+
+
+# reserved characters remain unquoted when quoting to URI
+_to_uri_safe = ":/?#[]@!$&'()*+,;=%"
+
+
+def iri_to_uri(iri, charset="utf-8", errors="strict", safe_conversion=False):
+    """Convert an IRI to a URI. All non-ASCII and unsafe characters are
+    quoted. If the URL has a domain, it is encoded to Punycode.
+
+    >>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF')
+    'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'
+
+    :param iri: The IRI to convert.
+    :param charset: The encoding of the IRI.
+    :param errors: Error handler to use during ``bytes.encode``.
+    :param safe_conversion: Return the URL unchanged if it only contains
+        ASCII characters and no whitespace. See the explanation below.
+
+    There is a general problem with IRI conversion with some protocols
+    that are in violation of the URI specification. Consider the
+    following two IRIs::
+
+        magnet:?xt=uri:whatever
+        itms-services://?action=download-manifest
+
+    After parsing, we don't know if the scheme requires the ``//``,
+    which is dropped if empty, but conveys different meanings in the
+    final URL if it's present or not. In this case, you can use
+    ``safe_conversion``, which will return the URL unchanged if it only
+    contains ASCII characters and no whitespace. This can result in a
+    URI with unquoted characters if it was not already quoted correctly,
+    but preserves the URL's semantics. Werkzeug uses this for the
+    ``Location`` header for redirects.
+
+    .. versionchanged:: 0.15
+        All reserved characters remain unquoted. Previously, only some
+        reserved characters were left unquoted.
+
+    .. versionchanged:: 0.9.6
+       The ``safe_conversion`` parameter was added.
+
+    .. versionadded:: 0.6
+    """
+    if isinstance(iri, tuple):
+        iri = url_unparse(iri)
+
+    if safe_conversion:
+        # If we're not sure if it's safe to convert the URL, and it only
+        # contains ASCII characters, return it unconverted.
+        try:
+            native_iri = to_native(iri)
+            ascii_iri = native_iri.encode("ascii")
+
+            # Only return if it doesn't have whitespace. (Why?)
+            if len(ascii_iri.split()) == 1:
+                return native_iri
+        except UnicodeError:
+            pass
+
+    iri = url_parse(to_unicode(iri, charset, errors))
+    path = url_quote(iri.path, charset, errors, _to_uri_safe)
+    query = url_quote(iri.query, charset, errors, _to_uri_safe)
+    fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)
+    return to_native(
+        url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))
+    )
+
+
+def url_decode(
+    s,
+    charset="utf-8",
+    decode_keys=False,
+    include_empty=True,
+    errors="replace",
+    separator="&",
+    cls=None,
+):
+    """
+    Parse a querystring and return it as :class:`MultiDict`.  There is a
+    difference in key decoding on different Python versions.  On Python 3
+    keys will always be fully decoded whereas on Python 2, keys will
+    remain bytestrings if they fit into ASCII.  On 2.x keys can be forced
+    to be unicode by setting `decode_keys` to `True`.
+
+    If the charset is set to `None` no unicode decoding will happen and
+    raw bytes will be returned.
+
+    Per default a missing value for a key will default to an empty key.  If
+    you don't want that behavior you can set `include_empty` to `False`.
+
+    Per default encoding errors are ignored.  If you want a different behavior
+    you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
+    `HTTPUnicodeError` is raised.
+
+    .. versionchanged:: 0.5
+       In previous versions ";" and "&" could be used for url decoding.
+       This changed in 0.5 where only "&" is supported.  If you want to
+       use ";" instead a different `separator` can be provided.
+
+       The `cls` parameter was added.
+
+    :param s: a string with the query string to decode.
+    :param charset: the charset of the query string.  If set to `None`
+                    no unicode decoding will take place.
+    :param decode_keys: Used on Python 2.x to control whether keys should
+                        be forced to be unicode objects.  If set to `True`
+                        then keys will be unicode in all cases. Otherwise,
+                        they remain `str` if they fit into ASCII.
+    :param include_empty: Set to `False` if you don't want empty values to
+                          appear in the dict.
+    :param errors: the decoding error behavior.
+    :param separator: the pair separator to be used, defaults to ``&``
+    :param cls: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`MultiDict` is used.
+    """
+    if cls is None:
+        cls = MultiDict
+    if isinstance(s, text_type) and not isinstance(separator, text_type):
+        separator = separator.decode(charset or "ascii")
+    elif isinstance(s, bytes) and not isinstance(separator, bytes):
+        separator = separator.encode(charset or "ascii")
+    return cls(
+        _url_decode_impl(
+            s.split(separator), charset, decode_keys, include_empty, errors
+        )
+    )
+
+
+def url_decode_stream(
+    stream,
+    charset="utf-8",
+    decode_keys=False,
+    include_empty=True,
+    errors="replace",
+    separator="&",
+    cls=None,
+    limit=None,
+    return_iterator=False,
+):
+    """Works like :func:`url_decode` but decodes a stream.  The behavior
+    of stream and limit follows functions like
+    :func:`~werkzeug.wsgi.make_line_iter`.  The generator of pairs is
+    directly fed to the `cls` so you can consume the data while it's
+    parsed.
+
+    .. versionadded:: 0.8
+
+    :param stream: a stream with the encoded querystring
+    :param charset: the charset of the query string.  If set to `None`
+                    no unicode decoding will take place.
+    :param decode_keys: Used on Python 2.x to control whether keys should
+                        be forced to be unicode objects.  If set to `True`,
+                        keys will be unicode in all cases. Otherwise, they
+                        remain `str` if they fit into ASCII.
+    :param include_empty: Set to `False` if you don't want empty values to
+                          appear in the dict.
+    :param errors: the decoding error behavior.
+    :param separator: the pair separator to be used, defaults to ``&``
+    :param cls: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`MultiDict` is used.
+    :param limit: the content length of the URL data.  Not necessary if
+                  a limited stream is provided.
+    :param return_iterator: if set to `True` the `cls` argument is ignored
+                            and an iterator over all decoded pairs is
+                            returned
+    """
+    from .wsgi import make_chunk_iter
+
+    pair_iter = make_chunk_iter(stream, separator, limit)
+    decoder = _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors)
+
+    if return_iterator:
+        return decoder
+
+    if cls is None:
+        cls = MultiDict
+
+    return cls(decoder)
+
+
+def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
+    for pair in pair_iter:
+        if not pair:
+            continue
+        s = make_literal_wrapper(pair)
+        equal = s("=")
+        if equal in pair:
+            key, value = pair.split(equal, 1)
+        else:
+            if not include_empty:
+                continue
+            key = pair
+            value = s("")
+        key = url_unquote_plus(key, charset, errors)
+        if charset is not None and PY2 and not decode_keys:
+            key = try_coerce_native(key)
+        yield key, url_unquote_plus(value, charset, errors)
+
+
+def url_encode(
+    obj, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&"
+):
+    """URL encode a dict/`MultiDict`.  If a value is `None` it will not appear
+    in the result string.  Per default only values are encoded into the target
+    charset strings.  If `encode_keys` is set to ``True`` unicode keys are
+    supported too.
+
+    If `sort` is set to `True` the items are sorted by `key` or the default
+    sorting algorithm.
+
+    .. versionadded:: 0.5
+        `sort`, `key`, and `separator` were added.
+
+    :param obj: the object to encode into a query string.
+    :param charset: the charset of the query string.
+    :param encode_keys: set to `True` if you have unicode keys. (Ignored on
+                        Python 3.x)
+    :param sort: set to `True` if you want parameters to be sorted by `key`.
+    :param separator: the separator to be used for the pairs.
+    :param key: an optional function to be used for sorting.  For more details
+                check out the :func:`sorted` documentation.
+    """
+    separator = to_native(separator, "ascii")
+    return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
+
+
+def url_encode_stream(
+    obj,
+    stream=None,
+    charset="utf-8",
+    encode_keys=False,
+    sort=False,
+    key=None,
+    separator=b"&",
+):
+    """Like :meth:`url_encode` but writes the results to a stream
+    object.  If the stream is `None` a generator over all encoded
+    pairs is returned.
+
+    .. versionadded:: 0.8
+
+    :param obj: the object to encode into a query string.
+    :param stream: a stream to write the encoded object into or `None` if
+                   an iterator over the encoded pairs should be returned.  In
+                   that case the separator argument is ignored.
+    :param charset: the charset of the query string.
+    :param encode_keys: set to `True` if you have unicode keys. (Ignored on
+                        Python 3.x)
+    :param sort: set to `True` if you want parameters to be sorted by `key`.
+    :param separator: the separator to be used for the pairs.
+    :param key: an optional function to be used for sorting.  For more details
+                check out the :func:`sorted` documentation.
+    """
+    separator = to_native(separator, "ascii")
+    gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
+    if stream is None:
+        return gen
+    for idx, chunk in enumerate(gen):
+        if idx:
+            stream.write(separator)
+        stream.write(chunk)
+
+
+def url_join(base, url, allow_fragments=True):
+    """Join a base URL and a possibly relative URL to form an absolute
+    interpretation of the latter.
+
+    :param base: the base URL for the join operation.
+    :param url: the URL to join.
+    :param allow_fragments: indicates whether fragments should be allowed.
+    """
+    if isinstance(base, tuple):
+        base = url_unparse(base)
+    if isinstance(url, tuple):
+        url = url_unparse(url)
+
+    base, url = normalize_string_tuple((base, url))
+    s = make_literal_wrapper(base)
+
+    if not base:
+        return url
+    if not url:
+        return base
+
+    bscheme, bnetloc, bpath, bquery, bfragment = url_parse(
+        base, allow_fragments=allow_fragments
+    )
+    scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)
+    if scheme != bscheme:
+        return url
+    if netloc:
+        return url_unparse((scheme, netloc, path, query, fragment))
+    netloc = bnetloc
+
+    if path[:1] == s("/"):
+        segments = path.split(s("/"))
+    elif not path:
+        segments = bpath.split(s("/"))
+        if not query:
+            query = bquery
+    else:
+        segments = bpath.split(s("/"))[:-1] + path.split(s("/"))
+
+    # If the rightmost part is "./" we want to keep the slash but
+    # remove the dot.
+    if segments[-1] == s("."):
+        segments[-1] = s("")
+
+    # Resolve ".." and "."
+    segments = [segment for segment in segments if segment != s(".")]
+    while 1:
+        i = 1
+        n = len(segments) - 1
+        while i < n:
+            if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")):
+                del segments[i - 1 : i + 1]
+                break
+            i += 1
+        else:
+            break
+
+    # Remove trailing ".." if the URL is absolute
+    unwanted_marker = [s(""), s("..")]
+    while segments[:2] == unwanted_marker:
+        del segments[1]
+
+    path = s("/").join(segments)
+    return url_unparse((scheme, netloc, path, query, fragment))
+
+
+class Href(object):
+    """Implements a callable that constructs URLs with the given base. The
+    function can be called with any number of positional and keyword
+    arguments which than are used to assemble the URL.  Works with URLs
+    and posix paths.
+
+    Positional arguments are appended as individual segments to
+    the path of the URL:
+
+    >>> href = Href('/foo')
+    >>> href('bar', 23)
+    '/foo/bar/23'
+    >>> href('foo', bar=23)
+    '/foo/foo?bar=23'
+
+    If any of the arguments (positional or keyword) evaluates to `None` it
+    will be skipped.  If no keyword arguments are given the last argument
+    can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
+    otherwise the keyword arguments are used for the query parameters, cutting
+    off the first trailing underscore of the parameter name:
+
+    >>> href(is_=42)
+    '/foo?is=42'
+    >>> href({'foo': 'bar'})
+    '/foo?foo=bar'
+
+    Combining of both methods is not allowed:
+
+    >>> href({'foo': 'bar'}, bar=42)
+    Traceback (most recent call last):
+      ...
+    TypeError: keyword arguments and query-dicts can't be combined
+
+    Accessing attributes on the href object creates a new href object with
+    the attribute name as prefix:
+
+    >>> bar_href = href.bar
+    >>> bar_href("blub")
+    '/foo/bar/blub'
+
+    If `sort` is set to `True` the items are sorted by `key` or the default
+    sorting algorithm:
+
+    >>> href = Href("/", sort=True)
+    >>> href(a=1, b=2, c=3)
+    '/?a=1&b=2&c=3'
+
+    .. versionadded:: 0.5
+        `sort` and `key` were added.
+    """
+
+    def __init__(self, base="./", charset="utf-8", sort=False, key=None):
+        if not base:
+            base = "./"
+        self.base = base
+        self.charset = charset
+        self.sort = sort
+        self.key = key
+
+    def __getattr__(self, name):
+        if name[:2] == "__":
+            raise AttributeError(name)
+        base = self.base
+        if base[-1:] != "/":
+            base += "/"
+        return Href(url_join(base, name), self.charset, self.sort, self.key)
+
+    def __call__(self, *path, **query):
+        if path and isinstance(path[-1], dict):
+            if query:
+                raise TypeError("keyword arguments and query-dicts can't be combined")
+            query, path = path[-1], path[:-1]
+        elif query:
+            query = dict(
+                [(k.endswith("_") and k[:-1] or k, v) for k, v in query.items()]
+            )
+        path = "/".join(
+            [
+                to_unicode(url_quote(x, self.charset), "ascii")
+                for x in path
+                if x is not None
+            ]
+        ).lstrip("/")
+        rv = self.base
+        if path:
+            if not rv.endswith("/"):
+                rv += "/"
+            rv = url_join(rv, "./" + path)
+        if query:
+            rv += "?" + to_unicode(
+                url_encode(query, self.charset, sort=self.sort, key=self.key), "ascii"
+            )
+        return to_native(rv)
diff --git a/lib/python3.7/site-packages/werkzeug/useragents.py b/lib/python3.7/site-packages/werkzeug/useragents.py
new file mode 100644
index 0000000000000000000000000000000000000000..e265e0939e51024834f5a1e1316cae261d6df042
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/useragents.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.useragents
+    ~~~~~~~~~~~~~~~~~~~
+
+    This module provides a helper to inspect user agent strings.  This module
+    is far from complete but should work for most of the currently available
+    browsers.
+
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import re
+import warnings
+
+
+class UserAgentParser(object):
+    """A simple user agent parser.  Used by the `UserAgent`."""
+
+    platforms = (
+        ("cros", "chromeos"),
+        ("iphone|ios", "iphone"),
+        ("ipad", "ipad"),
+        (r"darwin|mac|os\s*x", "macos"),
+        ("win", "windows"),
+        (r"android", "android"),
+        ("netbsd", "netbsd"),
+        ("openbsd", "openbsd"),
+        ("freebsd", "freebsd"),
+        ("dragonfly", "dragonflybsd"),
+        ("(sun|i86)os", "solaris"),
+        (r"x11|lin(\b|ux)?", "linux"),
+        (r"nintendo\s+wii", "wii"),
+        ("irix", "irix"),
+        ("hp-?ux", "hpux"),
+        ("aix", "aix"),
+        ("sco|unix_sv", "sco"),
+        ("bsd", "bsd"),
+        ("amiga", "amiga"),
+        ("blackberry|playbook", "blackberry"),
+        ("symbian", "symbian"),
+    )
+    browsers = (
+        ("googlebot", "google"),
+        ("msnbot", "msn"),
+        ("yahoo", "yahoo"),
+        ("ask jeeves", "ask"),
+        (r"aol|america\s+online\s+browser", "aol"),
+        ("opera", "opera"),
+        ("edge", "edge"),
+        ("chrome|crios", "chrome"),
+        ("seamonkey", "seamonkey"),
+        ("firefox|firebird|phoenix|iceweasel", "firefox"),
+        ("galeon", "galeon"),
+        ("safari|version", "safari"),
+        ("webkit", "webkit"),
+        ("camino", "camino"),
+        ("konqueror", "konqueror"),
+        ("k-meleon", "kmeleon"),
+        ("netscape", "netscape"),
+        (r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
+        ("lynx", "lynx"),
+        ("links", "links"),
+        ("Baiduspider", "baidu"),
+        ("bingbot", "bing"),
+        ("mozilla", "mozilla"),
+    )
+
+    _browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?"
+    _language_re = re.compile(
+        r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
+        r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
+    )
+
+    def __init__(self):
+        self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
+        self.browsers = [
+            (b, re.compile(self._browser_version_re % a, re.I))
+            for a, b in self.browsers
+        ]
+
+    def __call__(self, user_agent):
+        for platform, regex in self.platforms:  # noqa: B007
+            match = regex.search(user_agent)
+            if match is not None:
+                break
+        else:
+            platform = None
+        for browser, regex in self.browsers:  # noqa: B007
+            match = regex.search(user_agent)
+            if match is not None:
+                version = match.group(1)
+                break
+        else:
+            browser = version = None
+        match = self._language_re.search(user_agent)
+        if match is not None:
+            language = match.group(1) or match.group(2)
+        else:
+            language = None
+        return platform, browser, version, language
+
+
+class UserAgent(object):
+    """Represents a user agent.  Pass it a WSGI environment or a user agent
+    string and you can inspect some of the details from the user agent
+    string via the attributes.  The following attributes exist:
+
+    .. attribute:: string
+
+       the raw user agent string
+
+    .. attribute:: platform
+
+       the browser platform.  The following platforms are currently
+       recognized:
+
+       -   `aix`
+       -   `amiga`
+       -   `android`
+       -   `blackberry`
+       -   `bsd`
+       -   `chromeos`
+       -   `dragonflybsd`
+       -   `freebsd`
+       -   `hpux`
+       -   `ipad`
+       -   `iphone`
+       -   `irix`
+       -   `linux`
+       -   `macos`
+       -   `netbsd`
+       -   `openbsd`
+       -   `sco`
+       -   `solaris`
+       -   `symbian`
+       -   `wii`
+       -   `windows`
+
+    .. attribute:: browser
+
+        the name of the browser.  The following browsers are currently
+        recognized:
+
+        -   `aol` *
+        -   `ask` *
+        -   `baidu` *
+        -   `bing` *
+        -   `camino`
+        -   `chrome`
+        -   `edge`
+        -   `firefox`
+        -   `galeon`
+        -   `google` *
+        -   `kmeleon`
+        -   `konqueror`
+        -   `links`
+        -   `lynx`
+        -   `mozilla`
+        -   `msie`
+        -   `msn`
+        -   `netscape`
+        -   `opera`
+        -   `safari`
+        -   `seamonkey`
+        -   `webkit`
+        -   `yahoo` *
+
+        (Browsers marked with a star (``*``) are crawlers.)
+
+    .. attribute:: version
+
+        the version of the browser
+
+    .. attribute:: language
+
+        the language of the browser
+    """
+
+    _parser = UserAgentParser()
+
+    def __init__(self, environ_or_string):
+        if isinstance(environ_or_string, dict):
+            environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "")
+        self.string = environ_or_string
+        self.platform, self.browser, self.version, self.language = self._parser(
+            environ_or_string
+        )
+
+    def to_header(self):
+        return self.string
+
+    def __str__(self):
+        return self.string
+
+    def __nonzero__(self):
+        return bool(self.browser)
+
+    __bool__ = __nonzero__
+
+    def __repr__(self):
+        return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
+
+
+# DEPRECATED
+from .wrappers import UserAgentMixin as _UserAgentMixin
+
+
+class UserAgentMixin(_UserAgentMixin):
+    @property
+    def user_agent(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.useragents.UserAgentMixin' should be imported"
+            " from 'werkzeug.wrappers.UserAgentMixin'. This old import"
+            " will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return super(_UserAgentMixin, self).user_agent
diff --git a/lib/python3.7/site-packages/werkzeug/utils.py b/lib/python3.7/site-packages/werkzeug/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..20620572c25f9959a2181f89dc71810098a10b25
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/utils.py
@@ -0,0 +1,836 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.utils
+    ~~~~~~~~~~~~~~
+
+    This module implements various utilities for WSGI applications.  Most of
+    them are used by the request and response wrappers but especially for
+    middleware development it makes sense to use them without the wrappers.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import codecs
+import os
+import pkgutil
+import re
+import sys
+import warnings
+
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import reraise
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import unichr
+from ._internal import _DictAccessorProperty
+from ._internal import _missing
+from ._internal import _parse_signature
+
+try:
+    from html.entities import name2codepoint
+except ImportError:
+    from htmlentitydefs import name2codepoint
+
+
+_format_re = re.compile(r"\$(?:(%s)|\{(%s)\})" % (("[a-zA-Z_][a-zA-Z0-9_]*",) * 2))
+_entity_re = re.compile(r"&([^;]+);")
+_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
+_windows_device_files = (
+    "CON",
+    "AUX",
+    "COM1",
+    "COM2",
+    "COM3",
+    "COM4",
+    "LPT1",
+    "LPT2",
+    "LPT3",
+    "PRN",
+    "NUL",
+)
+
+
+class cached_property(property):
+    """A decorator that converts a function into a lazy property.  The
+    function wrapped is called the first time to retrieve the result
+    and then that calculated result is used the next time you access
+    the value::
+
+        class Foo(object):
+
+            @cached_property
+            def foo(self):
+                # calculate something important here
+                return 42
+
+    The class has to have a `__dict__` in order for this property to
+    work.
+    """
+
+    # implementation detail: A subclass of python's builtin property
+    # decorator, we override __get__ to check for a cached value. If one
+    # chooses to invoke __get__ by hand the property will still work as
+    # expected because the lookup logic is replicated in __get__ for
+    # manual invocation.
+
+    def __init__(self, func, name=None, doc=None):
+        self.__name__ = name or func.__name__
+        self.__module__ = func.__module__
+        self.__doc__ = doc or func.__doc__
+        self.func = func
+
+    def __set__(self, obj, value):
+        obj.__dict__[self.__name__] = value
+
+    def __get__(self, obj, type=None):
+        if obj is None:
+            return self
+        value = obj.__dict__.get(self.__name__, _missing)
+        if value is _missing:
+            value = self.func(obj)
+            obj.__dict__[self.__name__] = value
+        return value
+
+
+class environ_property(_DictAccessorProperty):
+    """Maps request attributes to environment variables. This works not only
+    for the Werzeug request object, but also any other class with an
+    environ attribute:
+
+    >>> class Test(object):
+    ...     environ = {'key': 'value'}
+    ...     test = environ_property('key')
+    >>> var = Test()
+    >>> var.test
+    'value'
+
+    If you pass it a second value it's used as default if the key does not
+    exist, the third one can be a converter that takes a value and converts
+    it.  If it raises :exc:`ValueError` or :exc:`TypeError` the default value
+    is used. If no default value is provided `None` is used.
+
+    Per default the property is read only.  You have to explicitly enable it
+    by passing ``read_only=False`` to the constructor.
+    """
+
+    read_only = True
+
+    def lookup(self, obj):
+        return obj.environ
+
+
+class header_property(_DictAccessorProperty):
+    """Like `environ_property` but for headers."""
+
+    def lookup(self, obj):
+        return obj.headers
+
+
+class HTMLBuilder(object):
+    """Helper object for HTML generation.
+
+    Per default there are two instances of that class.  The `html` one, and
+    the `xhtml` one for those two dialects.  The class uses keyword parameters
+    and positional parameters to generate small snippets of HTML.
+
+    Keyword parameters are converted to XML/SGML attributes, positional
+    arguments are used as children.  Because Python accepts positional
+    arguments before keyword arguments it's a good idea to use a list with the
+    star-syntax for some children:
+
+    >>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
+    ...                        html.a('bar', href='bar.html')])
+    u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
+
+    This class works around some browser limitations and can not be used for
+    arbitrary SGML/XML generation.  For that purpose lxml and similar
+    libraries exist.
+
+    Calling the builder escapes the string passed:
+
+    >>> html.p(html("<foo>"))
+    u'<p>&lt;foo&gt;</p>'
+    """
+
+    _entity_re = re.compile(r"&([^;]+);")
+    _entities = name2codepoint.copy()
+    _entities["apos"] = 39
+    _empty_elements = {
+        "area",
+        "base",
+        "basefont",
+        "br",
+        "col",
+        "command",
+        "embed",
+        "frame",
+        "hr",
+        "img",
+        "input",
+        "keygen",
+        "isindex",
+        "link",
+        "meta",
+        "param",
+        "source",
+        "wbr",
+    }
+    _boolean_attributes = {
+        "selected",
+        "checked",
+        "compact",
+        "declare",
+        "defer",
+        "disabled",
+        "ismap",
+        "multiple",
+        "nohref",
+        "noresize",
+        "noshade",
+        "nowrap",
+    }
+    _plaintext_elements = {"textarea"}
+    _c_like_cdata = {"script", "style"}
+
+    def __init__(self, dialect):
+        self._dialect = dialect
+
+    def __call__(self, s):
+        return escape(s)
+
+    def __getattr__(self, tag):
+        if tag[:2] == "__":
+            raise AttributeError(tag)
+
+        def proxy(*children, **arguments):
+            buffer = "<" + tag
+            for key, value in iteritems(arguments):
+                if value is None:
+                    continue
+                if key[-1] == "_":
+                    key = key[:-1]
+                if key in self._boolean_attributes:
+                    if not value:
+                        continue
+                    if self._dialect == "xhtml":
+                        value = '="' + key + '"'
+                    else:
+                        value = ""
+                else:
+                    value = '="' + escape(value) + '"'
+                buffer += " " + key + value
+            if not children and tag in self._empty_elements:
+                if self._dialect == "xhtml":
+                    buffer += " />"
+                else:
+                    buffer += ">"
+                return buffer
+            buffer += ">"
+
+            children_as_string = "".join(
+                [text_type(x) for x in children if x is not None]
+            )
+
+            if children_as_string:
+                if tag in self._plaintext_elements:
+                    children_as_string = escape(children_as_string)
+                elif tag in self._c_like_cdata and self._dialect == "xhtml":
+                    children_as_string = (
+                        "/*<![CDATA[*/" + children_as_string + "/*]]>*/"
+                    )
+            buffer += children_as_string + "</" + tag + ">"
+            return buffer
+
+        return proxy
+
+    def __repr__(self):
+        return "<%s for %r>" % (self.__class__.__name__, self._dialect)
+
+
+html = HTMLBuilder("html")
+xhtml = HTMLBuilder("xhtml")
+
+# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in
+# https://www.iana.org/assignments/media-types/media-types.xhtml
+# Types listed in the XDG mime info that have a charset in the IANA registration.
+_charset_mimetypes = {
+    "application/ecmascript",
+    "application/javascript",
+    "application/sql",
+    "application/xml",
+    "application/xml-dtd",
+    "application/xml-external-parsed-entity",
+}
+
+
+def get_content_type(mimetype, charset):
+    """Returns the full content type string with charset for a mimetype.
+
+    If the mimetype represents text, the charset parameter will be
+    appended, otherwise the mimetype is returned unchanged.
+
+    :param mimetype: The mimetype to be used as content type.
+    :param charset: The charset to be appended for text mimetypes.
+    :return: The content type.
+
+    .. verionchanged:: 0.15
+        Any type that ends with ``+xml`` gets a charset, not just those
+        that start with ``application/``. Known text types such as
+        ``application/javascript`` are also given charsets.
+    """
+    if (
+        mimetype.startswith("text/")
+        or mimetype in _charset_mimetypes
+        or mimetype.endswith("+xml")
+    ):
+        mimetype += "; charset=" + charset
+
+    return mimetype
+
+
+def detect_utf_encoding(data):
+    """Detect which UTF encoding was used to encode the given bytes.
+
+    The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is
+    accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big
+    or little endian. Some editors or libraries may prepend a BOM.
+
+    :internal:
+
+    :param data: Bytes in unknown UTF encoding.
+    :return: UTF encoding name
+
+    .. versionadded:: 0.15
+    """
+    head = data[:4]
+
+    if head[:3] == codecs.BOM_UTF8:
+        return "utf-8-sig"
+
+    if b"\x00" not in head:
+        return "utf-8"
+
+    if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE):
+        return "utf-32"
+
+    if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE):
+        return "utf-16"
+
+    if len(head) == 4:
+        if head[:3] == b"\x00\x00\x00":
+            return "utf-32-be"
+
+        if head[::2] == b"\x00\x00":
+            return "utf-16-be"
+
+        if head[1:] == b"\x00\x00\x00":
+            return "utf-32-le"
+
+        if head[1::2] == b"\x00\x00":
+            return "utf-16-le"
+
+    if len(head) == 2:
+        return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le"
+
+    return "utf-8"
+
+
+def format_string(string, context):
+    """String-template format a string:
+
+    >>> format_string('$foo and ${foo}s', dict(foo=42))
+    '42 and 42s'
+
+    This does not do any attribute lookup etc.  For more advanced string
+    formattings have a look at the `werkzeug.template` module.
+
+    :param string: the format string.
+    :param context: a dict with the variables to insert.
+    """
+
+    def lookup_arg(match):
+        x = context[match.group(1) or match.group(2)]
+        if not isinstance(x, string_types):
+            x = type(string)(x)
+        return x
+
+    return _format_re.sub(lookup_arg, string)
+
+
+def secure_filename(filename):
+    r"""Pass it a filename and it will return a secure version of it.  This
+    filename can then safely be stored on a regular file system and passed
+    to :func:`os.path.join`.  The filename returned is an ASCII only string
+    for maximum portability.
+
+    On windows systems the function also makes sure that the file is not
+    named after one of the special device files.
+
+    >>> secure_filename("My cool movie.mov")
+    'My_cool_movie.mov'
+    >>> secure_filename("../../../etc/passwd")
+    'etc_passwd'
+    >>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
+    'i_contain_cool_umlauts.txt'
+
+    The function might return an empty filename.  It's your responsibility
+    to ensure that the filename is unique and that you abort or
+    generate a random filename if the function returned an empty one.
+
+    .. versionadded:: 0.5
+
+    :param filename: the filename to secure
+    """
+    if isinstance(filename, text_type):
+        from unicodedata import normalize
+
+        filename = normalize("NFKD", filename).encode("ascii", "ignore")
+        if not PY2:
+            filename = filename.decode("ascii")
+    for sep in os.path.sep, os.path.altsep:
+        if sep:
+            filename = filename.replace(sep, " ")
+    filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
+        "._"
+    )
+
+    # on nt a couple of special files are present in each folder.  We
+    # have to ensure that the target file is not such a filename.  In
+    # this case we prepend an underline
+    if (
+        os.name == "nt"
+        and filename
+        and filename.split(".")[0].upper() in _windows_device_files
+    ):
+        filename = "_" + filename
+
+    return filename
+
+
+def escape(s, quote=None):
+    """Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
+
+    There is a special handling for `None` which escapes to an empty string.
+
+    .. versionchanged:: 0.9
+       `quote` is now implicitly on.
+
+    :param s: the string to escape.
+    :param quote: ignored.
+    """
+    if s is None:
+        return ""
+    elif hasattr(s, "__html__"):
+        return text_type(s.__html__())
+    elif not isinstance(s, string_types):
+        s = text_type(s)
+    if quote is not None:
+        from warnings import warn
+
+        warn(
+            "The 'quote' parameter is no longer used as of version 0.9"
+            " and will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+    s = (
+        s.replace("&", "&amp;")
+        .replace("<", "&lt;")
+        .replace(">", "&gt;")
+        .replace('"', "&quot;")
+    )
+    return s
+
+
+def unescape(s):
+    """The reverse function of `escape`.  This unescapes all the HTML
+    entities, not only the XML entities inserted by `escape`.
+
+    :param s: the string to unescape.
+    """
+
+    def handle_match(m):
+        name = m.group(1)
+        if name in HTMLBuilder._entities:
+            return unichr(HTMLBuilder._entities[name])
+        try:
+            if name[:2] in ("#x", "#X"):
+                return unichr(int(name[2:], 16))
+            elif name.startswith("#"):
+                return unichr(int(name[1:]))
+        except ValueError:
+            pass
+        return u""
+
+    return _entity_re.sub(handle_match, s)
+
+
+def redirect(location, code=302, Response=None):
+    """Returns a response object (a WSGI application) that, if called,
+    redirects the client to the target location. Supported codes are
+    301, 302, 303, 305, 307, and 308. 300 is not supported because
+    it's not a real redirect and 304 because it's the answer for a
+    request with a request with defined If-Modified-Since headers.
+
+    .. versionadded:: 0.6
+       The location can now be a unicode string that is encoded using
+       the :func:`iri_to_uri` function.
+
+    .. versionadded:: 0.10
+        The class used for the Response object can now be passed in.
+
+    :param location: the location the response should redirect to.
+    :param code: the redirect status code. defaults to 302.
+    :param class Response: a Response class to use when instantiating a
+        response. The default is :class:`werkzeug.wrappers.Response` if
+        unspecified.
+    """
+    if Response is None:
+        from .wrappers import Response
+
+    display_location = escape(location)
+    if isinstance(location, text_type):
+        # Safe conversion is necessary here as we might redirect
+        # to a broken URI scheme (for instance itms-services).
+        from .urls import iri_to_uri
+
+        location = iri_to_uri(location, safe_conversion=True)
+    response = Response(
+        '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
+        "<title>Redirecting...</title>\n"
+        "<h1>Redirecting...</h1>\n"
+        "<p>You should be redirected automatically to target URL: "
+        '<a href="%s">%s</a>.  If not click the link.'
+        % (escape(location), display_location),
+        code,
+        mimetype="text/html",
+    )
+    response.headers["Location"] = location
+    return response
+
+
+def append_slash_redirect(environ, code=301):
+    """Redirects to the same URL but with a slash appended.  The behavior
+    of this function is undefined if the path ends with a slash already.
+
+    :param environ: the WSGI environment for the request that triggers
+                    the redirect.
+    :param code: the status code for the redirect.
+    """
+    new_path = environ["PATH_INFO"].strip("/") + "/"
+    query_string = environ.get("QUERY_STRING")
+    if query_string:
+        new_path += "?" + query_string
+    return redirect(new_path, code)
+
+
+def import_string(import_name, silent=False):
+    """Imports an object based on a string.  This is useful if you want to
+    use import paths as endpoints or something similar.  An import path can
+    be specified either in dotted notation (``xml.sax.saxutils.escape``)
+    or with a colon as object delimiter (``xml.sax.saxutils:escape``).
+
+    If `silent` is True the return value will be `None` if the import fails.
+
+    :param import_name: the dotted name for the object to import.
+    :param silent: if set to `True` import errors are ignored and
+                   `None` is returned instead.
+    :return: imported object
+    """
+    # force the import name to automatically convert to strings
+    # __import__ is not able to handle unicode strings in the fromlist
+    # if the module is a package
+    import_name = str(import_name).replace(":", ".")
+    try:
+        try:
+            __import__(import_name)
+        except ImportError:
+            if "." not in import_name:
+                raise
+        else:
+            return sys.modules[import_name]
+
+        module_name, obj_name = import_name.rsplit(".", 1)
+        module = __import__(module_name, globals(), locals(), [obj_name])
+        try:
+            return getattr(module, obj_name)
+        except AttributeError as e:
+            raise ImportError(e)
+
+    except ImportError as e:
+        if not silent:
+            reraise(
+                ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2]
+            )
+
+
+def find_modules(import_path, include_packages=False, recursive=False):
+    """Finds all the modules below a package.  This can be useful to
+    automatically import all views / controllers so that their metaclasses /
+    function decorators have a chance to register themselves on the
+    application.
+
+    Packages are not returned unless `include_packages` is `True`.  This can
+    also recursively list modules but in that case it will import all the
+    packages to get the correct load path of that module.
+
+    :param import_path: the dotted name for the package to find child modules.
+    :param include_packages: set to `True` if packages should be returned, too.
+    :param recursive: set to `True` if recursion should happen.
+    :return: generator
+    """
+    module = import_string(import_path)
+    path = getattr(module, "__path__", None)
+    if path is None:
+        raise ValueError("%r is not a package" % import_path)
+    basename = module.__name__ + "."
+    for _importer, modname, ispkg in pkgutil.iter_modules(path):
+        modname = basename + modname
+        if ispkg:
+            if include_packages:
+                yield modname
+            if recursive:
+                for item in find_modules(modname, include_packages, True):
+                    yield item
+        else:
+            yield modname
+
+
+def validate_arguments(func, args, kwargs, drop_extra=True):
+    """Checks if the function accepts the arguments and keyword arguments.
+    Returns a new ``(args, kwargs)`` tuple that can safely be passed to
+    the function without causing a `TypeError` because the function signature
+    is incompatible.  If `drop_extra` is set to `True` (which is the default)
+    any extra positional or keyword arguments are dropped automatically.
+
+    The exception raised provides three attributes:
+
+    `missing`
+        A set of argument names that the function expected but where
+        missing.
+
+    `extra`
+        A dict of keyword arguments that the function can not handle but
+        where provided.
+
+    `extra_positional`
+        A list of values that where given by positional argument but the
+        function cannot accept.
+
+    This can be useful for decorators that forward user submitted data to
+    a view function::
+
+        from werkzeug.utils import ArgumentValidationError, validate_arguments
+
+        def sanitize(f):
+            def proxy(request):
+                data = request.values.to_dict()
+                try:
+                    args, kwargs = validate_arguments(f, (request,), data)
+                except ArgumentValidationError:
+                    raise BadRequest('The browser failed to transmit all '
+                                     'the data expected.')
+                return f(*args, **kwargs)
+            return proxy
+
+    :param func: the function the validation is performed against.
+    :param args: a tuple of positional arguments.
+    :param kwargs: a dict of keyword arguments.
+    :param drop_extra: set to `False` if you don't want extra arguments
+                       to be silently dropped.
+    :return: tuple in the form ``(args, kwargs)``.
+    """
+    parser = _parse_signature(func)
+    args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
+    if missing:
+        raise ArgumentValidationError(tuple(missing))
+    elif (extra or extra_positional) and not drop_extra:
+        raise ArgumentValidationError(None, extra, extra_positional)
+    return tuple(args), kwargs
+
+
+def bind_arguments(func, args, kwargs):
+    """Bind the arguments provided into a dict.  When passed a function,
+    a tuple of arguments and a dict of keyword arguments `bind_arguments`
+    returns a dict of names as the function would see it.  This can be useful
+    to implement a cache decorator that uses the function arguments to build
+    the cache key based on the values of the arguments.
+
+    :param func: the function the arguments should be bound for.
+    :param args: tuple of positional arguments.
+    :param kwargs: a dict of keyword arguments.
+    :return: a :class:`dict` of bound keyword arguments.
+    """
+    (
+        args,
+        kwargs,
+        missing,
+        extra,
+        extra_positional,
+        arg_spec,
+        vararg_var,
+        kwarg_var,
+    ) = _parse_signature(func)(args, kwargs)
+    values = {}
+    for (name, _has_default, _default), value in zip(arg_spec, args):
+        values[name] = value
+    if vararg_var is not None:
+        values[vararg_var] = tuple(extra_positional)
+    elif extra_positional:
+        raise TypeError("too many positional arguments")
+    if kwarg_var is not None:
+        multikw = set(extra) & set([x[0] for x in arg_spec])
+        if multikw:
+            raise TypeError(
+                "got multiple values for keyword argument " + repr(next(iter(multikw)))
+            )
+        values[kwarg_var] = extra
+    elif extra:
+        raise TypeError("got unexpected keyword argument " + repr(next(iter(extra))))
+    return values
+
+
+class ArgumentValidationError(ValueError):
+
+    """Raised if :func:`validate_arguments` fails to validate"""
+
+    def __init__(self, missing=None, extra=None, extra_positional=None):
+        self.missing = set(missing or ())
+        self.extra = extra or {}
+        self.extra_positional = extra_positional or []
+        ValueError.__init__(
+            self,
+            "function arguments invalid. (%d missing, %d additional)"
+            % (len(self.missing), len(self.extra) + len(self.extra_positional)),
+        )
+
+
+class ImportStringError(ImportError):
+    """Provides information about a failed :func:`import_string` attempt."""
+
+    #: String in dotted notation that failed to be imported.
+    import_name = None
+    #: Wrapped exception.
+    exception = None
+
+    def __init__(self, import_name, exception):
+        self.import_name = import_name
+        self.exception = exception
+
+        msg = (
+            "import_string() failed for %r. Possible reasons are:\n\n"
+            "- missing __init__.py in a package;\n"
+            "- package or module path not included in sys.path;\n"
+            "- duplicated package or module name taking precedence in "
+            "sys.path;\n"
+            "- missing module, class, function or variable;\n\n"
+            "Debugged import:\n\n%s\n\n"
+            "Original exception:\n\n%s: %s"
+        )
+
+        name = ""
+        tracked = []
+        for part in import_name.replace(":", ".").split("."):
+            name += (name and ".") + part
+            imported = import_string(name, silent=True)
+            if imported:
+                tracked.append((name, getattr(imported, "__file__", None)))
+            else:
+                track = ["- %r found in %r." % (n, i) for n, i in tracked]
+                track.append("- %r not found." % name)
+                msg = msg % (
+                    import_name,
+                    "\n".join(track),
+                    exception.__class__.__name__,
+                    str(exception),
+                )
+                break
+
+        ImportError.__init__(self, msg)
+
+    def __repr__(self):
+        return "<%s(%r, %r)>" % (
+            self.__class__.__name__,
+            self.import_name,
+            self.exception,
+        )
+
+
+# DEPRECATED
+from .datastructures import CombinedMultiDict as _CombinedMultiDict
+from .datastructures import EnvironHeaders as _EnvironHeaders
+from .datastructures import Headers as _Headers
+from .datastructures import MultiDict as _MultiDict
+from .http import dump_cookie as _dump_cookie
+from .http import parse_cookie as _parse_cookie
+
+
+class MultiDict(_MultiDict):
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.utils.MultiDict' has moved to 'werkzeug"
+            ".datastructures.MultiDict' as of version 0.5. This old"
+            " import will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(MultiDict, self).__init__(*args, **kwargs)
+
+
+class CombinedMultiDict(_CombinedMultiDict):
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.utils.CombinedMultiDict' has moved to 'werkzeug"
+            ".datastructures.CombinedMultiDict' as of version 0.5. This"
+            " old import will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(CombinedMultiDict, self).__init__(*args, **kwargs)
+
+
+class Headers(_Headers):
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.utils.Headers' has moved to 'werkzeug"
+            ".datastructures.Headers' as of version 0.5. This old"
+            " import will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(Headers, self).__init__(*args, **kwargs)
+
+
+class EnvironHeaders(_EnvironHeaders):
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.utils.EnvironHeaders' has moved to 'werkzeug"
+            ".datastructures.EnvironHeaders' as of version 0.5. This"
+            " old import will be removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(EnvironHeaders, self).__init__(*args, **kwargs)
+
+
+def parse_cookie(*args, **kwargs):
+    warnings.warn(
+        "'werkzeug.utils.parse_cookie' as moved to 'werkzeug.http"
+        ".parse_cookie' as of version 0.5. This old import will be"
+        " removed in version 1.0.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+    return _parse_cookie(*args, **kwargs)
+
+
+def dump_cookie(*args, **kwargs):
+    warnings.warn(
+        "'werkzeug.utils.dump_cookie' as moved to 'werkzeug.http"
+        ".dump_cookie' as of version 0.5. This old import will be"
+        " removed in version 1.0.",
+        DeprecationWarning,
+        stacklevel=2,
+    )
+    return _dump_cookie(*args, **kwargs)
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__init__.py b/lib/python3.7/site-packages/werkzeug/wrappers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..56c764abb073527fd160f4ff3b46cf2d73311aa0
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/__init__.py
@@ -0,0 +1,36 @@
+"""
+werkzeug.wrappers
+~~~~~~~~~~~~~~~~~
+
+The wrappers are simple request and response objects which you can
+subclass to do whatever you want them to do.  The request object contains
+the information transmitted by the client (webbrowser) and the response
+object contains all the information sent back to the browser.
+
+An important detail is that the request object is created with the WSGI
+environ and will act as high-level proxy whereas the response object is an
+actual WSGI application.
+
+Like everything else in Werkzeug these objects will work correctly with
+unicode data.  Incoming form data parsed by the response object will be
+decoded into an unicode object if possible and if it makes sense.
+
+:copyright: 2007 Pallets
+:license: BSD-3-Clause
+"""
+from .accept import AcceptMixin
+from .auth import AuthorizationMixin
+from .auth import WWWAuthenticateMixin
+from .base_request import BaseRequest
+from .base_response import BaseResponse
+from .common_descriptors import CommonRequestDescriptorsMixin
+from .common_descriptors import CommonResponseDescriptorsMixin
+from .etag import ETagRequestMixin
+from .etag import ETagResponseMixin
+from .request import PlainRequest
+from .request import Request
+from .request import StreamOnlyMixin
+from .response import Response
+from .response import ResponseStream
+from .response import ResponseStreamMixin
+from .user_agent import UserAgentMixin
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce19bd215723960bec84ad20311ef2bca4a5dc7a
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/accept.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/accept.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0901bd92ca562914b2047840a1b90f7ef0523ccb
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/accept.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/auth.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/auth.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6028e63547af91327c46c24c4bb47f52e1e7d502
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/auth.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_request.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_request.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc0e29fcd1e11e355d44ef3e6b08beca25f80190
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_request.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_response.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_response.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b823311c7cd86e40b08cec67247d72ffb8ec3ec
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_response.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/common_descriptors.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/common_descriptors.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c8e0f40a41df39b334f7fd75244a53c81cf65f5
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/common_descriptors.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/etag.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/etag.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f9ff92adedc7aba51a5feb19ba8eaa070a41c94
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/etag.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/json.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/json.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a7abb11e45e2a44bee14c4b94e0ebef4119decff
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/json.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/request.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/request.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2b712826333bdf568cc8d3e97fa705c4405892b6
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/request.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/response.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/response.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..94169aa49e97a93a43e9da7f9f1f1d055d2d5c39
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/response.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/user_agent.cpython-37.pyc b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/user_agent.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e1e5bc3b09c0758acb12652c7adc312c6aae1ac
Binary files /dev/null and b/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/user_agent.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/accept.py b/lib/python3.7/site-packages/werkzeug/wrappers/accept.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0620a0a55c25e22b1e510b2150b80f92320e104
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/accept.py
@@ -0,0 +1,50 @@
+from ..datastructures import CharsetAccept
+from ..datastructures import LanguageAccept
+from ..datastructures import MIMEAccept
+from ..http import parse_accept_header
+from ..utils import cached_property
+
+
+class AcceptMixin(object):
+    """A mixin for classes with an :attr:`~BaseResponse.environ` attribute
+    to get all the HTTP accept headers as
+    :class:`~werkzeug.datastructures.Accept` objects (or subclasses
+    thereof).
+    """
+
+    @cached_property
+    def accept_mimetypes(self):
+        """List of mimetypes this client supports as
+        :class:`~werkzeug.datastructures.MIMEAccept` object.
+        """
+        return parse_accept_header(self.environ.get("HTTP_ACCEPT"), MIMEAccept)
+
+    @cached_property
+    def accept_charsets(self):
+        """List of charsets this client supports as
+        :class:`~werkzeug.datastructures.CharsetAccept` object.
+        """
+        return parse_accept_header(
+            self.environ.get("HTTP_ACCEPT_CHARSET"), CharsetAccept
+        )
+
+    @cached_property
+    def accept_encodings(self):
+        """List of encodings this client accepts.  Encodings in a HTTP term
+        are compression encodings such as gzip.  For charsets have a look at
+        :attr:`accept_charset`.
+        """
+        return parse_accept_header(self.environ.get("HTTP_ACCEPT_ENCODING"))
+
+    @cached_property
+    def accept_languages(self):
+        """List of languages this client accepts as
+        :class:`~werkzeug.datastructures.LanguageAccept` object.
+
+        .. versionchanged 0.5
+           In previous versions this was a regular
+           :class:`~werkzeug.datastructures.Accept` object.
+        """
+        return parse_accept_header(
+            self.environ.get("HTTP_ACCEPT_LANGUAGE"), LanguageAccept
+        )
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/auth.py b/lib/python3.7/site-packages/werkzeug/wrappers/auth.py
new file mode 100644
index 0000000000000000000000000000000000000000..714f75546c32fef4f398f7ac7814ebcc73398d00
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/auth.py
@@ -0,0 +1,33 @@
+from ..http import parse_authorization_header
+from ..http import parse_www_authenticate_header
+from ..utils import cached_property
+
+
+class AuthorizationMixin(object):
+    """Adds an :attr:`authorization` property that represents the parsed
+    value of the `Authorization` header as
+    :class:`~werkzeug.datastructures.Authorization` object.
+    """
+
+    @cached_property
+    def authorization(self):
+        """The `Authorization` object in parsed form."""
+        header = self.environ.get("HTTP_AUTHORIZATION")
+        return parse_authorization_header(header)
+
+
+class WWWAuthenticateMixin(object):
+    """Adds a :attr:`www_authenticate` property to a response object."""
+
+    @property
+    def www_authenticate(self):
+        """The `WWW-Authenticate` header in a parsed form."""
+
+        def on_update(www_auth):
+            if not www_auth and "www-authenticate" in self.headers:
+                del self.headers["www-authenticate"]
+            elif www_auth:
+                self.headers["WWW-Authenticate"] = www_auth.to_header()
+
+        header = self.headers.get("www-authenticate")
+        return parse_www_authenticate_header(header, on_update)
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/base_request.py b/lib/python3.7/site-packages/werkzeug/wrappers/base_request.py
new file mode 100644
index 0000000000000000000000000000000000000000..41e8aadb3e8c9ab7fbf5a41a76e8e5084561d93b
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/base_request.py
@@ -0,0 +1,693 @@
+import warnings
+from functools import update_wrapper
+from io import BytesIO
+
+from .._compat import to_native
+from .._compat import to_unicode
+from .._compat import wsgi_decoding_dance
+from .._compat import wsgi_get_bytes
+from ..datastructures import CombinedMultiDict
+from ..datastructures import EnvironHeaders
+from ..datastructures import ImmutableList
+from ..datastructures import ImmutableMultiDict
+from ..datastructures import ImmutableTypeConversionDict
+from ..datastructures import iter_multi_items
+from ..datastructures import MultiDict
+from ..formparser import default_stream_factory
+from ..formparser import FormDataParser
+from ..http import parse_cookie
+from ..http import parse_options_header
+from ..urls import url_decode
+from ..utils import cached_property
+from ..utils import environ_property
+from ..wsgi import get_content_length
+from ..wsgi import get_current_url
+from ..wsgi import get_host
+from ..wsgi import get_input_stream
+
+
+class BaseRequest(object):
+    """Very basic request object.  This does not implement advanced stuff like
+    entity tag parsing or cache controls.  The request object is created with
+    the WSGI environment as first argument and will add itself to the WSGI
+    environment as ``'werkzeug.request'`` unless it's created with
+    `populate_request` set to False.
+
+    There are a couple of mixins available that add additional functionality
+    to the request object, there is also a class called `Request` which
+    subclasses `BaseRequest` and all the important mixins.
+
+    It's a good idea to create a custom subclass of the :class:`BaseRequest`
+    and add missing functionality either via mixins or direct implementation.
+    Here an example for such subclasses::
+
+        from werkzeug.wrappers import BaseRequest, ETagRequestMixin
+
+        class Request(BaseRequest, ETagRequestMixin):
+            pass
+
+    Request objects are **read only**.  As of 0.5 modifications are not
+    allowed in any place.  Unlike the lower level parsing functions the
+    request object will use immutable objects everywhere possible.
+
+    Per default the request object will assume all the text data is `utf-8`
+    encoded.  Please refer to :doc:`the unicode chapter </unicode>` for more
+    details about customizing the behavior.
+
+    Per default the request object will be added to the WSGI
+    environment as `werkzeug.request` to support the debugging system.
+    If you don't want that, set `populate_request` to `False`.
+
+    If `shallow` is `True` the environment is initialized as shallow
+    object around the environ.  Every operation that would modify the
+    environ in any way (such as consuming form data) raises an exception
+    unless the `shallow` attribute is explicitly set to `False`.  This
+    is useful for middlewares where you don't want to consume the form
+    data by accident.  A shallow request is not populated to the WSGI
+    environment.
+
+    .. versionchanged:: 0.5
+       read-only mode was enforced by using immutables classes for all
+       data.
+    """
+
+    #: the charset for the request, defaults to utf-8
+    charset = "utf-8"
+
+    #: the error handling procedure for errors, defaults to 'replace'
+    encoding_errors = "replace"
+
+    #: the maximum content length.  This is forwarded to the form data
+    #: parsing function (:func:`parse_form_data`).  When set and the
+    #: :attr:`form` or :attr:`files` attribute is accessed and the
+    #: parsing fails because more than the specified value is transmitted
+    #: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
+    #:
+    #: Have a look at :ref:`dealing-with-request-data` for more details.
+    #:
+    #: .. versionadded:: 0.5
+    max_content_length = None
+
+    #: the maximum form field size.  This is forwarded to the form data
+    #: parsing function (:func:`parse_form_data`).  When set and the
+    #: :attr:`form` or :attr:`files` attribute is accessed and the
+    #: data in memory for post data is longer than the specified value a
+    #: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
+    #:
+    #: Have a look at :ref:`dealing-with-request-data` for more details.
+    #:
+    #: .. versionadded:: 0.5
+    max_form_memory_size = None
+
+    #: the class to use for `args` and `form`.  The default is an
+    #: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
+    #: multiple values per key.  alternatively it makes sense to use an
+    #: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
+    #: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
+    #: which is the fastest but only remembers the last key.  It is also
+    #: possible to use mutable structures, but this is not recommended.
+    #:
+    #: .. versionadded:: 0.6
+    parameter_storage_class = ImmutableMultiDict
+
+    #: the type to be used for list values from the incoming WSGI environment.
+    #: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
+    #: (for example for :attr:`access_list`).
+    #:
+    #: .. versionadded:: 0.6
+    list_storage_class = ImmutableList
+
+    #: the type to be used for dict values from the incoming WSGI environment.
+    #: By default an
+    #: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
+    #: (for example for :attr:`cookies`).
+    #:
+    #: .. versionadded:: 0.6
+    dict_storage_class = ImmutableTypeConversionDict
+
+    #: The form data parser that shoud be used.  Can be replaced to customize
+    #: the form date parsing.
+    form_data_parser_class = FormDataParser
+
+    #: Optionally a list of hosts that is trusted by this request.  By default
+    #: all hosts are trusted which means that whatever the client sends the
+    #: host is will be accepted.
+    #:
+    #: Because `Host` and `X-Forwarded-Host` headers can be set to any value by
+    #: a malicious client, it is recommended to either set this property or
+    #: implement similar validation in the proxy (if application is being run
+    #: behind one).
+    #:
+    #: .. versionadded:: 0.9
+    trusted_hosts = None
+
+    #: Indicates whether the data descriptor should be allowed to read and
+    #: buffer up the input stream.  By default it's enabled.
+    #:
+    #: .. versionadded:: 0.9
+    disable_data_descriptor = False
+
+    def __init__(self, environ, populate_request=True, shallow=False):
+        self.environ = environ
+        if populate_request and not shallow:
+            self.environ["werkzeug.request"] = self
+        self.shallow = shallow
+
+    def __repr__(self):
+        # make sure the __repr__ even works if the request was created
+        # from an invalid WSGI environment.  If we display the request
+        # in a debug session we don't want the repr to blow up.
+        args = []
+        try:
+            args.append("'%s'" % to_native(self.url, self.url_charset))
+            args.append("[%s]" % self.method)
+        except Exception:
+            args.append("(invalid WSGI environ)")
+
+        return "<%s %s>" % (self.__class__.__name__, " ".join(args))
+
+    @property
+    def url_charset(self):
+        """The charset that is assumed for URLs.  Defaults to the value
+        of :attr:`charset`.
+
+        .. versionadded:: 0.6
+        """
+        return self.charset
+
+    @classmethod
+    def from_values(cls, *args, **kwargs):
+        """Create a new request object based on the values provided.  If
+        environ is given missing values are filled from there.  This method is
+        useful for small scripts when you need to simulate a request from an URL.
+        Do not use this method for unittesting, there is a full featured client
+        object (:class:`Client`) that allows to create multipart requests,
+        support for cookies etc.
+
+        This accepts the same options as the
+        :class:`~werkzeug.test.EnvironBuilder`.
+
+        .. versionchanged:: 0.5
+           This method now accepts the same arguments as
+           :class:`~werkzeug.test.EnvironBuilder`.  Because of this the
+           `environ` parameter is now called `environ_overrides`.
+
+        :return: request object
+        """
+        from ..test import EnvironBuilder
+
+        charset = kwargs.pop("charset", cls.charset)
+        kwargs["charset"] = charset
+        builder = EnvironBuilder(*args, **kwargs)
+        try:
+            return builder.get_request(cls)
+        finally:
+            builder.close()
+
+    @classmethod
+    def application(cls, f):
+        """Decorate a function as responder that accepts the request as first
+        argument.  This works like the :func:`responder` decorator but the
+        function is passed the request object as first argument and the
+        request object will be closed automatically::
+
+            @Request.application
+            def my_wsgi_app(request):
+                return Response('Hello World!')
+
+        As of Werkzeug 0.14 HTTP exceptions are automatically caught and
+        converted to responses instead of failing.
+
+        :param f: the WSGI callable to decorate
+        :return: a new WSGI callable
+        """
+        #: return a callable that wraps the -2nd argument with the request
+        #: and calls the function with all the arguments up to that one and
+        #: the request.  The return value is then called with the latest
+        #: two arguments.  This makes it possible to use this decorator for
+        #: both methods and standalone WSGI functions.
+        from ..exceptions import HTTPException
+
+        def application(*args):
+            request = cls(args[-2])
+            with request:
+                try:
+                    resp = f(*args[:-2] + (request,))
+                except HTTPException as e:
+                    resp = e.get_response(args[-2])
+                return resp(*args[-2:])
+
+        return update_wrapper(application, f)
+
+    def _get_file_stream(
+        self, total_content_length, content_type, filename=None, content_length=None
+    ):
+        """Called to get a stream for the file upload.
+
+        This must provide a file-like class with `read()`, `readline()`
+        and `seek()` methods that is both writeable and readable.
+
+        The default implementation returns a temporary file if the total
+        content length is higher than 500KB.  Because many browsers do not
+        provide a content length for the files only the total content
+        length matters.
+
+        :param total_content_length: the total content length of all the
+                                     data in the request combined.  This value
+                                     is guaranteed to be there.
+        :param content_type: the mimetype of the uploaded file.
+        :param filename: the filename of the uploaded file.  May be `None`.
+        :param content_length: the length of this file.  This value is usually
+                               not provided because webbrowsers do not provide
+                               this value.
+        """
+        return default_stream_factory(
+            total_content_length=total_content_length,
+            filename=filename,
+            content_type=content_type,
+            content_length=content_length,
+        )
+
+    @property
+    def want_form_data_parsed(self):
+        """Returns True if the request method carries content.  As of
+        Werkzeug 0.9 this will be the case if a content type is transmitted.
+
+        .. versionadded:: 0.8
+        """
+        return bool(self.environ.get("CONTENT_TYPE"))
+
+    def make_form_data_parser(self):
+        """Creates the form data parser. Instantiates the
+        :attr:`form_data_parser_class` with some parameters.
+
+        .. versionadded:: 0.8
+        """
+        return self.form_data_parser_class(
+            self._get_file_stream,
+            self.charset,
+            self.encoding_errors,
+            self.max_form_memory_size,
+            self.max_content_length,
+            self.parameter_storage_class,
+        )
+
+    def _load_form_data(self):
+        """Method used internally to retrieve submitted data.  After calling
+        this sets `form` and `files` on the request object to multi dicts
+        filled with the incoming form data.  As a matter of fact the input
+        stream will be empty afterwards.  You can also call this method to
+        force the parsing of the form data.
+
+        .. versionadded:: 0.8
+        """
+        # abort early if we have already consumed the stream
+        if "form" in self.__dict__:
+            return
+
+        _assert_not_shallow(self)
+
+        if self.want_form_data_parsed:
+            content_type = self.environ.get("CONTENT_TYPE", "")
+            content_length = get_content_length(self.environ)
+            mimetype, options = parse_options_header(content_type)
+            parser = self.make_form_data_parser()
+            data = parser.parse(
+                self._get_stream_for_parsing(), mimetype, content_length, options
+            )
+        else:
+            data = (
+                self.stream,
+                self.parameter_storage_class(),
+                self.parameter_storage_class(),
+            )
+
+        # inject the values into the instance dict so that we bypass
+        # our cached_property non-data descriptor.
+        d = self.__dict__
+        d["stream"], d["form"], d["files"] = data
+
+    def _get_stream_for_parsing(self):
+        """This is the same as accessing :attr:`stream` with the difference
+        that if it finds cached data from calling :meth:`get_data` first it
+        will create a new stream out of the cached data.
+
+        .. versionadded:: 0.9.3
+        """
+        cached_data = getattr(self, "_cached_data", None)
+        if cached_data is not None:
+            return BytesIO(cached_data)
+        return self.stream
+
+    def close(self):
+        """Closes associated resources of this request object.  This
+        closes all file handles explicitly.  You can also use the request
+        object in a with statement which will automatically close it.
+
+        .. versionadded:: 0.9
+        """
+        files = self.__dict__.get("files")
+        for _key, value in iter_multi_items(files or ()):
+            value.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self.close()
+
+    @cached_property
+    def stream(self):
+        """
+        If the incoming form data was not encoded with a known mimetype
+        the data is stored unmodified in this stream for consumption.  Most
+        of the time it is a better idea to use :attr:`data` which will give
+        you that data as a string.  The stream only returns the data once.
+
+        Unlike :attr:`input_stream` this stream is properly guarded that you
+        can't accidentally read past the length of the input.  Werkzeug will
+        internally always refer to this stream to read data which makes it
+        possible to wrap this object with a stream that does filtering.
+
+        .. versionchanged:: 0.9
+           This stream is now always available but might be consumed by the
+           form parser later on.  Previously the stream was only set if no
+           parsing happened.
+        """
+        _assert_not_shallow(self)
+        return get_input_stream(self.environ)
+
+    input_stream = environ_property(
+        "wsgi.input",
+        """The WSGI input stream.
+
+        In general it's a bad idea to use this one because you can
+        easily read past the boundary.  Use the :attr:`stream`
+        instead.""",
+    )
+
+    @cached_property
+    def args(self):
+        """The parsed URL parameters (the part in the URL after the question
+        mark).
+
+        By default an
+        :class:`~werkzeug.datastructures.ImmutableMultiDict`
+        is returned from this function.  This can be changed by setting
+        :attr:`parameter_storage_class` to a different type.  This might
+        be necessary if the order of the form data is important.
+        """
+        return url_decode(
+            wsgi_get_bytes(self.environ.get("QUERY_STRING", "")),
+            self.url_charset,
+            errors=self.encoding_errors,
+            cls=self.parameter_storage_class,
+        )
+
+    @cached_property
+    def data(self):
+        """
+        Contains the incoming request data as string in case it came with
+        a mimetype Werkzeug does not handle.
+        """
+
+        if self.disable_data_descriptor:
+            raise AttributeError("data descriptor is disabled")
+        # XXX: this should eventually be deprecated.
+
+        # We trigger form data parsing first which means that the descriptor
+        # will not cache the data that would otherwise be .form or .files
+        # data.  This restores the behavior that was there in Werkzeug
+        # before 0.9.  New code should use :meth:`get_data` explicitly as
+        # this will make behavior explicit.
+        return self.get_data(parse_form_data=True)
+
+    def get_data(self, cache=True, as_text=False, parse_form_data=False):
+        """This reads the buffered incoming data from the client into one
+        bytestring.  By default this is cached but that behavior can be
+        changed by setting `cache` to `False`.
+
+        Usually it's a bad idea to call this method without checking the
+        content length first as a client could send dozens of megabytes or more
+        to cause memory problems on the server.
+
+        Note that if the form data was already parsed this method will not
+        return anything as form data parsing does not cache the data like
+        this method does.  To implicitly invoke form data parsing function
+        set `parse_form_data` to `True`.  When this is done the return value
+        of this method will be an empty string if the form parser handles
+        the data.  This generally is not necessary as if the whole data is
+        cached (which is the default) the form parser will used the cached
+        data to parse the form data.  Please be generally aware of checking
+        the content length first in any case before calling this method
+        to avoid exhausting server memory.
+
+        If `as_text` is set to `True` the return value will be a decoded
+        unicode string.
+
+        .. versionadded:: 0.9
+        """
+        rv = getattr(self, "_cached_data", None)
+        if rv is None:
+            if parse_form_data:
+                self._load_form_data()
+            rv = self.stream.read()
+            if cache:
+                self._cached_data = rv
+        if as_text:
+            rv = rv.decode(self.charset, self.encoding_errors)
+        return rv
+
+    @cached_property
+    def form(self):
+        """The form parameters.  By default an
+        :class:`~werkzeug.datastructures.ImmutableMultiDict`
+        is returned from this function.  This can be changed by setting
+        :attr:`parameter_storage_class` to a different type.  This might
+        be necessary if the order of the form data is important.
+
+        Please keep in mind that file uploads will not end up here, but instead
+        in the :attr:`files` attribute.
+
+        .. versionchanged:: 0.9
+
+            Previous to Werkzeug 0.9 this would only contain form data for POST
+            and PUT requests.
+        """
+        self._load_form_data()
+        return self.form
+
+    @cached_property
+    def values(self):
+        """A :class:`werkzeug.datastructures.CombinedMultiDict` that combines
+        :attr:`args` and :attr:`form`."""
+        args = []
+        for d in self.args, self.form:
+            if not isinstance(d, MultiDict):
+                d = MultiDict(d)
+            args.append(d)
+        return CombinedMultiDict(args)
+
+    @cached_property
+    def files(self):
+        """:class:`~werkzeug.datastructures.MultiDict` object containing
+        all uploaded files.  Each key in :attr:`files` is the name from the
+        ``<input type="file" name="">``.  Each value in :attr:`files` is a
+        Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
+
+        It basically behaves like a standard file object you know from Python,
+        with the difference that it also has a
+        :meth:`~werkzeug.datastructures.FileStorage.save` function that can
+        store the file on the filesystem.
+
+        Note that :attr:`files` will only contain data if the request method was
+        POST, PUT or PATCH and the ``<form>`` that posted to the request had
+        ``enctype="multipart/form-data"``.  It will be empty otherwise.
+
+        See the :class:`~werkzeug.datastructures.MultiDict` /
+        :class:`~werkzeug.datastructures.FileStorage` documentation for
+        more details about the used data structure.
+        """
+        self._load_form_data()
+        return self.files
+
+    @cached_property
+    def cookies(self):
+        """A :class:`dict` with the contents of all cookies transmitted with
+        the request."""
+        return parse_cookie(
+            self.environ,
+            self.charset,
+            self.encoding_errors,
+            cls=self.dict_storage_class,
+        )
+
+    @cached_property
+    def headers(self):
+        """The headers from the WSGI environ as immutable
+        :class:`~werkzeug.datastructures.EnvironHeaders`.
+        """
+        return EnvironHeaders(self.environ)
+
+    @cached_property
+    def path(self):
+        """Requested path as unicode.  This works a bit like the regular path
+        info in the WSGI environment but will always include a leading slash,
+        even if the URL root is accessed.
+        """
+        raw_path = wsgi_decoding_dance(
+            self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors
+        )
+        return "/" + raw_path.lstrip("/")
+
+    @cached_property
+    def full_path(self):
+        """Requested path as unicode, including the query string."""
+        return self.path + u"?" + to_unicode(self.query_string, self.url_charset)
+
+    @cached_property
+    def script_root(self):
+        """The root path of the script without the trailing slash."""
+        raw_path = wsgi_decoding_dance(
+            self.environ.get("SCRIPT_NAME") or "", self.charset, self.encoding_errors
+        )
+        return raw_path.rstrip("/")
+
+    @cached_property
+    def url(self):
+        """The reconstructed current URL as IRI.
+        See also: :attr:`trusted_hosts`.
+        """
+        return get_current_url(self.environ, trusted_hosts=self.trusted_hosts)
+
+    @cached_property
+    def base_url(self):
+        """Like :attr:`url` but without the querystring
+        See also: :attr:`trusted_hosts`.
+        """
+        return get_current_url(
+            self.environ, strip_querystring=True, trusted_hosts=self.trusted_hosts
+        )
+
+    @cached_property
+    def url_root(self):
+        """The full URL root (with hostname), this is the application
+        root as IRI.
+        See also: :attr:`trusted_hosts`.
+        """
+        return get_current_url(self.environ, True, trusted_hosts=self.trusted_hosts)
+
+    @cached_property
+    def host_url(self):
+        """Just the host with scheme as IRI.
+        See also: :attr:`trusted_hosts`.
+        """
+        return get_current_url(
+            self.environ, host_only=True, trusted_hosts=self.trusted_hosts
+        )
+
+    @cached_property
+    def host(self):
+        """Just the host including the port if available.
+        See also: :attr:`trusted_hosts`.
+        """
+        return get_host(self.environ, trusted_hosts=self.trusted_hosts)
+
+    query_string = environ_property(
+        "QUERY_STRING",
+        "",
+        read_only=True,
+        load_func=wsgi_get_bytes,
+        doc="The URL parameters as raw bytestring.",
+    )
+    method = environ_property(
+        "REQUEST_METHOD",
+        "GET",
+        read_only=True,
+        load_func=lambda x: x.upper(),
+        doc="The request method. (For example ``'GET'`` or ``'POST'``).",
+    )
+
+    @cached_property
+    def access_route(self):
+        """If a forwarded header exists this is a list of all ip addresses
+        from the client ip to the last proxy server.
+        """
+        if "HTTP_X_FORWARDED_FOR" in self.environ:
+            addr = self.environ["HTTP_X_FORWARDED_FOR"].split(",")
+            return self.list_storage_class([x.strip() for x in addr])
+        elif "REMOTE_ADDR" in self.environ:
+            return self.list_storage_class([self.environ["REMOTE_ADDR"]])
+        return self.list_storage_class()
+
+    @property
+    def remote_addr(self):
+        """The remote address of the client."""
+        return self.environ.get("REMOTE_ADDR")
+
+    remote_user = environ_property(
+        "REMOTE_USER",
+        doc="""If the server supports user authentication, and the
+        script is protected, this attribute contains the username the
+        user has authenticated as.""",
+    )
+
+    scheme = environ_property(
+        "wsgi.url_scheme",
+        doc="""
+        URL scheme (http or https).
+
+        .. versionadded:: 0.7""",
+    )
+
+    @property
+    def is_xhr(self):
+        """True if the request was triggered via a JavaScript XMLHttpRequest.
+        This only works with libraries that support the ``X-Requested-With``
+        header and set it to "XMLHttpRequest".  Libraries that do that are
+        prototype, jQuery and Mochikit and probably some more.
+
+        .. deprecated:: 0.13
+            ``X-Requested-With`` is not standard and is unreliable. You
+            may be able to use :attr:`AcceptMixin.accept_mimetypes`
+            instead.
+        """
+        warnings.warn(
+            "'Request.is_xhr' is deprecated as of version 0.13 and will"
+            " be removed in version 1.0. The 'X-Requested-With' header"
+            " is not standard and is unreliable. You may be able to use"
+            " 'accept_mimetypes' instead.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        return self.environ.get("HTTP_X_REQUESTED_WITH", "").lower() == "xmlhttprequest"
+
+    is_secure = property(
+        lambda self: self.environ["wsgi.url_scheme"] == "https",
+        doc="`True` if the request is secure.",
+    )
+    is_multithread = environ_property(
+        "wsgi.multithread",
+        doc="""boolean that is `True` if the application is served by a
+        multithreaded WSGI server.""",
+    )
+    is_multiprocess = environ_property(
+        "wsgi.multiprocess",
+        doc="""boolean that is `True` if the application is served by a
+        WSGI server that spawns multiple processes.""",
+    )
+    is_run_once = environ_property(
+        "wsgi.run_once",
+        doc="""boolean that is `True` if the application will be
+        executed only once in a process lifetime.  This is the case for
+        CGI for example, but it's not guaranteed that the execution only
+        happens one time.""",
+    )
+
+
+def _assert_not_shallow(request):
+    if request.shallow:
+        raise RuntimeError(
+            "A shallow request tried to consume form data. If you really"
+            " want to do that, set `shallow` to False."
+        )
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/base_response.py b/lib/python3.7/site-packages/werkzeug/wrappers/base_response.py
new file mode 100644
index 0000000000000000000000000000000000000000..d944a7d22ad184b9affad712617e497d9e5fa3ab
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/base_response.py
@@ -0,0 +1,702 @@
+import warnings
+
+from .._compat import integer_types
+from .._compat import string_types
+from .._compat import text_type
+from .._compat import to_bytes
+from .._compat import to_native
+from ..datastructures import Headers
+from ..http import dump_cookie
+from ..http import HTTP_STATUS_CODES
+from ..http import remove_entity_headers
+from ..urls import iri_to_uri
+from ..urls import url_join
+from ..utils import get_content_type
+from ..wsgi import ClosingIterator
+from ..wsgi import get_current_url
+
+
+def _run_wsgi_app(*args):
+    """This function replaces itself to ensure that the test module is not
+    imported unless required.  DO NOT USE!
+    """
+    global _run_wsgi_app
+    from ..test import run_wsgi_app as _run_wsgi_app
+
+    return _run_wsgi_app(*args)
+
+
+def _warn_if_string(iterable):
+    """Helper for the response objects to check if the iterable returned
+    to the WSGI server is not a string.
+    """
+    if isinstance(iterable, string_types):
+        warnings.warn(
+            "Response iterable was set to a string. This will appear to"
+            " work but means that the server will send the data to the"
+            " client one character at a time. This is almost never"
+            " intended behavior, use 'response.data' to assign strings"
+            " to the response object.",
+            stacklevel=2,
+        )
+
+
+def _iter_encoded(iterable, charset):
+    for item in iterable:
+        if isinstance(item, text_type):
+            yield item.encode(charset)
+        else:
+            yield item
+
+
+def _clean_accept_ranges(accept_ranges):
+    if accept_ranges is True:
+        return "bytes"
+    elif accept_ranges is False:
+        return "none"
+    elif isinstance(accept_ranges, text_type):
+        return to_native(accept_ranges)
+    raise ValueError("Invalid accept_ranges value")
+
+
+class BaseResponse(object):
+    """Base response class.  The most important fact about a response object
+    is that it's a regular WSGI application.  It's initialized with a couple
+    of response parameters (headers, body, status code etc.) and will start a
+    valid WSGI response when called with the environ and start response
+    callable.
+
+    Because it's a WSGI application itself processing usually ends before the
+    actual response is sent to the server.  This helps debugging systems
+    because they can catch all the exceptions before responses are started.
+
+    Here a small example WSGI application that takes advantage of the
+    response objects::
+
+        from werkzeug.wrappers import BaseResponse as Response
+
+        def index():
+            return Response('Index page')
+
+        def application(environ, start_response):
+            path = environ.get('PATH_INFO') or '/'
+            if path == '/':
+                response = index()
+            else:
+                response = Response('Not Found', status=404)
+            return response(environ, start_response)
+
+    Like :class:`BaseRequest` which object is lacking a lot of functionality
+    implemented in mixins.  This gives you a better control about the actual
+    API of your response objects, so you can create subclasses and add custom
+    functionality.  A full featured response object is available as
+    :class:`Response` which implements a couple of useful mixins.
+
+    To enforce a new type of already existing responses you can use the
+    :meth:`force_type` method.  This is useful if you're working with different
+    subclasses of response objects and you want to post process them with a
+    known interface.
+
+    Per default the response object will assume all the text data is `utf-8`
+    encoded.  Please refer to :doc:`the unicode chapter </unicode>` for more
+    details about customizing the behavior.
+
+    Response can be any kind of iterable or string.  If it's a string it's
+    considered being an iterable with one item which is the string passed.
+    Headers can be a list of tuples or a
+    :class:`~werkzeug.datastructures.Headers` object.
+
+    Special note for `mimetype` and `content_type`:  For most mime types
+    `mimetype` and `content_type` work the same, the difference affects
+    only 'text' mimetypes.  If the mimetype passed with `mimetype` is a
+    mimetype starting with `text/`, the charset parameter of the response
+    object is appended to it.  In contrast the `content_type` parameter is
+    always added as header unmodified.
+
+    .. versionchanged:: 0.5
+       the `direct_passthrough` parameter was added.
+
+    :param response: a string or response iterable.
+    :param status: a string with a status or an integer with the status code.
+    :param headers: a list of headers or a
+                    :class:`~werkzeug.datastructures.Headers` object.
+    :param mimetype: the mimetype for the response.  See notice above.
+    :param content_type: the content type for the response.  See notice above.
+    :param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
+                               called before iteration which makes it
+                               possible to pass special iterators through
+                               unchanged (see :func:`wrap_file` for more
+                               details.)
+    """
+
+    #: the charset of the response.
+    charset = "utf-8"
+
+    #: the default status if none is provided.
+    default_status = 200
+
+    #: the default mimetype if none is provided.
+    default_mimetype = "text/plain"
+
+    #: if set to `False` accessing properties on the response object will
+    #: not try to consume the response iterator and convert it into a list.
+    #:
+    #: .. versionadded:: 0.6.2
+    #:
+    #:    That attribute was previously called `implicit_seqence_conversion`.
+    #:    (Notice the typo).  If you did use this feature, you have to adapt
+    #:    your code to the name change.
+    implicit_sequence_conversion = True
+
+    #: Should this response object correct the location header to be RFC
+    #: conformant?  This is true by default.
+    #:
+    #: .. versionadded:: 0.8
+    autocorrect_location_header = True
+
+    #: Should this response object automatically set the content-length
+    #: header if possible?  This is true by default.
+    #:
+    #: .. versionadded:: 0.8
+    automatically_set_content_length = True
+
+    #: Warn if a cookie header exceeds this size. The default, 4093, should be
+    #: safely `supported by most browsers <cookie_>`_. A cookie larger than
+    #: this size will still be sent, but it may be ignored or handled
+    #: incorrectly by some browsers. Set to 0 to disable this check.
+    #:
+    #: .. versionadded:: 0.13
+    #:
+    #: .. _`cookie`: http://browsercookielimits.squawky.net/
+    max_cookie_size = 4093
+
+    def __init__(
+        self,
+        response=None,
+        status=None,
+        headers=None,
+        mimetype=None,
+        content_type=None,
+        direct_passthrough=False,
+    ):
+        if isinstance(headers, Headers):
+            self.headers = headers
+        elif not headers:
+            self.headers = Headers()
+        else:
+            self.headers = Headers(headers)
+
+        if content_type is None:
+            if mimetype is None and "content-type" not in self.headers:
+                mimetype = self.default_mimetype
+            if mimetype is not None:
+                mimetype = get_content_type(mimetype, self.charset)
+            content_type = mimetype
+        if content_type is not None:
+            self.headers["Content-Type"] = content_type
+        if status is None:
+            status = self.default_status
+        if isinstance(status, integer_types):
+            self.status_code = status
+        else:
+            self.status = status
+
+        self.direct_passthrough = direct_passthrough
+        self._on_close = []
+
+        # we set the response after the headers so that if a class changes
+        # the charset attribute, the data is set in the correct charset.
+        if response is None:
+            self.response = []
+        elif isinstance(response, (text_type, bytes, bytearray)):
+            self.set_data(response)
+        else:
+            self.response = response
+
+    def call_on_close(self, func):
+        """Adds a function to the internal list of functions that should
+        be called as part of closing down the response.  Since 0.7 this
+        function also returns the function that was passed so that this
+        can be used as a decorator.
+
+        .. versionadded:: 0.6
+        """
+        self._on_close.append(func)
+        return func
+
+    def __repr__(self):
+        if self.is_sequence:
+            body_info = "%d bytes" % sum(map(len, self.iter_encoded()))
+        else:
+            body_info = "streamed" if self.is_streamed else "likely-streamed"
+        return "<%s %s [%s]>" % (self.__class__.__name__, body_info, self.status)
+
+    @classmethod
+    def force_type(cls, response, environ=None):
+        """Enforce that the WSGI response is a response object of the current
+        type.  Werkzeug will use the :class:`BaseResponse` internally in many
+        situations like the exceptions.  If you call :meth:`get_response` on an
+        exception you will get back a regular :class:`BaseResponse` object, even
+        if you are using a custom subclass.
+
+        This method can enforce a given response type, and it will also
+        convert arbitrary WSGI callables into response objects if an environ
+        is provided::
+
+            # convert a Werkzeug response object into an instance of the
+            # MyResponseClass subclass.
+            response = MyResponseClass.force_type(response)
+
+            # convert any WSGI application into a response object
+            response = MyResponseClass.force_type(response, environ)
+
+        This is especially useful if you want to post-process responses in
+        the main dispatcher and use functionality provided by your subclass.
+
+        Keep in mind that this will modify response objects in place if
+        possible!
+
+        :param response: a response object or wsgi application.
+        :param environ: a WSGI environment object.
+        :return: a response object.
+        """
+        if not isinstance(response, BaseResponse):
+            if environ is None:
+                raise TypeError(
+                    "cannot convert WSGI application into response"
+                    " objects without an environ"
+                )
+            response = BaseResponse(*_run_wsgi_app(response, environ))
+        response.__class__ = cls
+        return response
+
+    @classmethod
+    def from_app(cls, app, environ, buffered=False):
+        """Create a new response object from an application output.  This
+        works best if you pass it an application that returns a generator all
+        the time.  Sometimes applications may use the `write()` callable
+        returned by the `start_response` function.  This tries to resolve such
+        edge cases automatically.  But if you don't get the expected output
+        you should set `buffered` to `True` which enforces buffering.
+
+        :param app: the WSGI application to execute.
+        :param environ: the WSGI environment to execute against.
+        :param buffered: set to `True` to enforce buffering.
+        :return: a response object.
+        """
+        return cls(*_run_wsgi_app(app, environ, buffered))
+
+    def _get_status_code(self):
+        return self._status_code
+
+    def _set_status_code(self, code):
+        self._status_code = code
+        try:
+            self._status = "%d %s" % (code, HTTP_STATUS_CODES[code].upper())
+        except KeyError:
+            self._status = "%d UNKNOWN" % code
+
+    status_code = property(
+        _get_status_code, _set_status_code, doc="The HTTP Status code as number"
+    )
+    del _get_status_code, _set_status_code
+
+    def _get_status(self):
+        return self._status
+
+    def _set_status(self, value):
+        try:
+            self._status = to_native(value)
+        except AttributeError:
+            raise TypeError("Invalid status argument")
+
+        try:
+            self._status_code = int(self._status.split(None, 1)[0])
+        except ValueError:
+            self._status_code = 0
+            self._status = "0 %s" % self._status
+        except IndexError:
+            raise ValueError("Empty status argument")
+
+    status = property(_get_status, _set_status, doc="The HTTP Status code")
+    del _get_status, _set_status
+
+    def get_data(self, as_text=False):
+        """The string representation of the request body.  Whenever you call
+        this property the request iterable is encoded and flattened.  This
+        can lead to unwanted behavior if you stream big data.
+
+        This behavior can be disabled by setting
+        :attr:`implicit_sequence_conversion` to `False`.
+
+        If `as_text` is set to `True` the return value will be a decoded
+        unicode string.
+
+        .. versionadded:: 0.9
+        """
+        self._ensure_sequence()
+        rv = b"".join(self.iter_encoded())
+        if as_text:
+            rv = rv.decode(self.charset)
+        return rv
+
+    def set_data(self, value):
+        """Sets a new string as response.  The value set must either by a
+        unicode or bytestring.  If a unicode string is set it's encoded
+        automatically to the charset of the response (utf-8 by default).
+
+        .. versionadded:: 0.9
+        """
+        # if an unicode string is set, it's encoded directly so that we
+        # can set the content length
+        if isinstance(value, text_type):
+            value = value.encode(self.charset)
+        else:
+            value = bytes(value)
+        self.response = [value]
+        if self.automatically_set_content_length:
+            self.headers["Content-Length"] = str(len(value))
+
+    data = property(
+        get_data,
+        set_data,
+        doc="A descriptor that calls :meth:`get_data` and :meth:`set_data`.",
+    )
+
+    def calculate_content_length(self):
+        """Returns the content length if available or `None` otherwise."""
+        try:
+            self._ensure_sequence()
+        except RuntimeError:
+            return None
+        return sum(len(x) for x in self.iter_encoded())
+
+    def _ensure_sequence(self, mutable=False):
+        """This method can be called by methods that need a sequence.  If
+        `mutable` is true, it will also ensure that the response sequence
+        is a standard Python list.
+
+        .. versionadded:: 0.6
+        """
+        if self.is_sequence:
+            # if we need a mutable object, we ensure it's a list.
+            if mutable and not isinstance(self.response, list):
+                self.response = list(self.response)
+            return
+        if self.direct_passthrough:
+            raise RuntimeError(
+                "Attempted implicit sequence conversion but the"
+                " response object is in direct passthrough mode."
+            )
+        if not self.implicit_sequence_conversion:
+            raise RuntimeError(
+                "The response object required the iterable to be a"
+                " sequence, but the implicit conversion was disabled."
+                " Call make_sequence() yourself."
+            )
+        self.make_sequence()
+
+    def make_sequence(self):
+        """Converts the response iterator in a list.  By default this happens
+        automatically if required.  If `implicit_sequence_conversion` is
+        disabled, this method is not automatically called and some properties
+        might raise exceptions.  This also encodes all the items.
+
+        .. versionadded:: 0.6
+        """
+        if not self.is_sequence:
+            # if we consume an iterable we have to ensure that the close
+            # method of the iterable is called if available when we tear
+            # down the response
+            close = getattr(self.response, "close", None)
+            self.response = list(self.iter_encoded())
+            if close is not None:
+                self.call_on_close(close)
+
+    def iter_encoded(self):
+        """Iter the response encoded with the encoding of the response.
+        If the response object is invoked as WSGI application the return
+        value of this method is used as application iterator unless
+        :attr:`direct_passthrough` was activated.
+        """
+        if __debug__:
+            _warn_if_string(self.response)
+        # Encode in a separate function so that self.response is fetched
+        # early.  This allows us to wrap the response with the return
+        # value from get_app_iter or iter_encoded.
+        return _iter_encoded(self.response, self.charset)
+
+    def set_cookie(
+        self,
+        key,
+        value="",
+        max_age=None,
+        expires=None,
+        path="/",
+        domain=None,
+        secure=False,
+        httponly=False,
+        samesite=None,
+    ):
+        """Sets a cookie. The parameters are the same as in the cookie `Morsel`
+        object in the Python standard library but it accepts unicode data, too.
+
+        A warning is raised if the size of the cookie header exceeds
+        :attr:`max_cookie_size`, but the header will still be set.
+
+        :param key: the key (name) of the cookie to be set.
+        :param value: the value of the cookie.
+        :param max_age: should be a number of seconds, or `None` (default) if
+                        the cookie should last only as long as the client's
+                        browser session.
+        :param expires: should be a `datetime` object or UNIX timestamp.
+        :param path: limits the cookie to a given path, per default it will
+                     span the whole domain.
+        :param domain: if you want to set a cross-domain cookie.  For example,
+                       ``domain=".example.com"`` will set a cookie that is
+                       readable by the domain ``www.example.com``,
+                       ``foo.example.com`` etc.  Otherwise, a cookie will only
+                       be readable by the domain that set it.
+        :param secure: If `True`, the cookie will only be available via HTTPS
+        :param httponly: disallow JavaScript to access the cookie.  This is an
+                         extension to the cookie standard and probably not
+                         supported by all browsers.
+        :param samesite: Limits the scope of the cookie such that it will only
+                         be attached to requests if those requests are
+                         "same-site".
+        """
+        self.headers.add(
+            "Set-Cookie",
+            dump_cookie(
+                key,
+                value=value,
+                max_age=max_age,
+                expires=expires,
+                path=path,
+                domain=domain,
+                secure=secure,
+                httponly=httponly,
+                charset=self.charset,
+                max_size=self.max_cookie_size,
+                samesite=samesite,
+            ),
+        )
+
+    def delete_cookie(self, key, path="/", domain=None):
+        """Delete a cookie.  Fails silently if key doesn't exist.
+
+        :param key: the key (name) of the cookie to be deleted.
+        :param path: if the cookie that should be deleted was limited to a
+                     path, the path has to be defined here.
+        :param domain: if the cookie that should be deleted was limited to a
+                       domain, that domain has to be defined here.
+        """
+        self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
+
+    @property
+    def is_streamed(self):
+        """If the response is streamed (the response is not an iterable with
+        a length information) this property is `True`.  In this case streamed
+        means that there is no information about the number of iterations.
+        This is usually `True` if a generator is passed to the response object.
+
+        This is useful for checking before applying some sort of post
+        filtering that should not take place for streamed responses.
+        """
+        try:
+            len(self.response)
+        except (TypeError, AttributeError):
+            return True
+        return False
+
+    @property
+    def is_sequence(self):
+        """If the iterator is buffered, this property will be `True`.  A
+        response object will consider an iterator to be buffered if the
+        response attribute is a list or tuple.
+
+        .. versionadded:: 0.6
+        """
+        return isinstance(self.response, (tuple, list))
+
+    def close(self):
+        """Close the wrapped response if possible.  You can also use the object
+        in a with statement which will automatically close it.
+
+        .. versionadded:: 0.9
+           Can now be used in a with statement.
+        """
+        if hasattr(self.response, "close"):
+            self.response.close()
+        for func in self._on_close:
+            func()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self.close()
+
+    def freeze(self):
+        """Call this method if you want to make your response object ready for
+        being pickled.  This buffers the generator if there is one.  It will
+        also set the `Content-Length` header to the length of the body.
+
+        .. versionchanged:: 0.6
+           The `Content-Length` header is now set.
+        """
+        # we explicitly set the length to a list of the *encoded* response
+        # iterator.  Even if the implicit sequence conversion is disabled.
+        self.response = list(self.iter_encoded())
+        self.headers["Content-Length"] = str(sum(map(len, self.response)))
+
+    def get_wsgi_headers(self, environ):
+        """This is automatically called right before the response is started
+        and returns headers modified for the given environment.  It returns a
+        copy of the headers from the response with some modifications applied
+        if necessary.
+
+        For example the location header (if present) is joined with the root
+        URL of the environment.  Also the content length is automatically set
+        to zero here for certain status codes.
+
+        .. versionchanged:: 0.6
+           Previously that function was called `fix_headers` and modified
+           the response object in place.  Also since 0.6, IRIs in location
+           and content-location headers are handled properly.
+
+           Also starting with 0.6, Werkzeug will attempt to set the content
+           length if it is able to figure it out on its own.  This is the
+           case if all the strings in the response iterable are already
+           encoded and the iterable is buffered.
+
+        :param environ: the WSGI environment of the request.
+        :return: returns a new :class:`~werkzeug.datastructures.Headers`
+                 object.
+        """
+        headers = Headers(self.headers)
+        location = None
+        content_location = None
+        content_length = None
+        status = self.status_code
+
+        # iterate over the headers to find all values in one go.  Because
+        # get_wsgi_headers is used each response that gives us a tiny
+        # speedup.
+        for key, value in headers:
+            ikey = key.lower()
+            if ikey == u"location":
+                location = value
+            elif ikey == u"content-location":
+                content_location = value
+            elif ikey == u"content-length":
+                content_length = value
+
+        # make sure the location header is an absolute URL
+        if location is not None:
+            old_location = location
+            if isinstance(location, text_type):
+                # Safe conversion is necessary here as we might redirect
+                # to a broken URI scheme (for instance itms-services).
+                location = iri_to_uri(location, safe_conversion=True)
+
+            if self.autocorrect_location_header:
+                current_url = get_current_url(environ, strip_querystring=True)
+                if isinstance(current_url, text_type):
+                    current_url = iri_to_uri(current_url)
+                location = url_join(current_url, location)
+            if location != old_location:
+                headers["Location"] = location
+
+        # make sure the content location is a URL
+        if content_location is not None and isinstance(content_location, text_type):
+            headers["Content-Location"] = iri_to_uri(content_location)
+
+        if 100 <= status < 200 or status == 204:
+            # Per section 3.3.2 of RFC 7230, "a server MUST NOT send a
+            # Content-Length header field in any response with a status
+            # code of 1xx (Informational) or 204 (No Content)."
+            headers.remove("Content-Length")
+        elif status == 304:
+            remove_entity_headers(headers)
+
+        # if we can determine the content length automatically, we
+        # should try to do that.  But only if this does not involve
+        # flattening the iterator or encoding of unicode strings in
+        # the response.  We however should not do that if we have a 304
+        # response.
+        if (
+            self.automatically_set_content_length
+            and self.is_sequence
+            and content_length is None
+            and status not in (204, 304)
+            and not (100 <= status < 200)
+        ):
+            try:
+                content_length = sum(len(to_bytes(x, "ascii")) for x in self.response)
+            except UnicodeError:
+                # aha, something non-bytestringy in there, too bad, we
+                # can't safely figure out the length of the response.
+                pass
+            else:
+                headers["Content-Length"] = str(content_length)
+
+        return headers
+
+    def get_app_iter(self, environ):
+        """Returns the application iterator for the given environ.  Depending
+        on the request method and the current status code the return value
+        might be an empty response rather than the one from the response.
+
+        If the request method is `HEAD` or the status code is in a range
+        where the HTTP specification requires an empty response, an empty
+        iterable is returned.
+
+        .. versionadded:: 0.6
+
+        :param environ: the WSGI environment of the request.
+        :return: a response iterable.
+        """
+        status = self.status_code
+        if (
+            environ["REQUEST_METHOD"] == "HEAD"
+            or 100 <= status < 200
+            or status in (204, 304)
+        ):
+            iterable = ()
+        elif self.direct_passthrough:
+            if __debug__:
+                _warn_if_string(self.response)
+            return self.response
+        else:
+            iterable = self.iter_encoded()
+        return ClosingIterator(iterable, self.close)
+
+    def get_wsgi_response(self, environ):
+        """Returns the final WSGI response as tuple.  The first item in
+        the tuple is the application iterator, the second the status and
+        the third the list of headers.  The response returned is created
+        specially for the given environment.  For example if the request
+        method in the WSGI environment is ``'HEAD'`` the response will
+        be empty and only the headers and status code will be present.
+
+        .. versionadded:: 0.6
+
+        :param environ: the WSGI environment of the request.
+        :return: an ``(app_iter, status, headers)`` tuple.
+        """
+        headers = self.get_wsgi_headers(environ)
+        app_iter = self.get_app_iter(environ)
+        return app_iter, self.status, headers.to_wsgi_list()
+
+    def __call__(self, environ, start_response):
+        """Process this response as WSGI application.
+
+        :param environ: the WSGI environment.
+        :param start_response: the response callable provided by the WSGI
+                               server.
+        :return: an application iterator
+        """
+        app_iter, status, headers = self.get_wsgi_response(environ)
+        start_response(status, headers)
+        return app_iter
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/common_descriptors.py b/lib/python3.7/site-packages/werkzeug/wrappers/common_descriptors.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4107ee01085f051526a70fa595c84bb5cf1de45
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/common_descriptors.py
@@ -0,0 +1,322 @@
+from datetime import datetime
+from datetime import timedelta
+
+from .._compat import string_types
+from ..datastructures import CallbackDict
+from ..http import dump_age
+from ..http import dump_header
+from ..http import dump_options_header
+from ..http import http_date
+from ..http import parse_age
+from ..http import parse_date
+from ..http import parse_options_header
+from ..http import parse_set_header
+from ..utils import cached_property
+from ..utils import environ_property
+from ..utils import get_content_type
+from ..utils import header_property
+from ..wsgi import get_content_length
+
+
+class CommonRequestDescriptorsMixin(object):
+    """A mixin for :class:`BaseRequest` subclasses.  Request objects that
+    mix this class in will automatically get descriptors for a couple of
+    HTTP headers with automatic type conversion.
+
+    .. versionadded:: 0.5
+    """
+
+    content_type = environ_property(
+        "CONTENT_TYPE",
+        doc="""The Content-Type entity-header field indicates the media
+        type of the entity-body sent to the recipient or, in the case of
+        the HEAD method, the media type that would have been sent had
+        the request been a GET.""",
+    )
+
+    @cached_property
+    def content_length(self):
+        """The Content-Length entity-header field indicates the size of the
+        entity-body in bytes or, in the case of the HEAD method, the size of
+        the entity-body that would have been sent had the request been a
+        GET.
+        """
+        return get_content_length(self.environ)
+
+    content_encoding = environ_property(
+        "HTTP_CONTENT_ENCODING",
+        doc="""The Content-Encoding entity-header field is used as a
+        modifier to the media-type. When present, its value indicates
+        what additional content codings have been applied to the
+        entity-body, and thus what decoding mechanisms must be applied
+        in order to obtain the media-type referenced by the Content-Type
+        header field.
+
+        .. versionadded:: 0.9""",
+    )
+    content_md5 = environ_property(
+        "HTTP_CONTENT_MD5",
+        doc="""The Content-MD5 entity-header field, as defined in
+        RFC 1864, is an MD5 digest of the entity-body for the purpose of
+        providing an end-to-end message integrity check (MIC) of the
+        entity-body. (Note: a MIC is good for detecting accidental
+        modification of the entity-body in transit, but is not proof
+        against malicious attacks.)
+
+        .. versionadded:: 0.9""",
+    )
+    referrer = environ_property(
+        "HTTP_REFERER",
+        doc="""The Referer[sic] request-header field allows the client
+        to specify, for the server's benefit, the address (URI) of the
+        resource from which the Request-URI was obtained (the
+        "referrer", although the header field is misspelled).""",
+    )
+    date = environ_property(
+        "HTTP_DATE",
+        None,
+        parse_date,
+        doc="""The Date general-header field represents the date and
+        time at which the message was originated, having the same
+        semantics as orig-date in RFC 822.""",
+    )
+    max_forwards = environ_property(
+        "HTTP_MAX_FORWARDS",
+        None,
+        int,
+        doc="""The Max-Forwards request-header field provides a
+        mechanism with the TRACE and OPTIONS methods to limit the number
+        of proxies or gateways that can forward the request to the next
+        inbound server.""",
+    )
+
+    def _parse_content_type(self):
+        if not hasattr(self, "_parsed_content_type"):
+            self._parsed_content_type = parse_options_header(
+                self.environ.get("CONTENT_TYPE", "")
+            )
+
+    @property
+    def mimetype(self):
+        """Like :attr:`content_type`, but without parameters (eg, without
+        charset, type etc.) and always lowercase.  For example if the content
+        type is ``text/HTML; charset=utf-8`` the mimetype would be
+        ``'text/html'``.
+        """
+        self._parse_content_type()
+        return self._parsed_content_type[0].lower()
+
+    @property
+    def mimetype_params(self):
+        """The mimetype parameters as dict.  For example if the content
+        type is ``text/html; charset=utf-8`` the params would be
+        ``{'charset': 'utf-8'}``.
+        """
+        self._parse_content_type()
+        return self._parsed_content_type[1]
+
+    @cached_property
+    def pragma(self):
+        """The Pragma general-header field is used to include
+        implementation-specific directives that might apply to any recipient
+        along the request/response chain.  All pragma directives specify
+        optional behavior from the viewpoint of the protocol; however, some
+        systems MAY require that behavior be consistent with the directives.
+        """
+        return parse_set_header(self.environ.get("HTTP_PRAGMA", ""))
+
+
+class CommonResponseDescriptorsMixin(object):
+    """A mixin for :class:`BaseResponse` subclasses.  Response objects that
+    mix this class in will automatically get descriptors for a couple of
+    HTTP headers with automatic type conversion.
+    """
+
+    @property
+    def mimetype(self):
+        """The mimetype (content type without charset etc.)"""
+        ct = self.headers.get("content-type")
+        if ct:
+            return ct.split(";")[0].strip()
+
+    @mimetype.setter
+    def mimetype(self, value):
+        self.headers["Content-Type"] = get_content_type(value, self.charset)
+
+    @property
+    def mimetype_params(self):
+        """The mimetype parameters as dict. For example if the
+        content type is ``text/html; charset=utf-8`` the params would be
+        ``{'charset': 'utf-8'}``.
+
+        .. versionadded:: 0.5
+        """
+
+        def on_update(d):
+            self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
+
+        d = parse_options_header(self.headers.get("content-type", ""))[1]
+        return CallbackDict(d, on_update)
+
+    location = header_property(
+        "Location",
+        doc="""The Location response-header field is used to redirect
+        the recipient to a location other than the Request-URI for
+        completion of the request or identification of a new
+        resource.""",
+    )
+    age = header_property(
+        "Age",
+        None,
+        parse_age,
+        dump_age,
+        doc="""The Age response-header field conveys the sender's
+        estimate of the amount of time since the response (or its
+        revalidation) was generated at the origin server.
+
+        Age values are non-negative decimal integers, representing time
+        in seconds.""",
+    )
+    content_type = header_property(
+        "Content-Type",
+        doc="""The Content-Type entity-header field indicates the media
+        type of the entity-body sent to the recipient or, in the case of
+        the HEAD method, the media type that would have been sent had
+        the request been a GET.""",
+    )
+    content_length = header_property(
+        "Content-Length",
+        None,
+        int,
+        str,
+        doc="""The Content-Length entity-header field indicates the size
+        of the entity-body, in decimal number of OCTETs, sent to the
+        recipient or, in the case of the HEAD method, the size of the
+        entity-body that would have been sent had the request been a
+        GET.""",
+    )
+    content_location = header_property(
+        "Content-Location",
+        doc="""The Content-Location entity-header field MAY be used to
+        supply the resource location for the entity enclosed in the
+        message when that entity is accessible from a location separate
+        from the requested resource's URI.""",
+    )
+    content_encoding = header_property(
+        "Content-Encoding",
+        doc="""The Content-Encoding entity-header field is used as a
+        modifier to the media-type. When present, its value indicates
+        what additional content codings have been applied to the
+        entity-body, and thus what decoding mechanisms must be applied
+        in order to obtain the media-type referenced by the Content-Type
+        header field.""",
+    )
+    content_md5 = header_property(
+        "Content-MD5",
+        doc="""The Content-MD5 entity-header field, as defined in
+        RFC 1864, is an MD5 digest of the entity-body for the purpose of
+        providing an end-to-end message integrity check (MIC) of the
+        entity-body. (Note: a MIC is good for detecting accidental
+        modification of the entity-body in transit, but is not proof
+        against malicious attacks.)""",
+    )
+    date = header_property(
+        "Date",
+        None,
+        parse_date,
+        http_date,
+        doc="""The Date general-header field represents the date and
+        time at which the message was originated, having the same
+        semantics as orig-date in RFC 822.""",
+    )
+    expires = header_property(
+        "Expires",
+        None,
+        parse_date,
+        http_date,
+        doc="""The Expires entity-header field gives the date/time after
+        which the response is considered stale. A stale cache entry may
+        not normally be returned by a cache.""",
+    )
+    last_modified = header_property(
+        "Last-Modified",
+        None,
+        parse_date,
+        http_date,
+        doc="""The Last-Modified entity-header field indicates the date
+        and time at which the origin server believes the variant was
+        last modified.""",
+    )
+
+    @property
+    def retry_after(self):
+        """The Retry-After response-header field can be used with a
+        503 (Service Unavailable) response to indicate how long the
+        service is expected to be unavailable to the requesting client.
+
+        Time in seconds until expiration or date.
+        """
+        value = self.headers.get("retry-after")
+        if value is None:
+            return
+        elif value.isdigit():
+            return datetime.utcnow() + timedelta(seconds=int(value))
+        return parse_date(value)
+
+    @retry_after.setter
+    def retry_after(self, value):
+        if value is None:
+            if "retry-after" in self.headers:
+                del self.headers["retry-after"]
+            return
+        elif isinstance(value, datetime):
+            value = http_date(value)
+        else:
+            value = str(value)
+        self.headers["Retry-After"] = value
+
+    def _set_property(name, doc=None):  # noqa: B902
+        def fget(self):
+            def on_update(header_set):
+                if not header_set and name in self.headers:
+                    del self.headers[name]
+                elif header_set:
+                    self.headers[name] = header_set.to_header()
+
+            return parse_set_header(self.headers.get(name), on_update)
+
+        def fset(self, value):
+            if not value:
+                del self.headers[name]
+            elif isinstance(value, string_types):
+                self.headers[name] = value
+            else:
+                self.headers[name] = dump_header(value)
+
+        return property(fget, fset, doc=doc)
+
+    vary = _set_property(
+        "Vary",
+        doc="""The Vary field value indicates the set of request-header
+        fields that fully determines, while the response is fresh,
+        whether a cache is permitted to use the response to reply to a
+        subsequent request without revalidation.""",
+    )
+    content_language = _set_property(
+        "Content-Language",
+        doc="""The Content-Language entity-header field describes the
+        natural language(s) of the intended audience for the enclosed
+        entity. Note that this might not be equivalent to all the
+        languages used within the entity-body.""",
+    )
+    allow = _set_property(
+        "Allow",
+        doc="""The Allow entity-header field lists the set of methods
+        supported by the resource identified by the Request-URI. The
+        purpose of this field is strictly to inform the recipient of
+        valid methods associated with the resource. An Allow header
+        field MUST be present in a 405 (Method Not Allowed)
+        response.""",
+    )
+
+    del _set_property
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/etag.py b/lib/python3.7/site-packages/werkzeug/wrappers/etag.py
new file mode 100644
index 0000000000000000000000000000000000000000..0733506f11891ae89c2282a7d6b8d53c76e42a53
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/etag.py
@@ -0,0 +1,304 @@
+from .._compat import string_types
+from .._internal import _get_environ
+from ..datastructures import ContentRange
+from ..datastructures import RequestCacheControl
+from ..datastructures import ResponseCacheControl
+from ..http import generate_etag
+from ..http import http_date
+from ..http import is_resource_modified
+from ..http import parse_cache_control_header
+from ..http import parse_content_range_header
+from ..http import parse_date
+from ..http import parse_etags
+from ..http import parse_if_range_header
+from ..http import parse_range_header
+from ..http import quote_etag
+from ..http import unquote_etag
+from ..utils import cached_property
+from ..utils import header_property
+from ..wrappers.base_response import _clean_accept_ranges
+from ..wsgi import _RangeWrapper
+
+
+class ETagRequestMixin(object):
+    """Add entity tag and cache descriptors to a request object or object with
+    a WSGI environment available as :attr:`~BaseRequest.environ`.  This not
+    only provides access to etags but also to the cache control header.
+    """
+
+    @cached_property
+    def cache_control(self):
+        """A :class:`~werkzeug.datastructures.RequestCacheControl` object
+        for the incoming cache control headers.
+        """
+        cache_control = self.environ.get("HTTP_CACHE_CONTROL")
+        return parse_cache_control_header(cache_control, None, RequestCacheControl)
+
+    @cached_property
+    def if_match(self):
+        """An object containing all the etags in the `If-Match` header.
+
+        :rtype: :class:`~werkzeug.datastructures.ETags`
+        """
+        return parse_etags(self.environ.get("HTTP_IF_MATCH"))
+
+    @cached_property
+    def if_none_match(self):
+        """An object containing all the etags in the `If-None-Match` header.
+
+        :rtype: :class:`~werkzeug.datastructures.ETags`
+        """
+        return parse_etags(self.environ.get("HTTP_IF_NONE_MATCH"))
+
+    @cached_property
+    def if_modified_since(self):
+        """The parsed `If-Modified-Since` header as datetime object."""
+        return parse_date(self.environ.get("HTTP_IF_MODIFIED_SINCE"))
+
+    @cached_property
+    def if_unmodified_since(self):
+        """The parsed `If-Unmodified-Since` header as datetime object."""
+        return parse_date(self.environ.get("HTTP_IF_UNMODIFIED_SINCE"))
+
+    @cached_property
+    def if_range(self):
+        """The parsed `If-Range` header.
+
+        .. versionadded:: 0.7
+
+        :rtype: :class:`~werkzeug.datastructures.IfRange`
+        """
+        return parse_if_range_header(self.environ.get("HTTP_IF_RANGE"))
+
+    @cached_property
+    def range(self):
+        """The parsed `Range` header.
+
+        .. versionadded:: 0.7
+
+        :rtype: :class:`~werkzeug.datastructures.Range`
+        """
+        return parse_range_header(self.environ.get("HTTP_RANGE"))
+
+
+class ETagResponseMixin(object):
+    """Adds extra functionality to a response object for etag and cache
+    handling.  This mixin requires an object with at least a `headers`
+    object that implements a dict like interface similar to
+    :class:`~werkzeug.datastructures.Headers`.
+
+    If you want the :meth:`freeze` method to automatically add an etag, you
+    have to mixin this method before the response base class.  The default
+    response class does not do that.
+    """
+
+    @property
+    def cache_control(self):
+        """The Cache-Control general-header field is used to specify
+        directives that MUST be obeyed by all caching mechanisms along the
+        request/response chain.
+        """
+
+        def on_update(cache_control):
+            if not cache_control and "cache-control" in self.headers:
+                del self.headers["cache-control"]
+            elif cache_control:
+                self.headers["Cache-Control"] = cache_control.to_header()
+
+        return parse_cache_control_header(
+            self.headers.get("cache-control"), on_update, ResponseCacheControl
+        )
+
+    def _wrap_response(self, start, length):
+        """Wrap existing Response in case of Range Request context."""
+        if self.status_code == 206:
+            self.response = _RangeWrapper(self.response, start, length)
+
+    def _is_range_request_processable(self, environ):
+        """Return ``True`` if `Range` header is present and if underlying
+        resource is considered unchanged when compared with `If-Range` header.
+        """
+        return (
+            "HTTP_IF_RANGE" not in environ
+            or not is_resource_modified(
+                environ,
+                self.headers.get("etag"),
+                None,
+                self.headers.get("last-modified"),
+                ignore_if_range=False,
+            )
+        ) and "HTTP_RANGE" in environ
+
+    def _process_range_request(self, environ, complete_length=None, accept_ranges=None):
+        """Handle Range Request related headers (RFC7233).  If `Accept-Ranges`
+        header is valid, and Range Request is processable, we set the headers
+        as described by the RFC, and wrap the underlying response in a
+        RangeWrapper.
+
+        Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise.
+
+        :raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
+                 if `Range` header could not be parsed or satisfied.
+        """
+        from ..exceptions import RequestedRangeNotSatisfiable
+
+        if accept_ranges is None:
+            return False
+        self.headers["Accept-Ranges"] = accept_ranges
+        if not self._is_range_request_processable(environ) or complete_length is None:
+            return False
+        parsed_range = parse_range_header(environ.get("HTTP_RANGE"))
+        if parsed_range is None:
+            raise RequestedRangeNotSatisfiable(complete_length)
+        range_tuple = parsed_range.range_for_length(complete_length)
+        content_range_header = parsed_range.to_content_range_header(complete_length)
+        if range_tuple is None or content_range_header is None:
+            raise RequestedRangeNotSatisfiable(complete_length)
+        content_length = range_tuple[1] - range_tuple[0]
+        # Be sure not to send 206 response
+        # if requested range is the full content.
+        if content_length != complete_length:
+            self.headers["Content-Length"] = content_length
+            self.content_range = content_range_header
+            self.status_code = 206
+            self._wrap_response(range_tuple[0], content_length)
+            return True
+        return False
+
+    def make_conditional(
+        self, request_or_environ, accept_ranges=False, complete_length=None
+    ):
+        """Make the response conditional to the request.  This method works
+        best if an etag was defined for the response already.  The `add_etag`
+        method can be used to do that.  If called without etag just the date
+        header is set.
+
+        This does nothing if the request method in the request or environ is
+        anything but GET or HEAD.
+
+        For optimal performance when handling range requests, it's recommended
+        that your response data object implements `seekable`, `seek` and `tell`
+        methods as described by :py:class:`io.IOBase`.  Objects returned by
+        :meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
+
+        It does not remove the body of the response because that's something
+        the :meth:`__call__` function does for us automatically.
+
+        Returns self so that you can do ``return resp.make_conditional(req)``
+        but modifies the object in-place.
+
+        :param request_or_environ: a request object or WSGI environment to be
+                                   used to make the response conditional
+                                   against.
+        :param accept_ranges: This parameter dictates the value of
+                              `Accept-Ranges` header. If ``False`` (default),
+                              the header is not set. If ``True``, it will be set
+                              to ``"bytes"``. If ``None``, it will be set to
+                              ``"none"``. If it's a string, it will use this
+                              value.
+        :param complete_length: Will be used only in valid Range Requests.
+                                It will set `Content-Range` complete length
+                                value and compute `Content-Length` real value.
+                                This parameter is mandatory for successful
+                                Range Requests completion.
+        :raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
+                 if `Range` header could not be parsed or satisfied.
+        """
+        environ = _get_environ(request_or_environ)
+        if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
+            # if the date is not in the headers, add it now.  We however
+            # will not override an already existing header.  Unfortunately
+            # this header will be overriden by many WSGI servers including
+            # wsgiref.
+            if "date" not in self.headers:
+                self.headers["Date"] = http_date()
+            accept_ranges = _clean_accept_ranges(accept_ranges)
+            is206 = self._process_range_request(environ, complete_length, accept_ranges)
+            if not is206 and not is_resource_modified(
+                environ,
+                self.headers.get("etag"),
+                None,
+                self.headers.get("last-modified"),
+            ):
+                if parse_etags(environ.get("HTTP_IF_MATCH")):
+                    self.status_code = 412
+                else:
+                    self.status_code = 304
+            if (
+                self.automatically_set_content_length
+                and "content-length" not in self.headers
+            ):
+                length = self.calculate_content_length()
+                if length is not None:
+                    self.headers["Content-Length"] = length
+        return self
+
+    def add_etag(self, overwrite=False, weak=False):
+        """Add an etag for the current response if there is none yet."""
+        if overwrite or "etag" not in self.headers:
+            self.set_etag(generate_etag(self.get_data()), weak)
+
+    def set_etag(self, etag, weak=False):
+        """Set the etag, and override the old one if there was one."""
+        self.headers["ETag"] = quote_etag(etag, weak)
+
+    def get_etag(self):
+        """Return a tuple in the form ``(etag, is_weak)``.  If there is no
+        ETag the return value is ``(None, None)``.
+        """
+        return unquote_etag(self.headers.get("ETag"))
+
+    def freeze(self, no_etag=False):
+        """Call this method if you want to make your response object ready for
+        pickeling.  This buffers the generator if there is one.  This also
+        sets the etag unless `no_etag` is set to `True`.
+        """
+        if not no_etag:
+            self.add_etag()
+        super(ETagResponseMixin, self).freeze()
+
+    accept_ranges = header_property(
+        "Accept-Ranges",
+        doc="""The `Accept-Ranges` header. Even though the name would
+        indicate that multiple values are supported, it must be one
+        string token only.
+
+        The values ``'bytes'`` and ``'none'`` are common.
+
+        .. versionadded:: 0.7""",
+    )
+
+    def _get_content_range(self):
+        def on_update(rng):
+            if not rng:
+                del self.headers["content-range"]
+            else:
+                self.headers["Content-Range"] = rng.to_header()
+
+        rv = parse_content_range_header(self.headers.get("content-range"), on_update)
+        # always provide a content range object to make the descriptor
+        # more user friendly.  It provides an unset() method that can be
+        # used to remove the header quickly.
+        if rv is None:
+            rv = ContentRange(None, None, None, on_update=on_update)
+        return rv
+
+    def _set_content_range(self, value):
+        if not value:
+            del self.headers["content-range"]
+        elif isinstance(value, string_types):
+            self.headers["Content-Range"] = value
+        else:
+            self.headers["Content-Range"] = value.to_header()
+
+    content_range = property(
+        _get_content_range,
+        _set_content_range,
+        doc="""The ``Content-Range`` header as
+        :class:`~werkzeug.datastructures.ContentRange` object. Even if
+        the header is not set it wil provide such an object for easier
+        manipulation.
+
+        .. versionadded:: 0.7""",
+    )
+    del _get_content_range, _set_content_range
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/json.py b/lib/python3.7/site-packages/werkzeug/wrappers/json.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d5dc33d4783157127e43ae9087da73541941545
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/json.py
@@ -0,0 +1,145 @@
+from __future__ import absolute_import
+
+import datetime
+import uuid
+
+from .._compat import text_type
+from ..exceptions import BadRequest
+from ..utils import detect_utf_encoding
+
+try:
+    import simplejson as _json
+except ImportError:
+    import json as _json
+
+
+class _JSONModule(object):
+    @staticmethod
+    def _default(o):
+        if isinstance(o, datetime.date):
+            return o.isoformat()
+
+        if isinstance(o, uuid.UUID):
+            return str(o)
+
+        if hasattr(o, "__html__"):
+            return text_type(o.__html__())
+
+        raise TypeError()
+
+    @classmethod
+    def dumps(cls, obj, **kw):
+        kw.setdefault("separators", (",", ":"))
+        kw.setdefault("default", cls._default)
+        kw.setdefault("sort_keys", True)
+        return _json.dumps(obj, **kw)
+
+    @staticmethod
+    def loads(s, **kw):
+        if isinstance(s, bytes):
+            # Needed for Python < 3.6
+            encoding = detect_utf_encoding(s)
+            s = s.decode(encoding)
+
+        return _json.loads(s, **kw)
+
+
+class JSONMixin(object):
+    """Mixin to parse :attr:`data` as JSON. Can be mixed in for both
+    :class:`~werkzeug.wrappers.Request` and
+    :class:`~werkzeug.wrappers.Response` classes.
+
+    If `simplejson`_ is installed it is preferred over Python's built-in
+    :mod:`json` module.
+
+    .. _simplejson: https://simplejson.readthedocs.io/en/latest/
+    """
+
+    #: A module or other object that has ``dumps`` and ``loads``
+    #: functions that match the API of the built-in :mod:`json` module.
+    json_module = _JSONModule
+
+    @property
+    def json(self):
+        """The parsed JSON data if :attr:`mimetype` indicates JSON
+        (:mimetype:`application/json`, see :meth:`is_json`).
+
+        Calls :meth:`get_json` with default arguments.
+        """
+        return self.get_json()
+
+    @property
+    def is_json(self):
+        """Check if the mimetype indicates JSON data, either
+        :mimetype:`application/json` or :mimetype:`application/*+json`.
+        """
+        mt = self.mimetype
+        return (
+            mt == "application/json"
+            or mt.startswith("application/")
+            and mt.endswith("+json")
+        )
+
+    def _get_data_for_json(self, cache):
+        try:
+            return self.get_data(cache=cache)
+        except TypeError:
+            # Response doesn't have cache param.
+            return self.get_data()
+
+    # Cached values for ``(silent=False, silent=True)``. Initialized
+    # with sentinel values.
+    _cached_json = (Ellipsis, Ellipsis)
+
+    def get_json(self, force=False, silent=False, cache=True):
+        """Parse :attr:`data` as JSON.
+
+        If the mimetype does not indicate JSON
+        (:mimetype:`application/json`, see :meth:`is_json`), this
+        returns ``None``.
+
+        If parsing fails, :meth:`on_json_loading_failed` is called and
+        its return value is used as the return value.
+
+        :param force: Ignore the mimetype and always try to parse JSON.
+        :param silent: Silence parsing errors and return ``None``
+            instead.
+        :param cache: Store the parsed JSON to return for subsequent
+            calls.
+        """
+        if cache and self._cached_json[silent] is not Ellipsis:
+            return self._cached_json[silent]
+
+        if not (force or self.is_json):
+            return None
+
+        data = self._get_data_for_json(cache=cache)
+
+        try:
+            rv = self.json_module.loads(data)
+        except ValueError as e:
+            if silent:
+                rv = None
+
+                if cache:
+                    normal_rv, _ = self._cached_json
+                    self._cached_json = (normal_rv, rv)
+            else:
+                rv = self.on_json_loading_failed(e)
+
+                if cache:
+                    _, silent_rv = self._cached_json
+                    self._cached_json = (rv, silent_rv)
+        else:
+            if cache:
+                self._cached_json = (rv, rv)
+
+        return rv
+
+    def on_json_loading_failed(self, e):
+        """Called if :meth:`get_json` parsing fails and isn't silenced.
+        If this method returns a value, it is used as the return value
+        for :meth:`get_json`. The default implementation raises
+        :exc:`~werkzeug.exceptions.BadRequest`.
+        """
+        raise BadRequest("Failed to decode JSON object: {0}".format(e))
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/request.py b/lib/python3.7/site-packages/werkzeug/wrappers/request.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1c71b64774bf3338af01d1f98cb47742c7c3245
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/request.py
@@ -0,0 +1,44 @@
+from .accept import AcceptMixin
+from .auth import AuthorizationMixin
+from .base_request import BaseRequest
+from .common_descriptors import CommonRequestDescriptorsMixin
+from .etag import ETagRequestMixin
+from .user_agent import UserAgentMixin
+
+
+class Request(
+    BaseRequest,
+    AcceptMixin,
+    ETagRequestMixin,
+    UserAgentMixin,
+    AuthorizationMixin,
+    CommonRequestDescriptorsMixin,
+):
+    """Full featured request object implementing the following mixins:
+
+    - :class:`AcceptMixin` for accept header parsing
+    - :class:`ETagRequestMixin` for etag and cache control handling
+    - :class:`UserAgentMixin` for user agent introspection
+    - :class:`AuthorizationMixin` for http auth handling
+    - :class:`CommonRequestDescriptorsMixin` for common headers
+    """
+
+
+class StreamOnlyMixin(object):
+    """If mixed in before the request object this will change the bahavior
+    of it to disable handling of form parsing.  This disables the
+    :attr:`files`, :attr:`form` attributes and will just provide a
+    :attr:`stream` attribute that however is always available.
+
+    .. versionadded:: 0.9
+    """
+
+    disable_data_descriptor = True
+    want_form_data_parsed = False
+
+
+class PlainRequest(StreamOnlyMixin, Request):
+    """A request object without special form parsing capabilities.
+
+    .. versionadded:: 0.9
+    """
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/response.py b/lib/python3.7/site-packages/werkzeug/wrappers/response.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd86cacdb9401f435c54b98a13f59b31cd66462b
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/response.py
@@ -0,0 +1,78 @@
+from ..utils import cached_property
+from .auth import WWWAuthenticateMixin
+from .base_response import BaseResponse
+from .common_descriptors import CommonResponseDescriptorsMixin
+from .etag import ETagResponseMixin
+
+
+class ResponseStream(object):
+    """A file descriptor like object used by the :class:`ResponseStreamMixin` to
+    represent the body of the stream.  It directly pushes into the response
+    iterable of the response object.
+    """
+
+    mode = "wb+"
+
+    def __init__(self, response):
+        self.response = response
+        self.closed = False
+
+    def write(self, value):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        self.response._ensure_sequence(mutable=True)
+        self.response.response.append(value)
+        self.response.headers.pop("Content-Length", None)
+        return len(value)
+
+    def writelines(self, seq):
+        for item in seq:
+            self.write(item)
+
+    def close(self):
+        self.closed = True
+
+    def flush(self):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+
+    def isatty(self):
+        if self.closed:
+            raise ValueError("I/O operation on closed file")
+        return False
+
+    def tell(self):
+        self.response._ensure_sequence()
+        return sum(map(len, self.response.response))
+
+    @property
+    def encoding(self):
+        return self.response.charset
+
+
+class ResponseStreamMixin(object):
+    """Mixin for :class:`BaseRequest` subclasses.  Classes that inherit from
+    this mixin will automatically get a :attr:`stream` property that provides
+    a write-only interface to the response iterable.
+    """
+
+    @cached_property
+    def stream(self):
+        """The response iterable as write-only stream."""
+        return ResponseStream(self)
+
+
+class Response(
+    BaseResponse,
+    ETagResponseMixin,
+    ResponseStreamMixin,
+    CommonResponseDescriptorsMixin,
+    WWWAuthenticateMixin,
+):
+    """Full featured response object implementing the following mixins:
+
+    - :class:`ETagResponseMixin` for etag and cache control handling
+    - :class:`ResponseStreamMixin` to add support for the `stream` property
+    - :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
+    - :class:`WWWAuthenticateMixin` for HTTP authentication support
+    """
diff --git a/lib/python3.7/site-packages/werkzeug/wrappers/user_agent.py b/lib/python3.7/site-packages/werkzeug/wrappers/user_agent.py
new file mode 100644
index 0000000000000000000000000000000000000000..72588dd94f1271a0326dc47d39b6dec878811685
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wrappers/user_agent.py
@@ -0,0 +1,15 @@
+from ..utils import cached_property
+
+
+class UserAgentMixin(object):
+    """Adds a `user_agent` attribute to the request object which
+    contains the parsed user agent of the browser that triggered the
+    request as a :class:`~werkzeug.useragents.UserAgent` object.
+    """
+
+    @cached_property
+    def user_agent(self):
+        """The current user agent."""
+        from ..useragents import UserAgent
+
+        return UserAgent(self.environ)
diff --git a/lib/python3.7/site-packages/werkzeug/wsgi.py b/lib/python3.7/site-packages/werkzeug/wsgi.py
new file mode 100644
index 0000000000000000000000000000000000000000..f069f2d86356e2b0c41a546cc04541e4c4e252cf
--- /dev/null
+++ b/lib/python3.7/site-packages/werkzeug/wsgi.py
@@ -0,0 +1,1067 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.wsgi
+    ~~~~~~~~~~~~~
+
+    This module implements WSGI related helpers.
+
+    :copyright: 2007 Pallets
+    :license: BSD-3-Clause
+"""
+import io
+import re
+import warnings
+from functools import partial
+from functools import update_wrapper
+from itertools import chain
+
+from ._compat import BytesIO
+from ._compat import implements_iterator
+from ._compat import make_literal_wrapper
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import to_bytes
+from ._compat import to_unicode
+from ._compat import try_coerce_native
+from ._compat import wsgi_get_bytes
+from ._internal import _encode_idna
+from .urls import uri_to_iri
+from .urls import url_join
+from .urls import url_parse
+from .urls import url_quote
+
+
+def responder(f):
+    """Marks a function as responder.  Decorate a function with it and it
+    will automatically call the return value as WSGI application.
+
+    Example::
+
+        @responder
+        def application(environ, start_response):
+            return Response('Hello World!')
+    """
+    return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
+
+
+def get_current_url(
+    environ,
+    root_only=False,
+    strip_querystring=False,
+    host_only=False,
+    trusted_hosts=None,
+):
+    """A handy helper function that recreates the full URL as IRI for the
+    current request or parts of it.  Here's an example:
+
+    >>> from werkzeug.test import create_environ
+    >>> env = create_environ("/?param=foo", "http://localhost/script")
+    >>> get_current_url(env)
+    'http://localhost/script/?param=foo'
+    >>> get_current_url(env, root_only=True)
+    'http://localhost/script/'
+    >>> get_current_url(env, host_only=True)
+    'http://localhost/'
+    >>> get_current_url(env, strip_querystring=True)
+    'http://localhost/script/'
+
+    This optionally it verifies that the host is in a list of trusted hosts.
+    If the host is not in there it will raise a
+    :exc:`~werkzeug.exceptions.SecurityError`.
+
+    Note that the string returned might contain unicode characters as the
+    representation is an IRI not an URI.  If you need an ASCII only
+    representation you can use the :func:`~werkzeug.urls.iri_to_uri`
+    function:
+
+    >>> from werkzeug.urls import iri_to_uri
+    >>> iri_to_uri(get_current_url(env))
+    'http://localhost/script/?param=foo'
+
+    :param environ: the WSGI environment to get the current URL from.
+    :param root_only: set `True` if you only want the root URL.
+    :param strip_querystring: set to `True` if you don't want the querystring.
+    :param host_only: set to `True` if the host URL should be returned.
+    :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
+                          for more information.
+    """
+    tmp = [environ["wsgi.url_scheme"], "://", get_host(environ, trusted_hosts)]
+    cat = tmp.append
+    if host_only:
+        return uri_to_iri("".join(tmp) + "/")
+    cat(url_quote(wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))).rstrip("/"))
+    cat("/")
+    if not root_only:
+        cat(url_quote(wsgi_get_bytes(environ.get("PATH_INFO", "")).lstrip(b"/")))
+        if not strip_querystring:
+            qs = get_query_string(environ)
+            if qs:
+                cat("?" + qs)
+    return uri_to_iri("".join(tmp))
+
+
+def host_is_trusted(hostname, trusted_list):
+    """Checks if a host is trusted against a list.  This also takes care
+    of port normalization.
+
+    .. versionadded:: 0.9
+
+    :param hostname: the hostname to check
+    :param trusted_list: a list of hostnames to check against.  If a
+                         hostname starts with a dot it will match against
+                         all subdomains as well.
+    """
+    if not hostname:
+        return False
+
+    if isinstance(trusted_list, string_types):
+        trusted_list = [trusted_list]
+
+    def _normalize(hostname):
+        if ":" in hostname:
+            hostname = hostname.rsplit(":", 1)[0]
+        return _encode_idna(hostname)
+
+    try:
+        hostname = _normalize(hostname)
+    except UnicodeError:
+        return False
+    for ref in trusted_list:
+        if ref.startswith("."):
+            ref = ref[1:]
+            suffix_match = True
+        else:
+            suffix_match = False
+        try:
+            ref = _normalize(ref)
+        except UnicodeError:
+            return False
+        if ref == hostname:
+            return True
+        if suffix_match and hostname.endswith(b"." + ref):
+            return True
+    return False
+
+
+def get_host(environ, trusted_hosts=None):
+    """Return the host for the given WSGI environment. This first checks
+    the ``Host`` header. If it's not present, then ``SERVER_NAME`` and
+    ``SERVER_PORT`` are used. The host will only contain the port if it
+    is different than the standard port for the protocol.
+
+    Optionally, verify that the host is trusted using
+    :func:`host_is_trusted` and raise a
+    :exc:`~werkzeug.exceptions.SecurityError` if it is not.
+
+    :param environ: The WSGI environment to get the host from.
+    :param trusted_hosts: A list of trusted hosts.
+    :return: Host, with port if necessary.
+    :raise ~werkzeug.exceptions.SecurityError: If the host is not
+        trusted.
+    """
+    if "HTTP_HOST" in environ:
+        rv = environ["HTTP_HOST"]
+        if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
+            rv = rv[:-3]
+        elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
+            rv = rv[:-4]
+    else:
+        rv = environ["SERVER_NAME"]
+        if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
+            ("https", "443"),
+            ("http", "80"),
+        ):
+            rv += ":" + environ["SERVER_PORT"]
+    if trusted_hosts is not None:
+        if not host_is_trusted(rv, trusted_hosts):
+            from .exceptions import SecurityError
+
+            raise SecurityError('Host "%s" is not trusted' % rv)
+    return rv
+
+
+def get_content_length(environ):
+    """Returns the content length from the WSGI environment as
+    integer. If it's not available or chunked transfer encoding is used,
+    ``None`` is returned.
+
+    .. versionadded:: 0.9
+
+    :param environ: the WSGI environ to fetch the content length from.
+    """
+    if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked":
+        return None
+
+    content_length = environ.get("CONTENT_LENGTH")
+    if content_length is not None:
+        try:
+            return max(0, int(content_length))
+        except (ValueError, TypeError):
+            pass
+
+
+def get_input_stream(environ, safe_fallback=True):
+    """Returns the input stream from the WSGI environment and wraps it
+    in the most sensible way possible. The stream returned is not the
+    raw WSGI stream in most cases but one that is safe to read from
+    without taking into account the content length.
+
+    If content length is not set, the stream will be empty for safety reasons.
+    If the WSGI server supports chunked or infinite streams, it should set
+    the ``wsgi.input_terminated`` value in the WSGI environ to indicate that.
+
+    .. versionadded:: 0.9
+
+    :param environ: the WSGI environ to fetch the stream from.
+    :param safe_fallback: use an empty stream as a safe fallback when the
+        content length is not set. Disabling this allows infinite streams,
+        which can be a denial-of-service risk.
+    """
+    stream = environ["wsgi.input"]
+    content_length = get_content_length(environ)
+
+    # A wsgi extension that tells us if the input is terminated.  In
+    # that case we return the stream unchanged as we know we can safely
+    # read it until the end.
+    if environ.get("wsgi.input_terminated"):
+        return stream
+
+    # If the request doesn't specify a content length, returning the stream is
+    # potentially dangerous because it could be infinite, malicious or not. If
+    # safe_fallback is true, return an empty stream instead for safety.
+    if content_length is None:
+        return BytesIO() if safe_fallback else stream
+
+    # Otherwise limit the stream to the content length
+    return LimitedStream(stream, content_length)
+
+
+def get_query_string(environ):
+    """Returns the `QUERY_STRING` from the WSGI environment.  This also takes
+    care about the WSGI decoding dance on Python 3 environments as a
+    native string.  The string returned will be restricted to ASCII
+    characters.
+
+    .. versionadded:: 0.9
+
+    :param environ: the WSGI environment object to get the query string from.
+    """
+    qs = wsgi_get_bytes(environ.get("QUERY_STRING", ""))
+    # QUERY_STRING really should be ascii safe but some browsers
+    # will send us some unicode stuff (I am looking at you IE).
+    # In that case we want to urllib quote it badly.
+    return try_coerce_native(url_quote(qs, safe=":&%=+$!*'(),"))
+
+
+def get_path_info(environ, charset="utf-8", errors="replace"):
+    """Returns the `PATH_INFO` from the WSGI environment and properly
+    decodes it.  This also takes care about the WSGI decoding dance
+    on Python 3 environments.  if the `charset` is set to `None` a
+    bytestring is returned.
+
+    .. versionadded:: 0.9
+
+    :param environ: the WSGI environment object to get the path from.
+    :param charset: the charset for the path info, or `None` if no
+                    decoding should be performed.
+    :param errors: the decoding error handling.
+    """
+    path = wsgi_get_bytes(environ.get("PATH_INFO", ""))
+    return to_unicode(path, charset, errors, allow_none_charset=True)
+
+
+def get_script_name(environ, charset="utf-8", errors="replace"):
+    """Returns the `SCRIPT_NAME` from the WSGI environment and properly
+    decodes it.  This also takes care about the WSGI decoding dance
+    on Python 3 environments.  if the `charset` is set to `None` a
+    bytestring is returned.
+
+    .. versionadded:: 0.9
+
+    :param environ: the WSGI environment object to get the path from.
+    :param charset: the charset for the path, or `None` if no
+                    decoding should be performed.
+    :param errors: the decoding error handling.
+    """
+    path = wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))
+    return to_unicode(path, charset, errors, allow_none_charset=True)
+
+
+def pop_path_info(environ, charset="utf-8", errors="replace"):
+    """Removes and returns the next segment of `PATH_INFO`, pushing it onto
+    `SCRIPT_NAME`.  Returns `None` if there is nothing left on `PATH_INFO`.
+
+    If the `charset` is set to `None` a bytestring is returned.
+
+    If there are empty segments (``'/foo//bar``) these are ignored but
+    properly pushed to the `SCRIPT_NAME`:
+
+    >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
+    >>> pop_path_info(env)
+    'a'
+    >>> env['SCRIPT_NAME']
+    '/foo/a'
+    >>> pop_path_info(env)
+    'b'
+    >>> env['SCRIPT_NAME']
+    '/foo/a/b'
+
+    .. versionadded:: 0.5
+
+    .. versionchanged:: 0.9
+       The path is now decoded and a charset and encoding
+       parameter can be provided.
+
+    :param environ: the WSGI environment that is modified.
+    """
+    path = environ.get("PATH_INFO")
+    if not path:
+        return None
+
+    script_name = environ.get("SCRIPT_NAME", "")
+
+    # shift multiple leading slashes over
+    old_path = path
+    path = path.lstrip("/")
+    if path != old_path:
+        script_name += "/" * (len(old_path) - len(path))
+
+    if "/" not in path:
+        environ["PATH_INFO"] = ""
+        environ["SCRIPT_NAME"] = script_name + path
+        rv = wsgi_get_bytes(path)
+    else:
+        segment, path = path.split("/", 1)
+        environ["PATH_INFO"] = "/" + path
+        environ["SCRIPT_NAME"] = script_name + segment
+        rv = wsgi_get_bytes(segment)
+
+    return to_unicode(rv, charset, errors, allow_none_charset=True)
+
+
+def peek_path_info(environ, charset="utf-8", errors="replace"):
+    """Returns the next segment on the `PATH_INFO` or `None` if there
+    is none.  Works like :func:`pop_path_info` without modifying the
+    environment:
+
+    >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
+    >>> peek_path_info(env)
+    'a'
+    >>> peek_path_info(env)
+    'a'
+
+    If the `charset` is set to `None` a bytestring is returned.
+
+    .. versionadded:: 0.5
+
+    .. versionchanged:: 0.9
+       The path is now decoded and a charset and encoding
+       parameter can be provided.
+
+    :param environ: the WSGI environment that is checked.
+    """
+    segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1)
+    if segments:
+        return to_unicode(
+            wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True
+        )
+
+
+def extract_path_info(
+    environ_or_baseurl,
+    path_or_url,
+    charset="utf-8",
+    errors="werkzeug.url_quote",
+    collapse_http_schemes=True,
+):
+    """Extracts the path info from the given URL (or WSGI environment) and
+    path.  The path info returned is a unicode string, not a bytestring
+    suitable for a WSGI environment.  The URLs might also be IRIs.
+
+    If the path info could not be determined, `None` is returned.
+
+    Some examples:
+
+    >>> extract_path_info('http://example.com/app', '/app/hello')
+    u'/hello'
+    >>> extract_path_info('http://example.com/app',
+    ...                   'https://example.com/app/hello')
+    u'/hello'
+    >>> extract_path_info('http://example.com/app',
+    ...                   'https://example.com/app/hello',
+    ...                   collapse_http_schemes=False) is None
+    True
+
+    Instead of providing a base URL you can also pass a WSGI environment.
+
+    :param environ_or_baseurl: a WSGI environment dict, a base URL or
+                               base IRI.  This is the root of the
+                               application.
+    :param path_or_url: an absolute path from the server root, a
+                        relative path (in which case it's the path info)
+                        or a full URL.  Also accepts IRIs and unicode
+                        parameters.
+    :param charset: the charset for byte data in URLs
+    :param errors: the error handling on decode
+    :param collapse_http_schemes: if set to `False` the algorithm does
+                                  not assume that http and https on the
+                                  same server point to the same
+                                  resource.
+
+    .. versionchanged:: 0.15
+        The ``errors`` parameter defaults to leaving invalid bytes
+        quoted instead of replacing them.
+
+    .. versionadded:: 0.6
+    """
+
+    def _normalize_netloc(scheme, netloc):
+        parts = netloc.split(u"@", 1)[-1].split(u":", 1)
+        if len(parts) == 2:
+            netloc, port = parts
+            if (scheme == u"http" and port == u"80") or (
+                scheme == u"https" and port == u"443"
+            ):
+                port = None
+        else:
+            netloc = parts[0]
+            port = None
+        if port is not None:
+            netloc += u":" + port
+        return netloc
+
+    # make sure whatever we are working on is a IRI and parse it
+    path = uri_to_iri(path_or_url, charset, errors)
+    if isinstance(environ_or_baseurl, dict):
+        environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True)
+    base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
+    base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
+    cur_scheme, cur_netloc, cur_path, = url_parse(url_join(base_iri, path))[:3]
+
+    # normalize the network location
+    base_netloc = _normalize_netloc(base_scheme, base_netloc)
+    cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
+
+    # is that IRI even on a known HTTP scheme?
+    if collapse_http_schemes:
+        for scheme in base_scheme, cur_scheme:
+            if scheme not in (u"http", u"https"):
+                return None
+    else:
+        if not (base_scheme in (u"http", u"https") and base_scheme == cur_scheme):
+            return None
+
+    # are the netlocs compatible?
+    if base_netloc != cur_netloc:
+        return None
+
+    # are we below the application path?
+    base_path = base_path.rstrip(u"/")
+    if not cur_path.startswith(base_path):
+        return None
+
+    return u"/" + cur_path[len(base_path) :].lstrip(u"/")
+
+
+@implements_iterator
+class ClosingIterator(object):
+    """The WSGI specification requires that all middlewares and gateways
+    respect the `close` callback of the iterable returned by the application.
+    Because it is useful to add another close action to a returned iterable
+    and adding a custom iterable is a boring task this class can be used for
+    that::
+
+        return ClosingIterator(app(environ, start_response), [cleanup_session,
+                                                              cleanup_locals])
+
+    If there is just one close function it can be passed instead of the list.
+
+    A closing iterator is not needed if the application uses response objects
+    and finishes the processing if the response is started::
+
+        try:
+            return response(environ, start_response)
+        finally:
+            cleanup_session()
+            cleanup_locals()
+    """
+
+    def __init__(self, iterable, callbacks=None):
+        iterator = iter(iterable)
+        self._next = partial(next, iterator)
+        if callbacks is None:
+            callbacks = []
+        elif callable(callbacks):
+            callbacks = [callbacks]
+        else:
+            callbacks = list(callbacks)
+        iterable_close = getattr(iterable, "close", None)
+        if iterable_close:
+            callbacks.insert(0, iterable_close)
+        self._callbacks = callbacks
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        return self._next()
+
+    def close(self):
+        for callback in self._callbacks:
+            callback()
+
+
+def wrap_file(environ, file, buffer_size=8192):
+    """Wraps a file.  This uses the WSGI server's file wrapper if available
+    or otherwise the generic :class:`FileWrapper`.
+
+    .. versionadded:: 0.5
+
+    If the file wrapper from the WSGI server is used it's important to not
+    iterate over it from inside the application but to pass it through
+    unchanged.  If you want to pass out a file wrapper inside a response
+    object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
+
+    More information about file wrappers are available in :pep:`333`.
+
+    :param file: a :class:`file`-like object with a :meth:`~file.read` method.
+    :param buffer_size: number of bytes for one iteration.
+    """
+    return environ.get("wsgi.file_wrapper", FileWrapper)(file, buffer_size)
+
+
+@implements_iterator
+class FileWrapper(object):
+    """This class can be used to convert a :class:`file`-like object into
+    an iterable.  It yields `buffer_size` blocks until the file is fully
+    read.
+
+    You should not use this class directly but rather use the
+    :func:`wrap_file` function that uses the WSGI server's file wrapper
+    support if it's available.
+
+    .. versionadded:: 0.5
+
+    If you're using this object together with a :class:`BaseResponse` you have
+    to use the `direct_passthrough` mode.
+
+    :param file: a :class:`file`-like object with a :meth:`~file.read` method.
+    :param buffer_size: number of bytes for one iteration.
+    """
+
+    def __init__(self, file, buffer_size=8192):
+        self.file = file
+        self.buffer_size = buffer_size
+
+    def close(self):
+        if hasattr(self.file, "close"):
+            self.file.close()
+
+    def seekable(self):
+        if hasattr(self.file, "seekable"):
+            return self.file.seekable()
+        if hasattr(self.file, "seek"):
+            return True
+        return False
+
+    def seek(self, *args):
+        if hasattr(self.file, "seek"):
+            self.file.seek(*args)
+
+    def tell(self):
+        if hasattr(self.file, "tell"):
+            return self.file.tell()
+        return None
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        data = self.file.read(self.buffer_size)
+        if data:
+            return data
+        raise StopIteration()
+
+
+@implements_iterator
+class _RangeWrapper(object):
+    # private for now, but should we make it public in the future ?
+
+    """This class can be used to convert an iterable object into
+    an iterable that will only yield a piece of the underlying content.
+    It yields blocks until the underlying stream range is fully read.
+    The yielded blocks will have a size that can't exceed the original
+    iterator defined block size, but that can be smaller.
+
+    If you're using this object together with a :class:`BaseResponse` you have
+    to use the `direct_passthrough` mode.
+
+    :param iterable: an iterable object with a :meth:`__next__` method.
+    :param start_byte: byte from which read will start.
+    :param byte_range: how many bytes to read.
+    """
+
+    def __init__(self, iterable, start_byte=0, byte_range=None):
+        self.iterable = iter(iterable)
+        self.byte_range = byte_range
+        self.start_byte = start_byte
+        self.end_byte = None
+        if byte_range is not None:
+            self.end_byte = self.start_byte + self.byte_range
+        self.read_length = 0
+        self.seekable = hasattr(iterable, "seekable") and iterable.seekable()
+        self.end_reached = False
+
+    def __iter__(self):
+        return self
+
+    def _next_chunk(self):
+        try:
+            chunk = next(self.iterable)
+            self.read_length += len(chunk)
+            return chunk
+        except StopIteration:
+            self.end_reached = True
+            raise
+
+    def _first_iteration(self):
+        chunk = None
+        if self.seekable:
+            self.iterable.seek(self.start_byte)
+            self.read_length = self.iterable.tell()
+            contextual_read_length = self.read_length
+        else:
+            while self.read_length <= self.start_byte:
+                chunk = self._next_chunk()
+            if chunk is not None:
+                chunk = chunk[self.start_byte - self.read_length :]
+            contextual_read_length = self.start_byte
+        return chunk, contextual_read_length
+
+    def _next(self):
+        if self.end_reached:
+            raise StopIteration()
+        chunk = None
+        contextual_read_length = self.read_length
+        if self.read_length == 0:
+            chunk, contextual_read_length = self._first_iteration()
+        if chunk is None:
+            chunk = self._next_chunk()
+        if self.end_byte is not None and self.read_length >= self.end_byte:
+            self.end_reached = True
+            return chunk[: self.end_byte - contextual_read_length]
+        return chunk
+
+    def __next__(self):
+        chunk = self._next()
+        if chunk:
+            return chunk
+        self.end_reached = True
+        raise StopIteration()
+
+    def close(self):
+        if hasattr(self.iterable, "close"):
+            self.iterable.close()
+
+
+def _make_chunk_iter(stream, limit, buffer_size):
+    """Helper for the line and chunk iter functions."""
+    if isinstance(stream, (bytes, bytearray, text_type)):
+        raise TypeError(
+            "Passed a string or byte object instead of true iterator or stream."
+        )
+    if not hasattr(stream, "read"):
+        for item in stream:
+            if item:
+                yield item
+        return
+    if not isinstance(stream, LimitedStream) and limit is not None:
+        stream = LimitedStream(stream, limit)
+    _read = stream.read
+    while 1:
+        item = _read(buffer_size)
+        if not item:
+            break
+        yield item
+
+
+def make_line_iter(stream, limit=None, buffer_size=10 * 1024, cap_at_buffer=False):
+    """Safely iterates line-based over an input stream.  If the input stream
+    is not a :class:`LimitedStream` the `limit` parameter is mandatory.
+
+    This uses the stream's :meth:`~file.read` method internally as opposite
+    to the :meth:`~file.readline` method that is unsafe and can only be used
+    in violation of the WSGI specification.  The same problem applies to the
+    `__iter__` function of the input stream which calls :meth:`~file.readline`
+    without arguments.
+
+    If you need line-by-line processing it's strongly recommended to iterate
+    over the input stream using this helper function.
+
+    .. versionchanged:: 0.8
+       This function now ensures that the limit was reached.
+
+    .. versionadded:: 0.9
+       added support for iterators as input stream.
+
+    .. versionadded:: 0.11.10
+       added support for the `cap_at_buffer` parameter.
+
+    :param stream: the stream or iterate to iterate over.
+    :param limit: the limit in bytes for the stream.  (Usually
+                  content length.  Not necessary if the `stream`
+                  is a :class:`LimitedStream`.
+    :param buffer_size: The optional buffer size.
+    :param cap_at_buffer: if this is set chunks are split if they are longer
+                          than the buffer size.  Internally this is implemented
+                          that the buffer size might be exhausted by a factor
+                          of two however.
+    """
+    _iter = _make_chunk_iter(stream, limit, buffer_size)
+
+    first_item = next(_iter, "")
+    if not first_item:
+        return
+
+    s = make_literal_wrapper(first_item)
+    empty = s("")
+    cr = s("\r")
+    lf = s("\n")
+    crlf = s("\r\n")
+
+    _iter = chain((first_item,), _iter)
+
+    def _iter_basic_lines():
+        _join = empty.join
+        buffer = []
+        while 1:
+            new_data = next(_iter, "")
+            if not new_data:
+                break
+            new_buf = []
+            buf_size = 0
+            for item in chain(buffer, new_data.splitlines(True)):
+                new_buf.append(item)
+                buf_size += len(item)
+                if item and item[-1:] in crlf:
+                    yield _join(new_buf)
+                    new_buf = []
+                elif cap_at_buffer and buf_size >= buffer_size:
+                    rv = _join(new_buf)
+                    while len(rv) >= buffer_size:
+                        yield rv[:buffer_size]
+                        rv = rv[buffer_size:]
+                    new_buf = [rv]
+            buffer = new_buf
+        if buffer:
+            yield _join(buffer)
+
+    # This hackery is necessary to merge 'foo\r' and '\n' into one item
+    # of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
+    previous = empty
+    for item in _iter_basic_lines():
+        if item == lf and previous[-1:] == cr:
+            previous += item
+            item = empty
+        if previous:
+            yield previous
+        previous = item
+    if previous:
+        yield previous
+
+
+def make_chunk_iter(
+    stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False
+):
+    """Works like :func:`make_line_iter` but accepts a separator
+    which divides chunks.  If you want newline based processing
+    you should use :func:`make_line_iter` instead as it
+    supports arbitrary newline markers.
+
+    .. versionadded:: 0.8
+
+    .. versionadded:: 0.9
+       added support for iterators as input stream.
+
+    .. versionadded:: 0.11.10
+       added support for the `cap_at_buffer` parameter.
+
+    :param stream: the stream or iterate to iterate over.
+    :param separator: the separator that divides chunks.
+    :param limit: the limit in bytes for the stream.  (Usually
+                  content length.  Not necessary if the `stream`
+                  is otherwise already limited).
+    :param buffer_size: The optional buffer size.
+    :param cap_at_buffer: if this is set chunks are split if they are longer
+                          than the buffer size.  Internally this is implemented
+                          that the buffer size might be exhausted by a factor
+                          of two however.
+    """
+    _iter = _make_chunk_iter(stream, limit, buffer_size)
+
+    first_item = next(_iter, "")
+    if not first_item:
+        return
+
+    _iter = chain((first_item,), _iter)
+    if isinstance(first_item, text_type):
+        separator = to_unicode(separator)
+        _split = re.compile(r"(%s)" % re.escape(separator)).split
+        _join = u"".join
+    else:
+        separator = to_bytes(separator)
+        _split = re.compile(b"(" + re.escape(separator) + b")").split
+        _join = b"".join
+
+    buffer = []
+    while 1:
+        new_data = next(_iter, "")
+        if not new_data:
+            break
+        chunks = _split(new_data)
+        new_buf = []
+        buf_size = 0
+        for item in chain(buffer, chunks):
+            if item == separator:
+                yield _join(new_buf)
+                new_buf = []
+                buf_size = 0
+            else:
+                buf_size += len(item)
+                new_buf.append(item)
+
+                if cap_at_buffer and buf_size >= buffer_size:
+                    rv = _join(new_buf)
+                    while len(rv) >= buffer_size:
+                        yield rv[:buffer_size]
+                        rv = rv[buffer_size:]
+                    new_buf = [rv]
+                    buf_size = len(rv)
+
+        buffer = new_buf
+    if buffer:
+        yield _join(buffer)
+
+
+@implements_iterator
+class LimitedStream(io.IOBase):
+    """Wraps a stream so that it doesn't read more than n bytes.  If the
+    stream is exhausted and the caller tries to get more bytes from it
+    :func:`on_exhausted` is called which by default returns an empty
+    string.  The return value of that function is forwarded
+    to the reader function.  So if it returns an empty string
+    :meth:`read` will return an empty string as well.
+
+    The limit however must never be higher than what the stream can
+    output.  Otherwise :meth:`readlines` will try to read past the
+    limit.
+
+    .. admonition:: Note on WSGI compliance
+
+       calls to :meth:`readline` and :meth:`readlines` are not
+       WSGI compliant because it passes a size argument to the
+       readline methods.  Unfortunately the WSGI PEP is not safely
+       implementable without a size argument to :meth:`readline`
+       because there is no EOF marker in the stream.  As a result
+       of that the use of :meth:`readline` is discouraged.
+
+       For the same reason iterating over the :class:`LimitedStream`
+       is not portable.  It internally calls :meth:`readline`.
+
+       We strongly suggest using :meth:`read` only or using the
+       :func:`make_line_iter` which safely iterates line-based
+       over a WSGI input stream.
+
+    :param stream: the stream to wrap.
+    :param limit: the limit for the stream, must not be longer than
+                  what the string can provide if the stream does not
+                  end with `EOF` (like `wsgi.input`)
+    """
+
+    def __init__(self, stream, limit):
+        self._read = stream.read
+        self._readline = stream.readline
+        self._pos = 0
+        self.limit = limit
+
+    def __iter__(self):
+        return self
+
+    @property
+    def is_exhausted(self):
+        """If the stream is exhausted this attribute is `True`."""
+        return self._pos >= self.limit
+
+    def on_exhausted(self):
+        """This is called when the stream tries to read past the limit.
+        The return value of this function is returned from the reading
+        function.
+        """
+        # Read null bytes from the stream so that we get the
+        # correct end of stream marker.
+        return self._read(0)
+
+    def on_disconnect(self):
+        """What should happen if a disconnect is detected?  The return
+        value of this function is returned from read functions in case
+        the client went away.  By default a
+        :exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
+        """
+        from .exceptions import ClientDisconnected
+
+        raise ClientDisconnected()
+
+    def exhaust(self, chunk_size=1024 * 64):
+        """Exhaust the stream.  This consumes all the data left until the
+        limit is reached.
+
+        :param chunk_size: the size for a chunk.  It will read the chunk
+                           until the stream is exhausted and throw away
+                           the results.
+        """
+        to_read = self.limit - self._pos
+        chunk = chunk_size
+        while to_read > 0:
+            chunk = min(to_read, chunk)
+            self.read(chunk)
+            to_read -= chunk
+
+    def read(self, size=None):
+        """Read `size` bytes or if size is not provided everything is read.
+
+        :param size: the number of bytes read.
+        """
+        if self._pos >= self.limit:
+            return self.on_exhausted()
+        if size is None or size == -1:  # -1 is for consistence with file
+            size = self.limit
+        to_read = min(self.limit - self._pos, size)
+        try:
+            read = self._read(to_read)
+        except (IOError, ValueError):
+            return self.on_disconnect()
+        if to_read and len(read) != to_read:
+            return self.on_disconnect()
+        self._pos += len(read)
+        return read
+
+    def readline(self, size=None):
+        """Reads one line from the stream."""
+        if self._pos >= self.limit:
+            return self.on_exhausted()
+        if size is None:
+            size = self.limit - self._pos
+        else:
+            size = min(size, self.limit - self._pos)
+        try:
+            line = self._readline(size)
+        except (ValueError, IOError):
+            return self.on_disconnect()
+        if size and not line:
+            return self.on_disconnect()
+        self._pos += len(line)
+        return line
+
+    def readlines(self, size=None):
+        """Reads a file into a list of strings.  It calls :meth:`readline`
+        until the file is read to the end.  It does support the optional
+        `size` argument if the underlaying stream supports it for
+        `readline`.
+        """
+        last_pos = self._pos
+        result = []
+        if size is not None:
+            end = min(self.limit, last_pos + size)
+        else:
+            end = self.limit
+        while 1:
+            if size is not None:
+                size -= last_pos - self._pos
+            if self._pos >= end:
+                break
+            result.append(self.readline(size))
+            if size is not None:
+                last_pos = self._pos
+        return result
+
+    def tell(self):
+        """Returns the position of the stream.
+
+        .. versionadded:: 0.9
+        """
+        return self._pos
+
+    def __next__(self):
+        line = self.readline()
+        if not line:
+            raise StopIteration()
+        return line
+
+    def readable(self):
+        return True
+
+
+# DEPRECATED
+from .middleware.dispatcher import DispatcherMiddleware as _DispatcherMiddleware
+from .middleware.http_proxy import ProxyMiddleware as _ProxyMiddleware
+from .middleware.shared_data import SharedDataMiddleware as _SharedDataMiddleware
+
+
+class ProxyMiddleware(_ProxyMiddleware):
+    """
+    .. deprecated:: 0.15
+        ``werkzeug.wsgi.ProxyMiddleware`` has moved to
+        :mod:`werkzeug.middleware.http_proxy`. This import will be
+        removed in 1.0.
+    """
+
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.wsgi.ProxyMiddleware' has moved to 'werkzeug"
+            ".middleware.http_proxy.ProxyMiddleware'. This import is"
+            " deprecated as of version 0.15 and will be removed in"
+            " version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(ProxyMiddleware, self).__init__(*args, **kwargs)
+
+
+class SharedDataMiddleware(_SharedDataMiddleware):
+    """
+    .. deprecated:: 0.15
+        ``werkzeug.wsgi.SharedDataMiddleware`` has moved to
+        :mod:`werkzeug.middleware.shared_data`. This import will be
+        removed in 1.0.
+    """
+
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.wsgi.SharedDataMiddleware' has moved to"
+            " 'werkzeug.middleware.shared_data.SharedDataMiddleware'."
+            " This import is deprecated as of version 0.15 and will be"
+            " removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(SharedDataMiddleware, self).__init__(*args, **kwargs)
+
+
+class DispatcherMiddleware(_DispatcherMiddleware):
+    """
+    .. deprecated:: 0.15
+        ``werkzeug.wsgi.DispatcherMiddleware`` has moved to
+        :mod:`werkzeug.middleware.dispatcher`. This import will be
+        removed in 1.0.
+    """
+
+    def __init__(self, *args, **kwargs):
+        warnings.warn(
+            "'werkzeug.wsgi.DispatcherMiddleware' has moved to"
+            " 'werkzeug.middleware.dispatcher.DispatcherMiddleware'."
+            " This import is deprecated as of version 0.15 and will be"
+            " removed in version 1.0.",
+            DeprecationWarning,
+            stacklevel=2,
+        )
+        super(DispatcherMiddleware, self).__init__(*args, **kwargs)
diff --git a/lib/python3.7/site-packages/wheel-0.33.1.dist-info/INSTALLER b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/python3.7/site-packages/wheel-0.33.1.dist-info/LICENSE.txt b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c3441e6cc8443ced85b4f25f5171912926f6db6d
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/LICENSE.txt
@@ -0,0 +1,22 @@
+"wheel" copyright (c) 2012-2014 Daniel Holth <dholth@fastmail.fm> and
+contributors.
+
+The MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/python3.7/site-packages/wheel-0.33.1.dist-info/METADATA b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..2354dd4ec14468780fa06b61fa148a47bf44f8cf
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/METADATA
@@ -0,0 +1,60 @@
+Metadata-Version: 2.1
+Name: wheel
+Version: 0.33.1
+Summary: A built-package format for Python.
+Home-page: https://github.com/pypa/wheel
+Author: Daniel Holth
+Author-email: dholth@fastmail.fm
+Maintainer: Alex Grönholm
+Maintainer-email: alex.gronholm@nextday.fi
+License: MIT
+Project-URL: Issue Tracker, https://github.com/pypa/wheel/issues
+Project-URL: Documentation, https://wheel.readthedocs.io/
+Project-URL: Changelog, https://wheel.readthedocs.io/en/stable/news.html
+Keywords: wheel,packaging
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+Provides-Extra: test
+Requires-Dist: pytest (>=3.0.0) ; extra == 'test'
+Requires-Dist: pytest-cov ; extra == 'test'
+
+wheel
+=====
+
+This library is the reference implementation of the Python wheel packaging
+standard, as defined in `PEP 427`_.
+
+It has two different roles:
+
+#. A setuptools_ extension for building wheels that provides the
+   ``bdist_wheel`` setuptools command
+#. A command line tool for working with wheel files
+
+It should be noted that wheel is **not** intended to be used as a library, and
+as such there is no stable, public API.
+
+.. _PEP 427: https://www.python.org/dev/peps/pep-0427/
+.. _setuptools: https://pypi.org/project/setuptools/
+
+
+Code of Conduct
+---------------
+
+Everyone interacting in the wheel project's codebases, issue trackers, chat
+rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_.
+
+.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/
+
+
diff --git a/lib/python3.7/site-packages/wheel-0.33.1.dist-info/RECORD b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..8e3080c9fa57be97e424e082804ed8c8ec55d845
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/RECORD
@@ -0,0 +1,32 @@
+../../../bin/wheel,sha256=vKYLMszs1Ti36fbIV_nPl51KDK_gU2NlZfKIHUmtRmk,279
+wheel-0.33.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+wheel-0.33.1.dist-info/LICENSE.txt,sha256=zKniDGrx_Pv2lAjzd3aShsvuvN7TNhAMm0o_NfvmNeQ,1125
+wheel-0.33.1.dist-info/METADATA,sha256=ZKo4q8P_vXXg-OGjDZGlaIf4gafusgkeCd6jDxbrJLA,2082
+wheel-0.33.1.dist-info/RECORD,,
+wheel-0.33.1.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110
+wheel-0.33.1.dist-info/entry_points.txt,sha256=N8HbYFST3yrNQYeB2wXWBEPUhFsEtKNRPaCFGJPyqyc,108
+wheel-0.33.1.dist-info/top_level.txt,sha256=HxSBIbgEstMPe4eFawhA66Mq-QYHMopXVoAncfjb_1c,6
+wheel/__init__.py,sha256=OyOAcqlvJaBEASYRGijJyAkY_HL_5HR3alvjO5BEwKg,96
+wheel/__main__.py,sha256=lF-YLO4hdQmoWuh4eWZd8YL1U95RSdm76sNLBXa0vjE,417
+wheel/__pycache__/__init__.cpython-37.pyc,,
+wheel/__pycache__/__main__.cpython-37.pyc,,
+wheel/__pycache__/bdist_wheel.cpython-37.pyc,,
+wheel/__pycache__/metadata.cpython-37.pyc,,
+wheel/__pycache__/pep425tags.cpython-37.pyc,,
+wheel/__pycache__/pkginfo.cpython-37.pyc,,
+wheel/__pycache__/util.cpython-37.pyc,,
+wheel/__pycache__/wheelfile.cpython-37.pyc,,
+wheel/bdist_wheel.py,sha256=x2beC81u8AzTNVLwD36EPkVX5AxAYs2q70N09BxvMN0,14756
+wheel/cli/__init__.py,sha256=GWSoGUpRabTf8bk3FsNTPrc5Fsr8YOv2dX55iY2W7eY,2572
+wheel/cli/__pycache__/__init__.cpython-37.pyc,,
+wheel/cli/__pycache__/convert.cpython-37.pyc,,
+wheel/cli/__pycache__/pack.cpython-37.pyc,,
+wheel/cli/__pycache__/unpack.cpython-37.pyc,,
+wheel/cli/convert.py,sha256=me0l6G4gSw-EBVhzjSr7yWYWBp9spMz7mnXlyJTiXso,9497
+wheel/cli/pack.py,sha256=vkvZc4-rRZyWiwc6sHjpqIjzwDRMEF5u3JUNU9NY_jA,2263
+wheel/cli/unpack.py,sha256=0VWzT7U_xyenTPwEVavxqvdee93GPvAFHnR3Uu91aRc,673
+wheel/metadata.py,sha256=uBv2aOz4U2sERF834C8DeNo235drcsp3ypTzT7MTWEA,4699
+wheel/pep425tags.py,sha256=Jdjbnq17kqwPRKJCMb2E1VccNgnC3H6iQL7VGaxkPao,5908
+wheel/pkginfo.py,sha256=GR76kupQzn1x9sKDaXuE6B6FsZ4OkfRtG7pndlXPvQ4,1257
+wheel/util.py,sha256=zwVIk-9qWVQLRMgkgQTMp4TRE4HY03-tCUxLrtCpsfU,924
+wheel/wheelfile.py,sha256=U_13q1XpVt02704XXkFRzmUbz_0R9-GgNxMOZFP3tOs,7168
diff --git a/lib/python3.7/site-packages/wheel-0.33.1.dist-info/WHEEL b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..c8240f03e87fdc556c4cb9a1f721a73138177cc5
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/lib/python3.7/site-packages/wheel-0.33.1.dist-info/entry_points.txt b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b27acaddfde8258ee5b9eb70963ff108041bd998
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/entry_points.txt
@@ -0,0 +1,6 @@
+[console_scripts]
+wheel = wheel.cli:main
+
+[distutils.commands]
+bdist_wheel = wheel.bdist_wheel:bdist_wheel
+
diff --git a/lib/python3.7/site-packages/wheel-0.33.1.dist-info/top_level.txt b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2309722a93d261ba6840e2a1d18a96ac5b71f7b8
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel-0.33.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+wheel
diff --git a/lib/python3.7/site-packages/wheel/__init__.py b/lib/python3.7/site-packages/wheel/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..745cefc2eb32cd28c44b0a12fd6118a4a7c9faf7
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/__init__.py
@@ -0,0 +1,2 @@
+# __variables__ with double-quoted values will be available in setup.py:
+__version__ = "0.33.1"
diff --git a/lib/python3.7/site-packages/wheel/__main__.py b/lib/python3.7/site-packages/wheel/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3773a20e08b985d4160b624fbfe1742f35472cd
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/__main__.py
@@ -0,0 +1,19 @@
+"""
+Wheel command line tool (enable python -m wheel syntax)
+"""
+
+import sys
+
+
+def main():  # needed for console script
+    if __package__ == '':
+        # To be able to run 'python wheel-0.9.whl/wheel':
+        import os.path
+        path = os.path.dirname(os.path.dirname(__file__))
+        sys.path[0:0] = [path]
+    import wheel.cli
+    sys.exit(wheel.cli.main())
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/lib/python3.7/site-packages/wheel/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/wheel/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8529b289149e48c810b09c72cdc498b062a9e4f8
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/__pycache__/__main__.cpython-37.pyc b/lib/python3.7/site-packages/wheel/__pycache__/__main__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a9172f983a3d5304e074c53787ef519c04bf71d4
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/__pycache__/__main__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/__pycache__/bdist_wheel.cpython-37.pyc b/lib/python3.7/site-packages/wheel/__pycache__/bdist_wheel.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2a19702daa8bacb7a005ab8c0b8e3c7b9d4da11
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/__pycache__/bdist_wheel.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/__pycache__/metadata.cpython-37.pyc b/lib/python3.7/site-packages/wheel/__pycache__/metadata.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e645b04de8014f08d0a9a9d831efb78e28f93306
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/__pycache__/metadata.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/__pycache__/pep425tags.cpython-37.pyc b/lib/python3.7/site-packages/wheel/__pycache__/pep425tags.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce7498139c6bd895c0e2daef029dd1967d92cbb8
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/__pycache__/pep425tags.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/__pycache__/pkginfo.cpython-37.pyc b/lib/python3.7/site-packages/wheel/__pycache__/pkginfo.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e86e071eb83fffa6af394899e152d2390fbbb683
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/__pycache__/pkginfo.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/__pycache__/util.cpython-37.pyc b/lib/python3.7/site-packages/wheel/__pycache__/util.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..498b50bb1285b218a2457c129b50df57d2a8a19f
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/__pycache__/util.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/__pycache__/wheelfile.cpython-37.pyc b/lib/python3.7/site-packages/wheel/__pycache__/wheelfile.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f3c592f02f3ea171b935c5559ef1151e3621a8e0
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/__pycache__/wheelfile.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/bdist_wheel.py b/lib/python3.7/site-packages/wheel/bdist_wheel.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f06d36947313b1581be01eed98da37f8c7baf4f
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/bdist_wheel.py
@@ -0,0 +1,372 @@
+"""
+Create a wheel (.whl) distribution.
+
+A wheel is a built archive format.
+"""
+
+import os
+import shutil
+import sys
+import re
+from email.generator import Generator
+from distutils.core import Command
+from distutils.sysconfig import get_python_version
+from distutils import log as logger
+from glob import iglob
+from shutil import rmtree
+from warnings import warn
+
+import pkg_resources
+
+from .pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag, get_platform
+from .pkginfo import write_pkg_info
+from .metadata import pkginfo_to_metadata
+from .wheelfile import WheelFile
+from . import pep425tags
+from . import __version__ as wheel_version
+
+
+safe_name = pkg_resources.safe_name
+safe_version = pkg_resources.safe_version
+
+PY_LIMITED_API_PATTERN = r'cp3\d'
+
+
+def safer_name(name):
+    return safe_name(name).replace('-', '_')
+
+
+def safer_version(version):
+    return safe_version(version).replace('-', '_')
+
+
+class bdist_wheel(Command):
+
+    description = 'create a wheel distribution'
+
+    user_options = [('bdist-dir=', 'b',
+                     "temporary directory for creating the distribution"),
+                    ('plat-name=', 'p',
+                     "platform name to embed in generated filenames "
+                     "(default: %s)" % get_platform()),
+                    ('keep-temp', 'k',
+                     "keep the pseudo-installation tree around after " +
+                     "creating the distribution archive"),
+                    ('dist-dir=', 'd',
+                     "directory to put final built distributions in"),
+                    ('skip-build', None,
+                     "skip rebuilding everything (for testing/debugging)"),
+                    ('relative', None,
+                     "build the archive using relative paths"
+                     "(default: false)"),
+                    ('owner=', 'u',
+                     "Owner name used when creating a tar file"
+                     " [default: current user]"),
+                    ('group=', 'g',
+                     "Group name used when creating a tar file"
+                     " [default: current group]"),
+                    ('universal', None,
+                     "make a universal wheel"
+                     " (default: false)"),
+                    ('python-tag=', None,
+                     "Python implementation compatibility tag"
+                     " (default: py%s)" % get_impl_ver()[0]),
+                    ('build-number=', None,
+                     "Build number for this particular version. "
+                     "As specified in PEP-0427, this must start with a digit. "
+                     "[default: None]"),
+                    ('py-limited-api=', None,
+                     "Python tag (cp32|cp33|cpNN) for abi3 wheel tag"
+                     " (default: false)"),
+                    ]
+
+    boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
+
+    def initialize_options(self):
+        self.bdist_dir = None
+        self.data_dir = None
+        self.plat_name = None
+        self.plat_tag = None
+        self.format = 'zip'
+        self.keep_temp = False
+        self.dist_dir = None
+        self.egginfo_dir = None
+        self.root_is_pure = None
+        self.skip_build = None
+        self.relative = False
+        self.owner = None
+        self.group = None
+        self.universal = False
+        self.python_tag = 'py' + get_impl_ver()[0]
+        self.build_number = None
+        self.py_limited_api = False
+        self.plat_name_supplied = False
+
+    def finalize_options(self):
+        if self.bdist_dir is None:
+            bdist_base = self.get_finalized_command('bdist').bdist_base
+            self.bdist_dir = os.path.join(bdist_base, 'wheel')
+
+        self.data_dir = self.wheel_dist_name + '.data'
+        self.plat_name_supplied = self.plat_name is not None
+
+        need_options = ('dist_dir', 'plat_name', 'skip_build')
+
+        self.set_undefined_options('bdist',
+                                   *zip(need_options, need_options))
+
+        self.root_is_pure = not (self.distribution.has_ext_modules()
+                                 or self.distribution.has_c_libraries())
+
+        if self.py_limited_api and not re.match(PY_LIMITED_API_PATTERN, self.py_limited_api):
+            raise ValueError("py-limited-api must match '%s'" % PY_LIMITED_API_PATTERN)
+
+        # Support legacy [wheel] section for setting universal
+        wheel = self.distribution.get_option_dict('wheel')
+        if 'universal' in wheel:
+            # please don't define this in your global configs
+            logger.warn('The [wheel] section is deprecated. Use [bdist_wheel] instead.')
+            val = wheel['universal'][1].strip()
+            if val.lower() in ('1', 'true', 'yes'):
+                self.universal = True
+
+        if self.build_number is not None and not self.build_number[:1].isdigit():
+            raise ValueError("Build tag (build-number) must start with a digit.")
+
+    @property
+    def wheel_dist_name(self):
+        """Return distribution full name with - replaced with _"""
+        components = (safer_name(self.distribution.get_name()),
+                      safer_version(self.distribution.get_version()))
+        if self.build_number:
+            components += (self.build_number,)
+        return '-'.join(components)
+
+    def get_tag(self):
+        # bdist sets self.plat_name if unset, we should only use it for purepy
+        # wheels if the user supplied it.
+        if self.plat_name_supplied:
+            plat_name = self.plat_name
+        elif self.root_is_pure:
+            plat_name = 'any'
+        else:
+            plat_name = self.plat_name or get_platform()
+            if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647:
+                plat_name = 'linux_i686'
+        plat_name = plat_name.replace('-', '_').replace('.', '_')
+
+        if self.root_is_pure:
+            if self.universal:
+                impl = 'py2.py3'
+            else:
+                impl = self.python_tag
+            tag = (impl, 'none', plat_name)
+        else:
+            impl_name = get_abbr_impl()
+            impl_ver = get_impl_ver()
+            impl = impl_name + impl_ver
+            # We don't work on CPython 3.1, 3.0.
+            if self.py_limited_api and (impl_name + impl_ver).startswith('cp3'):
+                impl = self.py_limited_api
+                abi_tag = 'abi3'
+            else:
+                abi_tag = str(get_abi_tag()).lower()
+            tag = (impl, abi_tag, plat_name)
+            supported_tags = pep425tags.get_supported(
+                supplied_platform=plat_name if self.plat_name_supplied else None)
+            # XXX switch to this alternate implementation for non-pure:
+            if not self.py_limited_api:
+                assert tag == supported_tags[0], "%s != %s" % (tag, supported_tags[0])
+            assert tag in supported_tags, "would build wheel with unsupported tag {}".format(tag)
+        return tag
+
+    def run(self):
+        build_scripts = self.reinitialize_command('build_scripts')
+        build_scripts.executable = 'python'
+        build_scripts.force = True
+
+        build_ext = self.reinitialize_command('build_ext')
+        build_ext.inplace = False
+
+        if not self.skip_build:
+            self.run_command('build')
+
+        install = self.reinitialize_command('install',
+                                            reinit_subcommands=True)
+        install.root = self.bdist_dir
+        install.compile = False
+        install.skip_build = self.skip_build
+        install.warn_dir = False
+
+        # A wheel without setuptools scripts is more cross-platform.
+        # Use the (undocumented) `no_ep` option to setuptools'
+        # install_scripts command to avoid creating entry point scripts.
+        install_scripts = self.reinitialize_command('install_scripts')
+        install_scripts.no_ep = True
+
+        # Use a custom scheme for the archive, because we have to decide
+        # at installation time which scheme to use.
+        for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
+            setattr(install,
+                    'install_' + key,
+                    os.path.join(self.data_dir, key))
+
+        basedir_observed = ''
+
+        if os.name == 'nt':
+            # win32 barfs if any of these are ''; could be '.'?
+            # (distutils.command.install:change_roots bug)
+            basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
+            self.install_libbase = self.install_lib = basedir_observed
+
+        setattr(install,
+                'install_purelib' if self.root_is_pure else 'install_platlib',
+                basedir_observed)
+
+        logger.info("installing to %s", self.bdist_dir)
+
+        self.run_command('install')
+
+        impl_tag, abi_tag, plat_tag = self.get_tag()
+        archive_basename = "{}-{}-{}-{}".format(self.wheel_dist_name, impl_tag, abi_tag, plat_tag)
+        if not self.relative:
+            archive_root = self.bdist_dir
+        else:
+            archive_root = os.path.join(
+                self.bdist_dir,
+                self._ensure_relative(install.install_base))
+
+        self.set_undefined_options('install_egg_info', ('target', 'egginfo_dir'))
+        distinfo_dirname = '{}-{}.dist-info'.format(
+            safer_name(self.distribution.get_name()),
+            safer_version(self.distribution.get_version()))
+        distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname)
+        self.egg2dist(self.egginfo_dir, distinfo_dir)
+
+        self.write_wheelfile(distinfo_dir)
+
+        # Make the archive
+        if not os.path.exists(self.dist_dir):
+            os.makedirs(self.dist_dir)
+
+        wheel_path = os.path.join(self.dist_dir, archive_basename + '.whl')
+        with WheelFile(wheel_path, 'w') as wf:
+            wf.write_files(archive_root)
+
+        # Add to 'Distribution.dist_files' so that the "upload" command works
+        getattr(self.distribution, 'dist_files', []).append(
+            ('bdist_wheel', get_python_version(), wheel_path))
+
+        if not self.keep_temp:
+            logger.info('removing %s', self.bdist_dir)
+            if not self.dry_run:
+                rmtree(self.bdist_dir)
+
+    def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel_version + ')'):
+        from email.message import Message
+        msg = Message()
+        msg['Wheel-Version'] = '1.0'  # of the spec
+        msg['Generator'] = generator
+        msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
+        if self.build_number is not None:
+            msg['Build'] = self.build_number
+
+        # Doesn't work for bdist_wininst
+        impl_tag, abi_tag, plat_tag = self.get_tag()
+        for impl in impl_tag.split('.'):
+            for abi in abi_tag.split('.'):
+                for plat in plat_tag.split('.'):
+                    msg['Tag'] = '-'.join((impl, abi, plat))
+
+        wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
+        logger.info('creating %s', wheelfile_path)
+        with open(wheelfile_path, 'w') as f:
+            Generator(f, maxheaderlen=0).flatten(msg)
+
+    def _ensure_relative(self, path):
+        # copied from dir_util, deleted
+        drive, path = os.path.splitdrive(path)
+        if path[0:1] == os.sep:
+            path = drive + path[1:]
+        return path
+
+    @property
+    def license_paths(self):
+        metadata = self.distribution.get_option_dict('metadata')
+        files = set()
+        patterns = sorted({
+            option for option in metadata.get('license_files', ('', ''))[1].split()
+        })
+
+        if 'license_file' in metadata:
+            warn('The "license_file" option is deprecated. Use "license_files" instead.',
+                 DeprecationWarning)
+            files.add(metadata['license_file'][1])
+
+        if 'license_file' not in metadata and 'license_files' not in metadata:
+            patterns = ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*')
+
+        for pattern in patterns:
+            for path in iglob(pattern):
+                if path not in files and os.path.isfile(path):
+                    logger.info('adding license file "%s" (matched pattern "%s")', path, pattern)
+                    files.add(path)
+
+        return files
+
+    def egg2dist(self, egginfo_path, distinfo_path):
+        """Convert an .egg-info directory into a .dist-info directory"""
+        def adios(p):
+            """Appropriately delete directory, file or link."""
+            if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
+                shutil.rmtree(p)
+            elif os.path.exists(p):
+                os.unlink(p)
+
+        adios(distinfo_path)
+
+        if not os.path.exists(egginfo_path):
+            # There is no egg-info. This is probably because the egg-info
+            # file/directory is not named matching the distribution name used
+            # to name the archive file. Check for this case and report
+            # accordingly.
+            import glob
+            pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
+            possible = glob.glob(pat)
+            err = "Egg metadata expected at %s but not found" % (egginfo_path,)
+            if possible:
+                alt = os.path.basename(possible[0])
+                err += " (%s found - possible misnamed archive file?)" % (alt,)
+
+            raise ValueError(err)
+
+        if os.path.isfile(egginfo_path):
+            # .egg-info is a single file
+            pkginfo_path = egginfo_path
+            pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path)
+            os.mkdir(distinfo_path)
+        else:
+            # .egg-info is a directory
+            pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
+            pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path)
+
+            # ignore common egg metadata that is useless to wheel
+            shutil.copytree(egginfo_path, distinfo_path,
+                            ignore=lambda x, y: {'PKG-INFO', 'requires.txt', 'SOURCES.txt',
+                                                 'not-zip-safe'}
+                            )
+
+            # delete dependency_links if it is only whitespace
+            dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
+            with open(dependency_links_path, 'r') as dependency_links_file:
+                dependency_links = dependency_links_file.read().strip()
+            if not dependency_links:
+                adios(dependency_links_path)
+
+        write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
+
+        for license_path in self.license_paths:
+            filename = os.path.basename(license_path)
+            shutil.copy(license_path, os.path.join(distinfo_path, filename))
+
+        adios(egginfo_path)
diff --git a/lib/python3.7/site-packages/wheel/cli/__init__.py b/lib/python3.7/site-packages/wheel/cli/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..95740bfb650514f6b17005b4bc8220858ebdfdf4
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/cli/__init__.py
@@ -0,0 +1,88 @@
+"""
+Wheel command-line utility.
+"""
+
+from __future__ import print_function
+
+import argparse
+import os
+import sys
+
+
+def require_pkgresources(name):
+    try:
+        import pkg_resources  # noqa: F401
+    except ImportError:
+        raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name))
+
+
+class WheelError(Exception):
+    pass
+
+
+def unpack_f(args):
+    from .unpack import unpack
+    unpack(args.wheelfile, args.dest)
+
+
+def pack_f(args):
+    from .pack import pack
+    pack(args.directory, args.dest_dir, args.build_number)
+
+
+def convert_f(args):
+    from .convert import convert
+    convert(args.files, args.dest_dir, args.verbose)
+
+
+def version_f(args):
+    from .. import __version__
+    print("wheel %s" % __version__)
+
+
+def parser():
+    p = argparse.ArgumentParser()
+    s = p.add_subparsers(help="commands")
+
+    unpack_parser = s.add_parser('unpack', help='Unpack wheel')
+    unpack_parser.add_argument('--dest', '-d', help='Destination directory',
+                               default='.')
+    unpack_parser.add_argument('wheelfile', help='Wheel file')
+    unpack_parser.set_defaults(func=unpack_f)
+
+    repack_parser = s.add_parser('pack', help='Repack wheel')
+    repack_parser.add_argument('directory', help='Root directory of the unpacked wheel')
+    repack_parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
+                               help="Directory to store the wheel (default %(default)s)")
+    repack_parser.add_argument('--build-number', help="Build tag to use in the wheel name")
+    repack_parser.set_defaults(func=pack_f)
+
+    convert_parser = s.add_parser('convert', help='Convert egg or wininst to wheel')
+    convert_parser.add_argument('files', nargs='*', help='Files to convert')
+    convert_parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
+                                help="Directory to store wheels (default %(default)s)")
+    convert_parser.add_argument('--verbose', '-v', action='store_true')
+    convert_parser.set_defaults(func=convert_f)
+
+    version_parser = s.add_parser('version', help='Print version and exit')
+    version_parser.set_defaults(func=version_f)
+
+    help_parser = s.add_parser('help', help='Show this help')
+    help_parser.set_defaults(func=lambda args: p.print_help())
+
+    return p
+
+
+def main():
+    p = parser()
+    args = p.parse_args()
+    if not hasattr(args, 'func'):
+        p.print_help()
+    else:
+        try:
+            args.func(args)
+            return 0
+        except WheelError as e:
+            print(e, file=sys.stderr)
+
+    return 1
diff --git a/lib/python3.7/site-packages/wheel/cli/__pycache__/__init__.cpython-37.pyc b/lib/python3.7/site-packages/wheel/cli/__pycache__/__init__.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..27e26c8559c58c195ad6733f5f53221b3a463424
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/cli/__pycache__/__init__.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/cli/__pycache__/convert.cpython-37.pyc b/lib/python3.7/site-packages/wheel/cli/__pycache__/convert.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b8885d2f9e709b0d8f1f450df2c48620d078972
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/cli/__pycache__/convert.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/cli/__pycache__/pack.cpython-37.pyc b/lib/python3.7/site-packages/wheel/cli/__pycache__/pack.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..523de31af1c87cc678525a006b6b65a1827add9f
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/cli/__pycache__/pack.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/cli/__pycache__/unpack.cpython-37.pyc b/lib/python3.7/site-packages/wheel/cli/__pycache__/unpack.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4c93151b3383ab5b96c6a2e8ac69d8397e552692
Binary files /dev/null and b/lib/python3.7/site-packages/wheel/cli/__pycache__/unpack.cpython-37.pyc differ
diff --git a/lib/python3.7/site-packages/wheel/cli/convert.py b/lib/python3.7/site-packages/wheel/cli/convert.py
new file mode 100644
index 0000000000000000000000000000000000000000..f1a793a664adde6e36d4cab50a44380fa2a88eda
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/cli/convert.py
@@ -0,0 +1,269 @@
+import os.path
+import re
+import shutil
+import sys
+import tempfile
+import zipfile
+from distutils import dist
+from glob import iglob
+
+from ..bdist_wheel import bdist_wheel
+from ..wheelfile import WheelFile
+from . import WheelError, require_pkgresources
+
+egg_info_re = re.compile(r'''
+    (?P<name>.+?)-(?P<ver>.+?)
+    (-(?P<pyver>py\d\.\d)
+     (-(?P<arch>.+?))?
+    )?.egg$''', re.VERBOSE)
+
+
+class _bdist_wheel_tag(bdist_wheel):
+    # allow the client to override the default generated wheel tag
+    # The default bdist_wheel implementation uses python and abi tags
+    # of the running python process. This is not suitable for
+    # generating/repackaging prebuild binaries.
+
+    full_tag_supplied = False
+    full_tag = None  # None or a (pytag, soabitag, plattag) triple
+
+    def get_tag(self):
+        if self.full_tag_supplied and self.full_tag is not None:
+            return self.full_tag
+        else:
+            return bdist_wheel.get_tag(self)
+
+
+def egg2wheel(egg_path, dest_dir):
+    filename = os.path.basename(egg_path)
+    match = egg_info_re.match(filename)
+    if not match:
+        raise WheelError('Invalid egg file name: {}'.format(filename))
+
+    egg_info = match.groupdict()
+    dir = tempfile.mkdtemp(suffix="_e2w")
+    if os.path.isfile(egg_path):
+        # assume we have a bdist_egg otherwise
+        with zipfile.ZipFile(egg_path) as egg:
+            egg.extractall(dir)
+    else:
+        # support buildout-style installed eggs directories
+        for pth in os.listdir(egg_path):
+            src = os.path.join(egg_path, pth)
+            if os.path.isfile(src):
+                shutil.copy2(src, dir)
+            else:
+                shutil.copytree(src, os.path.join(dir, pth))
+
+    pyver = egg_info['pyver']
+    if pyver:
+        pyver = egg_info['pyver'] = pyver.replace('.', '')
+
+    arch = (egg_info['arch'] or 'any').replace('.', '_').replace('-', '_')
+
+    # assume all binary eggs are for CPython
+    abi = 'cp' + pyver[2:] if arch != 'any' else 'none'
+
+    root_is_purelib = egg_info['arch'] is None
+    if root_is_purelib:
+        bw = bdist_wheel(dist.Distribution())
+    else:
+        bw = _bdist_wheel_tag(dist.Distribution())
+
+    bw.root_is_pure = root_is_purelib
+    bw.python_tag = pyver
+    bw.plat_name_supplied = True
+    bw.plat_name = egg_info['arch'] or 'any'
+    if not root_is_purelib:
+        bw.full_tag_supplied = True
+        bw.full_tag = (pyver, abi, arch)
+
+    dist_info_dir = os.path.join(dir, '{name}-{ver}.dist-info'.format(**egg_info))
+    bw.egg2dist(os.path.join(dir, 'EGG-INFO'), dist_info_dir)
+    bw.write_wheelfile(dist_info_dir, generator='egg2wheel')
+    wheel_name = '{name}-{ver}-{pyver}-{}-{}.whl'.format(abi, arch, **egg_info)
+    with WheelFile(os.path.join(dest_dir, wheel_name), 'w') as wf:
+        wf.write_files(dir)
+
+    shutil.rmtree(dir)
+
+
+def parse_wininst_info(wininfo_name, egginfo_name):
+    """Extract metadata from filenames.
+
+    Extracts the 4 metadataitems needed (name, version, pyversion, arch) from
+    the installer filename and the name of the egg-info directory embedded in
+    the zipfile (if any).
+
+    The egginfo filename has the format::
+
+        name-ver(-pyver)(-arch).egg-info
+
+    The installer filename has the format::
+
+        name-ver.arch(-pyver).exe
+
+    Some things to note:
+
+    1. The installer filename is not definitive. An installer can be renamed
+       and work perfectly well as an installer. So more reliable data should
+       be used whenever possible.
+    2. The egg-info data should be preferred for the name and version, because
+       these come straight from the distutils metadata, and are mandatory.
+    3. The pyver from the egg-info data should be ignored, as it is
+       constructed from the version of Python used to build the installer,
+       which is irrelevant - the installer filename is correct here (even to
+       the point that when it's not there, any version is implied).
+    4. The architecture must be taken from the installer filename, as it is
+       not included in the egg-info data.
+    5. Architecture-neutral installers still have an architecture because the
+       installer format itself (being executable) is architecture-specific. We
+       should therefore ignore the architecture if the content is pure-python.
+    """
+
+    egginfo = None
+    if egginfo_name:
+        egginfo = egg_info_re.search(egginfo_name)
+        if not egginfo:
+            raise ValueError("Egg info filename %s is not valid" % (egginfo_name,))
+
+    # Parse the wininst filename
+    # 1. Distribution name (up to the first '-')
+    w_name, sep, rest = wininfo_name.partition('-')
+    if not sep:
+        raise ValueError("Installer filename %s is not valid" % (wininfo_name,))
+
+    # Strip '.exe'
+    rest = rest[:-4]
+    # 2. Python version (from the last '-', must start with 'py')
+    rest2, sep, w_pyver = rest.rpartition('-')
+    if sep and w_pyver.startswith('py'):
+        rest = rest2
+        w_pyver = w_pyver.replace('.', '')
+    else:
+        # Not version specific - use py2.py3. While it is possible that
+        # pure-Python code is not compatible with both Python 2 and 3, there
+        # is no way of knowing from the wininst format, so we assume the best
+        # here (the user can always manually rename the wheel to be more
+        # restrictive if needed).
+        w_pyver = 'py2.py3'
+    # 3. Version and architecture
+    w_ver, sep, w_arch = rest.rpartition('.')
+    if not sep:
+        raise ValueError("Installer filename %s is not valid" % (wininfo_name,))
+
+    if egginfo:
+        w_name = egginfo.group('name')
+        w_ver = egginfo.group('ver')
+
+    return {'name': w_name, 'ver': w_ver, 'arch': w_arch, 'pyver': w_pyver}
+
+
+def wininst2wheel(path, dest_dir):
+    with zipfile.ZipFile(path) as bdw:
+        # Search for egg-info in the archive
+        egginfo_name = None
+        for filename in bdw.namelist():
+            if '.egg-info' in filename:
+                egginfo_name = filename
+                break
+
+        info = parse_wininst_info(os.path.basename(path), egginfo_name)
+
+        root_is_purelib = True
+        for zipinfo in bdw.infolist():
+            if zipinfo.filename.startswith('PLATLIB'):
+                root_is_purelib = False
+                break
+        if root_is_purelib:
+            paths = {'purelib': ''}
+        else:
+            paths = {'platlib': ''}
+
+        dist_info = "%(name)s-%(ver)s" % info
+        datadir = "%s.data/" % dist_info
+
+        # rewrite paths to trick ZipFile into extracting an egg
+        # XXX grab wininst .ini - between .exe, padding, and first zip file.
+        members = []
+        egginfo_name = ''
+        for zipinfo in bdw.infolist():
+            key, basename = zipinfo.filename.split('/', 1)
+            key = key.lower()
+            basepath = paths.get(key, None)
+            if basepath is None:
+                basepath = datadir + key.lower() + '/'
+            oldname = zipinfo.filename
+            newname = basepath + basename
+            zipinfo.filename = newname
+            del bdw.NameToInfo[oldname]
+            bdw.NameToInfo[newname] = zipinfo
+            # Collect member names, but omit '' (from an entry like "PLATLIB/"
+            if newname:
+                members.append(newname)
+            # Remember egg-info name for the egg2dist call below
+            if not egginfo_name:
+                if newname.endswith('.egg-info'):
+                    egginfo_name = newname
+                elif '.egg-info/' in newname:
+                    egginfo_name, sep, _ = newname.rpartition('/')
+        dir = tempfile.mkdtemp(suffix="_b2w")
+        bdw.extractall(dir, members)
+
+    # egg2wheel
+    abi = 'none'
+    pyver = info['pyver']
+    arch = (info['arch'] or 'any').replace('.', '_').replace('-', '_')
+    # Wininst installers always have arch even if they are not
+    # architecture-specific (because the format itself is).
+    # So, assume the content is architecture-neutral if root is purelib.
+    if root_is_purelib:
+        arch = 'any'
+    # If the installer is architecture-specific, it's almost certainly also
+    # CPython-specific.
+    if arch != 'any':
+        pyver = pyver.replace('py', 'cp')
+    wheel_name = '-'.join((dist_info, pyver, abi, arch))
+    if root_is_purelib:
+        bw = bdist_wheel(dist.Distribution())
+    else:
+        bw = _bdist_wheel_tag(dist.Distribution())
+
+    bw.root_is_pure = root_is_purelib
+    bw.python_tag = pyver
+    bw.plat_name_supplied = True
+    bw.plat_name = info['arch'] or 'any'
+
+    if not root_is_purelib:
+        bw.full_tag_supplied = True
+        bw.full_tag = (pyver, abi, arch)
+
+    dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info)
+    bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir)
+    bw.write_wheelfile(dist_info_dir, generator='wininst2wheel')
+
+    wheel_path = os.path.join(dest_dir, wheel_name)
+    with WheelFile(wheel_path, 'w') as wf:
+        wf.write_files(dir)
+
+    shutil.rmtree(dir)
+
+
+def convert(files, dest_dir, verbose):
+    # Only support wheel convert if pkg_resources is present
+    require_pkgresources('wheel convert')
+
+    for pat in files:
+        for installer in iglob(pat):
+            if os.path.splitext(installer)[1] == '.egg':
+                conv = egg2wheel
+            else:
+                conv = wininst2wheel
+
+            if verbose:
+                print("{}... ".format(installer))
+                sys.stdout.flush()
+
+            conv(installer, dest_dir)
+            if verbose:
+                print("OK")
diff --git a/lib/python3.7/site-packages/wheel/cli/pack.py b/lib/python3.7/site-packages/wheel/cli/pack.py
new file mode 100644
index 0000000000000000000000000000000000000000..af6e81c4e3dfd188fa51569cbd139ddc8f48871a
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/cli/pack.py
@@ -0,0 +1,58 @@
+from __future__ import print_function
+
+import os.path
+import re
+import sys
+
+from wheel.cli import WheelError
+from wheel.wheelfile import WheelFile
+
+DIST_INFO_RE = re.compile(r"^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))\.dist-info$")
+
+
+def pack(directory, dest_dir, build_number):
+    """Repack a previously unpacked wheel directory into a new wheel file.
+
+    The .dist-info/WHEEL file must contain one or more tags so that the target
+    wheel file name can be determined.
+
+    :param directory: The unpacked wheel directory
+    :param dest_dir: Destination directory (defaults to the current directory)
+    """
+    # Find the .dist-info directory
+    dist_info_dirs = [fn for fn in os.listdir(directory)
+                      if os.path.isdir(os.path.join(directory, fn)) and DIST_INFO_RE.match(fn)]
+    if len(dist_info_dirs) > 1:
+        raise WheelError('Multiple .dist-info directories found in {}'.format(directory))
+    elif not dist_info_dirs:
+        raise WheelError('No .dist-info directories found in {}'.format(directory))
+
+    # Determine the target wheel filename
+    dist_info_dir = dist_info_dirs[0]
+    name_version = DIST_INFO_RE.match(dist_info_dir).group('namever')
+
+    # Add the build number if specific
+    if build_number:
+        name_version += '-' + build_number
+
+    # Read the tags from .dist-info/WHEEL
+    with open(os.path.join(directory, dist_info_dir, 'WHEEL')) as f:
+        tags = [line.split(' ')[1].rstrip() for line in f if line.startswith('Tag: ')]
+        if not tags:
+            raise WheelError('No tags present in {}/WHEEL; cannot determine target wheel filename'
+                             .format(dist_info_dir))
+
+    # Reassemble the tags for the wheel file
+    impls = sorted({tag.split('-')[0] for tag in tags})
+    abivers = sorted({tag.split('-')[1] for tag in tags})
+    platforms = sorted({tag.split('-')[2] for tag in tags})
+    tagline = '-'.join(['.'.join(impls), '.'.join(abivers), '.'.join(platforms)])
+
+    # Repack the wheel
+    wheel_path = os.path.join(dest_dir, '{}-{}.whl'.format(name_version, tagline))
+    with WheelFile(wheel_path, 'w') as wf:
+        print("Repacking wheel as {}...".format(wheel_path), end='')
+        sys.stdout.flush()
+        wf.write_files(directory)
+
+    print('OK')
diff --git a/lib/python3.7/site-packages/wheel/cli/unpack.py b/lib/python3.7/site-packages/wheel/cli/unpack.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e9857a35088ebec8d0a51c4be09caa66320435e
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/cli/unpack.py
@@ -0,0 +1,25 @@
+from __future__ import print_function
+
+import os.path
+import sys
+
+from ..wheelfile import WheelFile
+
+
+def unpack(path, dest='.'):
+    """Unpack a wheel.
+
+    Wheel content will be unpacked to {dest}/{name}-{ver}, where {name}
+    is the package name and {ver} its version.
+
+    :param path: The path to the wheel.
+    :param dest: Destination directory (default to current directory).
+    """
+    with WheelFile(path) as wf:
+        namever = wf.parsed_filename.group('namever')
+        destination = os.path.join(dest, namever)
+        print("Unpacking to: {}...".format(destination), end='')
+        sys.stdout.flush()
+        wf.extractall(destination)
+
+    print('OK')
diff --git a/lib/python3.7/site-packages/wheel/metadata.py b/lib/python3.7/site-packages/wheel/metadata.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab0c07e5b43475565e1c7e0353524ea22a287812
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/metadata.py
@@ -0,0 +1,141 @@
+"""
+Tools for converting old- to new-style metadata.
+"""
+
+import os.path
+import re
+import textwrap
+
+import pkg_resources
+
+from .pkginfo import read_pkg_info
+
+# Wheel itself is probably the only program that uses non-extras markers
+# in METADATA/PKG-INFO. Support its syntax with the extra at the end only.
+EXTRA_RE = re.compile(
+    r"""^(?P<package>.*?)(;\s*(?P<condition>.*?)(extra == '(?P<extra>.*?)')?)$""")
+
+
+def requires_to_requires_dist(requirement):
+    """Return the version specifier for a requirement in PEP 345/566 fashion."""
+    if getattr(requirement, 'url', None):
+        return " @ " + requirement.url
+
+    requires_dist = []
+    for op, ver in requirement.specs:
+        requires_dist.append(op + ver)
+    if not requires_dist:
+        return ''
+    return " (%s)" % ','.join(sorted(requires_dist))
+
+
+def convert_requirements(requirements):
+    """Yield Requires-Dist: strings for parsed requirements strings."""
+    for req in requirements:
+        parsed_requirement = pkg_resources.Requirement.parse(req)
+        spec = requires_to_requires_dist(parsed_requirement)
+        extras = ",".join(sorted(parsed_requirement.extras))
+        if extras:
+            extras = "[%s]" % extras
+        yield (parsed_requirement.project_name + extras + spec)
+
+
+def generate_requirements(extras_require):
+    """
+    Convert requirements from a setup()-style dictionary to ('Requires-Dist', 'requirement')
+    and ('Provides-Extra', 'extra') tuples.
+
+    extras_require is a dictionary of {extra: [requirements]} as passed to setup(),
+    using the empty extra {'': [requirements]} to hold install_requires.
+    """
+    for extra, depends in extras_require.items():
+        condition = ''
+        extra = extra or ''
+        if ':' in extra:  # setuptools extra:condition syntax
+            extra, condition = extra.split(':', 1)
+
+        extra = pkg_resources.safe_extra(extra)
+        if extra:
+            yield 'Provides-Extra', extra
+            if condition:
+                condition = "(" + condition + ") and "
+            condition += "extra == '%s'" % extra
+
+        if condition:
+            condition = ' ; ' + condition
+
+        for new_req in convert_requirements(depends):
+            yield 'Requires-Dist', new_req + condition
+
+
+def pkginfo_to_metadata(egg_info_path, pkginfo_path):
+    """
+    Convert .egg-info directory with PKG-INFO to the Metadata 2.1 format
+    """
+    pkg_info = read_pkg_info(pkginfo_path)
+    pkg_info.replace_header('Metadata-Version', '2.1')
+    # Those will be regenerated from `requires.txt`.
+    del pkg_info['Provides-Extra']
+    del pkg_info['Requires-Dist']
+    requires_path = os.path.join(egg_info_path, 'requires.txt')
+    if os.path.exists(requires_path):
+        with open(requires_path) as requires_file:
+            requires = requires_file.read()
+
+        parsed_requirements = sorted(pkg_resources.split_sections(requires),
+                                     key=lambda x: x[0] or '')
+        for extra, reqs in parsed_requirements:
+            for key, value in generate_requirements({extra: reqs}):
+                if (key, value) not in pkg_info.items():
+                    pkg_info[key] = value
+
+    description = pkg_info['Description']
+    if description:
+        pkg_info.set_payload(dedent_description(pkg_info))
+        del pkg_info['Description']
+
+    return pkg_info
+
+
+def pkginfo_unicode(pkg_info, field):
+    """Hack to coax Unicode out of an email Message() - Python 3.3+"""
+    text = pkg_info[field]
+    field = field.lower()
+    if not isinstance(text, str):
+        if not hasattr(pkg_info, 'raw_items'):  # Python 3.2
+            return str(text)
+        for item in pkg_info.raw_items():
+            if item[0].lower() == field:
+                text = item[1].encode('ascii', 'surrogateescape') \
+                    .decode('utf-8')
+                break
+
+    return text
+
+
+def dedent_description(pkg_info):
+    """
+    Dedent and convert pkg_info['Description'] to Unicode.
+    """
+    description = pkg_info['Description']
+
+    # Python 3 Unicode handling, sorta.
+    surrogates = False
+    if not isinstance(description, str):
+        surrogates = True
+        description = pkginfo_unicode(pkg_info, 'Description')
+
+    description_lines = description.splitlines()
+    description_dedent = '\n'.join(
+        # if the first line of long_description is blank,
+        # the first line here will be indented.
+        (description_lines[0].lstrip(),
+         textwrap.dedent('\n'.join(description_lines[1:])),
+         '\n'))
+
+    if surrogates:
+        description_dedent = description_dedent \
+            .encode("utf8") \
+            .decode("ascii", "surrogateescape")
+
+    return description_dedent
diff --git a/lib/python3.7/site-packages/wheel/pep425tags.py b/lib/python3.7/site-packages/wheel/pep425tags.py
new file mode 100644
index 0000000000000000000000000000000000000000..13b507310d0d16a21888b7491391ff3899513c10
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/pep425tags.py
@@ -0,0 +1,185 @@
+"""Generate and work with PEP 425 Compatibility Tags."""
+
+import distutils.util
+import platform
+import sys
+import sysconfig
+import warnings
+
+try:
+    from importlib.machinery import get_all_suffixes
+except ImportError:
+    from imp import get_suffixes as get_all_suffixes
+
+
+def get_config_var(var):
+    try:
+        return sysconfig.get_config_var(var)
+    except IOError as e:  # pip Issue #1074
+        warnings.warn("{0}".format(e), RuntimeWarning)
+        return None
+
+
+def get_abbr_impl():
+    """Return abbreviated implementation name."""
+    impl = platform.python_implementation()
+    if impl == 'PyPy':
+        return 'pp'
+    elif impl == 'Jython':
+        return 'jy'
+    elif impl == 'IronPython':
+        return 'ip'
+    elif impl == 'CPython':
+        return 'cp'
+
+    raise LookupError('Unknown Python implementation: ' + impl)
+
+
+def get_impl_ver():
+    """Return implementation version."""
+    impl_ver = get_config_var("py_version_nodot")
+    if not impl_ver or get_abbr_impl() == 'pp':
+        impl_ver = ''.join(map(str, get_impl_version_info()))
+    return impl_ver
+
+
+def get_impl_version_info():
+    """Return sys.version_info-like tuple for use in decrementing the minor
+    version."""
+    if get_abbr_impl() == 'pp':
+        # as per https://github.com/pypa/pip/issues/2882
+        return (sys.version_info[0], sys.pypy_version_info.major,
+                sys.pypy_version_info.minor)
+    else:
+        return sys.version_info[0], sys.version_info[1]
+
+
+def get_flag(var, fallback, expected=True, warn=True):
+    """Use a fallback method for determining SOABI flags if the needed config
+    var is unset or unavailable."""
+    val = get_config_var(var)
+    if val is None:
+        if warn:
+            warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
+                          "be incorrect".format(var), RuntimeWarning, 2)
+        return fallback()
+    return val == expected
+
+
+def get_abi_tag():
+    """Return the ABI tag based on SOABI (if available) or emulate SOABI
+    (CPython 2, PyPy)."""
+    soabi = get_config_var('SOABI')
+    impl = get_abbr_impl()
+    if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
+        d = ''
+        m = ''
+        u = ''
+        if get_flag('Py_DEBUG',
+                    lambda: hasattr(sys, 'gettotalrefcount'),
+                    warn=(impl == 'cp')):
+            d = 'd'
+        if get_flag('WITH_PYMALLOC',
+                    lambda: impl == 'cp',
+                    warn=(impl == 'cp')):
+            m = 'm'
+        if get_flag('Py_UNICODE_SIZE',
+                    lambda: sys.maxunicode == 0x10ffff,
+                    expected=4,
+                    warn=(impl == 'cp' and
+                          sys.version_info < (3, 3))) \
+                and sys.version_info < (3, 3):
+            u = 'u'
+        abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u)
+    elif soabi and soabi.startswith('cpython-'):
+        abi = 'cp' + soabi.split('-')[1]
+    elif soabi:
+        abi = soabi.replace('.', '_').replace('-', '_')
+    else:
+        abi = None
+    return abi
+
+
+def get_platform():
+    """Return our platform name 'win32', 'linux_x86_64'"""
+    # XXX remove distutils dependency
+    result = distutils.util.get_platform().replace('.', '_').replace('-', '_')
+    if result == "linux_x86_64" and sys.maxsize == 2147483647:
+        # pip pull request #3497
+        result = "linux_i686"
+    return result
+
+
+def get_supported(versions=None, supplied_platform=None):
+    """Return a list of supported tags for each version specified in
+    `versions`.
+
+    :param versions: a list of string versions, of the form ["33", "32"],
+        or None. The first version will be assumed to support our ABI.
+    """
+    supported = []
+
+    # Versions must be given with respect to the preference
+    if versions is None:
+        versions = []
+        version_info = get_impl_version_info()
+        major = version_info[:-1]
+        # Support all previous minor Python versions.
+        for minor in range(version_info[-1], -1, -1):
+            versions.append(''.join(map(str, major + (minor,))))
+
+    impl = get_abbr_impl()
+
+    abis = []
+
+    abi = get_abi_tag()
+    if abi:
+        abis[0:0] = [abi]
+
+    abi3s = set()
+    for suffix in get_all_suffixes():
+        if suffix[0].startswith('.abi'):
+            abi3s.add(suffix[0].split('.', 2)[1])
+
+    abis.extend(sorted(list(abi3s)))
+
+    abis.append('none')
+
+    platforms = []
+    if supplied_platform:
+        platforms.append(supplied_platform)
+    platforms.append(get_platform())
+
+    # Current version, current API (built specifically for our Python):
+    for abi in abis:
+        for arch in platforms:
+            supported.append(('%s%s' % (impl, versions[0]), abi, arch))
+
+    # abi3 modules compatible with older version of Python
+    for version in versions[1:]:
+        # abi3 was introduced in Python 3.2
+        if version in ('31', '30'):
+            break
+        for abi in abi3s:   # empty set if not Python 3
+            for arch in platforms:
+                supported.append(("%s%s" % (impl, version), abi, arch))
+
+    # No abi / arch, but requires our implementation:
+    for i, version in enumerate(versions):
+        supported.append(('%s%s' % (impl, version), 'none', 'any'))
+        if i == 0:
+            # Tagged specifically as being cross-version compatible
+            # (with just the major version specified)
+            supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
+
+    # Major Python version + platform; e.g. binaries not using the Python API
+    for arch in platforms:
+        supported.append(('py%s' % (versions[0][0]), 'none', arch))
+
+    # No abi / arch, generic Python
+    for i, version in enumerate(versions):
+        supported.append(('py%s' % (version,), 'none', 'any'))
+        if i == 0:
+            supported.append(('py%s' % (version[0]), 'none', 'any'))
+
+    return supported
diff --git a/lib/python3.7/site-packages/wheel/pkginfo.py b/lib/python3.7/site-packages/wheel/pkginfo.py
new file mode 100644
index 0000000000000000000000000000000000000000..115be45bdfb5bf1a6c9dca87471c5ca0839f63b1
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/pkginfo.py
@@ -0,0 +1,43 @@
+"""Tools for reading and writing PKG-INFO / METADATA without caring
+about the encoding."""
+
+from email.parser import Parser
+
+try:
+    unicode
+    _PY3 = False
+except NameError:
+    _PY3 = True
+
+if not _PY3:
+    from email.generator import Generator
+
+    def read_pkg_info_bytes(bytestr):
+        return Parser().parsestr(bytestr)
+
+    def read_pkg_info(path):
+        with open(path, "r") as headers:
+            message = Parser().parse(headers)
+        return message
+
+    def write_pkg_info(path, message):
+        with open(path, 'w') as metadata:
+            Generator(metadata, mangle_from_=False, maxheaderlen=0).flatten(message)
+else:
+    from email.generator import BytesGenerator
+
+    def read_pkg_info_bytes(bytestr):
+        headers = bytestr.decode(encoding="ascii", errors="surrogateescape")
+        message = Parser().parsestr(headers)
+        return message
+
+    def read_pkg_info(path):
+        with open(path, "r",
+                  encoding="ascii",
+                  errors="surrogateescape") as headers:
+            message = Parser().parse(headers)
+        return message
+
+    def write_pkg_info(path, message):
+        with open(path, "wb") as out:
+            BytesGenerator(out, mangle_from_=False, maxheaderlen=0).flatten(message)
diff --git a/lib/python3.7/site-packages/wheel/util.py b/lib/python3.7/site-packages/wheel/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..0afb54a4619b672d360b9afd022bbfaef0f1dd52
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/util.py
@@ -0,0 +1,46 @@
+import base64
+import io
+import sys
+
+
+if sys.version_info[0] < 3:
+    text_type = unicode  # noqa: F821
+
+    StringIO = io.BytesIO
+
+    def native(s, encoding='utf-8'):
+        if isinstance(s, unicode):
+            return s.encode(encoding)
+        return s
+else:
+    text_type = str
+
+    StringIO = io.StringIO
+
+    def native(s, encoding='utf-8'):
+        if isinstance(s, bytes):
+            return s.decode(encoding)
+        return s
+
+
+def urlsafe_b64encode(data):
+    """urlsafe_b64encode without padding"""
+    return base64.urlsafe_b64encode(data).rstrip(b'=')
+
+
+def urlsafe_b64decode(data):
+    """urlsafe_b64decode without padding"""
+    pad = b'=' * (4 - (len(data) & 3))
+    return base64.urlsafe_b64decode(data + pad)
+
+
+def as_unicode(s):
+    if isinstance(s, bytes):
+        return s.decode('utf-8')
+    return s
+
+
+def as_bytes(s):
+    if isinstance(s, text_type):
+        return s.encode('utf-8')
+    return s
diff --git a/lib/python3.7/site-packages/wheel/wheelfile.py b/lib/python3.7/site-packages/wheel/wheelfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a1c8d2a0d3a52de7ec42fd9d653ef38d8bd321c
--- /dev/null
+++ b/lib/python3.7/site-packages/wheel/wheelfile.py
@@ -0,0 +1,168 @@
+from __future__ import print_function
+
+import csv
+import hashlib
+import os.path
+import re
+import time
+from collections import OrderedDict
+from distutils import log as logger
+from zipfile import ZIP_DEFLATED, ZipInfo, ZipFile
+
+from wheel.cli import WheelError
+from wheel.util import urlsafe_b64decode, as_unicode, native, urlsafe_b64encode, as_bytes, StringIO
+
+# Non-greedy matching of an optional build number may be too clever (more
+# invalid wheel filenames will match). Separate regex for .dist-info?
+WHEEL_INFO_RE = re.compile(
+    r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.+?))(-(?P<build>\d[^-]*))?
+     -(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)\.whl$""",
+    re.VERBOSE)
+
+
+def get_zipinfo_datetime(timestamp=None):
+    # Some applications need reproducible .whl files, but they can't do this without forcing
+    # the timestamp of the individual ZipInfo objects. See issue #143.
+    timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', timestamp or time.time()))
+    return time.gmtime(timestamp)[0:6]
+
+
+class WheelFile(ZipFile):
+    """A ZipFile derivative class that also reads SHA-256 hashes from
+    .dist-info/RECORD and checks any read files against those.
+    """
+
+    _default_algorithm = hashlib.sha256
+
+    def __init__(self, file, mode='r'):
+        basename = os.path.basename(file)
+        self.parsed_filename = WHEEL_INFO_RE.match(basename)
+        if not basename.endswith('.whl') or self.parsed_filename is None:
+            raise WheelError("Bad wheel filename {!r}".format(basename))
+
+        ZipFile.__init__(self, file, mode, compression=ZIP_DEFLATED, allowZip64=True)
+
+        self.dist_info_path = '{}.dist-info'.format(self.parsed_filename.group('namever'))
+        self.record_path = self.dist_info_path + '/RECORD'
+        self._file_hashes = OrderedDict()
+        self._file_sizes = {}
+        if mode == 'r':
+            # Ignore RECORD and any embedded wheel signatures
+            self._file_hashes[self.record_path] = None, None
+            self._file_hashes[self.record_path + '.jws'] = None, None
+            self._file_hashes[self.record_path + '.p7s'] = None, None
+
+            # Fill in the expected hashes by reading them from RECORD
+            try:
+                record = self.open(self.record_path)
+            except KeyError:
+                raise WheelError('Missing {} file'.format(self.record_path))
+
+            with record:
+                for line in record:
+                    line = line.decode('utf-8')
+                    path, hash_sum, size = line.rsplit(u',', 2)
+                    if hash_sum:
+                        algorithm, hash_sum = hash_sum.split(u'=')
+                        try:
+                            hashlib.new(algorithm)
+                        except ValueError:
+                            raise WheelError('Unsupported hash algorithm: {}'.format(algorithm))
+
+                        if algorithm.lower() in {'md5', 'sha1'}:
+                            raise WheelError(
+                                'Weak hash algorithm ({}) is not permitted by PEP 427'
+                                .format(algorithm))
+
+                        self._file_hashes[path] = (
+                            algorithm, urlsafe_b64decode(hash_sum.encode('ascii')))
+
+    def open(self, name_or_info, mode="r", pwd=None):
+        def _update_crc(newdata, eof=None):
+            if eof is None:
+                eof = ef._eof
+                update_crc_orig(newdata)
+            else:  # Python 2
+                update_crc_orig(newdata, eof)
+
+            running_hash.update(newdata)
+            if eof and running_hash.digest() != expected_hash:
+                raise WheelError("Hash mismatch for file '{}'".format(native(ef_name)))
+
+        ef = ZipFile.open(self, name_or_info, mode, pwd)
+        ef_name = as_unicode(name_or_info.filename if isinstance(name_or_info, ZipInfo)
+                             else name_or_info)
+        if mode == 'r' and not ef_name.endswith('/'):
+            if ef_name not in self._file_hashes:
+                raise WheelError("No hash found for file '{}'".format(native(ef_name)))
+
+            algorithm, expected_hash = self._file_hashes[ef_name]
+            if expected_hash is not None:
+                # Monkey patch the _update_crc method to also check for the hash from RECORD
+                running_hash = hashlib.new(algorithm)
+                update_crc_orig, ef._update_crc = ef._update_crc, _update_crc
+
+        return ef
+
+    def write_files(self, base_dir):
+        logger.info("creating '%s' and adding '%s' to it", self.filename, base_dir)
+        deferred = []
+        for root, dirnames, filenames in os.walk(base_dir):
+            # Sort the directory names so that `os.walk` will walk them in a
+            # defined order on the next iteration.
+            dirnames.sort()
+            for name in sorted(filenames):
+                path = os.path.normpath(os.path.join(root, name))
+                if os.path.isfile(path):
+                    arcname = os.path.relpath(path, base_dir)
+                    if arcname == self.record_path:
+                        pass
+                    elif root.endswith('.dist-info'):
+                        deferred.append((path, arcname))
+                    else:
+                        self.write(path, arcname)
+
+        deferred.sort()
+        for path, arcname in deferred:
+            self.write(path, arcname)
+
+    def write(self, filename, arcname=None, compress_type=None):
+        with open(filename, 'rb') as f:
+            st = os.fstat(f.fileno())
+            data = f.read()
+
+        zinfo = ZipInfo(arcname or filename, date_time=get_zipinfo_datetime(st.st_mtime))
+        zinfo.external_attr = st.st_mode << 16
+        zinfo.compress_type = ZIP_DEFLATED
+        self.writestr(zinfo, data, compress_type)
+
+    def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
+        ZipFile.writestr(self, zinfo_or_arcname, bytes, compress_type)
+        fname = (zinfo_or_arcname.filename if isinstance(zinfo_or_arcname, ZipInfo)
+                 else zinfo_or_arcname)
+        logger.info("adding '%s'", fname)
+        if fname != self.record_path:
+            hash_ = self._default_algorithm(bytes)
+            self._file_hashes[fname] = hash_.name, native(urlsafe_b64encode(hash_.digest()))
+            self._file_sizes[fname] = len(bytes)
+
+    def close(self):
+        # Write RECORD
+        if self.fp is not None and self.mode == 'w' and self._file_hashes:
+            data = StringIO()
+            writer = csv.writer(data, delimiter=',', quotechar='"', lineterminator='\n')
+            writer.writerows((
+                (
+                    fname,
+                    algorithm + "=" + hash_,
+                    self._file_sizes[fname]
+                )
+                for fname, (algorithm, hash_) in self._file_hashes.items()
+            ))
+            writer.writerow((format(self.record_path), "", ""))
+            zinfo = ZipInfo(native(self.record_path), date_time=get_zipinfo_datetime())
+            zinfo.compress_type = ZIP_DEFLATED
+            zinfo.external_attr = 0o664 << 16
+            self.writestr(zinfo, as_bytes(data.getvalue()))
+
+        ZipFile.close(self)
diff --git a/lib/python3.7/site.py b/lib/python3.7/site.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc9887a91f0f71f331abc0b1285e4ccfa3ace3ab
--- /dev/null
+++ b/lib/python3.7/site.py
@@ -0,0 +1,833 @@
+"""Append module search paths for third-party packages to sys.path.
+
+****************************************************************
+* This module is automatically imported during initialization. *
+****************************************************************
+
+In earlier versions of Python (up to 1.5a3), scripts or modules that
+needed to use site-specific modules would place ``import site''
+somewhere near the top of their code.  Because of the automatic
+import, this is no longer necessary (but code that does it still
+works).
+
+This will append site-specific paths to the module search path.  On
+Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
+appends lib/python<version>/site-packages as well as lib/site-python.
+It also supports the Debian convention of
+lib/python<version>/dist-packages.  On other platforms (mainly Mac and
+Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
+but this is unlikely).  The resulting directories, if they exist, are
+appended to sys.path, and also inspected for path configuration files.
+
+FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
+Local addons go into /usr/local/lib/python<version>/site-packages
+(resp. /usr/local/lib/site-python), Debian addons install into
+/usr/{lib,share}/python<version>/dist-packages.
+
+A path configuration file is a file whose name has the form
+<package>.pth; its contents are additional directories (one per line)
+to be added to sys.path.  Non-existing directories (or
+non-directories) are never added to sys.path; no directory is added to
+sys.path more than once.  Blank lines and lines beginning with
+'#' are skipped. Lines starting with 'import' are executed.
+
+For example, suppose sys.prefix and sys.exec_prefix are set to
+/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
+with three subdirectories, foo, bar and spam, and two path
+configuration files, foo.pth and bar.pth.  Assume foo.pth contains the
+following:
+
+  # foo package configuration
+  foo
+  bar
+  bletch
+
+and bar.pth contains:
+
+  # bar package configuration
+  bar
+
+Then the following directories are added to sys.path, in this order:
+
+  /usr/local/lib/python2.X/site-packages/bar
+  /usr/local/lib/python2.X/site-packages/foo
+
+Note that bletch is omitted because it doesn't exist; bar precedes foo
+because bar.pth comes alphabetically before foo.pth; and spam is
+omitted because it is not mentioned in either path configuration file.
+
+After these path manipulations, an attempt is made to import a module
+named sitecustomize, which can perform arbitrary additional
+site-specific customizations.  If this import fails with an
+ImportError exception, it is silently ignored.
+
+"""
+
+import os
+import sys
+
+try:
+    import __builtin__ as builtins
+except ImportError:
+    import builtins
+try:
+    set
+except NameError:
+    from sets import Set as set
+
+# Prefixes for site-packages; add additional prefixes like /usr/local here
+PREFIXES = [sys.prefix, sys.exec_prefix]
+# Enable per user site-packages directory
+# set it to False to disable the feature or True to force the feature
+ENABLE_USER_SITE = None
+# for distutils.commands.install
+USER_SITE = None
+USER_BASE = None
+
+_is_64bit = (getattr(sys, "maxsize", None) or getattr(sys, "maxint")) > 2 ** 32
+_is_pypy = hasattr(sys, "pypy_version_info")
+_is_jython = sys.platform[:4] == "java"
+if _is_jython:
+    ModuleType = type(os)
+
+
+def makepath(*paths):
+    dir = os.path.join(*paths)
+    if _is_jython and (dir == "__classpath__" or dir.startswith("__pyclasspath__")):
+        return dir, dir
+    dir = os.path.abspath(dir)
+    return dir, os.path.normcase(dir)
+
+
+def abs__file__():
+    """Set all module' __file__ attribute to an absolute path"""
+    for m in sys.modules.values():
+        if (_is_jython and not isinstance(m, ModuleType)) or hasattr(m, "__loader__"):
+            # only modules need the abspath in Jython. and don't mess
+            # with a PEP 302-supplied __file__
+            continue
+        f = getattr(m, "__file__", None)
+        if f is None:
+            continue
+        m.__file__ = os.path.abspath(f)
+
+
+def removeduppaths():
+    """ Remove duplicate entries from sys.path along with making them
+    absolute"""
+    # This ensures that the initial path provided by the interpreter contains
+    # only absolute pathnames, even if we're running from the build directory.
+    L = []
+    known_paths = set()
+    for dir in sys.path:
+        # Filter out duplicate paths (on case-insensitive file systems also
+        # if they only differ in case); turn relative paths into absolute
+        # paths.
+        dir, dircase = makepath(dir)
+        if not dircase in known_paths:
+            L.append(dir)
+            known_paths.add(dircase)
+    sys.path[:] = L
+    return known_paths
+
+
+# XXX This should not be part of site.py, since it is needed even when
+# using the -S option for Python.  See http://www.python.org/sf/586680
+def addbuilddir():
+    """Append ./build/lib.<platform> in case we're running in the build dir
+    (especially for Guido :-)"""
+    from distutils.util import get_platform
+
+    s = "build/lib.{}-{:.3}".format(get_platform(), sys.version)
+    if hasattr(sys, "gettotalrefcount"):
+        s += "-pydebug"
+    s = os.path.join(os.path.dirname(sys.path[-1]), s)
+    sys.path.append(s)
+
+
+def _init_pathinfo():
+    """Return a set containing all existing directory entries from sys.path"""
+    d = set()
+    for dir in sys.path:
+        try:
+            if os.path.isdir(dir):
+                dir, dircase = makepath(dir)
+                d.add(dircase)
+        except TypeError:
+            continue
+    return d
+
+
+def addpackage(sitedir, name, known_paths):
+    """Add a new path to known_paths by combining sitedir and 'name' or execute
+    sitedir if it starts with 'import'"""
+    if known_paths is None:
+        _init_pathinfo()
+        reset = 1
+    else:
+        reset = 0
+    fullname = os.path.join(sitedir, name)
+    try:
+        f = open(fullname, "r")
+    except IOError:
+        return
+    try:
+        for line in f:
+            if line.startswith("#"):
+                continue
+            if line.startswith("import"):
+                exec(line)
+                continue
+            line = line.rstrip()
+            dir, dircase = makepath(sitedir, line)
+            if not dircase in known_paths and os.path.exists(dir):
+                sys.path.append(dir)
+                known_paths.add(dircase)
+    finally:
+        f.close()
+    if reset:
+        known_paths = None
+    return known_paths
+
+
+def addsitedir(sitedir, known_paths=None):
+    """Add 'sitedir' argument to sys.path if missing and handle .pth files in
+    'sitedir'"""
+    if known_paths is None:
+        known_paths = _init_pathinfo()
+        reset = 1
+    else:
+        reset = 0
+    sitedir, sitedircase = makepath(sitedir)
+    if not sitedircase in known_paths:
+        sys.path.append(sitedir)  # Add path component
+    try:
+        names = os.listdir(sitedir)
+    except os.error:
+        return
+    names.sort()
+    for name in names:
+        if name.endswith(os.extsep + "pth"):
+            addpackage(sitedir, name, known_paths)
+    if reset:
+        known_paths = None
+    return known_paths
+
+
+def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
+    """Add site-packages (and possibly site-python) to sys.path"""
+    prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
+    if exec_prefix != sys_prefix:
+        prefixes.append(os.path.join(exec_prefix, "local"))
+
+    for prefix in prefixes:
+        if prefix:
+            if sys.platform in ("os2emx", "riscos") or _is_jython:
+                sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
+            elif _is_pypy:
+                sitedirs = [os.path.join(prefix, "site-packages")]
+            elif sys.platform == "darwin" and prefix == sys_prefix:
+
+                if prefix.startswith("/System/Library/Frameworks/"):  # Apple's Python
+
+                    sitedirs = [
+                        os.path.join("/Library/Python", sys.version[:3], "site-packages"),
+                        os.path.join(prefix, "Extras", "lib", "python"),
+                    ]
+
+                else:  # any other Python distros on OSX work this way
+                    sitedirs = [os.path.join(prefix, "lib", "python" + sys.version[:3], "site-packages")]
+
+            elif os.sep == "/":
+                sitedirs = [
+                    os.path.join(prefix, "lib", "python" + sys.version[:3], "site-packages"),
+                    os.path.join(prefix, "lib", "site-python"),
+                    os.path.join(prefix, "python" + sys.version[:3], "lib-dynload"),
+                ]
+                lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
+                if os.path.exists(lib64_dir) and os.path.realpath(lib64_dir) not in [
+                    os.path.realpath(p) for p in sitedirs
+                ]:
+                    if _is_64bit:
+                        sitedirs.insert(0, lib64_dir)
+                    else:
+                        sitedirs.append(lib64_dir)
+                try:
+                    # sys.getobjects only available in --with-pydebug build
+                    sys.getobjects
+                    sitedirs.insert(0, os.path.join(sitedirs[0], "debug"))
+                except AttributeError:
+                    pass
+                # Debian-specific dist-packages directories:
+                sitedirs.append(os.path.join(prefix, "local/lib", "python" + sys.version[:3], "dist-packages"))
+                if sys.version[0] == "2":
+                    sitedirs.append(os.path.join(prefix, "lib", "python" + sys.version[:3], "dist-packages"))
+                else:
+                    sitedirs.append(os.path.join(prefix, "lib", "python" + sys.version[0], "dist-packages"))
+                sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
+            else:
+                sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
+            if sys.platform == "darwin":
+                # for framework builds *only* we add the standard Apple
+                # locations. Currently only per-user, but /Library and
+                # /Network/Library could be added too
+                if "Python.framework" in prefix:
+                    home = os.environ.get("HOME")
+                    if home:
+                        sitedirs.append(os.path.join(home, "Library", "Python", sys.version[:3], "site-packages"))
+            for sitedir in sitedirs:
+                if os.path.isdir(sitedir):
+                    addsitedir(sitedir, known_paths)
+    return None
+
+
+def check_enableusersite():
+    """Check if user site directory is safe for inclusion
+
+    The function tests for the command line flag (including environment var),
+    process uid/gid equal to effective uid/gid.
+
+    None: Disabled for security reasons
+    False: Disabled by user (command line option)
+    True: Safe and enabled
+    """
+    if hasattr(sys, "flags") and getattr(sys.flags, "no_user_site", False):
+        return False
+
+    if hasattr(os, "getuid") and hasattr(os, "geteuid"):
+        # check process uid == effective uid
+        if os.geteuid() != os.getuid():
+            return None
+    if hasattr(os, "getgid") and hasattr(os, "getegid"):
+        # check process gid == effective gid
+        if os.getegid() != os.getgid():
+            return None
+
+    return True
+
+
+def addusersitepackages(known_paths):
+    """Add a per user site-package to sys.path
+
+    Each user has its own python directory with site-packages in the
+    home directory.
+
+    USER_BASE is the root directory for all Python versions
+
+    USER_SITE is the user specific site-packages directory
+
+    USER_SITE/.. can be used for data.
+    """
+    global USER_BASE, USER_SITE, ENABLE_USER_SITE
+    env_base = os.environ.get("PYTHONUSERBASE", None)
+
+    def joinuser(*args):
+        return os.path.expanduser(os.path.join(*args))
+
+    # if sys.platform in ('os2emx', 'riscos'):
+    #    # Don't know what to put here
+    #    USER_BASE = ''
+    #    USER_SITE = ''
+    if os.name == "nt":
+        base = os.environ.get("APPDATA") or "~"
+        if env_base:
+            USER_BASE = env_base
+        else:
+            USER_BASE = joinuser(base, "Python")
+        USER_SITE = os.path.join(USER_BASE, "Python" + sys.version[0] + sys.version[2], "site-packages")
+    else:
+        if env_base:
+            USER_BASE = env_base
+        else:
+            USER_BASE = joinuser("~", ".local")
+        USER_SITE = os.path.join(USER_BASE, "lib", "python" + sys.version[:3], "site-packages")
+
+    if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
+        addsitedir(USER_SITE, known_paths)
+    if ENABLE_USER_SITE:
+        for dist_libdir in ("lib", "local/lib"):
+            user_site = os.path.join(USER_BASE, dist_libdir, "python" + sys.version[:3], "dist-packages")
+            if os.path.isdir(user_site):
+                addsitedir(user_site, known_paths)
+    return known_paths
+
+
+def setBEGINLIBPATH():
+    """The OS/2 EMX port has optional extension modules that do double duty
+    as DLLs (and must use the .DLL file extension) for other extensions.
+    The library search path needs to be amended so these will be found
+    during module import.  Use BEGINLIBPATH so that these are at the start
+    of the library search path.
+
+    """
+    dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
+    libpath = os.environ["BEGINLIBPATH"].split(";")
+    if libpath[-1]:
+        libpath.append(dllpath)
+    else:
+        libpath[-1] = dllpath
+    os.environ["BEGINLIBPATH"] = ";".join(libpath)
+
+
+def setquit():
+    """Define new built-ins 'quit' and 'exit'.
+    These are simply strings that display a hint on how to exit.
+
+    """
+    if os.sep == ":":
+        eof = "Cmd-Q"
+    elif os.sep == "\\":
+        eof = "Ctrl-Z plus Return"
+    else:
+        eof = "Ctrl-D (i.e. EOF)"
+
+    class Quitter(object):
+        def __init__(self, name):
+            self.name = name
+
+        def __repr__(self):
+            return "Use {}() or {} to exit".format(self.name, eof)
+
+        def __call__(self, code=None):
+            # Shells like IDLE catch the SystemExit, but listen when their
+            # stdin wrapper is closed.
+            try:
+                sys.stdin.close()
+            except:
+                pass
+            raise SystemExit(code)
+
+    builtins.quit = Quitter("quit")
+    builtins.exit = Quitter("exit")
+
+
+class _Printer(object):
+    """interactive prompt objects for printing the license text, a list of
+    contributors and the copyright notice."""
+
+    MAXLINES = 23
+
+    def __init__(self, name, data, files=(), dirs=()):
+        self.__name = name
+        self.__data = data
+        self.__files = files
+        self.__dirs = dirs
+        self.__lines = None
+
+    def __setup(self):
+        if self.__lines:
+            return
+        data = None
+        for dir in self.__dirs:
+            for filename in self.__files:
+                filename = os.path.join(dir, filename)
+                try:
+                    fp = open(filename, "r")
+                    data = fp.read()
+                    fp.close()
+                    break
+                except IOError:
+                    pass
+            if data:
+                break
+        if not data:
+            data = self.__data
+        self.__lines = data.split("\n")
+        self.__linecnt = len(self.__lines)
+
+    def __repr__(self):
+        self.__setup()
+        if len(self.__lines) <= self.MAXLINES:
+            return "\n".join(self.__lines)
+        else:
+            return "Type %s() to see the full %s text" % ((self.__name,) * 2)
+
+    def __call__(self):
+        self.__setup()
+        prompt = "Hit Return for more, or q (and Return) to quit: "
+        lineno = 0
+        while 1:
+            try:
+                for i in range(lineno, lineno + self.MAXLINES):
+                    print(self.__lines[i])
+            except IndexError:
+                break
+            else:
+                lineno += self.MAXLINES
+                key = None
+                while key is None:
+                    try:
+                        key = raw_input(prompt)
+                    except NameError:
+                        key = input(prompt)
+                    if key not in ("", "q"):
+                        key = None
+                if key == "q":
+                    break
+
+
+def setcopyright():
+    """Set 'copyright' and 'credits' in __builtin__"""
+    builtins.copyright = _Printer("copyright", sys.copyright)
+    if _is_jython:
+        builtins.credits = _Printer("credits", "Jython is maintained by the Jython developers (www.jython.org).")
+    elif _is_pypy:
+        builtins.credits = _Printer("credits", "PyPy is maintained by the PyPy developers: http://pypy.org/")
+    else:
+        builtins.credits = _Printer(
+            "credits",
+            """\
+    Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
+    for supporting Python development.  See www.python.org for more information.""",
+        )
+    here = os.path.dirname(os.__file__)
+    builtins.license = _Printer(
+        "license",
+        "See http://www.python.org/%.3s/license.html" % sys.version,
+        ["LICENSE.txt", "LICENSE"],
+        [os.path.join(here, os.pardir), here, os.curdir],
+    )
+
+
+class _Helper(object):
+    """Define the built-in 'help'.
+    This is a wrapper around pydoc.help (with a twist).
+
+    """
+
+    def __repr__(self):
+        return "Type help() for interactive help, " "or help(object) for help about object."
+
+    def __call__(self, *args, **kwds):
+        import pydoc
+
+        return pydoc.help(*args, **kwds)
+
+
+def sethelper():
+    builtins.help = _Helper()
+
+
+def aliasmbcs():
+    """On Windows, some default encodings are not provided by Python,
+    while they are always available as "mbcs" in each locale. Make
+    them usable by aliasing to "mbcs" in such a case."""
+    if sys.platform == "win32":
+        import locale, codecs
+
+        enc = locale.getdefaultlocale()[1]
+        if enc.startswith("cp"):  # "cp***" ?
+            try:
+                codecs.lookup(enc)
+            except LookupError:
+                import encodings
+
+                encodings._cache[enc] = encodings._unknown
+                encodings.aliases.aliases[enc] = "mbcs"
+
+
+def setencoding():
+    """Set the string encoding used by the Unicode implementation.  The
+    default is 'ascii', but if you're willing to experiment, you can
+    change this."""
+    encoding = "ascii"  # Default value set by _PyUnicode_Init()
+    if 0:
+        # Enable to support locale aware default string encodings.
+        import locale
+
+        loc = locale.getdefaultlocale()
+        if loc[1]:
+            encoding = loc[1]
+    if 0:
+        # Enable to switch off string to Unicode coercion and implicit
+        # Unicode to string conversion.
+        encoding = "undefined"
+    if encoding != "ascii":
+        # On Non-Unicode builds this will raise an AttributeError...
+        sys.setdefaultencoding(encoding)  # Needs Python Unicode build !
+
+
+def execsitecustomize():
+    """Run custom site specific code, if available."""
+    try:
+        import sitecustomize
+    except ImportError:
+        pass
+
+
+def virtual_install_main_packages():
+    f = open(os.path.join(os.path.dirname(__file__), "orig-prefix.txt"))
+    sys.real_prefix = f.read().strip()
+    f.close()
+    pos = 2
+    hardcoded_relative_dirs = []
+    if sys.path[0] == "":
+        pos += 1
+    if _is_jython:
+        paths = [os.path.join(sys.real_prefix, "Lib")]
+    elif _is_pypy:
+        if sys.version_info > (3, 2):
+            cpyver = "%d" % sys.version_info[0]
+        elif sys.pypy_version_info >= (1, 5):
+            cpyver = "%d.%d" % sys.version_info[:2]
+        else:
+            cpyver = "%d.%d.%d" % sys.version_info[:3]
+        paths = [os.path.join(sys.real_prefix, "lib_pypy"), os.path.join(sys.real_prefix, "lib-python", cpyver)]
+        if sys.pypy_version_info < (1, 9):
+            paths.insert(1, os.path.join(sys.real_prefix, "lib-python", "modified-%s" % cpyver))
+        hardcoded_relative_dirs = paths[:]  # for the special 'darwin' case below
+        #
+        # This is hardcoded in the Python executable, but relative to sys.prefix:
+        for path in paths[:]:
+            plat_path = os.path.join(path, "plat-%s" % sys.platform)
+            if os.path.exists(plat_path):
+                paths.append(plat_path)
+    elif sys.platform == "win32":
+        paths = [os.path.join(sys.real_prefix, "Lib"), os.path.join(sys.real_prefix, "DLLs")]
+    else:
+        paths = [os.path.join(sys.real_prefix, "lib", "python" + sys.version[:3])]
+        hardcoded_relative_dirs = paths[:]  # for the special 'darwin' case below
+        lib64_path = os.path.join(sys.real_prefix, "lib64", "python" + sys.version[:3])
+        if os.path.exists(lib64_path):
+            if _is_64bit:
+                paths.insert(0, lib64_path)
+            else:
+                paths.append(lib64_path)
+        # This is hardcoded in the Python executable, but relative to
+        # sys.prefix.  Debian change: we need to add the multiarch triplet
+        # here, which is where the real stuff lives.  As per PEP 421, in
+        # Python 3.3+, this lives in sys.implementation, while in Python 2.7
+        # it lives in sys.
+        try:
+            arch = getattr(sys, "implementation", sys)._multiarch
+        except AttributeError:
+            # This is a non-multiarch aware Python.  Fallback to the old way.
+            arch = sys.platform
+        plat_path = os.path.join(sys.real_prefix, "lib", "python" + sys.version[:3], "plat-%s" % arch)
+        if os.path.exists(plat_path):
+            paths.append(plat_path)
+    # This is hardcoded in the Python executable, but
+    # relative to sys.prefix, so we have to fix up:
+    for path in list(paths):
+        tk_dir = os.path.join(path, "lib-tk")
+        if os.path.exists(tk_dir):
+            paths.append(tk_dir)
+
+    # These are hardcoded in the Apple's Python executable,
+    # but relative to sys.prefix, so we have to fix them up:
+    if sys.platform == "darwin":
+        hardcoded_paths = [
+            os.path.join(relative_dir, module)
+            for relative_dir in hardcoded_relative_dirs
+            for module in ("plat-darwin", "plat-mac", "plat-mac/lib-scriptpackages")
+        ]
+
+        for path in hardcoded_paths:
+            if os.path.exists(path):
+                paths.append(path)
+
+    sys.path.extend(paths)
+
+
+def force_global_eggs_after_local_site_packages():
+    """
+    Force easy_installed eggs in the global environment to get placed
+    in sys.path after all packages inside the virtualenv.  This
+    maintains the "least surprise" result that packages in the
+    virtualenv always mask global packages, never the other way
+    around.
+
+    """
+    egginsert = getattr(sys, "__egginsert", 0)
+    for i, path in enumerate(sys.path):
+        if i > egginsert and path.startswith(sys.prefix):
+            egginsert = i
+    sys.__egginsert = egginsert + 1
+
+
+def virtual_addsitepackages(known_paths):
+    force_global_eggs_after_local_site_packages()
+    return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
+
+
+def fixclasspath():
+    """Adjust the special classpath sys.path entries for Jython. These
+    entries should follow the base virtualenv lib directories.
+    """
+    paths = []
+    classpaths = []
+    for path in sys.path:
+        if path == "__classpath__" or path.startswith("__pyclasspath__"):
+            classpaths.append(path)
+        else:
+            paths.append(path)
+    sys.path = paths
+    sys.path.extend(classpaths)
+
+
+def execusercustomize():
+    """Run custom user specific code, if available."""
+    try:
+        import usercustomize
+    except ImportError:
+        pass
+
+
+def enablerlcompleter():
+    """Enable default readline configuration on interactive prompts, by
+    registering a sys.__interactivehook__.
+    If the readline module can be imported, the hook will set the Tab key
+    as completion key and register ~/.python_history as history file.
+    This can be overridden in the sitecustomize or usercustomize module,
+    or in a PYTHONSTARTUP file.
+    """
+
+    def register_readline():
+        import atexit
+
+        try:
+            import readline
+            import rlcompleter
+        except ImportError:
+            return
+
+        # Reading the initialization (config) file may not be enough to set a
+        # completion key, so we set one first and then read the file.
+        readline_doc = getattr(readline, "__doc__", "")
+        if readline_doc is not None and "libedit" in readline_doc:
+            readline.parse_and_bind("bind ^I rl_complete")
+        else:
+            readline.parse_and_bind("tab: complete")
+
+        try:
+            readline.read_init_file()
+        except OSError:
+            # An OSError here could have many causes, but the most likely one
+            # is that there's no .inputrc file (or .editrc file in the case of
+            # Mac OS X + libedit) in the expected location.  In that case, we
+            # want to ignore the exception.
+            pass
+
+        if readline.get_current_history_length() == 0:
+            # If no history was loaded, default to .python_history.
+            # The guard is necessary to avoid doubling history size at
+            # each interpreter exit when readline was already configured
+            # through a PYTHONSTARTUP hook, see:
+            # http://bugs.python.org/issue5845#msg198636
+            history = os.path.join(os.path.expanduser("~"), ".python_history")
+            try:
+                readline.read_history_file(history)
+            except OSError:
+                pass
+
+            def write_history():
+                try:
+                    readline.write_history_file(history)
+                except (FileNotFoundError, PermissionError):
+                    # home directory does not exist or is not writable
+                    # https://bugs.python.org/issue19891
+                    pass
+
+            atexit.register(write_history)
+
+    sys.__interactivehook__ = register_readline
+
+
+def main():
+    global ENABLE_USER_SITE
+    virtual_install_main_packages()
+    abs__file__()
+    paths_in_sys = removeduppaths()
+    if os.name == "posix" and sys.path and os.path.basename(sys.path[-1]) == "Modules":
+        addbuilddir()
+    if _is_jython:
+        fixclasspath()
+    GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), "no-global-site-packages.txt"))
+    if not GLOBAL_SITE_PACKAGES:
+        ENABLE_USER_SITE = False
+    if ENABLE_USER_SITE is None:
+        ENABLE_USER_SITE = check_enableusersite()
+    paths_in_sys = addsitepackages(paths_in_sys)
+    paths_in_sys = addusersitepackages(paths_in_sys)
+    if GLOBAL_SITE_PACKAGES:
+        paths_in_sys = virtual_addsitepackages(paths_in_sys)
+    if sys.platform == "os2emx":
+        setBEGINLIBPATH()
+    setquit()
+    setcopyright()
+    sethelper()
+    if sys.version_info[0] == 3:
+        enablerlcompleter()
+    aliasmbcs()
+    setencoding()
+    execsitecustomize()
+    if ENABLE_USER_SITE:
+        execusercustomize()
+    # Remove sys.setdefaultencoding() so that users cannot change the
+    # encoding after initialization.  The test for presence is needed when
+    # this module is run as a script, because this code is executed twice.
+    if hasattr(sys, "setdefaultencoding"):
+        del sys.setdefaultencoding
+
+
+main()
+
+
+def _script():
+    help = """\
+    %s [--user-base] [--user-site]
+
+    Without arguments print some useful information
+    With arguments print the value of USER_BASE and/or USER_SITE separated
+    by '%s'.
+
+    Exit codes with --user-base or --user-site:
+      0 - user site directory is enabled
+      1 - user site directory is disabled by user
+      2 - uses site directory is disabled by super user
+          or for security reasons
+     >2 - unknown error
+    """
+    args = sys.argv[1:]
+    if not args:
+        print("sys.path = [")
+        for dir in sys.path:
+            print("    {!r},".format(dir))
+        print("]")
+
+        def exists(path):
+            if os.path.isdir(path):
+                return "exists"
+            else:
+                return "doesn't exist"
+
+        print("USER_BASE: {!r} ({})".format(USER_BASE, exists(USER_BASE)))
+        print("USER_SITE: {!r} ({})".format(USER_SITE, exists(USER_BASE)))
+        print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
+        sys.exit(0)
+
+    buffer = []
+    if "--user-base" in args:
+        buffer.append(USER_BASE)
+    if "--user-site" in args:
+        buffer.append(USER_SITE)
+
+    if buffer:
+        print(os.pathsep.join(buffer))
+        if ENABLE_USER_SITE:
+            sys.exit(0)
+        elif ENABLE_USER_SITE is False:
+            sys.exit(1)
+        elif ENABLE_USER_SITE is None:
+            sys.exit(2)
+        else:
+            sys.exit(3)
+    else:
+        import textwrap
+
+        print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
+        sys.exit(10)
+
+
+if __name__ == "__main__":
+    _script()
diff --git a/lib/python3.7/sre_compile.py b/lib/python3.7/sre_compile.py
new file mode 120000
index 0000000000000000000000000000000000000000..8df75bd75a436383788628f3fc4dc5828d8677dd
--- /dev/null
+++ b/lib/python3.7/sre_compile.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/sre_compile.py
\ No newline at end of file
diff --git a/lib/python3.7/sre_constants.py b/lib/python3.7/sre_constants.py
new file mode 120000
index 0000000000000000000000000000000000000000..c08169c1a8818e0285c173428800a12737b5af25
--- /dev/null
+++ b/lib/python3.7/sre_constants.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/sre_constants.py
\ No newline at end of file
diff --git a/lib/python3.7/sre_parse.py b/lib/python3.7/sre_parse.py
new file mode 120000
index 0000000000000000000000000000000000000000..481f82d80b6f979c53a69713da53727589891570
--- /dev/null
+++ b/lib/python3.7/sre_parse.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/sre_parse.py
\ No newline at end of file
diff --git a/lib/python3.7/stat.py b/lib/python3.7/stat.py
new file mode 120000
index 0000000000000000000000000000000000000000..5d1b6269c1f09aeb6e78190c029d37c5e8022f3e
--- /dev/null
+++ b/lib/python3.7/stat.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/stat.py
\ No newline at end of file
diff --git a/lib/python3.7/struct.py b/lib/python3.7/struct.py
new file mode 120000
index 0000000000000000000000000000000000000000..af494950b5deb7f92665da1a88d9a91202055b51
--- /dev/null
+++ b/lib/python3.7/struct.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/struct.py
\ No newline at end of file
diff --git a/lib/python3.7/tarfile.py b/lib/python3.7/tarfile.py
new file mode 120000
index 0000000000000000000000000000000000000000..b7fa773be8d00feaff4385763e6f08acdc273581
--- /dev/null
+++ b/lib/python3.7/tarfile.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/tarfile.py
\ No newline at end of file
diff --git a/lib/python3.7/tempfile.py b/lib/python3.7/tempfile.py
new file mode 120000
index 0000000000000000000000000000000000000000..570b09456c4e408c37631d4b1f363ee48ea754f0
--- /dev/null
+++ b/lib/python3.7/tempfile.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/tempfile.py
\ No newline at end of file
diff --git a/lib/python3.7/token.py b/lib/python3.7/token.py
new file mode 120000
index 0000000000000000000000000000000000000000..d67b104f49f5a8269938136b9c70c900e583c5d5
--- /dev/null
+++ b/lib/python3.7/token.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/token.py
\ No newline at end of file
diff --git a/lib/python3.7/tokenize.py b/lib/python3.7/tokenize.py
new file mode 120000
index 0000000000000000000000000000000000000000..55e6050a65bb79c4a6ca790b318cb4389090ac1c
--- /dev/null
+++ b/lib/python3.7/tokenize.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/tokenize.py
\ No newline at end of file
diff --git a/lib/python3.7/types.py b/lib/python3.7/types.py
new file mode 120000
index 0000000000000000000000000000000000000000..aa3096f547a7a848b85dfd893e64943d1f27ef29
--- /dev/null
+++ b/lib/python3.7/types.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/types.py
\ No newline at end of file
diff --git a/lib/python3.7/warnings.py b/lib/python3.7/warnings.py
new file mode 120000
index 0000000000000000000000000000000000000000..8dd72e205a146658c000dc0ad9a7d2eaa49eb3f9
--- /dev/null
+++ b/lib/python3.7/warnings.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/warnings.py
\ No newline at end of file
diff --git a/lib/python3.7/weakref.py b/lib/python3.7/weakref.py
new file mode 120000
index 0000000000000000000000000000000000000000..c02784d3c6010884c750d68f193045e9de0e2647
--- /dev/null
+++ b/lib/python3.7/weakref.py
@@ -0,0 +1 @@
+/usr/lib/python3.7/weakref.py
\ No newline at end of file
diff --git a/model/__init__.py b/model/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/model/base.py b/model/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..714f343c4729ee918dc3e0fdb58fa75f8ab3a2a4
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,2 @@
+flask
+peewee
\ No newline at end of file