nnilayy commited on
Commit
02bd74c
·
verified ·
1 Parent(s): 02224ea

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. lib/python3.10/site-packages/babel/locale-data/to.dat +3 -0
  3. lib/python3.10/site-packages/babel/locale-data/uk.dat +3 -0
  4. lib/python3.10/site-packages/babel/locale-data/yo.dat +3 -0
  5. lib/python3.10/site-packages/babel/locale-data/yrl.dat +3 -0
  6. lib/python3.10/site-packages/binaryornot/__init__.py +3 -0
  7. lib/python3.10/site-packages/binaryornot/check.py +33 -0
  8. lib/python3.10/site-packages/binaryornot/helpers.py +132 -0
  9. lib/python3.10/site-packages/cryptography-44.0.2.dist-info/INSTALLER +1 -0
  10. lib/python3.10/site-packages/cryptography-44.0.2.dist-info/METADATA +140 -0
  11. lib/python3.10/site-packages/cryptography-44.0.2.dist-info/RECORD +113 -0
  12. lib/python3.10/site-packages/cryptography-44.0.2.dist-info/REQUESTED +0 -0
  13. lib/python3.10/site-packages/cryptography-44.0.2.dist-info/WHEEL +5 -0
  14. lib/python3.10/site-packages/cryptography-44.0.2.dist-info/licenses/LICENSE +3 -0
  15. lib/python3.10/site-packages/cryptography-44.0.2.dist-info/licenses/LICENSE.APACHE +202 -0
  16. lib/python3.10/site-packages/cryptography-44.0.2.dist-info/licenses/LICENSE.BSD +27 -0
  17. lib/python3.10/site-packages/csvw-3.5.1.dist-info/LICENSE +201 -0
  18. lib/python3.10/site-packages/csvw-3.5.1.dist-info/METADATA +301 -0
  19. lib/python3.10/site-packages/csvw-3.5.1.dist-info/top_level.txt +1 -0
  20. lib/python3.10/site-packages/fsspec/implementations/arrow.py +304 -0
  21. lib/python3.10/site-packages/fsspec/implementations/asyn_wrapper.py +103 -0
  22. lib/python3.10/site-packages/fsspec/implementations/cache_mapper.py +75 -0
  23. lib/python3.10/site-packages/fsspec/implementations/cache_metadata.py +232 -0
  24. lib/python3.10/site-packages/fsspec/implementations/dask.py +152 -0
  25. lib/python3.10/site-packages/fsspec/implementations/data.py +58 -0
  26. lib/python3.10/site-packages/fsspec/implementations/git.py +115 -0
  27. lib/python3.10/site-packages/fsspec/implementations/github.py +239 -0
  28. lib/python3.10/site-packages/fsspec/implementations/http_sync.py +932 -0
  29. lib/python3.10/site-packages/fsspec/implementations/jupyter.py +124 -0
  30. lib/python3.10/site-packages/fsspec/implementations/tar.py +124 -0
  31. lib/python3.10/site-packages/fsspec/implementations/webhdfs.py +485 -0
  32. lib/python3.10/site-packages/fsspec/implementations/zip.py +177 -0
  33. lib/python3.10/site-packages/fsspec/tests/abstract/__init__.py +289 -0
  34. lib/python3.10/site-packages/fsspec/tests/abstract/common.py +175 -0
  35. lib/python3.10/site-packages/fsspec/tests/abstract/copy.py +557 -0
  36. lib/python3.10/site-packages/fsspec/tests/abstract/get.py +587 -0
  37. lib/python3.10/site-packages/fsspec/tests/abstract/mv.py +57 -0
  38. lib/python3.10/site-packages/fsspec/tests/abstract/open.py +11 -0
  39. lib/python3.10/site-packages/fsspec/tests/abstract/pipe.py +11 -0
  40. lib/python3.10/site-packages/fsspec/tests/abstract/put.py +591 -0
  41. lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/INSTALLER +1 -0
  42. lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/METADATA +81 -0
  43. lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/RECORD +11 -0
  44. lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/REQUESTED +0 -0
  45. lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/WHEEL +4 -0
  46. lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/licenses/LICENSE +21 -0
  47. lib/python3.10/site-packages/multiprocess-0.70.12.2.dist-info/COPYING.txt +28 -0
  48. lib/python3.10/site-packages/multiprocess-0.70.12.2.dist-info/INSTALLER +1 -0
  49. lib/python3.10/site-packages/multiprocess-0.70.12.2.dist-info/METADATA +211 -0
  50. lib/python3.10/site-packages/multiprocess-0.70.12.2.dist-info/RECORD +40 -0
.gitattributes CHANGED
@@ -136,3 +136,7 @@ lib/python3.10/site-packages/babel/locale-data/yue.dat filter=lfs diff=lfs merge
136
  lib/python3.10/site-packages/babel/locale-data/hi.dat filter=lfs diff=lfs merge=lfs -text
137
  lib/python3.10/site-packages/babel/locale-data/lo.dat filter=lfs diff=lfs merge=lfs -text
138
  lib/python3.10/site-packages/babel/locale-data/ak.dat filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
136
  lib/python3.10/site-packages/babel/locale-data/hi.dat filter=lfs diff=lfs merge=lfs -text
137
  lib/python3.10/site-packages/babel/locale-data/lo.dat filter=lfs diff=lfs merge=lfs -text
138
  lib/python3.10/site-packages/babel/locale-data/ak.dat filter=lfs diff=lfs merge=lfs -text
139
+ lib/python3.10/site-packages/babel/locale-data/to.dat filter=lfs diff=lfs merge=lfs -text
140
+ lib/python3.10/site-packages/babel/locale-data/yrl.dat filter=lfs diff=lfs merge=lfs -text
141
+ lib/python3.10/site-packages/babel/locale-data/yo.dat filter=lfs diff=lfs merge=lfs -text
142
+ lib/python3.10/site-packages/babel/locale-data/uk.dat filter=lfs diff=lfs merge=lfs -text
lib/python3.10/site-packages/babel/locale-data/to.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:081afa81182ed2aa3ffa42283a5b8fc2efd9747e3e48f3c91eda3f111703cc70
3
+ size 145255
lib/python3.10/site-packages/babel/locale-data/uk.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fc88486f2dd431abbaed96aa71d5cf6fa7afa7de33463c64ae0563616409c50
3
+ size 339125
lib/python3.10/site-packages/babel/locale-data/yo.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8962e0fb4f0593d4092d93eea2481b09e5e4c9fc9035aa2fe754d3de2d692a06
3
+ size 110427
lib/python3.10/site-packages/babel/locale-data/yrl.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da7c03bb0e50fdd2f81f63b19790a5cd9536530d748fe9283f94607e727aef2d
3
+ size 186854
lib/python3.10/site-packages/binaryornot/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ __author__ = 'Audrey Roy'
2
+ __email__ = '[email protected]'
3
+ __version__ = '0.4.4'
lib/python3.10/site-packages/binaryornot/check.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ """
4
+ binaryornot.check
5
+ -----------------
6
+
7
+ Main code for checking if a file is binary or text.
8
+ """
9
+
10
+ import logging
11
+
12
+ from .helpers import get_starting_chunk, is_binary_string
13
+
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def is_binary(filename):
19
+ """
20
+ :param filename: File to check.
21
+ :returns: True if it's a binary file, otherwise False.
22
+ """
23
+ logger.debug('is_binary: %(filename)r', locals())
24
+
25
+ # Check if the file extension is in a list of known binary types
26
+ binary_extensions = ['.pyc', ]
27
+ for ext in binary_extensions:
28
+ if filename.endswith(ext):
29
+ return True
30
+
31
+ # Check if the starting chunk is a binary string
32
+ chunk = get_starting_chunk(filename)
33
+ return is_binary_string(chunk)
lib/python3.10/site-packages/binaryornot/helpers.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+
4
+ """
5
+ binaryornot.helpers
6
+ -------------------
7
+
8
+ Helper utilities used by BinaryOrNot.
9
+ """
10
+
11
+ import chardet
12
+ import logging
13
+
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def print_as_hex(s):
19
+ """
20
+ Print a string as hex bytes.
21
+ """
22
+ print(":".join("{0:x}".format(ord(c)) for c in s))
23
+
24
+
25
+ def get_starting_chunk(filename, length=1024):
26
+ """
27
+ :param filename: File to open and get the first little chunk of.
28
+ :param length: Number of bytes to read, default 1024.
29
+ :returns: Starting chunk of bytes.
30
+ """
31
+ # Ensure we open the file in binary mode
32
+ try:
33
+ with open(filename, 'rb') as f:
34
+ chunk = f.read(length)
35
+ return chunk
36
+ except IOError as e:
37
+ print(e)
38
+
39
+
40
+ _control_chars = b'\n\r\t\f\b'
41
+ if bytes is str:
42
+ # Python 2 means we need to invoke chr() explicitly
43
+ _printable_ascii = _control_chars + b''.join(map(chr, range(32, 127)))
44
+ _printable_high_ascii = b''.join(map(chr, range(127, 256)))
45
+ else:
46
+ # Python 3 means bytes accepts integer input directly
47
+ _printable_ascii = _control_chars + bytes(range(32, 127))
48
+ _printable_high_ascii = bytes(range(127, 256))
49
+
50
+
51
+ def is_binary_string(bytes_to_check):
52
+ """
53
+ Uses a simplified version of the Perl detection algorithm,
54
+ based roughly on Eli Bendersky's translation to Python:
55
+ http://eli.thegreenplace.net/2011/10/19/perls-guess-if-file-is-text-or-binary-implemented-in-python/
56
+
57
+ This is biased slightly more in favour of deeming files as text
58
+ files than the Perl algorithm, since all ASCII compatible character
59
+ sets are accepted as text, not just utf-8.
60
+
61
+ :param bytes: A chunk of bytes to check.
62
+ :returns: True if appears to be a binary, otherwise False.
63
+ """
64
+
65
+ # Empty files are considered text files
66
+ if not bytes_to_check:
67
+ return False
68
+
69
+ # Now check for a high percentage of ASCII control characters
70
+ # Binary if control chars are > 30% of the string
71
+ low_chars = bytes_to_check.translate(None, _printable_ascii)
72
+ nontext_ratio1 = float(len(low_chars)) / float(len(bytes_to_check))
73
+ logger.debug('nontext_ratio1: %(nontext_ratio1)r', locals())
74
+
75
+ # and check for a low percentage of high ASCII characters:
76
+ # Binary if high ASCII chars are < 5% of the string
77
+ # From: https://en.wikipedia.org/wiki/UTF-8
78
+ # If the bytes are random, the chances of a byte with the high bit set
79
+ # starting a valid UTF-8 character is only 6.64%. The chances of finding 7
80
+ # of these without finding an invalid sequence is actually lower than the
81
+ # chance of the first three bytes randomly being the UTF-8 BOM.
82
+
83
+ high_chars = bytes_to_check.translate(None, _printable_high_ascii)
84
+ nontext_ratio2 = float(len(high_chars)) / float(len(bytes_to_check))
85
+ logger.debug('nontext_ratio2: %(nontext_ratio2)r', locals())
86
+
87
+ is_likely_binary = (
88
+ (nontext_ratio1 > 0.3 and nontext_ratio2 < 0.05) or
89
+ (nontext_ratio1 > 0.8 and nontext_ratio2 > 0.8)
90
+ )
91
+ logger.debug('is_likely_binary: %(is_likely_binary)r', locals())
92
+
93
+ # then check for binary for possible encoding detection with chardet
94
+ detected_encoding = chardet.detect(bytes_to_check)
95
+ logger.debug('detected_encoding: %(detected_encoding)r', locals())
96
+
97
+ # finally use all the check to decide binary or text
98
+ decodable_as_unicode = False
99
+ if (detected_encoding['confidence'] > 0.9 and
100
+ detected_encoding['encoding'] != 'ascii'):
101
+ try:
102
+ try:
103
+ bytes_to_check.decode(encoding=detected_encoding['encoding'])
104
+ except TypeError:
105
+ # happens only on Python 2.6
106
+ unicode(bytes_to_check, encoding=detected_encoding['encoding']) # noqa
107
+ decodable_as_unicode = True
108
+ logger.debug('success: decodable_as_unicode: '
109
+ '%(decodable_as_unicode)r', locals())
110
+ except LookupError:
111
+ logger.debug('failure: could not look up encoding %(encoding)s',
112
+ detected_encoding)
113
+ except UnicodeDecodeError:
114
+ logger.debug('failure: decodable_as_unicode: '
115
+ '%(decodable_as_unicode)r', locals())
116
+
117
+ logger.debug('failure: decodable_as_unicode: '
118
+ '%(decodable_as_unicode)r', locals())
119
+ if is_likely_binary:
120
+ if decodable_as_unicode:
121
+ return False
122
+ else:
123
+ return True
124
+ else:
125
+ if decodable_as_unicode:
126
+ return False
127
+ else:
128
+ if b'\x00' in bytes_to_check or b'\xff' in bytes_to_check:
129
+ # Check for NULL bytes last
130
+ logger.debug('has nulls:' + repr(b'\x00' in bytes_to_check))
131
+ return True
132
+ return False
lib/python3.10/site-packages/cryptography-44.0.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ uv
lib/python3.10/site-packages/cryptography-44.0.2.dist-info/METADATA ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.3
2
+ Name: cryptography
3
+ Version: 44.0.2
4
+ Classifier: Development Status :: 5 - Production/Stable
5
+ Classifier: Intended Audience :: Developers
6
+ Classifier: License :: OSI Approved :: Apache Software License
7
+ Classifier: License :: OSI Approved :: BSD License
8
+ Classifier: Natural Language :: English
9
+ Classifier: Operating System :: MacOS :: MacOS X
10
+ Classifier: Operating System :: POSIX
11
+ Classifier: Operating System :: POSIX :: BSD
12
+ Classifier: Operating System :: POSIX :: Linux
13
+ Classifier: Operating System :: Microsoft :: Windows
14
+ Classifier: Programming Language :: Python
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3 :: Only
17
+ Classifier: Programming Language :: Python :: 3.7
18
+ Classifier: Programming Language :: Python :: 3.8
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
23
+ Classifier: Programming Language :: Python :: 3.13
24
+ Classifier: Programming Language :: Python :: Implementation :: CPython
25
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
26
+ Classifier: Topic :: Security :: Cryptography
27
+ Requires-Dist: cffi >=1.12 ; platform_python_implementation != 'PyPy'
28
+ Requires-Dist: bcrypt >=3.1.5 ; extra == 'ssh'
29
+ Requires-Dist: nox >=2024.4.15 ; extra == 'nox'
30
+ Requires-Dist: nox[uv] >=2024.3.2 ; python_version >= '3.8' and extra == 'nox'
31
+ Requires-Dist: cryptography-vectors ==44.0.2 ; extra == 'test'
32
+ Requires-Dist: pytest >=7.4.0 ; extra == 'test'
33
+ Requires-Dist: pytest-benchmark >=4.0 ; extra == 'test'
34
+ Requires-Dist: pytest-cov >=2.10.1 ; extra == 'test'
35
+ Requires-Dist: pytest-xdist >=3.5.0 ; extra == 'test'
36
+ Requires-Dist: pretend >=0.7 ; extra == 'test'
37
+ Requires-Dist: certifi >=2024 ; extra == 'test'
38
+ Requires-Dist: pytest-randomly ; extra == 'test-randomorder'
39
+ Requires-Dist: sphinx >=5.3.0 ; extra == 'docs'
40
+ Requires-Dist: sphinx-rtd-theme >=3.0.0 ; python_version >= '3.8' and extra == 'docs'
41
+ Requires-Dist: pyenchant >=3 ; extra == 'docstest'
42
+ Requires-Dist: readme-renderer >=30.0 ; extra == 'docstest'
43
+ Requires-Dist: sphinxcontrib-spelling >=7.3.1 ; extra == 'docstest'
44
+ Requires-Dist: build >=1.0.0 ; extra == 'sdist'
45
+ Requires-Dist: ruff >=0.3.6 ; extra == 'pep8test'
46
+ Requires-Dist: mypy >=1.4 ; extra == 'pep8test'
47
+ Requires-Dist: check-sdist ; python_version >= '3.8' and extra == 'pep8test'
48
+ Requires-Dist: click >=8.0.1 ; extra == 'pep8test'
49
+ Provides-Extra: ssh
50
+ Provides-Extra: nox
51
+ Provides-Extra: test
52
+ Provides-Extra: test-randomorder
53
+ Provides-Extra: docs
54
+ Provides-Extra: docstest
55
+ Provides-Extra: sdist
56
+ Provides-Extra: pep8test
57
+ License-File: LICENSE
58
+ License-File: LICENSE.APACHE
59
+ License-File: LICENSE.BSD
60
+ Summary: cryptography is a package which provides cryptographic recipes and primitives to Python developers.
61
+ Author: The cryptography developers <[email protected]>
62
+ Author-email: The Python Cryptographic Authority and individual contributors <[email protected]>
63
+ License: Apache-2.0 OR BSD-3-Clause
64
+ Requires-Python: >=3.7, !=3.9.0, !=3.9.1
65
+ Description-Content-Type: text/x-rst; charset=UTF-8
66
+ Project-URL: homepage, https://github.com/pyca/cryptography
67
+ Project-URL: documentation, https://cryptography.io/
68
+ Project-URL: source, https://github.com/pyca/cryptography/
69
+ Project-URL: issues, https://github.com/pyca/cryptography/issues
70
+ Project-URL: changelog, https://cryptography.io/en/latest/changelog/
71
+
72
+ pyca/cryptography
73
+ =================
74
+
75
+ .. image:: https://img.shields.io/pypi/v/cryptography.svg
76
+ :target: https://pypi.org/project/cryptography/
77
+ :alt: Latest Version
78
+
79
+ .. image:: https://readthedocs.org/projects/cryptography/badge/?version=latest
80
+ :target: https://cryptography.io
81
+ :alt: Latest Docs
82
+
83
+ .. image:: https://github.com/pyca/cryptography/workflows/CI/badge.svg?branch=main
84
+ :target: https://github.com/pyca/cryptography/actions?query=workflow%3ACI+branch%3Amain
85
+
86
+
87
+ ``cryptography`` is a package which provides cryptographic recipes and
88
+ primitives to Python developers. Our goal is for it to be your "cryptographic
89
+ standard library". It supports Python 3.7+ and PyPy3 7.3.11+.
90
+
91
+ ``cryptography`` includes both high level recipes and low level interfaces to
92
+ common cryptographic algorithms such as symmetric ciphers, message digests, and
93
+ key derivation functions. For example, to encrypt something with
94
+ ``cryptography``'s high level symmetric encryption recipe:
95
+
96
+ .. code-block:: pycon
97
+
98
+ >>> from cryptography.fernet import Fernet
99
+ >>> # Put this somewhere safe!
100
+ >>> key = Fernet.generate_key()
101
+ >>> f = Fernet(key)
102
+ >>> token = f.encrypt(b"A really secret message. Not for prying eyes.")
103
+ >>> token
104
+ b'...'
105
+ >>> f.decrypt(token)
106
+ b'A really secret message. Not for prying eyes.'
107
+
108
+ You can find more information in the `documentation`_.
109
+
110
+ You can install ``cryptography`` with:
111
+
112
+ .. code-block:: console
113
+
114
+ $ pip install cryptography
115
+
116
+ For full details see `the installation documentation`_.
117
+
118
+ Discussion
119
+ ~~~~~~~~~~
120
+
121
+ If you run into bugs, you can file them in our `issue tracker`_.
122
+
123
+ We maintain a `cryptography-dev`_ mailing list for development discussion.
124
+
125
+ You can also join ``#pyca`` on ``irc.libera.chat`` to ask questions or get
126
+ involved.
127
+
128
+ Security
129
+ ~~~~~~~~
130
+
131
+ Need to report a security issue? Please consult our `security reporting`_
132
+ documentation.
133
+
134
+
135
+ .. _`documentation`: https://cryptography.io/
136
+ .. _`the installation documentation`: https://cryptography.io/en/latest/installation/
137
+ .. _`issue tracker`: https://github.com/pyca/cryptography/issues
138
+ .. _`cryptography-dev`: https://mail.python.org/mailman/listinfo/cryptography-dev
139
+ .. _`security reporting`: https://cryptography.io/en/latest/security/
140
+
lib/python3.10/site-packages/cryptography-44.0.2.dist-info/RECORD ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cryptography-44.0.2.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
2
+ cryptography-44.0.2.dist-info/METADATA,sha256=cgphHIHoULnRY3G2C7Eh6Wp2QMwMBmNMLXtz_1ld7ho,5724
3
+ cryptography-44.0.2.dist-info/RECORD,,
4
+ cryptography-44.0.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ cryptography-44.0.2.dist-info/WHEEL,sha256=UpI5w5M2-qnzZdHNjisIw1dag11TStMro3kowqBDR6k,107
6
+ cryptography-44.0.2.dist-info/licenses/LICENSE,sha256=Pgx8CRqUi4JTO6mP18u0BDLW8amsv4X1ki0vmak65rs,197
7
+ cryptography-44.0.2.dist-info/licenses/LICENSE.APACHE,sha256=qsc7MUj20dcRHbyjIJn2jSbGRMaBOuHk8F9leaomY_4,11360
8
+ cryptography-44.0.2.dist-info/licenses/LICENSE.BSD,sha256=YCxMdILeZHndLpeTzaJ15eY9dz2s0eymiSMqtwCPtPs,1532
9
+ cryptography/__about__.py,sha256=LsHy-0b4kwxfAD0ryobJhitlFn7Tk8Sepunxo8YcUZs,445
10
+ cryptography/__init__.py,sha256=XsRL_PxbU6UgoyoglAgJQSrJCP97ovBA8YIEQ2-uI68,762
11
+ cryptography/exceptions.py,sha256=835EWILc2fwxw-gyFMriciC2SqhViETB10LBSytnDIc,1087
12
+ cryptography/fernet.py,sha256=aMU2HyDJ5oRGjg8AkFvHwE7BSmHY4fVUCaioxZcd8gA,6933
13
+ cryptography/hazmat/__init__.py,sha256=5IwrLWrVp0AjEr_4FdWG_V057NSJGY_W4egNNsuct0g,455
14
+ cryptography/hazmat/_oid.py,sha256=xcGtygUQX1p2ozVjhqKk016E5--BC7ituI1EGuoiWds,15294
15
+ cryptography/hazmat/backends/__init__.py,sha256=O5jvKFQdZnXhKeqJ-HtulaEL9Ni7mr1mDzZY5kHlYhI,361
16
+ cryptography/hazmat/backends/openssl/__init__.py,sha256=p3jmJfnCag9iE5sdMrN6VvVEu55u46xaS_IjoI0SrmA,305
17
+ cryptography/hazmat/backends/openssl/backend.py,sha256=Bk_inezh7fBN3jsxMu1YIkf10zryfup6opBDLVFiNms,9413
18
+ cryptography/hazmat/bindings/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
19
+ cryptography/hazmat/bindings/_rust.abi3.so,sha256=-fS4tbc3TKtgMDEaLqdxNdIjuJjKA2IIIkbtufR1Gd8,11514880
20
+ cryptography/hazmat/bindings/_rust/__init__.pyi,sha256=s73-NWxZs-5r2vAzDT9Eqo9mRiWE__A4VJKyFBkjHdM,879
21
+ cryptography/hazmat/bindings/_rust/_openssl.pyi,sha256=mpNJLuYLbCVrd5i33FBTmWwL_55Dw7JPkSLlSX9Q7oI,230
22
+ cryptography/hazmat/bindings/_rust/asn1.pyi,sha256=BrGjC8J6nwuS-r3EVcdXJB8ndotfY9mbQYOfpbPG0HA,354
23
+ cryptography/hazmat/bindings/_rust/exceptions.pyi,sha256=exXr2xw_0pB1kk93cYbM3MohbzoUkjOms1ZMUi0uQZE,640
24
+ cryptography/hazmat/bindings/_rust/ocsp.pyi,sha256=mNrMO5sYEnftD_b2-NvvR6M8QdYGZ1jpTdazpgzXgl0,4004
25
+ cryptography/hazmat/bindings/_rust/openssl/__init__.pyi,sha256=FS2gi2eALVzqTTic8an8enD431pkwKbRxeAZaNMV4Ts,1410
26
+ cryptography/hazmat/bindings/_rust/openssl/aead.pyi,sha256=i0gA3jUQ4rkJXTGGZrq-AuY-VQLN31lyDeWuDZ0zJYw,2553
27
+ cryptography/hazmat/bindings/_rust/openssl/ciphers.pyi,sha256=iK0ZhQ-WyCQbjaraaFgK6q4PpD-7Rf5RDHkFD3YEW_g,1301
28
+ cryptography/hazmat/bindings/_rust/openssl/cmac.pyi,sha256=nPH0X57RYpsAkRowVpjQiHE566ThUTx7YXrsadmrmHk,564
29
+ cryptography/hazmat/bindings/_rust/openssl/dh.pyi,sha256=Z3TC-G04-THtSdAOPLM1h2G7ml5bda1ElZUcn5wpuhk,1564
30
+ cryptography/hazmat/bindings/_rust/openssl/dsa.pyi,sha256=qBtkgj2albt2qFcnZ9UDrhzoNhCVO7HTby5VSf1EXMI,1299
31
+ cryptography/hazmat/bindings/_rust/openssl/ec.pyi,sha256=zJy0pRa5n-_p2dm45PxECB_-B6SVZyNKfjxFDpPqT38,1691
32
+ cryptography/hazmat/bindings/_rust/openssl/ed25519.pyi,sha256=OJsrblS2nHptZctva-pAKFL5q8yPEAkhmjPZpJ6TA94,493
33
+ cryptography/hazmat/bindings/_rust/openssl/ed448.pyi,sha256=SkPHK2HdbYN02TVQEUOgW3iTdiEY7HBE4DijpdkAzmk,475
34
+ cryptography/hazmat/bindings/_rust/openssl/hashes.pyi,sha256=p8sdf41mPBlV_W9v_18JItuMoHE8UkBxj9Tuqi0WiTE,639
35
+ cryptography/hazmat/bindings/_rust/openssl/hmac.pyi,sha256=ZmLJ73pmxcZFC1XosWEiXMRYtvJJor3ZLdCQOJu85Cw,662
36
+ cryptography/hazmat/bindings/_rust/openssl/kdf.pyi,sha256=hvZSV2C3MQd9jC1Tuh5Lsb0iGBgcLVF2xFYdTo7USO4,1129
37
+ cryptography/hazmat/bindings/_rust/openssl/keys.pyi,sha256=JSrlGNaW49ZCZ1hcb-YJdS1EAbsMwRbVEcLL0P9OApA,872
38
+ cryptography/hazmat/bindings/_rust/openssl/poly1305.pyi,sha256=9iogF7Q4i81IkOS-IMXp6HvxFF_3cNy_ucrAjVQnn14,540
39
+ cryptography/hazmat/bindings/_rust/openssl/rsa.pyi,sha256=2OQCNSXkxgc-3uw1xiCCloIQTV6p9_kK79Yu0rhZgPc,1364
40
+ cryptography/hazmat/bindings/_rust/openssl/x25519.pyi,sha256=2BKdbrddM_9SMUpdvHKGhb9MNjURCarPxccbUDzHeoA,484
41
+ cryptography/hazmat/bindings/_rust/openssl/x448.pyi,sha256=AoRMWNvCJTiH5L-lkIkCdPlrPLUdJvvfXpIvf1GmxpM,466
42
+ cryptography/hazmat/bindings/_rust/pkcs12.pyi,sha256=afhB_6M8xI1MIE5vxkaDF1jSxA48ib1--NiOxtf6boM,1394
43
+ cryptography/hazmat/bindings/_rust/pkcs7.pyi,sha256=Ag9coB8kRwrUJEg1do6BJABs9DqxZiY8WJIFUVa7StE,1545
44
+ cryptography/hazmat/bindings/_rust/test_support.pyi,sha256=FXe7t_tqI3e9ULirYcr5Zlw5szGY7TiZyb7W83ak0Nk,718
45
+ cryptography/hazmat/bindings/_rust/x509.pyi,sha256=0p-Ak_zj-9WfyZKPo08YT6cOx1c-lhjeYd0jJ8c4oT0,8318
46
+ cryptography/hazmat/bindings/openssl/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
47
+ cryptography/hazmat/bindings/openssl/_conditional.py,sha256=dkGKGU-22uR2ZKeOOwaSxEJCGaafgUjb2romWcu03QE,5163
48
+ cryptography/hazmat/bindings/openssl/binding.py,sha256=e1gnFAZBPrkJ3CsiZV-ug6kaPdNTAEROaUFiFrUh71M,4042
49
+ cryptography/hazmat/decrepit/__init__.py,sha256=wHCbWfaefa-fk6THSw9th9fJUsStJo7245wfFBqmduA,216
50
+ cryptography/hazmat/decrepit/ciphers/__init__.py,sha256=wHCbWfaefa-fk6THSw9th9fJUsStJo7245wfFBqmduA,216
51
+ cryptography/hazmat/decrepit/ciphers/algorithms.py,sha256=HWA4PKDS2w4D2dQoRerpLRU7Kntt5vJeJC7j--AlZVU,2520
52
+ cryptography/hazmat/primitives/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
53
+ cryptography/hazmat/primitives/_asymmetric.py,sha256=RhgcouUB6HTiFDBrR1LxqkMjpUxIiNvQ1r_zJjRG6qQ,532
54
+ cryptography/hazmat/primitives/_cipheralgorithm.py,sha256=gKa0WrLz6K4fqhnGbfBYKDSxgLxsPU0uj_EK2UT47W4,1495
55
+ cryptography/hazmat/primitives/_serialization.py,sha256=qrozc8fw2WZSbjk3DAlSl3ResxpauwJ74ZgGoUL-mj0,5142
56
+ cryptography/hazmat/primitives/asymmetric/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
57
+ cryptography/hazmat/primitives/asymmetric/dh.py,sha256=OOCjMClH1Bf14Sy7jAdwzEeCxFPb8XUe2qePbExvXwc,3420
58
+ cryptography/hazmat/primitives/asymmetric/dsa.py,sha256=xBwdf0pZOgvqjUKcO7Q0L3NxwalYj0SJDUqThemhSmI,3945
59
+ cryptography/hazmat/primitives/asymmetric/ec.py,sha256=lwZmtAwi3PM8lsY1MsNaby_bVi--49OCxwE_1yqKC-A,10428
60
+ cryptography/hazmat/primitives/asymmetric/ed25519.py,sha256=kl63fg7myuMjNTmMoVFeH6iVr0x5FkjNmggxIRTloJk,3423
61
+ cryptography/hazmat/primitives/asymmetric/ed448.py,sha256=2UzEDzzfkPn83UFVFlMZfIMbAixxY09WmQyrwinWTn8,3456
62
+ cryptography/hazmat/primitives/asymmetric/padding.py,sha256=eZcvUqVLbe3u48SunLdeniaPlV4-k6pwBl67OW4jSy8,2885
63
+ cryptography/hazmat/primitives/asymmetric/rsa.py,sha256=dvj4i2js78qpgotEKn3SU5Eh2unDSMiZpTVo2kx_NWU,7668
64
+ cryptography/hazmat/primitives/asymmetric/types.py,sha256=LnsOJym-wmPUJ7Knu_7bCNU3kIiELCd6krOaW_JU08I,2996
65
+ cryptography/hazmat/primitives/asymmetric/utils.py,sha256=DPTs6T4F-UhwzFQTh-1fSEpQzazH2jf2xpIro3ItF4o,790
66
+ cryptography/hazmat/primitives/asymmetric/x25519.py,sha256=VGYuRdIYuVBtizpFdNWd2bTrT10JRa1admQdBr08xz8,3341
67
+ cryptography/hazmat/primitives/asymmetric/x448.py,sha256=GKKJBqYLr03VewMF18bXIM941aaWcZIQ4rC02GLLEmw,3374
68
+ cryptography/hazmat/primitives/ciphers/__init__.py,sha256=eyEXmjk6_CZXaOPYDr7vAYGXr29QvzgWL2-4CSolLFs,680
69
+ cryptography/hazmat/primitives/ciphers/aead.py,sha256=Fzlyx7w8KYQakzDp1zWgJnIr62zgZrgVh1u2h4exB54,634
70
+ cryptography/hazmat/primitives/ciphers/algorithms.py,sha256=cPzrUizm_RfUi7DDqf3WNezkFy2IxfllsJv6s16bWS8,4493
71
+ cryptography/hazmat/primitives/ciphers/base.py,sha256=tg-XNaKUyETBi7ounGDEL1_ICn-s4FF9LR7moV58blI,4211
72
+ cryptography/hazmat/primitives/ciphers/modes.py,sha256=BFpxEGSaxoeZjrQ4sqpyPDvKClrqfDKIBv7kYtFURhE,8192
73
+ cryptography/hazmat/primitives/cmac.py,sha256=sz_s6H_cYnOvx-VNWdIKhRhe3Ymp8z8J0D3CBqOX3gg,338
74
+ cryptography/hazmat/primitives/constant_time.py,sha256=xdunWT0nf8OvKdcqUhhlFKayGp4_PgVJRU2W1wLSr_A,422
75
+ cryptography/hazmat/primitives/hashes.py,sha256=EvDIJBhj83Z7f-oHbsA0TzZLFSDV_Yv8hQRdM4o8FD0,5091
76
+ cryptography/hazmat/primitives/hmac.py,sha256=RpB3z9z5skirCQrm7zQbtnp9pLMnAjrlTUvKqF5aDDc,423
77
+ cryptography/hazmat/primitives/kdf/__init__.py,sha256=4XibZnrYq4hh5xBjWiIXzaYW6FKx8hPbVaa_cB9zS64,750
78
+ cryptography/hazmat/primitives/kdf/argon2.py,sha256=UFDNXG0v-rw3DqAQTB1UQAsQC2M5Ejg0k_6OCyhLKus,460
79
+ cryptography/hazmat/primitives/kdf/concatkdf.py,sha256=bcn4NGXse-EsFl7nlU83e5ilop7TSHcX-CJJS107W80,3686
80
+ cryptography/hazmat/primitives/kdf/hkdf.py,sha256=uhN5L87w4JvtAqQcPh_Ji2TPSc18IDThpaYJiHOWy3A,3015
81
+ cryptography/hazmat/primitives/kdf/kbkdf.py,sha256=eSuLK1sATkamgCAit794jLr7sDNlu5X0USdcWhwJdmk,9146
82
+ cryptography/hazmat/primitives/kdf/pbkdf2.py,sha256=Xj3YIeX30h2BUaoJAtOo1RMXV_em0-eCG0PU_0FHJzM,1950
83
+ cryptography/hazmat/primitives/kdf/scrypt.py,sha256=XyWUdUUmhuI9V6TqAPOvujCSMGv1XQdg0a21IWCmO-U,590
84
+ cryptography/hazmat/primitives/kdf/x963kdf.py,sha256=wCpWmwQjZ2vAu2rlk3R_PX0nINl8WGXYBmlyMOC5iPw,1992
85
+ cryptography/hazmat/primitives/keywrap.py,sha256=XV4Pj2fqSeD-RqZVvY2cA3j5_7RwJSFygYuLfk2ujCo,5650
86
+ cryptography/hazmat/primitives/padding.py,sha256=Qu1VVsCiqfQMPPqU0qU6ig9Y802jZlXVOUDLIxN5KeQ,4932
87
+ cryptography/hazmat/primitives/poly1305.py,sha256=P5EPQV-RB_FJPahpg01u0Ts4S_PnAmsroxIGXbGeRRo,355
88
+ cryptography/hazmat/primitives/serialization/__init__.py,sha256=jyNx_7NcOEbVRBY4nP9ks0IVXBafbcYnTK27vafPLW8,1653
89
+ cryptography/hazmat/primitives/serialization/base.py,sha256=ikq5MJIwp_oUnjiaBco_PmQwOTYuGi-XkYUYHKy8Vo0,615
90
+ cryptography/hazmat/primitives/serialization/pkcs12.py,sha256=7vVXbiP7qhhvKAHJT_M8-LBZdbpOwrpWRHWxNrNqzXE,4492
91
+ cryptography/hazmat/primitives/serialization/pkcs7.py,sha256=n25jEw__vkZWSlumwgYnqJ0lzyPh5xljMsJDyp2QomM,12346
92
+ cryptography/hazmat/primitives/serialization/ssh.py,sha256=VKscMrVdYK5B9PQISjjdRMglRvqa_L3sDNm5vdjVHJY,51915
93
+ cryptography/hazmat/primitives/twofactor/__init__.py,sha256=tmMZGB-g4IU1r7lIFqASU019zr0uPp_wEBYcwdDCKCA,258
94
+ cryptography/hazmat/primitives/twofactor/hotp.py,sha256=rv507uNznUs22XlaqGBbZKkkGjmiTUAcwghTYMem6uM,3219
95
+ cryptography/hazmat/primitives/twofactor/totp.py,sha256=BQ0oPTp2JW1SMZqdgv95NBG3u_ODiDtzVJENHWYhvXY,1613
96
+ cryptography/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
97
+ cryptography/utils.py,sha256=Rp7ppg4XIBVVzNQ6XngGndwkICJoYp6FoFOOgTWLJ7g,3925
98
+ cryptography/x509/__init__.py,sha256=Q8P-MnUGrgFxRt5423bE-gzSvgZLAdddWuPheHnuA_c,8132
99
+ cryptography/x509/base.py,sha256=-F5KWjxbyjSqluUSr7LRC_sqN_s-qHP5K0rW-41PI4E,26909
100
+ cryptography/x509/certificate_transparency.py,sha256=JqoOIDhlwInrYMFW6IFn77WJ0viF-PB_rlZV3vs9MYc,797
101
+ cryptography/x509/extensions.py,sha256=iX-3WFm4yFjstFIs1F30f3tixIp6i0WgGdc6GwJ-QiQ,76158
102
+ cryptography/x509/general_name.py,sha256=sP_rV11Qlpsk4x3XXGJY_Mv0Q_s9dtjeLckHsjpLQoQ,7836
103
+ cryptography/x509/name.py,sha256=MYCxCSTQTpzhjxFPZaANqJ9fGrhESH73vPkoay8HSWM,14830
104
+ cryptography/x509/ocsp.py,sha256=vbrg3p1hBJQEEFIZ35GHcjbGwTrsxPhlot-OVpyP-C8,11390
105
+ cryptography/x509/oid.py,sha256=X8EbhkRTLrGuv9vHZSGqPd9zpvRVsonU_joWAL5LLY8,885
106
+ cryptography/x509/verification.py,sha256=alfx3VaTSb2bMz7_7s788oL90vzgHwBjVINssdz0Gv0,796
107
+ rust/Cargo.toml,sha256=gaBJTn9TwBCG7U3JgETYbTmK8DNUxl4gKKS65nDWuwM,1320
108
+ rust/cryptography-cffi/Cargo.toml,sha256=CjVBJTYW1TwzXgLgY8TZ92NP_9XSmHzSfRIzVaZh9Bk,386
109
+ rust/cryptography-keepalive/Cargo.toml,sha256=_ABt1o-uFnxDqhb7YzNynb6YEQ2eW2QpnPD1RXBUsrI,210
110
+ rust/cryptography-key-parsing/Cargo.toml,sha256=yLWh172kspq6BJVZA2PjFw17Rt0xTYKn_TTzp3IVhxg,455
111
+ rust/cryptography-openssl/Cargo.toml,sha256=mI0cIDv-kQTl24C-bLvDCqiWn6QobBdqCMYSi_UWPE0,545
112
+ rust/cryptography-x509-verification/Cargo.toml,sha256=vECbxPiNu-dQhW4baTuSPzgqaBnBgwZYnJCSaJQbIUA,426
113
+ rust/cryptography-x509/Cargo.toml,sha256=wAuwnc1eKnSUNFjf4GpQM__FTig-hqF2ZPXJPmqb6cA,248
lib/python3.10/site-packages/cryptography-44.0.2.dist-info/REQUESTED ADDED
File without changes
lib/python3.10/site-packages/cryptography-44.0.2.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: maturin (1.7.5)
3
+ Root-Is-Purelib: false
4
+ Tag: cp39-abi3-manylinux_2_34_x86_64
5
+
lib/python3.10/site-packages/cryptography-44.0.2.dist-info/licenses/LICENSE ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ This software is made available under the terms of *either* of the licenses
2
+ found in LICENSE.APACHE or LICENSE.BSD. Contributions to cryptography are made
3
+ under the terms of *both* these licenses.
lib/python3.10/site-packages/cryptography-44.0.2.dist-info/licenses/LICENSE.APACHE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ https://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ https://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
lib/python3.10/site-packages/cryptography-44.0.2.dist-info/licenses/LICENSE.BSD ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) Individual contributors.
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright notice,
8
+ this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ 3. Neither the name of PyCA Cryptography nor the names of its contributors
15
+ may be used to endorse or promote products derived from this software
16
+ without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
19
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
22
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
25
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
lib/python3.10/site-packages/csvw-3.5.1.dist-info/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
lib/python3.10/site-packages/csvw-3.5.1.dist-info/METADATA ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: csvw
3
+ Version: 3.5.1
4
+ Summary: Python library to work with CSVW described tabular data
5
+ Home-page: https://github.com/cldf/csvw
6
+ Author: Robert Forkel
7
+ Author-email: [email protected]
8
+ License: Apache 2.0
9
+ Project-URL: Bug Tracker, https://github.com/cldf/csvw/issues
10
+ Keywords: csv,w3c,tabular-data
11
+ Platform: any
12
+ Classifier: Development Status :: 5 - Production/Stable
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: Intended Audience :: Science/Research
15
+ Classifier: Natural Language :: English
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.8
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
23
+ Classifier: Programming Language :: Python :: 3.13
24
+ Classifier: Programming Language :: Python :: Implementation :: CPython
25
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
26
+ Classifier: License :: OSI Approved :: Apache Software License
27
+ Requires-Python: >=3.8
28
+ Description-Content-Type: text/markdown
29
+ License-File: LICENSE
30
+ Requires-Dist: attrs>=18.1
31
+ Requires-Dist: isodate
32
+ Requires-Dist: python-dateutil
33
+ Requires-Dist: rfc3986<2
34
+ Requires-Dist: uritemplate>=3.0.0
35
+ Requires-Dist: babel
36
+ Requires-Dist: requests
37
+ Requires-Dist: language-tags
38
+ Requires-Dist: rdflib
39
+ Requires-Dist: colorama
40
+ Requires-Dist: jsonschema
41
+ Provides-Extra: dev
42
+ Requires-Dist: flake8; extra == "dev"
43
+ Requires-Dist: wheel; extra == "dev"
44
+ Requires-Dist: twine; extra == "dev"
45
+ Requires-Dist: build; extra == "dev"
46
+ Provides-Extra: docs
47
+ Requires-Dist: sphinx<7; extra == "docs"
48
+ Requires-Dist: sphinx-autodoc-typehints; extra == "docs"
49
+ Requires-Dist: sphinx-rtd-theme; extra == "docs"
50
+ Provides-Extra: test
51
+ Requires-Dist: frictionless; extra == "test"
52
+ Requires-Dist: pytest>=5; extra == "test"
53
+ Requires-Dist: pytest-mock; extra == "test"
54
+ Requires-Dist: requests-mock; extra == "test"
55
+ Requires-Dist: pytest-cov; extra == "test"
56
+
57
+ # csvw
58
+
59
+ [![Build Status](https://github.com/cldf/csvw/workflows/tests/badge.svg)](https://github.com/cldf/csvw/actions?query=workflow%3Atests)
60
+ [![PyPI](https://img.shields.io/pypi/v/csvw.svg)](https://pypi.org/project/csvw)
61
+ [![Documentation Status](https://readthedocs.org/projects/csvw/badge/?version=latest)](https://csvw.readthedocs.io/en/latest/?badge=latest)
62
+
63
+
64
+ This package provides
65
+ - a Python API to read and write relational, tabular data according to the [CSV on the Web](https://csvw.org/) specification and
66
+ - commandline tools for reading and validating CSVW data.
67
+
68
+
69
+ ## Links
70
+
71
+ - GitHub: https://github.com/cldf/csvw
72
+ - PyPI: https://pypi.org/project/csvw
73
+ - Issue Tracker: https://github.com/cldf/csvw/issues
74
+
75
+
76
+ ## Installation
77
+
78
+ This package runs under Python >=3.8, use pip to install:
79
+
80
+ ```bash
81
+ $ pip install csvw
82
+ ```
83
+
84
+
85
+ ## CLI
86
+
87
+ ### `csvw2json`
88
+
89
+ Converting CSVW data [to JSON](https://www.w3.org/TR/csv2json/)
90
+
91
+ ```shell
92
+ $ csvw2json tests/fixtures/zipped-metadata.json
93
+ {
94
+ "tables": [
95
+ {
96
+ "url": "tests/fixtures/zipped.csv",
97
+ "row": [
98
+ {
99
+ "url": "tests/fixtures/zipped.csv#row=2",
100
+ "rownum": 1,
101
+ "describes": [
102
+ {
103
+ "ID": "abc",
104
+ "Value": "the value"
105
+ }
106
+ ]
107
+ },
108
+ {
109
+ "url": "tests/fixtures/zipped.csv#row=3",
110
+ "rownum": 2,
111
+ "describes": [
112
+ {
113
+ "ID": "cde",
114
+ "Value": "another one"
115
+ }
116
+ ]
117
+ }
118
+ ]
119
+ }
120
+ ]
121
+ }
122
+ ```
123
+
124
+ ### `csvwvalidate`
125
+
126
+ Validating CSVW data
127
+
128
+ ```shell
129
+ $ csvwvalidate tests/fixtures/zipped-metadata.json
130
+ OK
131
+ ```
132
+
133
+ ### `csvwdescribe`
134
+
135
+ Describing tabular-data files with CSVW metadata
136
+
137
+ ```shell
138
+ $ csvwdescribe --delimiter "|" tests/fixtures/frictionless-data.csv
139
+ {
140
+ "@context": "http://www.w3.org/ns/csvw",
141
+ "dc:conformsTo": "data-package",
142
+ "tables": [
143
+ {
144
+ "dialect": {
145
+ "delimiter": "|"
146
+ },
147
+ "tableSchema": {
148
+ "columns": [
149
+ {
150
+ "datatype": "string",
151
+ "name": "FK"
152
+ },
153
+ {
154
+ "datatype": "integer",
155
+ "name": "Year"
156
+ },
157
+ {
158
+ "datatype": "string",
159
+ "name": "Location name"
160
+ },
161
+ {
162
+ "datatype": "string",
163
+ "name": "Value"
164
+ },
165
+ {
166
+ "datatype": "string",
167
+ "name": "binary"
168
+ },
169
+ {
170
+ "datatype": "string",
171
+ "name": "anyURI"
172
+ },
173
+ {
174
+ "datatype": "string",
175
+ "name": "email"
176
+ },
177
+ {
178
+ "datatype": "string",
179
+ "name": "boolean"
180
+ },
181
+ {
182
+ "datatype": {
183
+ "dc:format": "application/json",
184
+ "base": "json"
185
+ },
186
+ "name": "array"
187
+ },
188
+ {
189
+ "datatype": {
190
+ "dc:format": "application/json",
191
+ "base": "json"
192
+ },
193
+ "name": "geojson"
194
+ }
195
+ ]
196
+ },
197
+ "url": "tests/fixtures/frictionless-data.csv"
198
+ }
199
+ ]
200
+ }
201
+ ```
202
+
203
+
204
+ ## Python API
205
+
206
+ Find the Python API documentation at [csvw.readthedocs.io](https://csvw.readthedocs.io/en/latest/).
207
+
208
+ A quick example for using `csvw` from Python code:
209
+
210
+ ```python
211
+ import json
212
+ from csvw import CSVW
213
+ data = CSVW('https://raw.githubusercontent.com/cldf/csvw/master/tests/fixtures/test.tsv')
214
+ print(json.dumps(data.to_json(minimal=True), indent=4))
215
+ [
216
+ {
217
+ "province": "Hello",
218
+ "territory": "world",
219
+ "precinct": "1"
220
+ }
221
+ ]
222
+ ```
223
+
224
+
225
+ ## Known limitations
226
+
227
+ - We read **all** data which is specified as UTF-8 encoded using the
228
+ [`utf-8-sig` codecs](https://docs.python.org/3/library/codecs.html#module-encodings.utf_8_sig).
229
+ Thus, if such data starts with `U+FEFF` this will be interpreted as [BOM](https://en.wikipedia.org/wiki/Byte_order_mark)
230
+ and skipped.
231
+ - Low level CSV parsing is delegated to the `csv` module in Python's standard library. Thus, if a `commentPrefix`
232
+ is specified in a `Dialect` instance, this will lead to skipping rows where the first value starts
233
+ with `commentPrefix`, **even if the value was quoted**.
234
+ - Also, cell content containing `escapechar` may not be round-tripped as expected (when specifying
235
+ `escapechar` or a `csvw.Dialect` with `quoteChar` but `doubleQuote==False`),
236
+ when minimal quoting is specified. This is due to inconsistent `csv` behaviour
237
+ across Python versions (see https://bugs.python.org/issue44861).
238
+
239
+
240
+ ## CSVW conformance
241
+
242
+ While we use the CSVW specification as guideline, this package does not (and
243
+ probably never will) implement the full extent of this spec.
244
+
245
+ - When CSV files with a header are read, columns are not matched in order with
246
+ column descriptions in the `tableSchema`, but instead are matched based on the
247
+ CSV column header and the column descriptions' `name` and `titles` atributes.
248
+ This allows for more flexibility, because columns in the CSV file may be
249
+ re-ordered without invalidating the metadata. A stricter matching can be forced
250
+ by specifying `"header": false` and `"skipRows": 1` in the table's dialect
251
+ description.
252
+
253
+ However, `csvw.CSVW` works correctly for
254
+ - 269 out of 270 [JSON tests](https://w3c.github.io/csvw/tests/#manifest-json),
255
+ - 280 out of 282 [validation tests](https://w3c.github.io/csvw/tests/#manifest-validation),
256
+ - 10 out of 18 [non-normative tests](https://w3c.github.io/csvw/tests/#manifest-nonnorm)
257
+
258
+ from the [CSVW Test suites](https://w3c.github.io/csvw/tests/).
259
+
260
+
261
+ ## Compatibility with [Frictionless Data Specs](https://specs.frictionlessdata.io/)
262
+
263
+ A CSVW-described dataset is basically equivalent to a Frictionless DataPackage where all
264
+ [Data Resources](https://specs.frictionlessdata.io/data-resource/) are [Tabular Data](https://specs.frictionlessdata.io/tabular-data-resource/).
265
+ Thus, the `csvw` package provides some conversion functionality. To
266
+ "read CSVW data from a Data Package", there's the `csvw.TableGroup.from_frictionless_datapackage` method:
267
+ ```python
268
+ from csvw import TableGroup
269
+ tg = TableGroup.from_frictionless_datapackage('PATH/TO/datapackage.json')
270
+ ```
271
+ To convert the metadata, the `TableGroup` can then be serialzed:
272
+ ```python
273
+ tg.to_file('csvw-metadata.json')
274
+ ```
275
+
276
+ Note that the CSVW metadata file must be written to the Data Package's directory
277
+ to make sure relative paths to data resources work.
278
+
279
+ This functionality - together with the schema inference capabilities
280
+ of [`frictionless describe`](https://framework.frictionlessdata.io/docs/guides/describing-data/) - provides
281
+ a convenient way to bootstrap CSVW metadata for a set of "raw" CSV
282
+ files, implemented in the [`csvwdescribe` command described above](#csvwdescribe).
283
+
284
+
285
+ ## See also
286
+
287
+ - https://www.w3.org/2013/csvw/wiki/Main_Page
288
+ - https://csvw.org
289
+ - https://github.com/CLARIAH/COW
290
+ - https://github.com/CLARIAH/ruminator
291
+ - https://github.com/bloomberg/pycsvw
292
+ - https://specs.frictionlessdata.io/table-schema/
293
+ - https://github.com/theodi/csvlint.rb
294
+ - https://github.com/ruby-rdf/rdf-tabular
295
+ - https://github.com/rdf-ext/rdf-parser-csvw
296
+ - https://github.com/Robsteranium/csvwr
297
+
298
+
299
+ ## License
300
+
301
+ This package is distributed under the [Apache 2.0 license](https://opensource.org/licenses/Apache-2.0).
lib/python3.10/site-packages/csvw-3.5.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ csvw
lib/python3.10/site-packages/fsspec/implementations/arrow.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import errno
2
+ import io
3
+ import os
4
+ import secrets
5
+ import shutil
6
+ from contextlib import suppress
7
+ from functools import cached_property, wraps
8
+ from urllib.parse import parse_qs
9
+
10
+ from fsspec.spec import AbstractFileSystem
11
+ from fsspec.utils import (
12
+ get_package_version_without_import,
13
+ infer_storage_options,
14
+ mirror_from,
15
+ tokenize,
16
+ )
17
+
18
+
19
+ def wrap_exceptions(func):
20
+ @wraps(func)
21
+ def wrapper(*args, **kwargs):
22
+ try:
23
+ return func(*args, **kwargs)
24
+ except OSError as exception:
25
+ if not exception.args:
26
+ raise
27
+
28
+ message, *args = exception.args
29
+ if isinstance(message, str) and "does not exist" in message:
30
+ raise FileNotFoundError(errno.ENOENT, message) from exception
31
+ else:
32
+ raise
33
+
34
+ return wrapper
35
+
36
+
37
+ PYARROW_VERSION = None
38
+
39
+
40
+ class ArrowFSWrapper(AbstractFileSystem):
41
+ """FSSpec-compatible wrapper of pyarrow.fs.FileSystem.
42
+
43
+ Parameters
44
+ ----------
45
+ fs : pyarrow.fs.FileSystem
46
+
47
+ """
48
+
49
+ root_marker = "/"
50
+
51
+ def __init__(self, fs, **kwargs):
52
+ global PYARROW_VERSION
53
+ PYARROW_VERSION = get_package_version_without_import("pyarrow")
54
+ self.fs = fs
55
+ super().__init__(**kwargs)
56
+
57
+ @property
58
+ def protocol(self):
59
+ return self.fs.type_name
60
+
61
+ @cached_property
62
+ def fsid(self):
63
+ return "hdfs_" + tokenize(self.fs.host, self.fs.port)
64
+
65
+ @classmethod
66
+ def _strip_protocol(cls, path):
67
+ ops = infer_storage_options(path)
68
+ path = ops["path"]
69
+ if path.startswith("//"):
70
+ # special case for "hdfs://path" (without the triple slash)
71
+ path = path[1:]
72
+ return path
73
+
74
+ def ls(self, path, detail=False, **kwargs):
75
+ path = self._strip_protocol(path)
76
+ from pyarrow.fs import FileSelector
77
+
78
+ entries = [
79
+ self._make_entry(entry)
80
+ for entry in self.fs.get_file_info(FileSelector(path))
81
+ ]
82
+ if detail:
83
+ return entries
84
+ else:
85
+ return [entry["name"] for entry in entries]
86
+
87
+ def info(self, path, **kwargs):
88
+ path = self._strip_protocol(path)
89
+ [info] = self.fs.get_file_info([path])
90
+ return self._make_entry(info)
91
+
92
+ def exists(self, path):
93
+ path = self._strip_protocol(path)
94
+ try:
95
+ self.info(path)
96
+ except FileNotFoundError:
97
+ return False
98
+ else:
99
+ return True
100
+
101
+ def _make_entry(self, info):
102
+ from pyarrow.fs import FileType
103
+
104
+ if info.type is FileType.Directory:
105
+ kind = "directory"
106
+ elif info.type is FileType.File:
107
+ kind = "file"
108
+ elif info.type is FileType.NotFound:
109
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), info.path)
110
+ else:
111
+ kind = "other"
112
+
113
+ return {
114
+ "name": info.path,
115
+ "size": info.size,
116
+ "type": kind,
117
+ "mtime": info.mtime,
118
+ }
119
+
120
+ @wrap_exceptions
121
+ def cp_file(self, path1, path2, **kwargs):
122
+ path1 = self._strip_protocol(path1).rstrip("/")
123
+ path2 = self._strip_protocol(path2).rstrip("/")
124
+
125
+ with self._open(path1, "rb") as lstream:
126
+ tmp_fname = f"{path2}.tmp.{secrets.token_hex(6)}"
127
+ try:
128
+ with self.open(tmp_fname, "wb") as rstream:
129
+ shutil.copyfileobj(lstream, rstream)
130
+ self.fs.move(tmp_fname, path2)
131
+ except BaseException:
132
+ with suppress(FileNotFoundError):
133
+ self.fs.delete_file(tmp_fname)
134
+ raise
135
+
136
+ @wrap_exceptions
137
+ def mv(self, path1, path2, **kwargs):
138
+ path1 = self._strip_protocol(path1).rstrip("/")
139
+ path2 = self._strip_protocol(path2).rstrip("/")
140
+ self.fs.move(path1, path2)
141
+
142
+ @wrap_exceptions
143
+ def rm_file(self, path):
144
+ path = self._strip_protocol(path)
145
+ self.fs.delete_file(path)
146
+
147
+ @wrap_exceptions
148
+ def rm(self, path, recursive=False, maxdepth=None):
149
+ path = self._strip_protocol(path).rstrip("/")
150
+ if self.isdir(path):
151
+ if recursive:
152
+ self.fs.delete_dir(path)
153
+ else:
154
+ raise ValueError("Can't delete directories without recursive=False")
155
+ else:
156
+ self.fs.delete_file(path)
157
+
158
+ @wrap_exceptions
159
+ def _open(self, path, mode="rb", block_size=None, seekable=True, **kwargs):
160
+ if mode == "rb":
161
+ if seekable:
162
+ method = self.fs.open_input_file
163
+ else:
164
+ method = self.fs.open_input_stream
165
+ elif mode == "wb":
166
+ method = self.fs.open_output_stream
167
+ elif mode == "ab":
168
+ method = self.fs.open_append_stream
169
+ else:
170
+ raise ValueError(f"unsupported mode for Arrow filesystem: {mode!r}")
171
+
172
+ _kwargs = {}
173
+ if mode != "rb" or not seekable:
174
+ if int(PYARROW_VERSION.split(".")[0]) >= 4:
175
+ # disable compression auto-detection
176
+ _kwargs["compression"] = None
177
+ stream = method(path, **_kwargs)
178
+
179
+ return ArrowFile(self, stream, path, mode, block_size, **kwargs)
180
+
181
+ @wrap_exceptions
182
+ def mkdir(self, path, create_parents=True, **kwargs):
183
+ path = self._strip_protocol(path)
184
+ if create_parents:
185
+ self.makedirs(path, exist_ok=True)
186
+ else:
187
+ self.fs.create_dir(path, recursive=False)
188
+
189
+ @wrap_exceptions
190
+ def makedirs(self, path, exist_ok=False):
191
+ path = self._strip_protocol(path)
192
+ self.fs.create_dir(path, recursive=True)
193
+
194
+ @wrap_exceptions
195
+ def rmdir(self, path):
196
+ path = self._strip_protocol(path)
197
+ self.fs.delete_dir(path)
198
+
199
+ @wrap_exceptions
200
+ def modified(self, path):
201
+ path = self._strip_protocol(path)
202
+ return self.fs.get_file_info(path).mtime
203
+
204
+ def cat_file(self, path, start=None, end=None, **kwargs):
205
+ kwargs["seekable"] = start not in [None, 0]
206
+ return super().cat_file(path, start=None, end=None, **kwargs)
207
+
208
+ def get_file(self, rpath, lpath, **kwargs):
209
+ kwargs["seekable"] = False
210
+ super().get_file(rpath, lpath, **kwargs)
211
+
212
+
213
+ @mirror_from(
214
+ "stream",
215
+ [
216
+ "read",
217
+ "seek",
218
+ "tell",
219
+ "write",
220
+ "readable",
221
+ "writable",
222
+ "close",
223
+ "size",
224
+ "seekable",
225
+ ],
226
+ )
227
+ class ArrowFile(io.IOBase):
228
+ def __init__(self, fs, stream, path, mode, block_size=None, **kwargs):
229
+ self.path = path
230
+ self.mode = mode
231
+
232
+ self.fs = fs
233
+ self.stream = stream
234
+
235
+ self.blocksize = self.block_size = block_size
236
+ self.kwargs = kwargs
237
+
238
+ def __enter__(self):
239
+ return self
240
+
241
+ def __exit__(self, *args):
242
+ return self.close()
243
+
244
+
245
+ class HadoopFileSystem(ArrowFSWrapper):
246
+ """A wrapper on top of the pyarrow.fs.HadoopFileSystem
247
+ to connect it's interface with fsspec"""
248
+
249
+ protocol = "hdfs"
250
+
251
+ def __init__(
252
+ self,
253
+ host="default",
254
+ port=0,
255
+ user=None,
256
+ kerb_ticket=None,
257
+ replication=3,
258
+ extra_conf=None,
259
+ **kwargs,
260
+ ):
261
+ """
262
+
263
+ Parameters
264
+ ----------
265
+ host: str
266
+ Hostname, IP or "default" to try to read from Hadoop config
267
+ port: int
268
+ Port to connect on, or default from Hadoop config if 0
269
+ user: str or None
270
+ If given, connect as this username
271
+ kerb_ticket: str or None
272
+ If given, use this ticket for authentication
273
+ replication: int
274
+ set replication factor of file for write operations. default value is 3.
275
+ extra_conf: None or dict
276
+ Passed on to HadoopFileSystem
277
+ """
278
+ from pyarrow.fs import HadoopFileSystem
279
+
280
+ fs = HadoopFileSystem(
281
+ host=host,
282
+ port=port,
283
+ user=user,
284
+ kerb_ticket=kerb_ticket,
285
+ replication=replication,
286
+ extra_conf=extra_conf,
287
+ )
288
+ super().__init__(fs=fs, **kwargs)
289
+
290
+ @staticmethod
291
+ def _get_kwargs_from_urls(path):
292
+ ops = infer_storage_options(path)
293
+ out = {}
294
+ if ops.get("host", None):
295
+ out["host"] = ops["host"]
296
+ if ops.get("username", None):
297
+ out["user"] = ops["username"]
298
+ if ops.get("port", None):
299
+ out["port"] = ops["port"]
300
+ if ops.get("url_query", None):
301
+ queries = parse_qs(ops["url_query"])
302
+ if queries.get("replication", None):
303
+ out["replication"] = int(queries["replication"][0])
304
+ return out
lib/python3.10/site-packages/fsspec/implementations/asyn_wrapper.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import functools
3
+ import inspect
4
+
5
+ from fsspec.asyn import AsyncFileSystem, running_async
6
+
7
+
8
+ def async_wrapper(func, obj=None):
9
+ """
10
+ Wraps a synchronous function to make it awaitable.
11
+
12
+ Parameters
13
+ ----------
14
+ func : callable
15
+ The synchronous function to wrap.
16
+ obj : object, optional
17
+ The instance to bind the function to, if applicable.
18
+
19
+ Returns
20
+ -------
21
+ coroutine
22
+ An awaitable version of the function.
23
+ """
24
+
25
+ @functools.wraps(func)
26
+ async def wrapper(*args, **kwargs):
27
+ return await asyncio.to_thread(func, *args, **kwargs)
28
+
29
+ return wrapper
30
+
31
+
32
+ class AsyncFileSystemWrapper(AsyncFileSystem):
33
+ """
34
+ A wrapper class to convert a synchronous filesystem into an asynchronous one.
35
+
36
+ This class takes an existing synchronous filesystem implementation and wraps all
37
+ its methods to provide an asynchronous interface.
38
+
39
+ Parameters
40
+ ----------
41
+ sync_fs : AbstractFileSystem
42
+ The synchronous filesystem instance to wrap.
43
+ """
44
+
45
+ protocol = "async_wrapper"
46
+ cachable = False
47
+
48
+ def __init__(self, fs, *args, asynchronous=None, **kwargs):
49
+ if asynchronous is None:
50
+ asynchronous = running_async()
51
+ super().__init__(*args, asynchronous=asynchronous, **kwargs)
52
+ self.sync_fs = fs
53
+ self.protocol = self.sync_fs.protocol
54
+ self._wrap_all_sync_methods()
55
+
56
+ @property
57
+ def fsid(self):
58
+ return f"async_{self.sync_fs.fsid}"
59
+
60
+ def _wrap_all_sync_methods(self):
61
+ """
62
+ Wrap all synchronous methods of the underlying filesystem with asynchronous versions.
63
+ """
64
+ excluded_methods = {"open"}
65
+ for method_name in dir(self.sync_fs):
66
+ if method_name.startswith("_") or method_name in excluded_methods:
67
+ continue
68
+
69
+ attr = inspect.getattr_static(self.sync_fs, method_name)
70
+ if isinstance(attr, property):
71
+ continue
72
+
73
+ method = getattr(self.sync_fs, method_name)
74
+ if callable(method) and not asyncio.iscoroutinefunction(method):
75
+ async_method = async_wrapper(method, obj=self)
76
+ setattr(self, f"_{method_name}", async_method)
77
+
78
+ @classmethod
79
+ def wrap_class(cls, sync_fs_class):
80
+ """
81
+ Create a new class that can be used to instantiate an AsyncFileSystemWrapper
82
+ with lazy instantiation of the underlying synchronous filesystem.
83
+
84
+ Parameters
85
+ ----------
86
+ sync_fs_class : type
87
+ The class of the synchronous filesystem to wrap.
88
+
89
+ Returns
90
+ -------
91
+ type
92
+ A new class that wraps the provided synchronous filesystem class.
93
+ """
94
+
95
+ class GeneratedAsyncFileSystemWrapper(cls):
96
+ def __init__(self, *args, **kwargs):
97
+ sync_fs = sync_fs_class(*args, **kwargs)
98
+ super().__init__(sync_fs)
99
+
100
+ GeneratedAsyncFileSystemWrapper.__name__ = (
101
+ f"Async{sync_fs_class.__name__}Wrapper"
102
+ )
103
+ return GeneratedAsyncFileSystemWrapper
lib/python3.10/site-packages/fsspec/implementations/cache_mapper.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ import hashlib
5
+
6
+ from fsspec.implementations.local import make_path_posix
7
+
8
+
9
+ class AbstractCacheMapper(abc.ABC):
10
+ """Abstract super-class for mappers from remote URLs to local cached
11
+ basenames.
12
+ """
13
+
14
+ @abc.abstractmethod
15
+ def __call__(self, path: str) -> str: ...
16
+
17
+ def __eq__(self, other: object) -> bool:
18
+ # Identity only depends on class. When derived classes have attributes
19
+ # they will need to be included.
20
+ return isinstance(other, type(self))
21
+
22
+ def __hash__(self) -> int:
23
+ # Identity only depends on class. When derived classes have attributes
24
+ # they will need to be included.
25
+ return hash(type(self))
26
+
27
+
28
+ class BasenameCacheMapper(AbstractCacheMapper):
29
+ """Cache mapper that uses the basename of the remote URL and a fixed number
30
+ of directory levels above this.
31
+
32
+ The default is zero directory levels, meaning different paths with the same
33
+ basename will have the same cached basename.
34
+ """
35
+
36
+ def __init__(self, directory_levels: int = 0):
37
+ if directory_levels < 0:
38
+ raise ValueError(
39
+ "BasenameCacheMapper requires zero or positive directory_levels"
40
+ )
41
+ self.directory_levels = directory_levels
42
+
43
+ # Separator for directories when encoded as strings.
44
+ self._separator = "_@_"
45
+
46
+ def __call__(self, path: str) -> str:
47
+ path = make_path_posix(path)
48
+ prefix, *bits = path.rsplit("/", self.directory_levels + 1)
49
+ if bits:
50
+ return self._separator.join(bits)
51
+ else:
52
+ return prefix # No separator found, simple filename
53
+
54
+ def __eq__(self, other: object) -> bool:
55
+ return super().__eq__(other) and self.directory_levels == other.directory_levels
56
+
57
+ def __hash__(self) -> int:
58
+ return super().__hash__() ^ hash(self.directory_levels)
59
+
60
+
61
+ class HashCacheMapper(AbstractCacheMapper):
62
+ """Cache mapper that uses a hash of the remote URL."""
63
+
64
+ def __call__(self, path: str) -> str:
65
+ return hashlib.sha256(path.encode()).hexdigest()
66
+
67
+
68
+ def create_cache_mapper(same_names: bool) -> AbstractCacheMapper:
69
+ """Factory method to create cache mapper for backward compatibility with
70
+ ``CachingFileSystem`` constructor using ``same_names`` kwarg.
71
+ """
72
+ if same_names:
73
+ return BasenameCacheMapper()
74
+ else:
75
+ return HashCacheMapper()
lib/python3.10/site-packages/fsspec/implementations/cache_metadata.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import pickle
5
+ import time
6
+ from typing import TYPE_CHECKING
7
+
8
+ from fsspec.utils import atomic_write
9
+
10
+ try:
11
+ import ujson as json
12
+ except ImportError:
13
+ if not TYPE_CHECKING:
14
+ import json
15
+
16
+ if TYPE_CHECKING:
17
+ from typing import Any, Dict, Iterator, Literal
18
+
19
+ from typing_extensions import TypeAlias
20
+
21
+ from .cached import CachingFileSystem
22
+
23
+ Detail: TypeAlias = Dict[str, Any]
24
+
25
+
26
+ class CacheMetadata:
27
+ """Cache metadata.
28
+
29
+ All reading and writing of cache metadata is performed by this class,
30
+ accessing the cached files and blocks is not.
31
+
32
+ Metadata is stored in a single file per storage directory in JSON format.
33
+ For backward compatibility, also reads metadata stored in pickle format
34
+ which is converted to JSON when next saved.
35
+ """
36
+
37
+ def __init__(self, storage: list[str]):
38
+ """
39
+
40
+ Parameters
41
+ ----------
42
+ storage: list[str]
43
+ Directories containing cached files, must be at least one. Metadata
44
+ is stored in the last of these directories by convention.
45
+ """
46
+ if not storage:
47
+ raise ValueError("CacheMetadata expects at least one storage location")
48
+
49
+ self._storage = storage
50
+ self.cached_files: list[Detail] = [{}]
51
+
52
+ # Private attribute to force saving of metadata in pickle format rather than
53
+ # JSON for use in tests to confirm can read both pickle and JSON formats.
54
+ self._force_save_pickle = False
55
+
56
+ def _load(self, fn: str) -> Detail:
57
+ """Low-level function to load metadata from specific file"""
58
+ try:
59
+ with open(fn, "r") as f:
60
+ loaded = json.load(f)
61
+ except ValueError:
62
+ with open(fn, "rb") as f:
63
+ loaded = pickle.load(f)
64
+ for c in loaded.values():
65
+ if isinstance(c.get("blocks"), list):
66
+ c["blocks"] = set(c["blocks"])
67
+ return loaded
68
+
69
+ def _save(self, metadata_to_save: Detail, fn: str) -> None:
70
+ """Low-level function to save metadata to specific file"""
71
+ if self._force_save_pickle:
72
+ with atomic_write(fn) as f:
73
+ pickle.dump(metadata_to_save, f)
74
+ else:
75
+ with atomic_write(fn, mode="w") as f:
76
+ json.dump(metadata_to_save, f)
77
+
78
+ def _scan_locations(
79
+ self, writable_only: bool = False
80
+ ) -> Iterator[tuple[str, str, bool]]:
81
+ """Yield locations (filenames) where metadata is stored, and whether
82
+ writable or not.
83
+
84
+ Parameters
85
+ ----------
86
+ writable: bool
87
+ Set to True to only yield writable locations.
88
+
89
+ Returns
90
+ -------
91
+ Yields (str, str, bool)
92
+ """
93
+ n = len(self._storage)
94
+ for i, storage in enumerate(self._storage):
95
+ writable = i == n - 1
96
+ if writable_only and not writable:
97
+ continue
98
+ yield os.path.join(storage, "cache"), storage, writable
99
+
100
+ def check_file(
101
+ self, path: str, cfs: CachingFileSystem | None
102
+ ) -> Literal[False] | tuple[Detail, str]:
103
+ """If path is in cache return its details, otherwise return ``False``.
104
+
105
+ If the optional CachingFileSystem is specified then it is used to
106
+ perform extra checks to reject possible matches, such as if they are
107
+ too old.
108
+ """
109
+ for (fn, base, _), cache in zip(self._scan_locations(), self.cached_files):
110
+ if path not in cache:
111
+ continue
112
+ detail = cache[path].copy()
113
+
114
+ if cfs is not None:
115
+ if cfs.check_files and detail["uid"] != cfs.fs.ukey(path):
116
+ # Wrong file as determined by hash of file properties
117
+ continue
118
+ if cfs.expiry and time.time() - detail["time"] > cfs.expiry:
119
+ # Cached file has expired
120
+ continue
121
+
122
+ fn = os.path.join(base, detail["fn"])
123
+ if os.path.exists(fn):
124
+ return detail, fn
125
+ return False
126
+
127
+ def clear_expired(self, expiry_time: int) -> tuple[list[str], bool]:
128
+ """Remove expired metadata from the cache.
129
+
130
+ Returns names of files corresponding to expired metadata and a boolean
131
+ flag indicating whether the writable cache is empty. Caller is
132
+ responsible for deleting the expired files.
133
+ """
134
+ expired_files = []
135
+ for path, detail in self.cached_files[-1].copy().items():
136
+ if time.time() - detail["time"] > expiry_time:
137
+ fn = detail.get("fn", "")
138
+ if not fn:
139
+ raise RuntimeError(
140
+ f"Cache metadata does not contain 'fn' for {path}"
141
+ )
142
+ fn = os.path.join(self._storage[-1], fn)
143
+ expired_files.append(fn)
144
+ self.cached_files[-1].pop(path)
145
+
146
+ if self.cached_files[-1]:
147
+ cache_path = os.path.join(self._storage[-1], "cache")
148
+ self._save(self.cached_files[-1], cache_path)
149
+
150
+ writable_cache_empty = not self.cached_files[-1]
151
+ return expired_files, writable_cache_empty
152
+
153
+ def load(self) -> None:
154
+ """Load all metadata from disk and store in ``self.cached_files``"""
155
+ cached_files = []
156
+ for fn, _, _ in self._scan_locations():
157
+ if os.path.exists(fn):
158
+ # TODO: consolidate blocks here
159
+ cached_files.append(self._load(fn))
160
+ else:
161
+ cached_files.append({})
162
+ self.cached_files = cached_files or [{}]
163
+
164
+ def on_close_cached_file(self, f: Any, path: str) -> None:
165
+ """Perform side-effect actions on closing a cached file.
166
+
167
+ The actual closing of the file is the responsibility of the caller.
168
+ """
169
+ # File must be writeble, so in self.cached_files[-1]
170
+ c = self.cached_files[-1][path]
171
+ if c["blocks"] is not True and len(c["blocks"]) * f.blocksize >= f.size:
172
+ c["blocks"] = True
173
+
174
+ def pop_file(self, path: str) -> str | None:
175
+ """Remove metadata of cached file.
176
+
177
+ If path is in the cache, return the filename of the cached file,
178
+ otherwise return ``None``. Caller is responsible for deleting the
179
+ cached file.
180
+ """
181
+ details = self.check_file(path, None)
182
+ if not details:
183
+ return None
184
+ _, fn = details
185
+ if fn.startswith(self._storage[-1]):
186
+ self.cached_files[-1].pop(path)
187
+ self.save()
188
+ else:
189
+ raise PermissionError(
190
+ "Can only delete cached file in last, writable cache location"
191
+ )
192
+ return fn
193
+
194
+ def save(self) -> None:
195
+ """Save metadata to disk"""
196
+ for (fn, _, writable), cache in zip(self._scan_locations(), self.cached_files):
197
+ if not writable:
198
+ continue
199
+
200
+ if os.path.exists(fn):
201
+ cached_files = self._load(fn)
202
+ for k, c in cached_files.items():
203
+ if k in cache:
204
+ if c["blocks"] is True or cache[k]["blocks"] is True:
205
+ c["blocks"] = True
206
+ else:
207
+ # self.cached_files[*][*]["blocks"] must continue to
208
+ # point to the same set object so that updates
209
+ # performed by MMapCache are propagated back to
210
+ # self.cached_files.
211
+ blocks = cache[k]["blocks"]
212
+ blocks.update(c["blocks"])
213
+ c["blocks"] = blocks
214
+ c["time"] = max(c["time"], cache[k]["time"])
215
+ c["uid"] = cache[k]["uid"]
216
+
217
+ # Files can be added to cache after it was written once
218
+ for k, c in cache.items():
219
+ if k not in cached_files:
220
+ cached_files[k] = c
221
+ else:
222
+ cached_files = cache
223
+ cache = {k: v.copy() for k, v in cached_files.items()}
224
+ for c in cache.values():
225
+ if isinstance(c["blocks"], set):
226
+ c["blocks"] = list(c["blocks"])
227
+ self._save(cache, fn)
228
+ self.cached_files[-1] = cached_files
229
+
230
+ def update_file(self, path: str, detail: Detail) -> None:
231
+ """Update metadata for specific file in memory, do not save"""
232
+ self.cached_files[-1][path] = detail
lib/python3.10/site-packages/fsspec/implementations/dask.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dask
2
+ from distributed.client import Client, _get_global_client
3
+ from distributed.worker import Worker
4
+
5
+ from fsspec import filesystem
6
+ from fsspec.spec import AbstractBufferedFile, AbstractFileSystem
7
+ from fsspec.utils import infer_storage_options
8
+
9
+
10
+ def _get_client(client):
11
+ if client is None:
12
+ return _get_global_client()
13
+ elif isinstance(client, Client):
14
+ return client
15
+ else:
16
+ # e.g., connection string
17
+ return Client(client)
18
+
19
+
20
+ def _in_worker():
21
+ return bool(Worker._instances)
22
+
23
+
24
+ class DaskWorkerFileSystem(AbstractFileSystem):
25
+ """View files accessible to a worker as any other remote file-system
26
+
27
+ When instances are run on the worker, uses the real filesystem. When
28
+ run on the client, they call the worker to provide information or data.
29
+
30
+ **Warning** this implementation is experimental, and read-only for now.
31
+ """
32
+
33
+ def __init__(
34
+ self, target_protocol=None, target_options=None, fs=None, client=None, **kwargs
35
+ ):
36
+ super().__init__(**kwargs)
37
+ if not (fs is None) ^ (target_protocol is None):
38
+ raise ValueError(
39
+ "Please provide one of filesystem instance (fs) or"
40
+ " target_protocol, not both"
41
+ )
42
+ self.target_protocol = target_protocol
43
+ self.target_options = target_options
44
+ self.worker = None
45
+ self.client = client
46
+ self.fs = fs
47
+ self._determine_worker()
48
+
49
+ @staticmethod
50
+ def _get_kwargs_from_urls(path):
51
+ so = infer_storage_options(path)
52
+ if "host" in so and "port" in so:
53
+ return {"client": f"{so['host']}:{so['port']}"}
54
+ else:
55
+ return {}
56
+
57
+ def _determine_worker(self):
58
+ if _in_worker():
59
+ self.worker = True
60
+ if self.fs is None:
61
+ self.fs = filesystem(
62
+ self.target_protocol, **(self.target_options or {})
63
+ )
64
+ else:
65
+ self.worker = False
66
+ self.client = _get_client(self.client)
67
+ self.rfs = dask.delayed(self)
68
+
69
+ def mkdir(self, *args, **kwargs):
70
+ if self.worker:
71
+ self.fs.mkdir(*args, **kwargs)
72
+ else:
73
+ self.rfs.mkdir(*args, **kwargs).compute()
74
+
75
+ def rm(self, *args, **kwargs):
76
+ if self.worker:
77
+ self.fs.rm(*args, **kwargs)
78
+ else:
79
+ self.rfs.rm(*args, **kwargs).compute()
80
+
81
+ def copy(self, *args, **kwargs):
82
+ if self.worker:
83
+ self.fs.copy(*args, **kwargs)
84
+ else:
85
+ self.rfs.copy(*args, **kwargs).compute()
86
+
87
+ def mv(self, *args, **kwargs):
88
+ if self.worker:
89
+ self.fs.mv(*args, **kwargs)
90
+ else:
91
+ self.rfs.mv(*args, **kwargs).compute()
92
+
93
+ def ls(self, *args, **kwargs):
94
+ if self.worker:
95
+ return self.fs.ls(*args, **kwargs)
96
+ else:
97
+ return self.rfs.ls(*args, **kwargs).compute()
98
+
99
+ def _open(
100
+ self,
101
+ path,
102
+ mode="rb",
103
+ block_size=None,
104
+ autocommit=True,
105
+ cache_options=None,
106
+ **kwargs,
107
+ ):
108
+ if self.worker:
109
+ return self.fs._open(
110
+ path,
111
+ mode=mode,
112
+ block_size=block_size,
113
+ autocommit=autocommit,
114
+ cache_options=cache_options,
115
+ **kwargs,
116
+ )
117
+ else:
118
+ return DaskFile(
119
+ fs=self,
120
+ path=path,
121
+ mode=mode,
122
+ block_size=block_size,
123
+ autocommit=autocommit,
124
+ cache_options=cache_options,
125
+ **kwargs,
126
+ )
127
+
128
+ def fetch_range(self, path, mode, start, end):
129
+ if self.worker:
130
+ with self._open(path, mode) as f:
131
+ f.seek(start)
132
+ return f.read(end - start)
133
+ else:
134
+ return self.rfs.fetch_range(path, mode, start, end).compute()
135
+
136
+
137
+ class DaskFile(AbstractBufferedFile):
138
+ def __init__(self, mode="rb", **kwargs):
139
+ if mode != "rb":
140
+ raise ValueError('Remote dask files can only be opened in "rb" mode')
141
+ super().__init__(**kwargs)
142
+
143
+ def _upload_chunk(self, final=False):
144
+ pass
145
+
146
+ def _initiate_upload(self):
147
+ """Create remote file/upload"""
148
+ pass
149
+
150
+ def _fetch_range(self, start, end):
151
+ """Get the specified set of bytes from remote"""
152
+ return self.fs.fetch_range(self.path, self.mode, start, end)
lib/python3.10/site-packages/fsspec/implementations/data.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ from typing import Optional
4
+ from urllib.parse import unquote
5
+
6
+ from fsspec import AbstractFileSystem
7
+
8
+
9
+ class DataFileSystem(AbstractFileSystem):
10
+ """A handy decoder for data-URLs
11
+
12
+ Example
13
+ -------
14
+ >>> with fsspec.open("data:,Hello%2C%20World%21") as f:
15
+ ... print(f.read())
16
+ b"Hello, World!"
17
+
18
+ See https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs
19
+ """
20
+
21
+ protocol = "data"
22
+
23
+ def __init__(self, **kwargs):
24
+ """No parameters for this filesystem"""
25
+ super().__init__(**kwargs)
26
+
27
+ def cat_file(self, path, start=None, end=None, **kwargs):
28
+ pref, data = path.split(",", 1)
29
+ if pref.endswith("base64"):
30
+ return base64.b64decode(data)[start:end]
31
+ return unquote(data).encode()[start:end]
32
+
33
+ def info(self, path, **kwargs):
34
+ pref, name = path.split(",", 1)
35
+ data = self.cat_file(path)
36
+ mime = pref.split(":", 1)[1].split(";", 1)[0]
37
+ return {"name": name, "size": len(data), "type": "file", "mimetype": mime}
38
+
39
+ def _open(
40
+ self,
41
+ path,
42
+ mode="rb",
43
+ block_size=None,
44
+ autocommit=True,
45
+ cache_options=None,
46
+ **kwargs,
47
+ ):
48
+ if "r" not in mode:
49
+ raise ValueError("Read only filesystem")
50
+ return io.BytesIO(self.cat_file(path))
51
+
52
+ @staticmethod
53
+ def encode(data: bytes, mime: Optional[str] = None):
54
+ """Format the given data into data-URL syntax
55
+
56
+ This version always base64 encodes, even when the data is ascii/url-safe.
57
+ """
58
+ return f"data:{mime or ''};base64,{base64.b64encode(data).decode()}"
lib/python3.10/site-packages/fsspec/implementations/git.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pygit2
4
+
5
+ from fsspec.spec import AbstractFileSystem
6
+
7
+ from .memory import MemoryFile
8
+
9
+
10
+ class GitFileSystem(AbstractFileSystem):
11
+ """Browse the files of a local git repo at any hash/tag/branch
12
+
13
+ (experimental backend)
14
+ """
15
+
16
+ root_marker = ""
17
+ cachable = True
18
+
19
+ def __init__(self, path=None, fo=None, ref=None, **kwargs):
20
+ """
21
+
22
+ Parameters
23
+ ----------
24
+ path: str (optional)
25
+ Local location of the repo (uses current directory if not given).
26
+ May be deprecated in favour of ``fo``. When used with a higher
27
+ level function such as fsspec.open(), may be of the form
28
+ "git://[path-to-repo[:]][ref@]path/to/file" (but the actual
29
+ file path should not contain "@" or ":").
30
+ fo: str (optional)
31
+ Same as ``path``, but passed as part of a chained URL. This one
32
+ takes precedence if both are given.
33
+ ref: str (optional)
34
+ Reference to work with, could be a hash, tag or branch name. Defaults
35
+ to current working tree. Note that ``ls`` and ``open`` also take hash,
36
+ so this becomes the default for those operations
37
+ kwargs
38
+ """
39
+ super().__init__(**kwargs)
40
+ self.repo = pygit2.Repository(fo or path or os.getcwd())
41
+ self.ref = ref or "master"
42
+
43
+ @classmethod
44
+ def _strip_protocol(cls, path):
45
+ path = super()._strip_protocol(path).lstrip("/")
46
+ if ":" in path:
47
+ path = path.split(":", 1)[1]
48
+ if "@" in path:
49
+ path = path.split("@", 1)[1]
50
+ return path.lstrip("/")
51
+
52
+ def _path_to_object(self, path, ref):
53
+ comm, ref = self.repo.resolve_refish(ref or self.ref)
54
+ parts = path.split("/")
55
+ tree = comm.tree
56
+ for part in parts:
57
+ if part and isinstance(tree, pygit2.Tree):
58
+ if part not in tree:
59
+ raise FileNotFoundError(path)
60
+ tree = tree[part]
61
+ return tree
62
+
63
+ @staticmethod
64
+ def _get_kwargs_from_urls(path):
65
+ if path.startswith("git://"):
66
+ path = path[6:]
67
+ out = {}
68
+ if ":" in path:
69
+ out["path"], path = path.split(":", 1)
70
+ if "@" in path:
71
+ out["ref"], path = path.split("@", 1)
72
+ return out
73
+
74
+ @staticmethod
75
+ def _object_to_info(obj, path=None):
76
+ # obj.name and obj.filemode are None for the root tree!
77
+ is_dir = isinstance(obj, pygit2.Tree)
78
+ return {
79
+ "type": "directory" if is_dir else "file",
80
+ "name": (
81
+ "/".join([path, obj.name or ""]).lstrip("/") if path else obj.name
82
+ ),
83
+ "hex": str(obj.id),
84
+ "mode": "100644" if obj.filemode is None else f"{obj.filemode:o}",
85
+ "size": 0 if is_dir else obj.size,
86
+ }
87
+
88
+ def ls(self, path, detail=True, ref=None, **kwargs):
89
+ tree = self._path_to_object(self._strip_protocol(path), ref)
90
+ return [
91
+ GitFileSystem._object_to_info(obj, path)
92
+ if detail
93
+ else GitFileSystem._object_to_info(obj, path)["name"]
94
+ for obj in (tree if isinstance(tree, pygit2.Tree) else [tree])
95
+ ]
96
+
97
+ def info(self, path, ref=None, **kwargs):
98
+ tree = self._path_to_object(self._strip_protocol(path), ref)
99
+ return GitFileSystem._object_to_info(tree, path)
100
+
101
+ def ukey(self, path, ref=None):
102
+ return self.info(path, ref=ref)["hex"]
103
+
104
+ def _open(
105
+ self,
106
+ path,
107
+ mode="rb",
108
+ block_size=None,
109
+ autocommit=True,
110
+ cache_options=None,
111
+ ref=None,
112
+ **kwargs,
113
+ ):
114
+ obj = self._path_to_object(path, ref or self.ref)
115
+ return MemoryFile(data=obj.data)
lib/python3.10/site-packages/fsspec/implementations/github.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ import fsspec
4
+
5
+ from ..spec import AbstractFileSystem
6
+ from ..utils import infer_storage_options
7
+ from .memory import MemoryFile
8
+
9
+ # TODO: add GIST backend, would be very similar
10
+
11
+
12
+ class GithubFileSystem(AbstractFileSystem):
13
+ """Interface to files in github
14
+
15
+ An instance of this class provides the files residing within a remote github
16
+ repository. You may specify a point in the repos history, by SHA, branch
17
+ or tag (default is current master).
18
+
19
+ Given that code files tend to be small, and that github does not support
20
+ retrieving partial content, we always fetch whole files.
21
+
22
+ When using fsspec.open, allows URIs of the form:
23
+
24
+ - "github://path/file", in which case you must specify org, repo and
25
+ may specify sha in the extra args
26
+ - 'github://org:repo@/precip/catalog.yml', where the org and repo are
27
+ part of the URI
28
+ - 'github://org:repo@sha/precip/catalog.yml', where the sha is also included
29
+
30
+ ``sha`` can be the full or abbreviated hex of the commit you want to fetch
31
+ from, or a branch or tag name (so long as it doesn't contain special characters
32
+ like "/", "?", which would have to be HTTP-encoded).
33
+
34
+ For authorised access, you must provide username and token, which can be made
35
+ at https://github.com/settings/tokens
36
+ """
37
+
38
+ url = "https://api.github.com/repos/{org}/{repo}/git/trees/{sha}"
39
+ rurl = "https://raw.githubusercontent.com/{org}/{repo}/{sha}/{path}"
40
+ protocol = "github"
41
+ timeout = (60, 60) # connect, read timeouts
42
+
43
+ def __init__(
44
+ self, org, repo, sha=None, username=None, token=None, timeout=None, **kwargs
45
+ ):
46
+ super().__init__(**kwargs)
47
+ self.org = org
48
+ self.repo = repo
49
+ if (username is None) ^ (token is None):
50
+ raise ValueError("Auth required both username and token")
51
+ self.username = username
52
+ self.token = token
53
+ if timeout is not None:
54
+ self.timeout = timeout
55
+ if sha is None:
56
+ # look up default branch (not necessarily "master")
57
+ u = "https://api.github.com/repos/{org}/{repo}"
58
+ r = requests.get(
59
+ u.format(org=org, repo=repo), timeout=self.timeout, **self.kw
60
+ )
61
+ r.raise_for_status()
62
+ sha = r.json()["default_branch"]
63
+
64
+ self.root = sha
65
+ self.ls("")
66
+
67
+ @property
68
+ def kw(self):
69
+ if self.username:
70
+ return {"auth": (self.username, self.token)}
71
+ return {}
72
+
73
+ @classmethod
74
+ def repos(cls, org_or_user, is_org=True):
75
+ """List repo names for given org or user
76
+
77
+ This may become the top level of the FS
78
+
79
+ Parameters
80
+ ----------
81
+ org_or_user: str
82
+ Name of the github org or user to query
83
+ is_org: bool (default True)
84
+ Whether the name is an organisation (True) or user (False)
85
+
86
+ Returns
87
+ -------
88
+ List of string
89
+ """
90
+ r = requests.get(
91
+ f"https://api.github.com/{['users', 'orgs'][is_org]}/{org_or_user}/repos",
92
+ timeout=cls.timeout,
93
+ )
94
+ r.raise_for_status()
95
+ return [repo["name"] for repo in r.json()]
96
+
97
+ @property
98
+ def tags(self):
99
+ """Names of tags in the repo"""
100
+ r = requests.get(
101
+ f"https://api.github.com/repos/{self.org}/{self.repo}/tags",
102
+ timeout=self.timeout,
103
+ **self.kw,
104
+ )
105
+ r.raise_for_status()
106
+ return [t["name"] for t in r.json()]
107
+
108
+ @property
109
+ def branches(self):
110
+ """Names of branches in the repo"""
111
+ r = requests.get(
112
+ f"https://api.github.com/repos/{self.org}/{self.repo}/branches",
113
+ timeout=self.timeout,
114
+ **self.kw,
115
+ )
116
+ r.raise_for_status()
117
+ return [t["name"] for t in r.json()]
118
+
119
+ @property
120
+ def refs(self):
121
+ """Named references, tags and branches"""
122
+ return {"tags": self.tags, "branches": self.branches}
123
+
124
+ def ls(self, path, detail=False, sha=None, _sha=None, **kwargs):
125
+ """List files at given path
126
+
127
+ Parameters
128
+ ----------
129
+ path: str
130
+ Location to list, relative to repo root
131
+ detail: bool
132
+ If True, returns list of dicts, one per file; if False, returns
133
+ list of full filenames only
134
+ sha: str (optional)
135
+ List at the given point in the repo history, branch or tag name or commit
136
+ SHA
137
+ _sha: str (optional)
138
+ List this specific tree object (used internally to descend into trees)
139
+ """
140
+ path = self._strip_protocol(path)
141
+ if path == "":
142
+ _sha = sha or self.root
143
+ if _sha is None:
144
+ parts = path.rstrip("/").split("/")
145
+ so_far = ""
146
+ _sha = sha or self.root
147
+ for part in parts:
148
+ out = self.ls(so_far, True, sha=sha, _sha=_sha)
149
+ so_far += "/" + part if so_far else part
150
+ out = [o for o in out if o["name"] == so_far]
151
+ if not out:
152
+ raise FileNotFoundError(path)
153
+ out = out[0]
154
+ if out["type"] == "file":
155
+ if detail:
156
+ return [out]
157
+ else:
158
+ return path
159
+ _sha = out["sha"]
160
+ if path not in self.dircache or sha not in [self.root, None]:
161
+ r = requests.get(
162
+ self.url.format(org=self.org, repo=self.repo, sha=_sha),
163
+ timeout=self.timeout,
164
+ **self.kw,
165
+ )
166
+ if r.status_code == 404:
167
+ raise FileNotFoundError(path)
168
+ r.raise_for_status()
169
+ types = {"blob": "file", "tree": "directory"}
170
+ out = [
171
+ {
172
+ "name": path + "/" + f["path"] if path else f["path"],
173
+ "mode": f["mode"],
174
+ "type": types[f["type"]],
175
+ "size": f.get("size", 0),
176
+ "sha": f["sha"],
177
+ }
178
+ for f in r.json()["tree"]
179
+ if f["type"] in types
180
+ ]
181
+ if sha in [self.root, None]:
182
+ self.dircache[path] = out
183
+ else:
184
+ out = self.dircache[path]
185
+ if detail:
186
+ return out
187
+ else:
188
+ return sorted([f["name"] for f in out])
189
+
190
+ def invalidate_cache(self, path=None):
191
+ self.dircache.clear()
192
+
193
+ @classmethod
194
+ def _strip_protocol(cls, path):
195
+ opts = infer_storage_options(path)
196
+ if "username" not in opts:
197
+ return super()._strip_protocol(path)
198
+ return opts["path"].lstrip("/")
199
+
200
+ @staticmethod
201
+ def _get_kwargs_from_urls(path):
202
+ opts = infer_storage_options(path)
203
+ if "username" not in opts:
204
+ return {}
205
+ out = {"org": opts["username"], "repo": opts["password"]}
206
+ if opts["host"]:
207
+ out["sha"] = opts["host"]
208
+ return out
209
+
210
+ def _open(
211
+ self,
212
+ path,
213
+ mode="rb",
214
+ block_size=None,
215
+ autocommit=True,
216
+ cache_options=None,
217
+ sha=None,
218
+ **kwargs,
219
+ ):
220
+ if mode != "rb":
221
+ raise NotImplementedError
222
+ url = self.rurl.format(
223
+ org=self.org, repo=self.repo, path=path, sha=sha or self.root
224
+ )
225
+ r = requests.get(url, timeout=self.timeout, **self.kw)
226
+ if r.status_code == 404:
227
+ raise FileNotFoundError(path)
228
+ r.raise_for_status()
229
+ return MemoryFile(None, None, r.content)
230
+
231
+ def cat(self, path, recursive=False, on_error="raise", **kwargs):
232
+ paths = self.expand_path(path, recursive=recursive)
233
+ urls = [
234
+ self.rurl.format(org=self.org, repo=self.repo, path=u, sha=self.root)
235
+ for u, sh in paths
236
+ ]
237
+ fs = fsspec.filesystem("http")
238
+ data = fs.cat(urls, on_error="return")
239
+ return {u: v for ((k, v), u) in zip(data.items(), urls)}
lib/python3.10/site-packages/fsspec/implementations/http_sync.py ADDED
@@ -0,0 +1,932 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This file is largely copied from http.py"""
2
+
3
+ import io
4
+ import logging
5
+ import re
6
+ import urllib.error
7
+ import urllib.parse
8
+ from copy import copy
9
+ from json import dumps, loads
10
+ from urllib.parse import urlparse
11
+
12
+ try:
13
+ import yarl
14
+ except (ImportError, ModuleNotFoundError, OSError):
15
+ yarl = False
16
+
17
+ from fsspec.callbacks import _DEFAULT_CALLBACK
18
+ from fsspec.registry import register_implementation
19
+ from fsspec.spec import AbstractBufferedFile, AbstractFileSystem
20
+ from fsspec.utils import DEFAULT_BLOCK_SIZE, isfilelike, nullcontext, tokenize
21
+
22
+ from ..caching import AllBytes
23
+
24
+ # https://stackoverflow.com/a/15926317/3821154
25
+ ex = re.compile(r"""<(a|A)\s+(?:[^>]*?\s+)?(href|HREF)=["'](?P<url>[^"']+)""")
26
+ ex2 = re.compile(r"""(?P<url>http[s]?://[-a-zA-Z0-9@:%_+.~#?&/=]+)""")
27
+ logger = logging.getLogger("fsspec.http")
28
+
29
+
30
+ class JsHttpException(urllib.error.HTTPError): ...
31
+
32
+
33
+ class StreamIO(io.BytesIO):
34
+ # fake class, so you can set attributes on it
35
+ # will eventually actually stream
36
+ ...
37
+
38
+
39
+ class ResponseProxy:
40
+ """Looks like a requests response"""
41
+
42
+ def __init__(self, req, stream=False):
43
+ self.request = req
44
+ self.stream = stream
45
+ self._data = None
46
+ self._headers = None
47
+
48
+ @property
49
+ def raw(self):
50
+ if self._data is None:
51
+ b = self.request.response.to_bytes()
52
+ if self.stream:
53
+ self._data = StreamIO(b)
54
+ else:
55
+ self._data = b
56
+ return self._data
57
+
58
+ def close(self):
59
+ if hasattr(self, "_data"):
60
+ del self._data
61
+
62
+ @property
63
+ def headers(self):
64
+ if self._headers is None:
65
+ self._headers = dict(
66
+ [
67
+ _.split(": ")
68
+ for _ in self.request.getAllResponseHeaders().strip().split("\r\n")
69
+ ]
70
+ )
71
+ return self._headers
72
+
73
+ @property
74
+ def status_code(self):
75
+ return int(self.request.status)
76
+
77
+ def raise_for_status(self):
78
+ if not self.ok:
79
+ raise JsHttpException(
80
+ self.url, self.status_code, self.reason, self.headers, None
81
+ )
82
+
83
+ def iter_content(self, chunksize, *_, **__):
84
+ while True:
85
+ out = self.raw.read(chunksize)
86
+ if out:
87
+ yield out
88
+ else:
89
+ break
90
+
91
+ @property
92
+ def reason(self):
93
+ return self.request.statusText
94
+
95
+ @property
96
+ def ok(self):
97
+ return self.status_code < 400
98
+
99
+ @property
100
+ def url(self):
101
+ return self.request.response.responseURL
102
+
103
+ @property
104
+ def text(self):
105
+ # TODO: encoding from headers
106
+ return self.content.decode()
107
+
108
+ @property
109
+ def content(self):
110
+ self.stream = False
111
+ return self.raw
112
+
113
+ @property
114
+ def json(self):
115
+ return loads(self.text)
116
+
117
+
118
+ class RequestsSessionShim:
119
+ def __init__(self):
120
+ self.headers = {}
121
+
122
+ def request(
123
+ self,
124
+ method,
125
+ url,
126
+ params=None,
127
+ data=None,
128
+ headers=None,
129
+ cookies=None,
130
+ files=None,
131
+ auth=None,
132
+ timeout=None,
133
+ allow_redirects=None,
134
+ proxies=None,
135
+ hooks=None,
136
+ stream=None,
137
+ verify=None,
138
+ cert=None,
139
+ json=None,
140
+ ):
141
+ from js import Blob, XMLHttpRequest
142
+
143
+ logger.debug("JS request: %s %s", method, url)
144
+
145
+ if cert or verify or proxies or files or cookies or hooks:
146
+ raise NotImplementedError
147
+ if data and json:
148
+ raise ValueError("Use json= or data=, not both")
149
+ req = XMLHttpRequest.new()
150
+ extra = auth if auth else ()
151
+ if params:
152
+ url = f"{url}?{urllib.parse.urlencode(params)}"
153
+ req.open(method, url, False, *extra)
154
+ if timeout:
155
+ req.timeout = timeout
156
+ if headers:
157
+ for k, v in headers.items():
158
+ req.setRequestHeader(k, v)
159
+
160
+ req.setRequestHeader("Accept", "application/octet-stream")
161
+ req.responseType = "arraybuffer"
162
+ if json:
163
+ blob = Blob.new([dumps(data)], {type: "application/json"})
164
+ req.send(blob)
165
+ elif data:
166
+ if isinstance(data, io.IOBase):
167
+ data = data.read()
168
+ blob = Blob.new([data], {type: "application/octet-stream"})
169
+ req.send(blob)
170
+ else:
171
+ req.send(None)
172
+ return ResponseProxy(req, stream=stream)
173
+
174
+ def get(self, url, **kwargs):
175
+ return self.request("GET", url, **kwargs)
176
+
177
+ def head(self, url, **kwargs):
178
+ return self.request("HEAD", url, **kwargs)
179
+
180
+ def post(self, url, **kwargs):
181
+ return self.request("POST}", url, **kwargs)
182
+
183
+ def put(self, url, **kwargs):
184
+ return self.request("PUT", url, **kwargs)
185
+
186
+ def patch(self, url, **kwargs):
187
+ return self.request("PATCH", url, **kwargs)
188
+
189
+ def delete(self, url, **kwargs):
190
+ return self.request("DELETE", url, **kwargs)
191
+
192
+
193
+ class HTTPFileSystem(AbstractFileSystem):
194
+ """
195
+ Simple File-System for fetching data via HTTP(S)
196
+
197
+ This is the BLOCKING version of the normal HTTPFileSystem. It uses
198
+ requests in normal python and the JS runtime in pyodide.
199
+
200
+ ***This implementation is extremely experimental, do not use unless
201
+ you are testing pyodide/pyscript integration***
202
+ """
203
+
204
+ protocol = ("http", "https", "sync_http", "sync_https")
205
+ sep = "/"
206
+
207
+ def __init__(
208
+ self,
209
+ simple_links=True,
210
+ block_size=None,
211
+ same_scheme=True,
212
+ cache_type="readahead",
213
+ cache_options=None,
214
+ client_kwargs=None,
215
+ encoded=False,
216
+ **storage_options,
217
+ ):
218
+ """
219
+
220
+ Parameters
221
+ ----------
222
+ block_size: int
223
+ Blocks to read bytes; if 0, will default to raw requests file-like
224
+ objects instead of HTTPFile instances
225
+ simple_links: bool
226
+ If True, will consider both HTML <a> tags and anything that looks
227
+ like a URL; if False, will consider only the former.
228
+ same_scheme: True
229
+ When doing ls/glob, if this is True, only consider paths that have
230
+ http/https matching the input URLs.
231
+ size_policy: this argument is deprecated
232
+ client_kwargs: dict
233
+ Passed to aiohttp.ClientSession, see
234
+ https://docs.aiohttp.org/en/stable/client_reference.html
235
+ For example, ``{'auth': aiohttp.BasicAuth('user', 'pass')}``
236
+ storage_options: key-value
237
+ Any other parameters passed on to requests
238
+ cache_type, cache_options: defaults used in open
239
+ """
240
+ super().__init__(self, **storage_options)
241
+ self.block_size = block_size if block_size is not None else DEFAULT_BLOCK_SIZE
242
+ self.simple_links = simple_links
243
+ self.same_schema = same_scheme
244
+ self.cache_type = cache_type
245
+ self.cache_options = cache_options
246
+ self.client_kwargs = client_kwargs or {}
247
+ self.encoded = encoded
248
+ self.kwargs = storage_options
249
+
250
+ try:
251
+ import js # noqa: F401
252
+
253
+ logger.debug("Starting JS session")
254
+ self.session = RequestsSessionShim()
255
+ self.js = True
256
+ except Exception as e:
257
+ import requests
258
+
259
+ logger.debug("Starting cpython session because of: %s", e)
260
+ self.session = requests.Session(**(client_kwargs or {}))
261
+ self.js = False
262
+
263
+ request_options = copy(storage_options)
264
+ self.use_listings_cache = request_options.pop("use_listings_cache", False)
265
+ request_options.pop("listings_expiry_time", None)
266
+ request_options.pop("max_paths", None)
267
+ request_options.pop("skip_instance_cache", None)
268
+ self.kwargs = request_options
269
+
270
+ @property
271
+ def fsid(self):
272
+ return "http_sync"
273
+
274
+ def encode_url(self, url):
275
+ if yarl:
276
+ return yarl.URL(url, encoded=self.encoded)
277
+ return url
278
+
279
+ @classmethod
280
+ def _strip_protocol(cls, path: str) -> str:
281
+ """For HTTP, we always want to keep the full URL"""
282
+ path = path.replace("http_sync://", "http://").replace(
283
+ "https_sync://", "https://"
284
+ )
285
+ return path
286
+
287
+ @classmethod
288
+ def _parent(cls, path):
289
+ # override, since _strip_protocol is different for URLs
290
+ par = super()._parent(path)
291
+ if len(par) > 7: # "http://..."
292
+ return par
293
+ return ""
294
+
295
+ def _ls_real(self, url, detail=True, **kwargs):
296
+ # ignoring URL-encoded arguments
297
+ kw = self.kwargs.copy()
298
+ kw.update(kwargs)
299
+ logger.debug(url)
300
+ r = self.session.get(self.encode_url(url), **self.kwargs)
301
+ self._raise_not_found_for_status(r, url)
302
+ text = r.text
303
+ if self.simple_links:
304
+ links = ex2.findall(text) + [u[2] for u in ex.findall(text)]
305
+ else:
306
+ links = [u[2] for u in ex.findall(text)]
307
+ out = set()
308
+ parts = urlparse(url)
309
+ for l in links:
310
+ if isinstance(l, tuple):
311
+ l = l[1]
312
+ if l.startswith("/") and len(l) > 1:
313
+ # absolute URL on this server
314
+ l = parts.scheme + "://" + parts.netloc + l
315
+ if l.startswith("http"):
316
+ if self.same_schema and l.startswith(url.rstrip("/") + "/"):
317
+ out.add(l)
318
+ elif l.replace("https", "http").startswith(
319
+ url.replace("https", "http").rstrip("/") + "/"
320
+ ):
321
+ # allowed to cross http <-> https
322
+ out.add(l)
323
+ else:
324
+ if l not in ["..", "../"]:
325
+ # Ignore FTP-like "parent"
326
+ out.add("/".join([url.rstrip("/"), l.lstrip("/")]))
327
+ if not out and url.endswith("/"):
328
+ out = self._ls_real(url.rstrip("/"), detail=False)
329
+ if detail:
330
+ return [
331
+ {
332
+ "name": u,
333
+ "size": None,
334
+ "type": "directory" if u.endswith("/") else "file",
335
+ }
336
+ for u in out
337
+ ]
338
+ else:
339
+ return sorted(out)
340
+
341
+ def ls(self, url, detail=True, **kwargs):
342
+ if self.use_listings_cache and url in self.dircache:
343
+ out = self.dircache[url]
344
+ else:
345
+ out = self._ls_real(url, detail=detail, **kwargs)
346
+ self.dircache[url] = out
347
+ return out
348
+
349
+ def _raise_not_found_for_status(self, response, url):
350
+ """
351
+ Raises FileNotFoundError for 404s, otherwise uses raise_for_status.
352
+ """
353
+ if response.status_code == 404:
354
+ raise FileNotFoundError(url)
355
+ response.raise_for_status()
356
+
357
+ def cat_file(self, url, start=None, end=None, **kwargs):
358
+ kw = self.kwargs.copy()
359
+ kw.update(kwargs)
360
+ logger.debug(url)
361
+
362
+ if start is not None or end is not None:
363
+ if start == end:
364
+ return b""
365
+ headers = kw.pop("headers", {}).copy()
366
+
367
+ headers["Range"] = self._process_limits(url, start, end)
368
+ kw["headers"] = headers
369
+ r = self.session.get(self.encode_url(url), **kw)
370
+ self._raise_not_found_for_status(r, url)
371
+ return r.content
372
+
373
+ def get_file(
374
+ self, rpath, lpath, chunk_size=5 * 2**20, callback=_DEFAULT_CALLBACK, **kwargs
375
+ ):
376
+ kw = self.kwargs.copy()
377
+ kw.update(kwargs)
378
+ logger.debug(rpath)
379
+ r = self.session.get(self.encode_url(rpath), **kw)
380
+ try:
381
+ size = int(
382
+ r.headers.get("content-length", None)
383
+ or r.headers.get("Content-Length", None)
384
+ )
385
+ except (ValueError, KeyError, TypeError):
386
+ size = None
387
+
388
+ callback.set_size(size)
389
+ self._raise_not_found_for_status(r, rpath)
390
+ if not isfilelike(lpath):
391
+ lpath = open(lpath, "wb")
392
+ for chunk in r.iter_content(chunk_size, decode_unicode=False):
393
+ lpath.write(chunk)
394
+ callback.relative_update(len(chunk))
395
+
396
+ def put_file(
397
+ self,
398
+ lpath,
399
+ rpath,
400
+ chunk_size=5 * 2**20,
401
+ callback=_DEFAULT_CALLBACK,
402
+ method="post",
403
+ **kwargs,
404
+ ):
405
+ def gen_chunks():
406
+ # Support passing arbitrary file-like objects
407
+ # and use them instead of streams.
408
+ if isinstance(lpath, io.IOBase):
409
+ context = nullcontext(lpath)
410
+ use_seek = False # might not support seeking
411
+ else:
412
+ context = open(lpath, "rb")
413
+ use_seek = True
414
+
415
+ with context as f:
416
+ if use_seek:
417
+ callback.set_size(f.seek(0, 2))
418
+ f.seek(0)
419
+ else:
420
+ callback.set_size(getattr(f, "size", None))
421
+
422
+ chunk = f.read(chunk_size)
423
+ while chunk:
424
+ yield chunk
425
+ callback.relative_update(len(chunk))
426
+ chunk = f.read(chunk_size)
427
+
428
+ kw = self.kwargs.copy()
429
+ kw.update(kwargs)
430
+
431
+ method = method.lower()
432
+ if method not in ("post", "put"):
433
+ raise ValueError(
434
+ f"method has to be either 'post' or 'put', not: {method!r}"
435
+ )
436
+
437
+ meth = getattr(self.session, method)
438
+ resp = meth(rpath, data=gen_chunks(), **kw)
439
+ self._raise_not_found_for_status(resp, rpath)
440
+
441
+ def _process_limits(self, url, start, end):
442
+ """Helper for "Range"-based _cat_file"""
443
+ size = None
444
+ suff = False
445
+ if start is not None and start < 0:
446
+ # if start is negative and end None, end is the "suffix length"
447
+ if end is None:
448
+ end = -start
449
+ start = ""
450
+ suff = True
451
+ else:
452
+ size = size or self.info(url)["size"]
453
+ start = size + start
454
+ elif start is None:
455
+ start = 0
456
+ if not suff:
457
+ if end is not None and end < 0:
458
+ if start is not None:
459
+ size = size or self.info(url)["size"]
460
+ end = size + end
461
+ elif end is None:
462
+ end = ""
463
+ if isinstance(end, int):
464
+ end -= 1 # bytes range is inclusive
465
+ return f"bytes={start}-{end}"
466
+
467
+ def exists(self, path, **kwargs):
468
+ kw = self.kwargs.copy()
469
+ kw.update(kwargs)
470
+ try:
471
+ logger.debug(path)
472
+ r = self.session.get(self.encode_url(path), **kw)
473
+ return r.status_code < 400
474
+ except Exception:
475
+ return False
476
+
477
+ def isfile(self, path, **kwargs):
478
+ return self.exists(path, **kwargs)
479
+
480
+ def _open(
481
+ self,
482
+ path,
483
+ mode="rb",
484
+ block_size=None,
485
+ autocommit=None, # XXX: This differs from the base class.
486
+ cache_type=None,
487
+ cache_options=None,
488
+ size=None,
489
+ **kwargs,
490
+ ):
491
+ """Make a file-like object
492
+
493
+ Parameters
494
+ ----------
495
+ path: str
496
+ Full URL with protocol
497
+ mode: string
498
+ must be "rb"
499
+ block_size: int or None
500
+ Bytes to download in one request; use instance value if None. If
501
+ zero, will return a streaming Requests file-like instance.
502
+ kwargs: key-value
503
+ Any other parameters, passed to requests calls
504
+ """
505
+ if mode != "rb":
506
+ raise NotImplementedError
507
+ block_size = block_size if block_size is not None else self.block_size
508
+ kw = self.kwargs.copy()
509
+ kw.update(kwargs)
510
+ size = size or self.info(path, **kwargs)["size"]
511
+ if block_size and size:
512
+ return HTTPFile(
513
+ self,
514
+ path,
515
+ session=self.session,
516
+ block_size=block_size,
517
+ mode=mode,
518
+ size=size,
519
+ cache_type=cache_type or self.cache_type,
520
+ cache_options=cache_options or self.cache_options,
521
+ **kw,
522
+ )
523
+ else:
524
+ return HTTPStreamFile(
525
+ self,
526
+ path,
527
+ mode=mode,
528
+ session=self.session,
529
+ **kw,
530
+ )
531
+
532
+ def ukey(self, url):
533
+ """Unique identifier; assume HTTP files are static, unchanging"""
534
+ return tokenize(url, self.kwargs, self.protocol)
535
+
536
+ def info(self, url, **kwargs):
537
+ """Get info of URL
538
+
539
+ Tries to access location via HEAD, and then GET methods, but does
540
+ not fetch the data.
541
+
542
+ It is possible that the server does not supply any size information, in
543
+ which case size will be given as None (and certain operations on the
544
+ corresponding file will not work).
545
+ """
546
+ info = {}
547
+ for policy in ["head", "get"]:
548
+ try:
549
+ info.update(
550
+ _file_info(
551
+ self.encode_url(url),
552
+ size_policy=policy,
553
+ session=self.session,
554
+ **self.kwargs,
555
+ **kwargs,
556
+ )
557
+ )
558
+ if info.get("size") is not None:
559
+ break
560
+ except Exception as exc:
561
+ if policy == "get":
562
+ # If get failed, then raise a FileNotFoundError
563
+ raise FileNotFoundError(url) from exc
564
+ logger.debug(str(exc))
565
+
566
+ return {"name": url, "size": None, **info, "type": "file"}
567
+
568
+ def glob(self, path, maxdepth=None, **kwargs):
569
+ """
570
+ Find files by glob-matching.
571
+
572
+ This implementation is idntical to the one in AbstractFileSystem,
573
+ but "?" is not considered as a character for globbing, because it is
574
+ so common in URLs, often identifying the "query" part.
575
+ """
576
+ import re
577
+
578
+ ends = path.endswith("/")
579
+ path = self._strip_protocol(path)
580
+ indstar = path.find("*") if path.find("*") >= 0 else len(path)
581
+ indbrace = path.find("[") if path.find("[") >= 0 else len(path)
582
+
583
+ ind = min(indstar, indbrace)
584
+
585
+ detail = kwargs.pop("detail", False)
586
+
587
+ if not has_magic(path):
588
+ root = path
589
+ depth = 1
590
+ if ends:
591
+ path += "/*"
592
+ elif self.exists(path):
593
+ if not detail:
594
+ return [path]
595
+ else:
596
+ return {path: self.info(path)}
597
+ else:
598
+ if not detail:
599
+ return [] # glob of non-existent returns empty
600
+ else:
601
+ return {}
602
+ elif "/" in path[:ind]:
603
+ ind2 = path[:ind].rindex("/")
604
+ root = path[: ind2 + 1]
605
+ depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1
606
+ else:
607
+ root = ""
608
+ depth = None if "**" in path else path[ind + 1 :].count("/") + 1
609
+
610
+ allpaths = self.find(
611
+ root, maxdepth=maxdepth or depth, withdirs=True, detail=True, **kwargs
612
+ )
613
+ # Escape characters special to python regex, leaving our supported
614
+ # special characters in place.
615
+ # See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
616
+ # for shell globbing details.
617
+ pattern = (
618
+ "^"
619
+ + (
620
+ path.replace("\\", r"\\")
621
+ .replace(".", r"\.")
622
+ .replace("+", r"\+")
623
+ .replace("//", "/")
624
+ .replace("(", r"\(")
625
+ .replace(")", r"\)")
626
+ .replace("|", r"\|")
627
+ .replace("^", r"\^")
628
+ .replace("$", r"\$")
629
+ .replace("{", r"\{")
630
+ .replace("}", r"\}")
631
+ .rstrip("/")
632
+ )
633
+ + "$"
634
+ )
635
+ pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern)
636
+ pattern = re.sub("[*]", "[^/]*", pattern)
637
+ pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*"))
638
+ out = {
639
+ p: allpaths[p]
640
+ for p in sorted(allpaths)
641
+ if pattern.match(p.replace("//", "/").rstrip("/"))
642
+ }
643
+ if detail:
644
+ return out
645
+ else:
646
+ return list(out)
647
+
648
+ def isdir(self, path):
649
+ # override, since all URLs are (also) files
650
+ try:
651
+ return bool(self.ls(path))
652
+ except (FileNotFoundError, ValueError):
653
+ return False
654
+
655
+
656
+ class HTTPFile(AbstractBufferedFile):
657
+ """
658
+ A file-like object pointing to a remove HTTP(S) resource
659
+
660
+ Supports only reading, with read-ahead of a predermined block-size.
661
+
662
+ In the case that the server does not supply the filesize, only reading of
663
+ the complete file in one go is supported.
664
+
665
+ Parameters
666
+ ----------
667
+ url: str
668
+ Full URL of the remote resource, including the protocol
669
+ session: requests.Session or None
670
+ All calls will be made within this session, to avoid restarting
671
+ connections where the server allows this
672
+ block_size: int or None
673
+ The amount of read-ahead to do, in bytes. Default is 5MB, or the value
674
+ configured for the FileSystem creating this file
675
+ size: None or int
676
+ If given, this is the size of the file in bytes, and we don't attempt
677
+ to call the server to find the value.
678
+ kwargs: all other key-values are passed to requests calls.
679
+ """
680
+
681
+ def __init__(
682
+ self,
683
+ fs,
684
+ url,
685
+ session=None,
686
+ block_size=None,
687
+ mode="rb",
688
+ cache_type="bytes",
689
+ cache_options=None,
690
+ size=None,
691
+ **kwargs,
692
+ ):
693
+ if mode != "rb":
694
+ raise NotImplementedError("File mode not supported")
695
+ self.url = url
696
+ self.session = session
697
+ self.details = {"name": url, "size": size, "type": "file"}
698
+ super().__init__(
699
+ fs=fs,
700
+ path=url,
701
+ mode=mode,
702
+ block_size=block_size,
703
+ cache_type=cache_type,
704
+ cache_options=cache_options,
705
+ **kwargs,
706
+ )
707
+
708
+ def read(self, length=-1):
709
+ """Read bytes from file
710
+
711
+ Parameters
712
+ ----------
713
+ length: int
714
+ Read up to this many bytes. If negative, read all content to end of
715
+ file. If the server has not supplied the filesize, attempting to
716
+ read only part of the data will raise a ValueError.
717
+ """
718
+ if (
719
+ (length < 0 and self.loc == 0) # explicit read all
720
+ # but not when the size is known and fits into a block anyways
721
+ and not (self.size is not None and self.size <= self.blocksize)
722
+ ):
723
+ self._fetch_all()
724
+ if self.size is None:
725
+ if length < 0:
726
+ self._fetch_all()
727
+ else:
728
+ length = min(self.size - self.loc, length)
729
+ return super().read(length)
730
+
731
+ def _fetch_all(self):
732
+ """Read whole file in one shot, without caching
733
+
734
+ This is only called when position is still at zero,
735
+ and read() is called without a byte-count.
736
+ """
737
+ logger.debug(f"Fetch all for {self}")
738
+ if not isinstance(self.cache, AllBytes):
739
+ r = self.session.get(self.fs.encode_url(self.url), **self.kwargs)
740
+ r.raise_for_status()
741
+ out = r.content
742
+ self.cache = AllBytes(size=len(out), fetcher=None, blocksize=None, data=out)
743
+ self.size = len(out)
744
+
745
+ def _parse_content_range(self, headers):
746
+ """Parse the Content-Range header"""
747
+ s = headers.get("Content-Range", "")
748
+ m = re.match(r"bytes (\d+-\d+|\*)/(\d+|\*)", s)
749
+ if not m:
750
+ return None, None, None
751
+
752
+ if m[1] == "*":
753
+ start = end = None
754
+ else:
755
+ start, end = [int(x) for x in m[1].split("-")]
756
+ total = None if m[2] == "*" else int(m[2])
757
+ return start, end, total
758
+
759
+ def _fetch_range(self, start, end):
760
+ """Download a block of data
761
+
762
+ The expectation is that the server returns only the requested bytes,
763
+ with HTTP code 206. If this is not the case, we first check the headers,
764
+ and then stream the output - if the data size is bigger than we
765
+ requested, an exception is raised.
766
+ """
767
+ logger.debug(f"Fetch range for {self}: {start}-{end}")
768
+ kwargs = self.kwargs.copy()
769
+ headers = kwargs.pop("headers", {}).copy()
770
+ headers["Range"] = f"bytes={start}-{end - 1}"
771
+ logger.debug("%s : %s", self.url, headers["Range"])
772
+ r = self.session.get(self.fs.encode_url(self.url), headers=headers, **kwargs)
773
+ if r.status_code == 416:
774
+ # range request outside file
775
+ return b""
776
+ r.raise_for_status()
777
+
778
+ # If the server has handled the range request, it should reply
779
+ # with status 206 (partial content). But we'll guess that a suitable
780
+ # Content-Range header or a Content-Length no more than the
781
+ # requested range also mean we have got the desired range.
782
+ cl = r.headers.get("Content-Length", r.headers.get("content-length", end + 1))
783
+ response_is_range = (
784
+ r.status_code == 206
785
+ or self._parse_content_range(r.headers)[0] == start
786
+ or int(cl) <= end - start
787
+ )
788
+
789
+ if response_is_range:
790
+ # partial content, as expected
791
+ out = r.content
792
+ elif start > 0:
793
+ raise ValueError(
794
+ "The HTTP server doesn't appear to support range requests. "
795
+ "Only reading this file from the beginning is supported. "
796
+ "Open with block_size=0 for a streaming file interface."
797
+ )
798
+ else:
799
+ # Response is not a range, but we want the start of the file,
800
+ # so we can read the required amount anyway.
801
+ cl = 0
802
+ out = []
803
+ for chunk in r.iter_content(2**20, False):
804
+ out.append(chunk)
805
+ cl += len(chunk)
806
+ out = b"".join(out)[: end - start]
807
+ return out
808
+
809
+
810
+ magic_check = re.compile("([*[])")
811
+
812
+
813
+ def has_magic(s):
814
+ match = magic_check.search(s)
815
+ return match is not None
816
+
817
+
818
+ class HTTPStreamFile(AbstractBufferedFile):
819
+ def __init__(self, fs, url, mode="rb", session=None, **kwargs):
820
+ self.url = url
821
+ self.session = session
822
+ if mode != "rb":
823
+ raise ValueError
824
+ self.details = {"name": url, "size": None}
825
+ super().__init__(fs=fs, path=url, mode=mode, cache_type="readahead", **kwargs)
826
+
827
+ r = self.session.get(self.fs.encode_url(url), stream=True, **kwargs)
828
+ self.fs._raise_not_found_for_status(r, url)
829
+ self.it = r.iter_content(1024, False)
830
+ self.leftover = b""
831
+
832
+ self.r = r
833
+
834
+ def seek(self, *args, **kwargs):
835
+ raise ValueError("Cannot seek streaming HTTP file")
836
+
837
+ def read(self, num=-1):
838
+ bufs = [self.leftover]
839
+ leng = len(self.leftover)
840
+ while leng < num or num < 0:
841
+ try:
842
+ out = self.it.__next__()
843
+ except StopIteration:
844
+ break
845
+ if out:
846
+ bufs.append(out)
847
+ else:
848
+ break
849
+ leng += len(out)
850
+ out = b"".join(bufs)
851
+ if num >= 0:
852
+ self.leftover = out[num:]
853
+ out = out[:num]
854
+ else:
855
+ self.leftover = b""
856
+ self.loc += len(out)
857
+ return out
858
+
859
+ def close(self):
860
+ self.r.close()
861
+ self.closed = True
862
+
863
+
864
+ def get_range(session, url, start, end, **kwargs):
865
+ # explicit get a range when we know it must be safe
866
+ kwargs = kwargs.copy()
867
+ headers = kwargs.pop("headers", {}).copy()
868
+ headers["Range"] = f"bytes={start}-{end - 1}"
869
+ r = session.get(url, headers=headers, **kwargs)
870
+ r.raise_for_status()
871
+ return r.content
872
+
873
+
874
+ def _file_info(url, session, size_policy="head", **kwargs):
875
+ """Call HEAD on the server to get details about the file (size/checksum etc.)
876
+
877
+ Default operation is to explicitly allow redirects and use encoding
878
+ 'identity' (no compression) to get the true size of the target.
879
+ """
880
+ logger.debug("Retrieve file size for %s", url)
881
+ kwargs = kwargs.copy()
882
+ ar = kwargs.pop("allow_redirects", True)
883
+ head = kwargs.get("headers", {}).copy()
884
+ # TODO: not allowed in JS
885
+ # head["Accept-Encoding"] = "identity"
886
+ kwargs["headers"] = head
887
+
888
+ info = {}
889
+ if size_policy == "head":
890
+ r = session.head(url, allow_redirects=ar, **kwargs)
891
+ elif size_policy == "get":
892
+ r = session.get(url, allow_redirects=ar, **kwargs)
893
+ else:
894
+ raise TypeError(f'size_policy must be "head" or "get", got {size_policy}')
895
+ r.raise_for_status()
896
+
897
+ # TODO:
898
+ # recognise lack of 'Accept-Ranges',
899
+ # or 'Accept-Ranges': 'none' (not 'bytes')
900
+ # to mean streaming only, no random access => return None
901
+ if "Content-Length" in r.headers:
902
+ info["size"] = int(r.headers["Content-Length"])
903
+ elif "Content-Range" in r.headers:
904
+ info["size"] = int(r.headers["Content-Range"].split("/")[1])
905
+ elif "content-length" in r.headers:
906
+ info["size"] = int(r.headers["content-length"])
907
+ elif "content-range" in r.headers:
908
+ info["size"] = int(r.headers["content-range"].split("/")[1])
909
+
910
+ for checksum_field in ["ETag", "Content-MD5", "Digest"]:
911
+ if r.headers.get(checksum_field):
912
+ info[checksum_field] = r.headers[checksum_field]
913
+
914
+ return info
915
+
916
+
917
+ # importing this is enough to register it
918
+ def register():
919
+ register_implementation("http", HTTPFileSystem, clobber=True)
920
+ register_implementation("https", HTTPFileSystem, clobber=True)
921
+ register_implementation("sync_http", HTTPFileSystem, clobber=True)
922
+ register_implementation("sync_https", HTTPFileSystem, clobber=True)
923
+
924
+
925
+ register()
926
+
927
+
928
+ def unregister():
929
+ from fsspec.implementations.http import HTTPFileSystem
930
+
931
+ register_implementation("http", HTTPFileSystem, clobber=True)
932
+ register_implementation("https", HTTPFileSystem, clobber=True)
lib/python3.10/site-packages/fsspec/implementations/jupyter.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import re
4
+
5
+ import requests
6
+
7
+ import fsspec
8
+
9
+
10
+ class JupyterFileSystem(fsspec.AbstractFileSystem):
11
+ """View of the files as seen by a Jupyter server (notebook or lab)"""
12
+
13
+ protocol = ("jupyter", "jlab")
14
+
15
+ def __init__(self, url, tok=None, **kwargs):
16
+ """
17
+
18
+ Parameters
19
+ ----------
20
+ url : str
21
+ Base URL of the server, like "http://127.0.0.1:8888". May include
22
+ token in the string, which is given by the process when starting up
23
+ tok : str
24
+ If the token is obtained separately, can be given here
25
+ kwargs
26
+ """
27
+ if "?" in url:
28
+ if tok is None:
29
+ try:
30
+ tok = re.findall("token=([a-z0-9]+)", url)[0]
31
+ except IndexError as e:
32
+ raise ValueError("Could not determine token") from e
33
+ url = url.split("?", 1)[0]
34
+ self.url = url.rstrip("/") + "/api/contents"
35
+ self.session = requests.Session()
36
+ if tok:
37
+ self.session.headers["Authorization"] = f"token {tok}"
38
+
39
+ super().__init__(**kwargs)
40
+
41
+ def ls(self, path, detail=True, **kwargs):
42
+ path = self._strip_protocol(path)
43
+ r = self.session.get(f"{self.url}/{path}")
44
+ if r.status_code == 404:
45
+ return FileNotFoundError(path)
46
+ r.raise_for_status()
47
+ out = r.json()
48
+
49
+ if out["type"] == "directory":
50
+ out = out["content"]
51
+ else:
52
+ out = [out]
53
+ for o in out:
54
+ o["name"] = o.pop("path")
55
+ o.pop("content")
56
+ if o["type"] == "notebook":
57
+ o["type"] = "file"
58
+ if detail:
59
+ return out
60
+ return [o["name"] for o in out]
61
+
62
+ def cat_file(self, path, start=None, end=None, **kwargs):
63
+ path = self._strip_protocol(path)
64
+ r = self.session.get(f"{self.url}/{path}")
65
+ if r.status_code == 404:
66
+ return FileNotFoundError(path)
67
+ r.raise_for_status()
68
+ out = r.json()
69
+ if out["format"] == "text":
70
+ # data should be binary
71
+ b = out["content"].encode()
72
+ else:
73
+ b = base64.b64decode(out["content"])
74
+ return b[start:end]
75
+
76
+ def pipe_file(self, path, value, **_):
77
+ path = self._strip_protocol(path)
78
+ json = {
79
+ "name": path.rsplit("/", 1)[-1],
80
+ "path": path,
81
+ "size": len(value),
82
+ "content": base64.b64encode(value).decode(),
83
+ "format": "base64",
84
+ "type": "file",
85
+ }
86
+ self.session.put(f"{self.url}/{path}", json=json)
87
+
88
+ def mkdir(self, path, create_parents=True, **kwargs):
89
+ path = self._strip_protocol(path)
90
+ if create_parents and "/" in path:
91
+ self.mkdir(path.rsplit("/", 1)[0], True)
92
+ json = {
93
+ "name": path.rsplit("/", 1)[-1],
94
+ "path": path,
95
+ "size": None,
96
+ "content": None,
97
+ "type": "directory",
98
+ }
99
+ self.session.put(f"{self.url}/{path}", json=json)
100
+
101
+ def _rm(self, path):
102
+ path = self._strip_protocol(path)
103
+ self.session.delete(f"{self.url}/{path}")
104
+
105
+ def _open(self, path, mode="rb", **kwargs):
106
+ path = self._strip_protocol(path)
107
+ if mode == "rb":
108
+ data = self.cat_file(path)
109
+ return io.BytesIO(data)
110
+ else:
111
+ return SimpleFileWriter(self, path, mode="wb")
112
+
113
+
114
+ class SimpleFileWriter(fsspec.spec.AbstractBufferedFile):
115
+ def _upload_chunk(self, final=False):
116
+ """Never uploads a chunk until file is done
117
+
118
+ Not suitable for large files
119
+ """
120
+ if final is False:
121
+ return False
122
+ self.buffer.seek(0)
123
+ data = self.buffer.read()
124
+ self.fs.pipe_file(self.path, data)
lib/python3.10/site-packages/fsspec/implementations/tar.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import tarfile
3
+
4
+ import fsspec
5
+ from fsspec.archive import AbstractArchiveFileSystem
6
+ from fsspec.compression import compr
7
+ from fsspec.utils import infer_compression
8
+
9
+ typemap = {b"0": "file", b"5": "directory"}
10
+
11
+ logger = logging.getLogger("tar")
12
+
13
+
14
+ class TarFileSystem(AbstractArchiveFileSystem):
15
+ """Compressed Tar archives as a file-system (read-only)
16
+
17
+ Supports the following formats:
18
+ tar.gz, tar.bz2, tar.xz
19
+ """
20
+
21
+ root_marker = ""
22
+ protocol = "tar"
23
+ cachable = False
24
+
25
+ def __init__(
26
+ self,
27
+ fo="",
28
+ index_store=None,
29
+ target_options=None,
30
+ target_protocol=None,
31
+ compression=None,
32
+ **kwargs,
33
+ ):
34
+ super().__init__(**kwargs)
35
+ target_options = target_options or {}
36
+
37
+ if isinstance(fo, str):
38
+ self.of = fsspec.open(fo, protocol=target_protocol, **target_options)
39
+ fo = self.of.open() # keep the reference
40
+
41
+ # Try to infer compression.
42
+ if compression is None:
43
+ name = None
44
+
45
+ # Try different ways to get hold of the filename. `fo` might either
46
+ # be a `fsspec.LocalFileOpener`, an `io.BufferedReader` or an
47
+ # `fsspec.AbstractFileSystem` instance.
48
+ try:
49
+ # Amended io.BufferedReader or similar.
50
+ # This uses a "protocol extension" where original filenames are
51
+ # propagated to archive-like filesystems in order to let them
52
+ # infer the right compression appropriately.
53
+ if hasattr(fo, "original"):
54
+ name = fo.original
55
+
56
+ # fsspec.LocalFileOpener
57
+ elif hasattr(fo, "path"):
58
+ name = fo.path
59
+
60
+ # io.BufferedReader
61
+ elif hasattr(fo, "name"):
62
+ name = fo.name
63
+
64
+ # fsspec.AbstractFileSystem
65
+ elif hasattr(fo, "info"):
66
+ name = fo.info()["name"]
67
+
68
+ except Exception as ex:
69
+ logger.warning(
70
+ f"Unable to determine file name, not inferring compression: {ex}"
71
+ )
72
+
73
+ if name is not None:
74
+ compression = infer_compression(name)
75
+ logger.info(f"Inferred compression {compression} from file name {name}")
76
+
77
+ if compression is not None:
78
+ # TODO: tarfile already implements compression with modes like "'r:gz'",
79
+ # but then would seek to offset in the file work?
80
+ fo = compr[compression](fo)
81
+
82
+ self._fo_ref = fo
83
+ self.fo = fo # the whole instance is a context
84
+ self.tar = tarfile.TarFile(fileobj=self.fo)
85
+ self.dir_cache = None
86
+
87
+ self.index_store = index_store
88
+ self.index = None
89
+ self._index()
90
+
91
+ def _index(self):
92
+ # TODO: load and set saved index, if exists
93
+ out = {}
94
+ for ti in self.tar:
95
+ info = ti.get_info()
96
+ info["type"] = typemap.get(info["type"], "file")
97
+ name = ti.get_info()["name"].rstrip("/")
98
+ out[name] = (info, ti.offset_data)
99
+
100
+ self.index = out
101
+ # TODO: save index to self.index_store here, if set
102
+
103
+ def _get_dirs(self):
104
+ if self.dir_cache is not None:
105
+ return
106
+
107
+ # This enables ls to get directories as children as well as files
108
+ self.dir_cache = {
109
+ dirname: {"name": dirname, "size": 0, "type": "directory"}
110
+ for dirname in self._all_dirnames(self.tar.getnames())
111
+ }
112
+ for member in self.tar.getmembers():
113
+ info = member.get_info()
114
+ info["name"] = info["name"].rstrip("/")
115
+ info["type"] = typemap.get(info["type"], "file")
116
+ self.dir_cache[info["name"]] = info
117
+
118
+ def _open(self, path, mode="rb", **kwargs):
119
+ if mode != "rb":
120
+ raise ValueError("Read-only filesystem implementation")
121
+ details, offset = self.index[path]
122
+ if details["type"] != "file":
123
+ raise ValueError("Can only handle regular files")
124
+ return self.tar.extractfile(path)
lib/python3.10/site-packages/fsspec/implementations/webhdfs.py ADDED
@@ -0,0 +1,485 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://hadoop.apache.org/docs/r1.0.4/webhdfs.html
2
+
3
+ import logging
4
+ import os
5
+ import secrets
6
+ import shutil
7
+ import tempfile
8
+ import uuid
9
+ from contextlib import suppress
10
+ from urllib.parse import quote
11
+
12
+ import requests
13
+
14
+ from ..spec import AbstractBufferedFile, AbstractFileSystem
15
+ from ..utils import infer_storage_options, tokenize
16
+
17
+ logger = logging.getLogger("webhdfs")
18
+
19
+
20
+ class WebHDFS(AbstractFileSystem):
21
+ """
22
+ Interface to HDFS over HTTP using the WebHDFS API. Supports also HttpFS gateways.
23
+
24
+ Four auth mechanisms are supported:
25
+
26
+ insecure: no auth is done, and the user is assumed to be whoever they
27
+ say they are (parameter ``user``), or a predefined value such as
28
+ "dr.who" if not given
29
+ spnego: when kerberos authentication is enabled, auth is negotiated by
30
+ requests_kerberos https://github.com/requests/requests-kerberos .
31
+ This establishes a session based on existing kinit login and/or
32
+ specified principal/password; parameters are passed with ``kerb_kwargs``
33
+ token: uses an existing Hadoop delegation token from another secured
34
+ service. Indeed, this client can also generate such tokens when
35
+ not insecure. Note that tokens expire, but can be renewed (by a
36
+ previously specified user) and may allow for proxying.
37
+ basic-auth: used when both parameter ``user`` and parameter ``password``
38
+ are provided.
39
+
40
+ """
41
+
42
+ tempdir = str(tempfile.gettempdir())
43
+ protocol = "webhdfs", "webHDFS"
44
+
45
+ def __init__(
46
+ self,
47
+ host,
48
+ port=50070,
49
+ kerberos=False,
50
+ token=None,
51
+ user=None,
52
+ password=None,
53
+ proxy_to=None,
54
+ kerb_kwargs=None,
55
+ data_proxy=None,
56
+ use_https=False,
57
+ session_cert=None,
58
+ session_verify=True,
59
+ **kwargs,
60
+ ):
61
+ """
62
+ Parameters
63
+ ----------
64
+ host: str
65
+ Name-node address
66
+ port: int
67
+ Port for webHDFS
68
+ kerberos: bool
69
+ Whether to authenticate with kerberos for this connection
70
+ token: str or None
71
+ If given, use this token on every call to authenticate. A user
72
+ and user-proxy may be encoded in the token and should not be also
73
+ given
74
+ user: str or None
75
+ If given, assert the user name to connect with
76
+ password: str or None
77
+ If given, assert the password to use for basic auth. If password
78
+ is provided, user must be provided also
79
+ proxy_to: str or None
80
+ If given, the user has the authority to proxy, and this value is
81
+ the user in who's name actions are taken
82
+ kerb_kwargs: dict
83
+ Any extra arguments for HTTPKerberosAuth, see
84
+ `<https://github.com/requests/requests-kerberos/blob/master/requests_kerberos/kerberos_.py>`_
85
+ data_proxy: dict, callable or None
86
+ If given, map data-node addresses. This can be necessary if the
87
+ HDFS cluster is behind a proxy, running on Docker or otherwise has
88
+ a mismatch between the host-names given by the name-node and the
89
+ address by which to refer to them from the client. If a dict,
90
+ maps host names ``host->data_proxy[host]``; if a callable, full
91
+ URLs are passed, and function must conform to
92
+ ``url->data_proxy(url)``.
93
+ use_https: bool
94
+ Whether to connect to the Name-node using HTTPS instead of HTTP
95
+ session_cert: str or Tuple[str, str] or None
96
+ Path to a certificate file, or tuple of (cert, key) files to use
97
+ for the requests.Session
98
+ session_verify: str, bool or None
99
+ Path to a certificate file to use for verifying the requests.Session.
100
+ kwargs
101
+ """
102
+ if self._cached:
103
+ return
104
+ super().__init__(**kwargs)
105
+ self.url = f"{'https' if use_https else 'http'}://{host}:{port}/webhdfs/v1"
106
+ self.kerb = kerberos
107
+ self.kerb_kwargs = kerb_kwargs or {}
108
+ self.pars = {}
109
+ self.proxy = data_proxy or {}
110
+ if token is not None:
111
+ if user is not None or proxy_to is not None:
112
+ raise ValueError(
113
+ "If passing a delegation token, must not set "
114
+ "user or proxy_to, as these are encoded in the"
115
+ " token"
116
+ )
117
+ self.pars["delegation"] = token
118
+ self.user = user
119
+ self.password = password
120
+
121
+ if password is not None:
122
+ if user is None:
123
+ raise ValueError(
124
+ "If passing a password, the user must also be"
125
+ "set in order to set up the basic-auth"
126
+ )
127
+ else:
128
+ if user is not None:
129
+ self.pars["user.name"] = user
130
+
131
+ if proxy_to is not None:
132
+ self.pars["doas"] = proxy_to
133
+ if kerberos and user is not None:
134
+ raise ValueError(
135
+ "If using Kerberos auth, do not specify the "
136
+ "user, this is handled by kinit."
137
+ )
138
+
139
+ self.session_cert = session_cert
140
+ self.session_verify = session_verify
141
+
142
+ self._connect()
143
+
144
+ self._fsid = f"webhdfs_{tokenize(host, port)}"
145
+
146
+ @property
147
+ def fsid(self):
148
+ return self._fsid
149
+
150
+ def _connect(self):
151
+ self.session = requests.Session()
152
+
153
+ if self.session_cert:
154
+ self.session.cert = self.session_cert
155
+
156
+ self.session.verify = self.session_verify
157
+
158
+ if self.kerb:
159
+ from requests_kerberos import HTTPKerberosAuth
160
+
161
+ self.session.auth = HTTPKerberosAuth(**self.kerb_kwargs)
162
+
163
+ if self.user is not None and self.password is not None:
164
+ from requests.auth import HTTPBasicAuth
165
+
166
+ self.session.auth = HTTPBasicAuth(self.user, self.password)
167
+
168
+ def _call(self, op, method="get", path=None, data=None, redirect=True, **kwargs):
169
+ path = self._strip_protocol(path) if path is not None else ""
170
+ url = self._apply_proxy(self.url + quote(path, safe="/="))
171
+ args = kwargs.copy()
172
+ args.update(self.pars)
173
+ args["op"] = op.upper()
174
+ logger.debug("sending %s with %s", url, method)
175
+ out = self.session.request(
176
+ method=method.upper(),
177
+ url=url,
178
+ params=args,
179
+ data=data,
180
+ allow_redirects=redirect,
181
+ )
182
+ if out.status_code in [400, 401, 403, 404, 500]:
183
+ try:
184
+ err = out.json()
185
+ msg = err["RemoteException"]["message"]
186
+ exp = err["RemoteException"]["exception"]
187
+ except (ValueError, KeyError):
188
+ pass
189
+ else:
190
+ if exp in ["IllegalArgumentException", "UnsupportedOperationException"]:
191
+ raise ValueError(msg)
192
+ elif exp in ["SecurityException", "AccessControlException"]:
193
+ raise PermissionError(msg)
194
+ elif exp in ["FileNotFoundException"]:
195
+ raise FileNotFoundError(msg)
196
+ else:
197
+ raise RuntimeError(msg)
198
+ out.raise_for_status()
199
+ return out
200
+
201
+ def _open(
202
+ self,
203
+ path,
204
+ mode="rb",
205
+ block_size=None,
206
+ autocommit=True,
207
+ replication=None,
208
+ permissions=None,
209
+ **kwargs,
210
+ ):
211
+ """
212
+
213
+ Parameters
214
+ ----------
215
+ path: str
216
+ File location
217
+ mode: str
218
+ 'rb', 'wb', etc.
219
+ block_size: int
220
+ Client buffer size for read-ahead or write buffer
221
+ autocommit: bool
222
+ If False, writes to temporary file that only gets put in final
223
+ location upon commit
224
+ replication: int
225
+ Number of copies of file on the cluster, write mode only
226
+ permissions: str or int
227
+ posix permissions, write mode only
228
+ kwargs
229
+
230
+ Returns
231
+ -------
232
+ WebHDFile instance
233
+ """
234
+ block_size = block_size or self.blocksize
235
+ return WebHDFile(
236
+ self,
237
+ path,
238
+ mode=mode,
239
+ block_size=block_size,
240
+ tempdir=self.tempdir,
241
+ autocommit=autocommit,
242
+ replication=replication,
243
+ permissions=permissions,
244
+ )
245
+
246
+ @staticmethod
247
+ def _process_info(info):
248
+ info["type"] = info["type"].lower()
249
+ info["size"] = info["length"]
250
+ return info
251
+
252
+ @classmethod
253
+ def _strip_protocol(cls, path):
254
+ return infer_storage_options(path)["path"]
255
+
256
+ @staticmethod
257
+ def _get_kwargs_from_urls(urlpath):
258
+ out = infer_storage_options(urlpath)
259
+ out.pop("path", None)
260
+ out.pop("protocol", None)
261
+ if "username" in out:
262
+ out["user"] = out.pop("username")
263
+ return out
264
+
265
+ def info(self, path):
266
+ out = self._call("GETFILESTATUS", path=path)
267
+ info = out.json()["FileStatus"]
268
+ info["name"] = path
269
+ return self._process_info(info)
270
+
271
+ def ls(self, path, detail=False):
272
+ out = self._call("LISTSTATUS", path=path)
273
+ infos = out.json()["FileStatuses"]["FileStatus"]
274
+ for info in infos:
275
+ self._process_info(info)
276
+ info["name"] = path.rstrip("/") + "/" + info["pathSuffix"]
277
+ if detail:
278
+ return sorted(infos, key=lambda i: i["name"])
279
+ else:
280
+ return sorted(info["name"] for info in infos)
281
+
282
+ def content_summary(self, path):
283
+ """Total numbers of files, directories and bytes under path"""
284
+ out = self._call("GETCONTENTSUMMARY", path=path)
285
+ return out.json()["ContentSummary"]
286
+
287
+ def ukey(self, path):
288
+ """Checksum info of file, giving method and result"""
289
+ out = self._call("GETFILECHECKSUM", path=path, redirect=False)
290
+ if "Location" in out.headers:
291
+ location = self._apply_proxy(out.headers["Location"])
292
+ out2 = self.session.get(location)
293
+ out2.raise_for_status()
294
+ return out2.json()["FileChecksum"]
295
+ else:
296
+ out.raise_for_status()
297
+ return out.json()["FileChecksum"]
298
+
299
+ def home_directory(self):
300
+ """Get user's home directory"""
301
+ out = self._call("GETHOMEDIRECTORY")
302
+ return out.json()["Path"]
303
+
304
+ def get_delegation_token(self, renewer=None):
305
+ """Retrieve token which can give the same authority to other uses
306
+
307
+ Parameters
308
+ ----------
309
+ renewer: str or None
310
+ User who may use this token; if None, will be current user
311
+ """
312
+ if renewer:
313
+ out = self._call("GETDELEGATIONTOKEN", renewer=renewer)
314
+ else:
315
+ out = self._call("GETDELEGATIONTOKEN")
316
+ t = out.json()["Token"]
317
+ if t is None:
318
+ raise ValueError("No token available for this user/security context")
319
+ return t["urlString"]
320
+
321
+ def renew_delegation_token(self, token):
322
+ """Make token live longer. Returns new expiry time"""
323
+ out = self._call("RENEWDELEGATIONTOKEN", method="put", token=token)
324
+ return out.json()["long"]
325
+
326
+ def cancel_delegation_token(self, token):
327
+ """Stop the token from being useful"""
328
+ self._call("CANCELDELEGATIONTOKEN", method="put", token=token)
329
+
330
+ def chmod(self, path, mod):
331
+ """Set the permission at path
332
+
333
+ Parameters
334
+ ----------
335
+ path: str
336
+ location to set (file or directory)
337
+ mod: str or int
338
+ posix epresentation or permission, give as oct string, e.g, '777'
339
+ or 0o777
340
+ """
341
+ self._call("SETPERMISSION", method="put", path=path, permission=mod)
342
+
343
+ def chown(self, path, owner=None, group=None):
344
+ """Change owning user and/or group"""
345
+ kwargs = {}
346
+ if owner is not None:
347
+ kwargs["owner"] = owner
348
+ if group is not None:
349
+ kwargs["group"] = group
350
+ self._call("SETOWNER", method="put", path=path, **kwargs)
351
+
352
+ def set_replication(self, path, replication):
353
+ """
354
+ Set file replication factor
355
+
356
+ Parameters
357
+ ----------
358
+ path: str
359
+ File location (not for directories)
360
+ replication: int
361
+ Number of copies of file on the cluster. Should be smaller than
362
+ number of data nodes; normally 3 on most systems.
363
+ """
364
+ self._call("SETREPLICATION", path=path, method="put", replication=replication)
365
+
366
+ def mkdir(self, path, **kwargs):
367
+ self._call("MKDIRS", method="put", path=path)
368
+
369
+ def makedirs(self, path, exist_ok=False):
370
+ if exist_ok is False and self.exists(path):
371
+ raise FileExistsError(path)
372
+ self.mkdir(path)
373
+
374
+ def mv(self, path1, path2, **kwargs):
375
+ self._call("RENAME", method="put", path=path1, destination=path2)
376
+
377
+ def rm(self, path, recursive=False, **kwargs):
378
+ self._call(
379
+ "DELETE",
380
+ method="delete",
381
+ path=path,
382
+ recursive="true" if recursive else "false",
383
+ )
384
+
385
+ def rm_file(self, path, **kwargs):
386
+ self.rm(path)
387
+
388
+ def cp_file(self, lpath, rpath, **kwargs):
389
+ with self.open(lpath) as lstream:
390
+ tmp_fname = "/".join([self._parent(rpath), f".tmp.{secrets.token_hex(16)}"])
391
+ # Perform an atomic copy (stream to a temporary file and
392
+ # move it to the actual destination).
393
+ try:
394
+ with self.open(tmp_fname, "wb") as rstream:
395
+ shutil.copyfileobj(lstream, rstream)
396
+ self.mv(tmp_fname, rpath)
397
+ except BaseException:
398
+ with suppress(FileNotFoundError):
399
+ self.rm(tmp_fname)
400
+ raise
401
+
402
+ def _apply_proxy(self, location):
403
+ if self.proxy and callable(self.proxy):
404
+ location = self.proxy(location)
405
+ elif self.proxy:
406
+ # as a dict
407
+ for k, v in self.proxy.items():
408
+ location = location.replace(k, v, 1)
409
+ return location
410
+
411
+
412
+ class WebHDFile(AbstractBufferedFile):
413
+ """A file living in HDFS over webHDFS"""
414
+
415
+ def __init__(self, fs, path, **kwargs):
416
+ super().__init__(fs, path, **kwargs)
417
+ kwargs = kwargs.copy()
418
+ if kwargs.get("permissions", None) is None:
419
+ kwargs.pop("permissions", None)
420
+ if kwargs.get("replication", None) is None:
421
+ kwargs.pop("replication", None)
422
+ self.permissions = kwargs.pop("permissions", 511)
423
+ tempdir = kwargs.pop("tempdir")
424
+ if kwargs.pop("autocommit", False) is False:
425
+ self.target = self.path
426
+ self.path = os.path.join(tempdir, str(uuid.uuid4()))
427
+
428
+ def _upload_chunk(self, final=False):
429
+ """Write one part of a multi-block file upload
430
+
431
+ Parameters
432
+ ==========
433
+ final: bool
434
+ This is the last block, so should complete file, if
435
+ self.autocommit is True.
436
+ """
437
+ out = self.fs.session.post(
438
+ self.location,
439
+ data=self.buffer.getvalue(),
440
+ headers={"content-type": "application/octet-stream"},
441
+ )
442
+ out.raise_for_status()
443
+ return True
444
+
445
+ def _initiate_upload(self):
446
+ """Create remote file/upload"""
447
+ kwargs = self.kwargs.copy()
448
+ if "a" in self.mode:
449
+ op, method = "APPEND", "POST"
450
+ else:
451
+ op, method = "CREATE", "PUT"
452
+ kwargs["overwrite"] = "true"
453
+ out = self.fs._call(op, method, self.path, redirect=False, **kwargs)
454
+ location = self.fs._apply_proxy(out.headers["Location"])
455
+ if "w" in self.mode:
456
+ # create empty file to append to
457
+ out2 = self.fs.session.put(
458
+ location, headers={"content-type": "application/octet-stream"}
459
+ )
460
+ out2.raise_for_status()
461
+ # after creating empty file, change location to append to
462
+ out2 = self.fs._call("APPEND", "POST", self.path, redirect=False, **kwargs)
463
+ self.location = self.fs._apply_proxy(out2.headers["Location"])
464
+
465
+ def _fetch_range(self, start, end):
466
+ start = max(start, 0)
467
+ end = min(self.size, end)
468
+ if start >= end or start >= self.size:
469
+ return b""
470
+ out = self.fs._call(
471
+ "OPEN", path=self.path, offset=start, length=end - start, redirect=False
472
+ )
473
+ out.raise_for_status()
474
+ if "Location" in out.headers:
475
+ location = out.headers["Location"]
476
+ out2 = self.fs.session.get(self.fs._apply_proxy(location))
477
+ return out2.content
478
+ else:
479
+ return out.content
480
+
481
+ def commit(self):
482
+ self.fs.mv(self.path, self.target)
483
+
484
+ def discard(self):
485
+ self.fs.rm(self.path)
lib/python3.10/site-packages/fsspec/implementations/zip.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import zipfile
3
+
4
+ import fsspec
5
+ from fsspec.archive import AbstractArchiveFileSystem
6
+
7
+
8
+ class ZipFileSystem(AbstractArchiveFileSystem):
9
+ """Read/Write contents of ZIP archive as a file-system
10
+
11
+ Keeps file object open while instance lives.
12
+
13
+ This class is pickleable, but not necessarily thread-safe
14
+ """
15
+
16
+ root_marker = ""
17
+ protocol = "zip"
18
+ cachable = False
19
+
20
+ def __init__(
21
+ self,
22
+ fo="",
23
+ mode="r",
24
+ target_protocol=None,
25
+ target_options=None,
26
+ compression=zipfile.ZIP_STORED,
27
+ allowZip64=True,
28
+ compresslevel=None,
29
+ **kwargs,
30
+ ):
31
+ """
32
+ Parameters
33
+ ----------
34
+ fo: str or file-like
35
+ Contains ZIP, and must exist. If a str, will fetch file using
36
+ :meth:`~fsspec.open_files`, which must return one file exactly.
37
+ mode: str
38
+ Accept: "r", "w", "a"
39
+ target_protocol: str (optional)
40
+ If ``fo`` is a string, this value can be used to override the
41
+ FS protocol inferred from a URL
42
+ target_options: dict (optional)
43
+ Kwargs passed when instantiating the target FS, if ``fo`` is
44
+ a string.
45
+ compression, allowZip64, compresslevel: passed to ZipFile
46
+ Only relevant when creating a ZIP
47
+ """
48
+ super().__init__(self, **kwargs)
49
+ if mode not in set("rwa"):
50
+ raise ValueError(f"mode '{mode}' no understood")
51
+ self.mode = mode
52
+ if isinstance(fo, (str, os.PathLike)):
53
+ if mode == "a":
54
+ m = "r+b"
55
+ else:
56
+ m = mode + "b"
57
+ fo = fsspec.open(
58
+ fo, mode=m, protocol=target_protocol, **(target_options or {})
59
+ )
60
+ self.force_zip_64 = allowZip64
61
+ self.of = fo
62
+ self.fo = fo.__enter__() # the whole instance is a context
63
+ self.zip = zipfile.ZipFile(
64
+ self.fo,
65
+ mode=mode,
66
+ compression=compression,
67
+ allowZip64=allowZip64,
68
+ compresslevel=compresslevel,
69
+ )
70
+ self.dir_cache = None
71
+
72
+ @classmethod
73
+ def _strip_protocol(cls, path):
74
+ # zip file paths are always relative to the archive root
75
+ return super()._strip_protocol(path).lstrip("/")
76
+
77
+ def __del__(self):
78
+ if hasattr(self, "zip"):
79
+ self.close()
80
+ del self.zip
81
+
82
+ def close(self):
83
+ """Commits any write changes to the file. Done on ``del`` too."""
84
+ self.zip.close()
85
+
86
+ def _get_dirs(self):
87
+ if self.dir_cache is None or self.mode in set("wa"):
88
+ # when writing, dir_cache is always in the ZipFile's attributes,
89
+ # not read from the file.
90
+ files = self.zip.infolist()
91
+ self.dir_cache = {
92
+ dirname.rstrip("/"): {
93
+ "name": dirname.rstrip("/"),
94
+ "size": 0,
95
+ "type": "directory",
96
+ }
97
+ for dirname in self._all_dirnames(self.zip.namelist())
98
+ }
99
+ for z in files:
100
+ f = {s: getattr(z, s, None) for s in zipfile.ZipInfo.__slots__}
101
+ f.update(
102
+ {
103
+ "name": z.filename.rstrip("/"),
104
+ "size": z.file_size,
105
+ "type": ("directory" if z.is_dir() else "file"),
106
+ }
107
+ )
108
+ self.dir_cache[f["name"]] = f
109
+
110
+ def pipe_file(self, path, value, **kwargs):
111
+ # override upstream, because we know the exact file size in this case
112
+ self.zip.writestr(path, value, **kwargs)
113
+
114
+ def _open(
115
+ self,
116
+ path,
117
+ mode="rb",
118
+ block_size=None,
119
+ autocommit=True,
120
+ cache_options=None,
121
+ **kwargs,
122
+ ):
123
+ path = self._strip_protocol(path)
124
+ if "r" in mode and self.mode in set("wa"):
125
+ if self.exists(path):
126
+ raise OSError("ZipFS can only be open for reading or writing, not both")
127
+ raise FileNotFoundError(path)
128
+ if "r" in self.mode and "w" in mode:
129
+ raise OSError("ZipFS can only be open for reading or writing, not both")
130
+ out = self.zip.open(path, mode.strip("b"), force_zip64=self.force_zip_64)
131
+ if "r" in mode:
132
+ info = self.info(path)
133
+ out.size = info["size"]
134
+ out.name = info["name"]
135
+ return out
136
+
137
+ def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
138
+ if maxdepth is not None and maxdepth < 1:
139
+ raise ValueError("maxdepth must be at least 1")
140
+
141
+ # Remove the leading slash, as the zip file paths are always
142
+ # given without a leading slash
143
+ path = path.lstrip("/")
144
+ path_parts = list(filter(lambda s: bool(s), path.split("/")))
145
+
146
+ def _matching_starts(file_path):
147
+ file_parts = filter(lambda s: bool(s), file_path.split("/"))
148
+ return all(a == b for a, b in zip(path_parts, file_parts))
149
+
150
+ self._get_dirs()
151
+
152
+ result = {}
153
+ # To match posix find, if an exact file name is given, we should
154
+ # return only that file
155
+ if path in self.dir_cache and self.dir_cache[path]["type"] == "file":
156
+ result[path] = self.dir_cache[path]
157
+ return result if detail else [path]
158
+
159
+ for file_path, file_info in self.dir_cache.items():
160
+ if not (path == "" or _matching_starts(file_path)):
161
+ continue
162
+
163
+ if file_info["type"] == "directory":
164
+ if withdirs:
165
+ if file_path not in result:
166
+ result[file_path.strip("/")] = file_info
167
+ continue
168
+
169
+ if file_path not in result:
170
+ result[file_path] = file_info if detail else None
171
+
172
+ if maxdepth:
173
+ path_depth = path.count("/")
174
+ result = {
175
+ k: v for k, v in result.items() if k.count("/") - path_depth < maxdepth
176
+ }
177
+ return result if detail else sorted(result)
lib/python3.10/site-packages/fsspec/tests/abstract/__init__.py ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from hashlib import md5
3
+
4
+ import pytest
5
+
6
+ from fsspec.implementations.local import LocalFileSystem
7
+ from fsspec.tests.abstract.copy import AbstractCopyTests # noqa: F401
8
+ from fsspec.tests.abstract.get import AbstractGetTests # noqa: F401
9
+ from fsspec.tests.abstract.open import AbstractOpenTests # noqa: F401
10
+ from fsspec.tests.abstract.pipe import AbstractPipeTests # noqa: F401
11
+ from fsspec.tests.abstract.put import AbstractPutTests # noqa: F401
12
+
13
+
14
+ class BaseAbstractFixtures:
15
+ """
16
+ Abstract base class containing fixtures that are used by but never need to
17
+ be overridden in derived filesystem-specific classes to run the abstract
18
+ tests on such filesystems.
19
+ """
20
+
21
+ @pytest.fixture
22
+ def fs_bulk_operations_scenario_0(self, fs, fs_join, fs_path):
23
+ """
24
+ Scenario on remote filesystem that is used for many cp/get/put tests.
25
+
26
+ Cleans up at the end of each test it which it is used.
27
+ """
28
+ source = self._bulk_operations_scenario_0(fs, fs_join, fs_path)
29
+ yield source
30
+ fs.rm(source, recursive=True)
31
+
32
+ @pytest.fixture
33
+ def fs_glob_edge_cases_files(self, fs, fs_join, fs_path):
34
+ """
35
+ Scenario on remote filesystem that is used for glob edge cases cp/get/put tests.
36
+
37
+ Cleans up at the end of each test it which it is used.
38
+ """
39
+ source = self._glob_edge_cases_files(fs, fs_join, fs_path)
40
+ yield source
41
+ fs.rm(source, recursive=True)
42
+
43
+ @pytest.fixture
44
+ def fs_dir_and_file_with_same_name_prefix(self, fs, fs_join, fs_path):
45
+ """
46
+ Scenario on remote filesystem that is used to check cp/get/put on directory
47
+ and file with the same name prefixes.
48
+
49
+ Cleans up at the end of each test it which it is used.
50
+ """
51
+ source = self._dir_and_file_with_same_name_prefix(fs, fs_join, fs_path)
52
+ yield source
53
+ fs.rm(source, recursive=True)
54
+
55
+ @pytest.fixture
56
+ def fs_10_files_with_hashed_names(self, fs, fs_join, fs_path):
57
+ """
58
+ Scenario on remote filesystem that is used to check cp/get/put files order
59
+ when source and destination are lists.
60
+
61
+ Cleans up at the end of each test it which it is used.
62
+ """
63
+ source = self._10_files_with_hashed_names(fs, fs_join, fs_path)
64
+ yield source
65
+ fs.rm(source, recursive=True)
66
+
67
+ @pytest.fixture
68
+ def fs_target(self, fs, fs_join, fs_path):
69
+ """
70
+ Return name of remote directory that does not yet exist to copy into.
71
+
72
+ Cleans up at the end of each test it which it is used.
73
+ """
74
+ target = fs_join(fs_path, "target")
75
+ yield target
76
+ if fs.exists(target):
77
+ fs.rm(target, recursive=True)
78
+
79
+ @pytest.fixture
80
+ def local_bulk_operations_scenario_0(self, local_fs, local_join, local_path):
81
+ """
82
+ Scenario on local filesystem that is used for many cp/get/put tests.
83
+
84
+ Cleans up at the end of each test it which it is used.
85
+ """
86
+ source = self._bulk_operations_scenario_0(local_fs, local_join, local_path)
87
+ yield source
88
+ local_fs.rm(source, recursive=True)
89
+
90
+ @pytest.fixture
91
+ def local_glob_edge_cases_files(self, local_fs, local_join, local_path):
92
+ """
93
+ Scenario on local filesystem that is used for glob edge cases cp/get/put tests.
94
+
95
+ Cleans up at the end of each test it which it is used.
96
+ """
97
+ source = self._glob_edge_cases_files(local_fs, local_join, local_path)
98
+ yield source
99
+ local_fs.rm(source, recursive=True)
100
+
101
+ @pytest.fixture
102
+ def local_dir_and_file_with_same_name_prefix(
103
+ self, local_fs, local_join, local_path
104
+ ):
105
+ """
106
+ Scenario on local filesystem that is used to check cp/get/put on directory
107
+ and file with the same name prefixes.
108
+
109
+ Cleans up at the end of each test it which it is used.
110
+ """
111
+ source = self._dir_and_file_with_same_name_prefix(
112
+ local_fs, local_join, local_path
113
+ )
114
+ yield source
115
+ local_fs.rm(source, recursive=True)
116
+
117
+ @pytest.fixture
118
+ def local_10_files_with_hashed_names(self, local_fs, local_join, local_path):
119
+ """
120
+ Scenario on local filesystem that is used to check cp/get/put files order
121
+ when source and destination are lists.
122
+
123
+ Cleans up at the end of each test it which it is used.
124
+ """
125
+ source = self._10_files_with_hashed_names(local_fs, local_join, local_path)
126
+ yield source
127
+ local_fs.rm(source, recursive=True)
128
+
129
+ @pytest.fixture
130
+ def local_target(self, local_fs, local_join, local_path):
131
+ """
132
+ Return name of local directory that does not yet exist to copy into.
133
+
134
+ Cleans up at the end of each test it which it is used.
135
+ """
136
+ target = local_join(local_path, "target")
137
+ yield target
138
+ if local_fs.exists(target):
139
+ local_fs.rm(target, recursive=True)
140
+
141
+ def _glob_edge_cases_files(self, some_fs, some_join, some_path):
142
+ """
143
+ Scenario that is used for glob edge cases cp/get/put tests.
144
+ Creates the following directory and file structure:
145
+
146
+ 📁 source
147
+ ├── 📄 file1
148
+ ├── 📄 file2
149
+ ├── 📁 subdir0
150
+ │ ├── 📄 subfile1
151
+ │ ├── 📄 subfile2
152
+ │ └── 📁 nesteddir
153
+ │ └── 📄 nestedfile
154
+ └── 📁 subdir1
155
+ ├── 📄 subfile1
156
+ ├── 📄 subfile2
157
+ └── 📁 nesteddir
158
+ └── 📄 nestedfile
159
+ """
160
+ source = some_join(some_path, "source")
161
+ some_fs.touch(some_join(source, "file1"))
162
+ some_fs.touch(some_join(source, "file2"))
163
+
164
+ for subdir_idx in range(2):
165
+ subdir = some_join(source, f"subdir{subdir_idx}")
166
+ nesteddir = some_join(subdir, "nesteddir")
167
+ some_fs.makedirs(nesteddir)
168
+ some_fs.touch(some_join(subdir, "subfile1"))
169
+ some_fs.touch(some_join(subdir, "subfile2"))
170
+ some_fs.touch(some_join(nesteddir, "nestedfile"))
171
+
172
+ return source
173
+
174
+ def _bulk_operations_scenario_0(self, some_fs, some_join, some_path):
175
+ """
176
+ Scenario that is used for many cp/get/put tests. Creates the following
177
+ directory and file structure:
178
+
179
+ 📁 source
180
+ ├── 📄 file1
181
+ ├── 📄 file2
182
+ └── 📁 subdir
183
+ ├── 📄 subfile1
184
+ ├── 📄 subfile2
185
+ └── 📁 nesteddir
186
+ └── 📄 nestedfile
187
+ """
188
+ source = some_join(some_path, "source")
189
+ subdir = some_join(source, "subdir")
190
+ nesteddir = some_join(subdir, "nesteddir")
191
+ some_fs.makedirs(nesteddir)
192
+ some_fs.touch(some_join(source, "file1"))
193
+ some_fs.touch(some_join(source, "file2"))
194
+ some_fs.touch(some_join(subdir, "subfile1"))
195
+ some_fs.touch(some_join(subdir, "subfile2"))
196
+ some_fs.touch(some_join(nesteddir, "nestedfile"))
197
+ return source
198
+
199
+ def _dir_and_file_with_same_name_prefix(self, some_fs, some_join, some_path):
200
+ """
201
+ Scenario that is used to check cp/get/put on directory and file with
202
+ the same name prefixes. Creates the following directory and file structure:
203
+
204
+ 📁 source
205
+ ├── 📄 subdir.txt
206
+ └── 📁 subdir
207
+ └── 📄 subfile.txt
208
+ """
209
+ source = some_join(some_path, "source")
210
+ subdir = some_join(source, "subdir")
211
+ file = some_join(source, "subdir.txt")
212
+ subfile = some_join(subdir, "subfile.txt")
213
+ some_fs.makedirs(subdir)
214
+ some_fs.touch(file)
215
+ some_fs.touch(subfile)
216
+ return source
217
+
218
+ def _10_files_with_hashed_names(self, some_fs, some_join, some_path):
219
+ """
220
+ Scenario that is used to check cp/get/put files order when source and
221
+ destination are lists. Creates the following directory and file structure:
222
+
223
+ 📁 source
224
+ └── 📄 {hashed([0-9])}.txt
225
+ """
226
+ source = some_join(some_path, "source")
227
+ for i in range(10):
228
+ hashed_i = md5(str(i).encode("utf-8")).hexdigest()
229
+ path = some_join(source, f"{hashed_i}.txt")
230
+ some_fs.pipe(path=path, value=f"{i}".encode())
231
+ return source
232
+
233
+
234
+ class AbstractFixtures(BaseAbstractFixtures):
235
+ """
236
+ Abstract base class containing fixtures that may be overridden in derived
237
+ filesystem-specific classes to run the abstract tests on such filesystems.
238
+
239
+ For any particular filesystem some of these fixtures must be overridden,
240
+ such as ``fs`` and ``fs_path``, and others may be overridden if the
241
+ default functions here are not appropriate, such as ``fs_join``.
242
+ """
243
+
244
+ @pytest.fixture
245
+ def fs(self):
246
+ raise NotImplementedError("This function must be overridden in derived classes")
247
+
248
+ @pytest.fixture
249
+ def fs_join(self):
250
+ """
251
+ Return a function that joins its arguments together into a path.
252
+
253
+ Most fsspec implementations join paths in a platform-dependent way,
254
+ but some will override this to always use a forward slash.
255
+ """
256
+ return os.path.join
257
+
258
+ @pytest.fixture
259
+ def fs_path(self):
260
+ raise NotImplementedError("This function must be overridden in derived classes")
261
+
262
+ @pytest.fixture(scope="class")
263
+ def local_fs(self):
264
+ # Maybe need an option for auto_mkdir=False? This is only relevant
265
+ # for certain implementations.
266
+ return LocalFileSystem(auto_mkdir=True)
267
+
268
+ @pytest.fixture
269
+ def local_join(self):
270
+ """
271
+ Return a function that joins its arguments together into a path, on
272
+ the local filesystem.
273
+ """
274
+ return os.path.join
275
+
276
+ @pytest.fixture
277
+ def local_path(self, tmpdir):
278
+ return tmpdir
279
+
280
+ @pytest.fixture
281
+ def supports_empty_directories(self):
282
+ """
283
+ Return whether this implementation supports empty directories.
284
+ """
285
+ return True
286
+
287
+ @pytest.fixture
288
+ def fs_sanitize_path(self):
289
+ return lambda x: x
lib/python3.10/site-packages/fsspec/tests/abstract/common.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GLOB_EDGE_CASES_TESTS = {
2
+ "argnames": ("path", "recursive", "maxdepth", "expected"),
3
+ "argvalues": [
4
+ ("fil?1", False, None, ["file1"]),
5
+ ("fil?1", True, None, ["file1"]),
6
+ ("file[1-2]", False, None, ["file1", "file2"]),
7
+ ("file[1-2]", True, None, ["file1", "file2"]),
8
+ ("*", False, None, ["file1", "file2"]),
9
+ (
10
+ "*",
11
+ True,
12
+ None,
13
+ [
14
+ "file1",
15
+ "file2",
16
+ "subdir0/subfile1",
17
+ "subdir0/subfile2",
18
+ "subdir0/nesteddir/nestedfile",
19
+ "subdir1/subfile1",
20
+ "subdir1/subfile2",
21
+ "subdir1/nesteddir/nestedfile",
22
+ ],
23
+ ),
24
+ ("*", True, 1, ["file1", "file2"]),
25
+ (
26
+ "*",
27
+ True,
28
+ 2,
29
+ [
30
+ "file1",
31
+ "file2",
32
+ "subdir0/subfile1",
33
+ "subdir0/subfile2",
34
+ "subdir1/subfile1",
35
+ "subdir1/subfile2",
36
+ ],
37
+ ),
38
+ ("*1", False, None, ["file1"]),
39
+ (
40
+ "*1",
41
+ True,
42
+ None,
43
+ [
44
+ "file1",
45
+ "subdir1/subfile1",
46
+ "subdir1/subfile2",
47
+ "subdir1/nesteddir/nestedfile",
48
+ ],
49
+ ),
50
+ ("*1", True, 2, ["file1", "subdir1/subfile1", "subdir1/subfile2"]),
51
+ (
52
+ "**",
53
+ False,
54
+ None,
55
+ [
56
+ "file1",
57
+ "file2",
58
+ "subdir0/subfile1",
59
+ "subdir0/subfile2",
60
+ "subdir0/nesteddir/nestedfile",
61
+ "subdir1/subfile1",
62
+ "subdir1/subfile2",
63
+ "subdir1/nesteddir/nestedfile",
64
+ ],
65
+ ),
66
+ (
67
+ "**",
68
+ True,
69
+ None,
70
+ [
71
+ "file1",
72
+ "file2",
73
+ "subdir0/subfile1",
74
+ "subdir0/subfile2",
75
+ "subdir0/nesteddir/nestedfile",
76
+ "subdir1/subfile1",
77
+ "subdir1/subfile2",
78
+ "subdir1/nesteddir/nestedfile",
79
+ ],
80
+ ),
81
+ ("**", True, 1, ["file1", "file2"]),
82
+ (
83
+ "**",
84
+ True,
85
+ 2,
86
+ [
87
+ "file1",
88
+ "file2",
89
+ "subdir0/subfile1",
90
+ "subdir0/subfile2",
91
+ "subdir0/nesteddir/nestedfile",
92
+ "subdir1/subfile1",
93
+ "subdir1/subfile2",
94
+ "subdir1/nesteddir/nestedfile",
95
+ ],
96
+ ),
97
+ (
98
+ "**",
99
+ False,
100
+ 2,
101
+ [
102
+ "file1",
103
+ "file2",
104
+ "subdir0/subfile1",
105
+ "subdir0/subfile2",
106
+ "subdir1/subfile1",
107
+ "subdir1/subfile2",
108
+ ],
109
+ ),
110
+ ("**/*1", False, None, ["file1", "subdir0/subfile1", "subdir1/subfile1"]),
111
+ (
112
+ "**/*1",
113
+ True,
114
+ None,
115
+ [
116
+ "file1",
117
+ "subdir0/subfile1",
118
+ "subdir1/subfile1",
119
+ "subdir1/subfile2",
120
+ "subdir1/nesteddir/nestedfile",
121
+ ],
122
+ ),
123
+ ("**/*1", True, 1, ["file1"]),
124
+ (
125
+ "**/*1",
126
+ True,
127
+ 2,
128
+ ["file1", "subdir0/subfile1", "subdir1/subfile1", "subdir1/subfile2"],
129
+ ),
130
+ ("**/*1", False, 2, ["file1", "subdir0/subfile1", "subdir1/subfile1"]),
131
+ ("**/subdir0", False, None, []),
132
+ ("**/subdir0", True, None, ["subfile1", "subfile2", "nesteddir/nestedfile"]),
133
+ ("**/subdir0/nested*", False, 2, []),
134
+ ("**/subdir0/nested*", True, 2, ["nestedfile"]),
135
+ ("subdir[1-2]", False, None, []),
136
+ ("subdir[1-2]", True, None, ["subfile1", "subfile2", "nesteddir/nestedfile"]),
137
+ ("subdir[1-2]", True, 2, ["subfile1", "subfile2"]),
138
+ ("subdir[0-1]", False, None, []),
139
+ (
140
+ "subdir[0-1]",
141
+ True,
142
+ None,
143
+ [
144
+ "subdir0/subfile1",
145
+ "subdir0/subfile2",
146
+ "subdir0/nesteddir/nestedfile",
147
+ "subdir1/subfile1",
148
+ "subdir1/subfile2",
149
+ "subdir1/nesteddir/nestedfile",
150
+ ],
151
+ ),
152
+ (
153
+ "subdir[0-1]/*fil[e]*",
154
+ False,
155
+ None,
156
+ [
157
+ "subdir0/subfile1",
158
+ "subdir0/subfile2",
159
+ "subdir1/subfile1",
160
+ "subdir1/subfile2",
161
+ ],
162
+ ),
163
+ (
164
+ "subdir[0-1]/*fil[e]*",
165
+ True,
166
+ None,
167
+ [
168
+ "subdir0/subfile1",
169
+ "subdir0/subfile2",
170
+ "subdir1/subfile1",
171
+ "subdir1/subfile2",
172
+ ],
173
+ ),
174
+ ],
175
+ }
lib/python3.10/site-packages/fsspec/tests/abstract/copy.py ADDED
@@ -0,0 +1,557 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from hashlib import md5
2
+ from itertools import product
3
+
4
+ import pytest
5
+
6
+ from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS
7
+
8
+
9
+ class AbstractCopyTests:
10
+ def test_copy_file_to_existing_directory(
11
+ self,
12
+ fs,
13
+ fs_join,
14
+ fs_bulk_operations_scenario_0,
15
+ fs_target,
16
+ supports_empty_directories,
17
+ ):
18
+ # Copy scenario 1a
19
+ source = fs_bulk_operations_scenario_0
20
+
21
+ target = fs_target
22
+ fs.mkdir(target)
23
+ if not supports_empty_directories:
24
+ # Force target directory to exist by adding a dummy file
25
+ fs.touch(fs_join(target, "dummy"))
26
+ assert fs.isdir(target)
27
+
28
+ target_file2 = fs_join(target, "file2")
29
+ target_subfile1 = fs_join(target, "subfile1")
30
+
31
+ # Copy from source directory
32
+ fs.cp(fs_join(source, "file2"), target)
33
+ assert fs.isfile(target_file2)
34
+
35
+ # Copy from sub directory
36
+ fs.cp(fs_join(source, "subdir", "subfile1"), target)
37
+ assert fs.isfile(target_subfile1)
38
+
39
+ # Remove copied files
40
+ fs.rm([target_file2, target_subfile1])
41
+ assert not fs.exists(target_file2)
42
+ assert not fs.exists(target_subfile1)
43
+
44
+ # Repeat with trailing slash on target
45
+ fs.cp(fs_join(source, "file2"), target + "/")
46
+ assert fs.isdir(target)
47
+ assert fs.isfile(target_file2)
48
+
49
+ fs.cp(fs_join(source, "subdir", "subfile1"), target + "/")
50
+ assert fs.isfile(target_subfile1)
51
+
52
+ def test_copy_file_to_new_directory(
53
+ self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target
54
+ ):
55
+ # Copy scenario 1b
56
+ source = fs_bulk_operations_scenario_0
57
+
58
+ target = fs_target
59
+ fs.mkdir(target)
60
+
61
+ fs.cp(
62
+ fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir/")
63
+ ) # Note trailing slash
64
+ assert fs.isdir(target)
65
+ assert fs.isdir(fs_join(target, "newdir"))
66
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
67
+
68
+ def test_copy_file_to_file_in_existing_directory(
69
+ self,
70
+ fs,
71
+ fs_join,
72
+ fs_bulk_operations_scenario_0,
73
+ fs_target,
74
+ supports_empty_directories,
75
+ ):
76
+ # Copy scenario 1c
77
+ source = fs_bulk_operations_scenario_0
78
+
79
+ target = fs_target
80
+ fs.mkdir(target)
81
+ if not supports_empty_directories:
82
+ # Force target directory to exist by adding a dummy file
83
+ fs.touch(fs_join(target, "dummy"))
84
+ assert fs.isdir(target)
85
+
86
+ fs.cp(fs_join(source, "subdir", "subfile1"), fs_join(target, "newfile"))
87
+ assert fs.isfile(fs_join(target, "newfile"))
88
+
89
+ def test_copy_file_to_file_in_new_directory(
90
+ self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target
91
+ ):
92
+ # Copy scenario 1d
93
+ source = fs_bulk_operations_scenario_0
94
+
95
+ target = fs_target
96
+ fs.mkdir(target)
97
+
98
+ fs.cp(
99
+ fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir", "newfile")
100
+ )
101
+ assert fs.isdir(fs_join(target, "newdir"))
102
+ assert fs.isfile(fs_join(target, "newdir", "newfile"))
103
+
104
+ def test_copy_directory_to_existing_directory(
105
+ self,
106
+ fs,
107
+ fs_join,
108
+ fs_bulk_operations_scenario_0,
109
+ fs_target,
110
+ supports_empty_directories,
111
+ ):
112
+ # Copy scenario 1e
113
+ source = fs_bulk_operations_scenario_0
114
+
115
+ target = fs_target
116
+ fs.mkdir(target)
117
+ if not supports_empty_directories:
118
+ # Force target directory to exist by adding a dummy file
119
+ dummy = fs_join(target, "dummy")
120
+ fs.touch(dummy)
121
+ assert fs.isdir(target)
122
+
123
+ for source_slash, target_slash in zip([False, True], [False, True]):
124
+ s = fs_join(source, "subdir")
125
+ if source_slash:
126
+ s += "/"
127
+ t = target + "/" if target_slash else target
128
+
129
+ # Without recursive does nothing
130
+ fs.cp(s, t)
131
+ assert fs.ls(target, detail=False) == (
132
+ [] if supports_empty_directories else [dummy]
133
+ )
134
+
135
+ # With recursive
136
+ fs.cp(s, t, recursive=True)
137
+ if source_slash:
138
+ assert fs.isfile(fs_join(target, "subfile1"))
139
+ assert fs.isfile(fs_join(target, "subfile2"))
140
+ assert fs.isdir(fs_join(target, "nesteddir"))
141
+ assert fs.isfile(fs_join(target, "nesteddir", "nestedfile"))
142
+ assert not fs.exists(fs_join(target, "subdir"))
143
+
144
+ fs.rm(
145
+ [
146
+ fs_join(target, "subfile1"),
147
+ fs_join(target, "subfile2"),
148
+ fs_join(target, "nesteddir"),
149
+ ],
150
+ recursive=True,
151
+ )
152
+ else:
153
+ assert fs.isdir(fs_join(target, "subdir"))
154
+ assert fs.isfile(fs_join(target, "subdir", "subfile1"))
155
+ assert fs.isfile(fs_join(target, "subdir", "subfile2"))
156
+ assert fs.isdir(fs_join(target, "subdir", "nesteddir"))
157
+ assert fs.isfile(fs_join(target, "subdir", "nesteddir", "nestedfile"))
158
+
159
+ fs.rm(fs_join(target, "subdir"), recursive=True)
160
+ assert fs.ls(target, detail=False) == (
161
+ [] if supports_empty_directories else [dummy]
162
+ )
163
+
164
+ # Limit recursive by maxdepth
165
+ fs.cp(s, t, recursive=True, maxdepth=1)
166
+ if source_slash:
167
+ assert fs.isfile(fs_join(target, "subfile1"))
168
+ assert fs.isfile(fs_join(target, "subfile2"))
169
+ assert not fs.exists(fs_join(target, "nesteddir"))
170
+ assert not fs.exists(fs_join(target, "subdir"))
171
+
172
+ fs.rm(
173
+ [
174
+ fs_join(target, "subfile1"),
175
+ fs_join(target, "subfile2"),
176
+ ],
177
+ recursive=True,
178
+ )
179
+ else:
180
+ assert fs.isdir(fs_join(target, "subdir"))
181
+ assert fs.isfile(fs_join(target, "subdir", "subfile1"))
182
+ assert fs.isfile(fs_join(target, "subdir", "subfile2"))
183
+ assert not fs.exists(fs_join(target, "subdir", "nesteddir"))
184
+
185
+ fs.rm(fs_join(target, "subdir"), recursive=True)
186
+ assert fs.ls(target, detail=False) == (
187
+ [] if supports_empty_directories else [dummy]
188
+ )
189
+
190
+ def test_copy_directory_to_new_directory(
191
+ self,
192
+ fs,
193
+ fs_join,
194
+ fs_bulk_operations_scenario_0,
195
+ fs_target,
196
+ supports_empty_directories,
197
+ ):
198
+ # Copy scenario 1f
199
+ source = fs_bulk_operations_scenario_0
200
+
201
+ target = fs_target
202
+ fs.mkdir(target)
203
+
204
+ for source_slash, target_slash in zip([False, True], [False, True]):
205
+ s = fs_join(source, "subdir")
206
+ if source_slash:
207
+ s += "/"
208
+ t = fs_join(target, "newdir")
209
+ if target_slash:
210
+ t += "/"
211
+
212
+ # Without recursive does nothing
213
+ fs.cp(s, t)
214
+ if supports_empty_directories:
215
+ assert fs.ls(target) == []
216
+ else:
217
+ with pytest.raises(FileNotFoundError):
218
+ fs.ls(target)
219
+
220
+ # With recursive
221
+ fs.cp(s, t, recursive=True)
222
+ assert fs.isdir(fs_join(target, "newdir"))
223
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
224
+ assert fs.isfile(fs_join(target, "newdir", "subfile2"))
225
+ assert fs.isdir(fs_join(target, "newdir", "nesteddir"))
226
+ assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile"))
227
+ assert not fs.exists(fs_join(target, "subdir"))
228
+
229
+ fs.rm(fs_join(target, "newdir"), recursive=True)
230
+ assert not fs.exists(fs_join(target, "newdir"))
231
+
232
+ # Limit recursive by maxdepth
233
+ fs.cp(s, t, recursive=True, maxdepth=1)
234
+ assert fs.isdir(fs_join(target, "newdir"))
235
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
236
+ assert fs.isfile(fs_join(target, "newdir", "subfile2"))
237
+ assert not fs.exists(fs_join(target, "newdir", "nesteddir"))
238
+ assert not fs.exists(fs_join(target, "subdir"))
239
+
240
+ fs.rm(fs_join(target, "newdir"), recursive=True)
241
+ assert not fs.exists(fs_join(target, "newdir"))
242
+
243
+ def test_copy_glob_to_existing_directory(
244
+ self,
245
+ fs,
246
+ fs_join,
247
+ fs_bulk_operations_scenario_0,
248
+ fs_target,
249
+ supports_empty_directories,
250
+ ):
251
+ # Copy scenario 1g
252
+ source = fs_bulk_operations_scenario_0
253
+
254
+ target = fs_target
255
+ fs.mkdir(target)
256
+ if not supports_empty_directories:
257
+ # Force target directory to exist by adding a dummy file
258
+ dummy = fs_join(target, "dummy")
259
+ fs.touch(dummy)
260
+ assert fs.isdir(target)
261
+
262
+ for target_slash in [False, True]:
263
+ t = target + "/" if target_slash else target
264
+
265
+ # Without recursive
266
+ fs.cp(fs_join(source, "subdir", "*"), t)
267
+ assert fs.isfile(fs_join(target, "subfile1"))
268
+ assert fs.isfile(fs_join(target, "subfile2"))
269
+ assert not fs.isdir(fs_join(target, "nesteddir"))
270
+ assert not fs.exists(fs_join(target, "nesteddir", "nestedfile"))
271
+ assert not fs.exists(fs_join(target, "subdir"))
272
+
273
+ fs.rm(
274
+ [
275
+ fs_join(target, "subfile1"),
276
+ fs_join(target, "subfile2"),
277
+ ],
278
+ recursive=True,
279
+ )
280
+ assert fs.ls(target, detail=False) == (
281
+ [] if supports_empty_directories else [dummy]
282
+ )
283
+
284
+ # With recursive
285
+ for glob, recursive in zip(["*", "**"], [True, False]):
286
+ fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive)
287
+ assert fs.isfile(fs_join(target, "subfile1"))
288
+ assert fs.isfile(fs_join(target, "subfile2"))
289
+ assert fs.isdir(fs_join(target, "nesteddir"))
290
+ assert fs.isfile(fs_join(target, "nesteddir", "nestedfile"))
291
+ assert not fs.exists(fs_join(target, "subdir"))
292
+
293
+ fs.rm(
294
+ [
295
+ fs_join(target, "subfile1"),
296
+ fs_join(target, "subfile2"),
297
+ fs_join(target, "nesteddir"),
298
+ ],
299
+ recursive=True,
300
+ )
301
+ assert fs.ls(target, detail=False) == (
302
+ [] if supports_empty_directories else [dummy]
303
+ )
304
+
305
+ # Limit recursive by maxdepth
306
+ fs.cp(
307
+ fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1
308
+ )
309
+ assert fs.isfile(fs_join(target, "subfile1"))
310
+ assert fs.isfile(fs_join(target, "subfile2"))
311
+ assert not fs.exists(fs_join(target, "nesteddir"))
312
+ assert not fs.exists(fs_join(target, "subdir"))
313
+
314
+ fs.rm(
315
+ [
316
+ fs_join(target, "subfile1"),
317
+ fs_join(target, "subfile2"),
318
+ ],
319
+ recursive=True,
320
+ )
321
+ assert fs.ls(target, detail=False) == (
322
+ [] if supports_empty_directories else [dummy]
323
+ )
324
+
325
+ def test_copy_glob_to_new_directory(
326
+ self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target
327
+ ):
328
+ # Copy scenario 1h
329
+ source = fs_bulk_operations_scenario_0
330
+
331
+ target = fs_target
332
+ fs.mkdir(target)
333
+
334
+ for target_slash in [False, True]:
335
+ t = fs_join(target, "newdir")
336
+ if target_slash:
337
+ t += "/"
338
+
339
+ # Without recursive
340
+ fs.cp(fs_join(source, "subdir", "*"), t)
341
+ assert fs.isdir(fs_join(target, "newdir"))
342
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
343
+ assert fs.isfile(fs_join(target, "newdir", "subfile2"))
344
+ assert not fs.exists(fs_join(target, "newdir", "nesteddir"))
345
+ assert not fs.exists(fs_join(target, "newdir", "nesteddir", "nestedfile"))
346
+ assert not fs.exists(fs_join(target, "subdir"))
347
+ assert not fs.exists(fs_join(target, "newdir", "subdir"))
348
+
349
+ fs.rm(fs_join(target, "newdir"), recursive=True)
350
+ assert not fs.exists(fs_join(target, "newdir"))
351
+
352
+ # With recursive
353
+ for glob, recursive in zip(["*", "**"], [True, False]):
354
+ fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive)
355
+ assert fs.isdir(fs_join(target, "newdir"))
356
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
357
+ assert fs.isfile(fs_join(target, "newdir", "subfile2"))
358
+ assert fs.isdir(fs_join(target, "newdir", "nesteddir"))
359
+ assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile"))
360
+ assert not fs.exists(fs_join(target, "subdir"))
361
+ assert not fs.exists(fs_join(target, "newdir", "subdir"))
362
+
363
+ fs.rm(fs_join(target, "newdir"), recursive=True)
364
+ assert not fs.exists(fs_join(target, "newdir"))
365
+
366
+ # Limit recursive by maxdepth
367
+ fs.cp(
368
+ fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1
369
+ )
370
+ assert fs.isdir(fs_join(target, "newdir"))
371
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
372
+ assert fs.isfile(fs_join(target, "newdir", "subfile2"))
373
+ assert not fs.exists(fs_join(target, "newdir", "nesteddir"))
374
+ assert not fs.exists(fs_join(target, "subdir"))
375
+ assert not fs.exists(fs_join(target, "newdir", "subdir"))
376
+
377
+ fs.rm(fs_join(target, "newdir"), recursive=True)
378
+ assert not fs.exists(fs_join(target, "newdir"))
379
+
380
+ @pytest.mark.parametrize(
381
+ GLOB_EDGE_CASES_TESTS["argnames"],
382
+ GLOB_EDGE_CASES_TESTS["argvalues"],
383
+ )
384
+ def test_copy_glob_edge_cases(
385
+ self,
386
+ path,
387
+ recursive,
388
+ maxdepth,
389
+ expected,
390
+ fs,
391
+ fs_join,
392
+ fs_glob_edge_cases_files,
393
+ fs_target,
394
+ fs_sanitize_path,
395
+ ):
396
+ # Copy scenario 1g
397
+ source = fs_glob_edge_cases_files
398
+
399
+ target = fs_target
400
+
401
+ for new_dir, target_slash in product([True, False], [True, False]):
402
+ fs.mkdir(target)
403
+
404
+ t = fs_join(target, "newdir") if new_dir else target
405
+ t = t + "/" if target_slash else t
406
+
407
+ fs.copy(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth)
408
+
409
+ output = fs.find(target)
410
+ if new_dir:
411
+ prefixed_expected = [
412
+ fs_sanitize_path(fs_join(target, "newdir", p)) for p in expected
413
+ ]
414
+ else:
415
+ prefixed_expected = [
416
+ fs_sanitize_path(fs_join(target, p)) for p in expected
417
+ ]
418
+ assert sorted(output) == sorted(prefixed_expected)
419
+
420
+ try:
421
+ fs.rm(target, recursive=True)
422
+ except FileNotFoundError:
423
+ pass
424
+
425
+ def test_copy_list_of_files_to_existing_directory(
426
+ self,
427
+ fs,
428
+ fs_join,
429
+ fs_bulk_operations_scenario_0,
430
+ fs_target,
431
+ supports_empty_directories,
432
+ ):
433
+ # Copy scenario 2a
434
+ source = fs_bulk_operations_scenario_0
435
+
436
+ target = fs_target
437
+ fs.mkdir(target)
438
+ if not supports_empty_directories:
439
+ # Force target directory to exist by adding a dummy file
440
+ dummy = fs_join(target, "dummy")
441
+ fs.touch(dummy)
442
+ assert fs.isdir(target)
443
+
444
+ source_files = [
445
+ fs_join(source, "file1"),
446
+ fs_join(source, "file2"),
447
+ fs_join(source, "subdir", "subfile1"),
448
+ ]
449
+
450
+ for target_slash in [False, True]:
451
+ t = target + "/" if target_slash else target
452
+
453
+ fs.cp(source_files, t)
454
+ assert fs.isfile(fs_join(target, "file1"))
455
+ assert fs.isfile(fs_join(target, "file2"))
456
+ assert fs.isfile(fs_join(target, "subfile1"))
457
+
458
+ fs.rm(
459
+ [
460
+ fs_join(target, "file1"),
461
+ fs_join(target, "file2"),
462
+ fs_join(target, "subfile1"),
463
+ ],
464
+ recursive=True,
465
+ )
466
+ assert fs.ls(target, detail=False) == (
467
+ [] if supports_empty_directories else [dummy]
468
+ )
469
+
470
+ def test_copy_list_of_files_to_new_directory(
471
+ self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target
472
+ ):
473
+ # Copy scenario 2b
474
+ source = fs_bulk_operations_scenario_0
475
+
476
+ target = fs_target
477
+ fs.mkdir(target)
478
+
479
+ source_files = [
480
+ fs_join(source, "file1"),
481
+ fs_join(source, "file2"),
482
+ fs_join(source, "subdir", "subfile1"),
483
+ ]
484
+
485
+ fs.cp(source_files, fs_join(target, "newdir") + "/") # Note trailing slash
486
+ assert fs.isdir(fs_join(target, "newdir"))
487
+ assert fs.isfile(fs_join(target, "newdir", "file1"))
488
+ assert fs.isfile(fs_join(target, "newdir", "file2"))
489
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
490
+
491
+ def test_copy_two_files_new_directory(
492
+ self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target
493
+ ):
494
+ # This is a duplicate of test_copy_list_of_files_to_new_directory and
495
+ # can eventually be removed.
496
+ source = fs_bulk_operations_scenario_0
497
+
498
+ target = fs_target
499
+ assert not fs.exists(target)
500
+ fs.cp([fs_join(source, "file1"), fs_join(source, "file2")], target)
501
+
502
+ assert fs.isdir(target)
503
+ assert fs.isfile(fs_join(target, "file1"))
504
+ assert fs.isfile(fs_join(target, "file2"))
505
+
506
+ def test_copy_directory_without_files_with_same_name_prefix(
507
+ self,
508
+ fs,
509
+ fs_join,
510
+ fs_target,
511
+ fs_dir_and_file_with_same_name_prefix,
512
+ supports_empty_directories,
513
+ ):
514
+ # Create the test dirs
515
+ source = fs_dir_and_file_with_same_name_prefix
516
+ target = fs_target
517
+
518
+ # Test without glob
519
+ fs.cp(fs_join(source, "subdir"), target, recursive=True)
520
+
521
+ assert fs.isfile(fs_join(target, "subfile.txt"))
522
+ assert not fs.isfile(fs_join(target, "subdir.txt"))
523
+
524
+ fs.rm([fs_join(target, "subfile.txt")])
525
+ if supports_empty_directories:
526
+ assert fs.ls(target) == []
527
+ else:
528
+ assert not fs.exists(target)
529
+
530
+ # Test with glob
531
+ fs.cp(fs_join(source, "subdir*"), target, recursive=True)
532
+
533
+ assert fs.isdir(fs_join(target, "subdir"))
534
+ assert fs.isfile(fs_join(target, "subdir", "subfile.txt"))
535
+ assert fs.isfile(fs_join(target, "subdir.txt"))
536
+
537
+ def test_copy_with_source_and_destination_as_list(
538
+ self, fs, fs_target, fs_join, fs_10_files_with_hashed_names
539
+ ):
540
+ # Create the test dir
541
+ source = fs_10_files_with_hashed_names
542
+ target = fs_target
543
+
544
+ # Create list of files for source and destination
545
+ source_files = []
546
+ destination_files = []
547
+ for i in range(10):
548
+ hashed_i = md5(str(i).encode("utf-8")).hexdigest()
549
+ source_files.append(fs_join(source, f"{hashed_i}.txt"))
550
+ destination_files.append(fs_join(target, f"{hashed_i}.txt"))
551
+
552
+ # Copy and assert order was kept
553
+ fs.copy(path1=source_files, path2=destination_files)
554
+
555
+ for i in range(10):
556
+ file_content = fs.cat(destination_files[i]).decode("utf-8")
557
+ assert file_content == str(i)
lib/python3.10/site-packages/fsspec/tests/abstract/get.py ADDED
@@ -0,0 +1,587 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from hashlib import md5
2
+ from itertools import product
3
+
4
+ import pytest
5
+
6
+ from fsspec.implementations.local import make_path_posix
7
+ from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS
8
+
9
+
10
+ class AbstractGetTests:
11
+ def test_get_file_to_existing_directory(
12
+ self,
13
+ fs,
14
+ fs_join,
15
+ fs_bulk_operations_scenario_0,
16
+ local_fs,
17
+ local_join,
18
+ local_target,
19
+ ):
20
+ # Copy scenario 1a
21
+ source = fs_bulk_operations_scenario_0
22
+
23
+ target = local_target
24
+ local_fs.mkdir(target)
25
+ assert local_fs.isdir(target)
26
+
27
+ target_file2 = local_join(target, "file2")
28
+ target_subfile1 = local_join(target, "subfile1")
29
+
30
+ # Copy from source directory
31
+ fs.get(fs_join(source, "file2"), target)
32
+ assert local_fs.isfile(target_file2)
33
+
34
+ # Copy from sub directory
35
+ fs.get(fs_join(source, "subdir", "subfile1"), target)
36
+ assert local_fs.isfile(target_subfile1)
37
+
38
+ # Remove copied files
39
+ local_fs.rm([target_file2, target_subfile1])
40
+ assert not local_fs.exists(target_file2)
41
+ assert not local_fs.exists(target_subfile1)
42
+
43
+ # Repeat with trailing slash on target
44
+ fs.get(fs_join(source, "file2"), target + "/")
45
+ assert local_fs.isdir(target)
46
+ assert local_fs.isfile(target_file2)
47
+
48
+ fs.get(fs_join(source, "subdir", "subfile1"), target + "/")
49
+ assert local_fs.isfile(target_subfile1)
50
+
51
+ def test_get_file_to_new_directory(
52
+ self,
53
+ fs,
54
+ fs_join,
55
+ fs_bulk_operations_scenario_0,
56
+ local_fs,
57
+ local_join,
58
+ local_target,
59
+ ):
60
+ # Copy scenario 1b
61
+ source = fs_bulk_operations_scenario_0
62
+
63
+ target = local_target
64
+ local_fs.mkdir(target)
65
+
66
+ fs.get(
67
+ fs_join(source, "subdir", "subfile1"), local_join(target, "newdir/")
68
+ ) # Note trailing slash
69
+
70
+ assert local_fs.isdir(target)
71
+ assert local_fs.isdir(local_join(target, "newdir"))
72
+ assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
73
+
74
+ def test_get_file_to_file_in_existing_directory(
75
+ self,
76
+ fs,
77
+ fs_join,
78
+ fs_bulk_operations_scenario_0,
79
+ local_fs,
80
+ local_join,
81
+ local_target,
82
+ ):
83
+ # Copy scenario 1c
84
+ source = fs_bulk_operations_scenario_0
85
+
86
+ target = local_target
87
+ local_fs.mkdir(target)
88
+
89
+ fs.get(fs_join(source, "subdir", "subfile1"), local_join(target, "newfile"))
90
+ assert local_fs.isfile(local_join(target, "newfile"))
91
+
92
+ def test_get_file_to_file_in_new_directory(
93
+ self,
94
+ fs,
95
+ fs_join,
96
+ fs_bulk_operations_scenario_0,
97
+ local_fs,
98
+ local_join,
99
+ local_target,
100
+ ):
101
+ # Copy scenario 1d
102
+ source = fs_bulk_operations_scenario_0
103
+
104
+ target = local_target
105
+ local_fs.mkdir(target)
106
+
107
+ fs.get(
108
+ fs_join(source, "subdir", "subfile1"),
109
+ local_join(target, "newdir", "newfile"),
110
+ )
111
+ assert local_fs.isdir(local_join(target, "newdir"))
112
+ assert local_fs.isfile(local_join(target, "newdir", "newfile"))
113
+
114
+ def test_get_directory_to_existing_directory(
115
+ self,
116
+ fs,
117
+ fs_join,
118
+ fs_bulk_operations_scenario_0,
119
+ local_fs,
120
+ local_join,
121
+ local_target,
122
+ ):
123
+ # Copy scenario 1e
124
+ source = fs_bulk_operations_scenario_0
125
+
126
+ target = local_target
127
+ local_fs.mkdir(target)
128
+ assert local_fs.isdir(target)
129
+
130
+ for source_slash, target_slash in zip([False, True], [False, True]):
131
+ s = fs_join(source, "subdir")
132
+ if source_slash:
133
+ s += "/"
134
+ t = target + "/" if target_slash else target
135
+
136
+ # Without recursive does nothing
137
+ fs.get(s, t)
138
+ assert local_fs.ls(target) == []
139
+
140
+ # With recursive
141
+ fs.get(s, t, recursive=True)
142
+ if source_slash:
143
+ assert local_fs.isfile(local_join(target, "subfile1"))
144
+ assert local_fs.isfile(local_join(target, "subfile2"))
145
+ assert local_fs.isdir(local_join(target, "nesteddir"))
146
+ assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile"))
147
+ assert not local_fs.exists(local_join(target, "subdir"))
148
+
149
+ local_fs.rm(
150
+ [
151
+ local_join(target, "subfile1"),
152
+ local_join(target, "subfile2"),
153
+ local_join(target, "nesteddir"),
154
+ ],
155
+ recursive=True,
156
+ )
157
+ else:
158
+ assert local_fs.isdir(local_join(target, "subdir"))
159
+ assert local_fs.isfile(local_join(target, "subdir", "subfile1"))
160
+ assert local_fs.isfile(local_join(target, "subdir", "subfile2"))
161
+ assert local_fs.isdir(local_join(target, "subdir", "nesteddir"))
162
+ assert local_fs.isfile(
163
+ local_join(target, "subdir", "nesteddir", "nestedfile")
164
+ )
165
+
166
+ local_fs.rm(local_join(target, "subdir"), recursive=True)
167
+ assert local_fs.ls(target) == []
168
+
169
+ # Limit recursive by maxdepth
170
+ fs.get(s, t, recursive=True, maxdepth=1)
171
+ if source_slash:
172
+ assert local_fs.isfile(local_join(target, "subfile1"))
173
+ assert local_fs.isfile(local_join(target, "subfile2"))
174
+ assert not local_fs.exists(local_join(target, "nesteddir"))
175
+ assert not local_fs.exists(local_join(target, "subdir"))
176
+
177
+ local_fs.rm(
178
+ [
179
+ local_join(target, "subfile1"),
180
+ local_join(target, "subfile2"),
181
+ ],
182
+ recursive=True,
183
+ )
184
+ else:
185
+ assert local_fs.isdir(local_join(target, "subdir"))
186
+ assert local_fs.isfile(local_join(target, "subdir", "subfile1"))
187
+ assert local_fs.isfile(local_join(target, "subdir", "subfile2"))
188
+ assert not local_fs.exists(local_join(target, "subdir", "nesteddir"))
189
+
190
+ local_fs.rm(local_join(target, "subdir"), recursive=True)
191
+ assert local_fs.ls(target) == []
192
+
193
+ def test_get_directory_to_new_directory(
194
+ self,
195
+ fs,
196
+ fs_join,
197
+ fs_bulk_operations_scenario_0,
198
+ local_fs,
199
+ local_join,
200
+ local_target,
201
+ ):
202
+ # Copy scenario 1f
203
+ source = fs_bulk_operations_scenario_0
204
+
205
+ target = local_target
206
+ local_fs.mkdir(target)
207
+
208
+ for source_slash, target_slash in zip([False, True], [False, True]):
209
+ s = fs_join(source, "subdir")
210
+ if source_slash:
211
+ s += "/"
212
+ t = local_join(target, "newdir")
213
+ if target_slash:
214
+ t += "/"
215
+
216
+ # Without recursive does nothing
217
+ fs.get(s, t)
218
+ assert local_fs.ls(target) == []
219
+
220
+ # With recursive
221
+ fs.get(s, t, recursive=True)
222
+ assert local_fs.isdir(local_join(target, "newdir"))
223
+ assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
224
+ assert local_fs.isfile(local_join(target, "newdir", "subfile2"))
225
+ assert local_fs.isdir(local_join(target, "newdir", "nesteddir"))
226
+ assert local_fs.isfile(
227
+ local_join(target, "newdir", "nesteddir", "nestedfile")
228
+ )
229
+ assert not local_fs.exists(local_join(target, "subdir"))
230
+
231
+ local_fs.rm(local_join(target, "newdir"), recursive=True)
232
+ assert local_fs.ls(target) == []
233
+
234
+ # Limit recursive by maxdepth
235
+ fs.get(s, t, recursive=True, maxdepth=1)
236
+ assert local_fs.isdir(local_join(target, "newdir"))
237
+ assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
238
+ assert local_fs.isfile(local_join(target, "newdir", "subfile2"))
239
+ assert not local_fs.exists(local_join(target, "newdir", "nesteddir"))
240
+ assert not local_fs.exists(local_join(target, "subdir"))
241
+
242
+ local_fs.rm(local_join(target, "newdir"), recursive=True)
243
+ assert not local_fs.exists(local_join(target, "newdir"))
244
+
245
+ def test_get_glob_to_existing_directory(
246
+ self,
247
+ fs,
248
+ fs_join,
249
+ fs_bulk_operations_scenario_0,
250
+ local_fs,
251
+ local_join,
252
+ local_target,
253
+ ):
254
+ # Copy scenario 1g
255
+ source = fs_bulk_operations_scenario_0
256
+
257
+ target = local_target
258
+ local_fs.mkdir(target)
259
+
260
+ for target_slash in [False, True]:
261
+ t = target + "/" if target_slash else target
262
+
263
+ # Without recursive
264
+ fs.get(fs_join(source, "subdir", "*"), t)
265
+ assert local_fs.isfile(local_join(target, "subfile1"))
266
+ assert local_fs.isfile(local_join(target, "subfile2"))
267
+ assert not local_fs.isdir(local_join(target, "nesteddir"))
268
+ assert not local_fs.exists(local_join(target, "nesteddir", "nestedfile"))
269
+ assert not local_fs.exists(local_join(target, "subdir"))
270
+
271
+ local_fs.rm(
272
+ [
273
+ local_join(target, "subfile1"),
274
+ local_join(target, "subfile2"),
275
+ ],
276
+ recursive=True,
277
+ )
278
+ assert local_fs.ls(target) == []
279
+
280
+ # With recursive
281
+ for glob, recursive in zip(["*", "**"], [True, False]):
282
+ fs.get(fs_join(source, "subdir", glob), t, recursive=recursive)
283
+ assert local_fs.isfile(local_join(target, "subfile1"))
284
+ assert local_fs.isfile(local_join(target, "subfile2"))
285
+ assert local_fs.isdir(local_join(target, "nesteddir"))
286
+ assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile"))
287
+ assert not local_fs.exists(local_join(target, "subdir"))
288
+
289
+ local_fs.rm(
290
+ [
291
+ local_join(target, "subfile1"),
292
+ local_join(target, "subfile2"),
293
+ local_join(target, "nesteddir"),
294
+ ],
295
+ recursive=True,
296
+ )
297
+ assert local_fs.ls(target) == []
298
+
299
+ # Limit recursive by maxdepth
300
+ fs.get(
301
+ fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1
302
+ )
303
+ assert local_fs.isfile(local_join(target, "subfile1"))
304
+ assert local_fs.isfile(local_join(target, "subfile2"))
305
+ assert not local_fs.exists(local_join(target, "nesteddir"))
306
+ assert not local_fs.exists(local_join(target, "subdir"))
307
+
308
+ local_fs.rm(
309
+ [
310
+ local_join(target, "subfile1"),
311
+ local_join(target, "subfile2"),
312
+ ],
313
+ recursive=True,
314
+ )
315
+ assert local_fs.ls(target) == []
316
+
317
+ def test_get_glob_to_new_directory(
318
+ self,
319
+ fs,
320
+ fs_join,
321
+ fs_bulk_operations_scenario_0,
322
+ local_fs,
323
+ local_join,
324
+ local_target,
325
+ ):
326
+ # Copy scenario 1h
327
+ source = fs_bulk_operations_scenario_0
328
+
329
+ target = local_target
330
+ local_fs.mkdir(target)
331
+
332
+ for target_slash in [False, True]:
333
+ t = fs_join(target, "newdir")
334
+ if target_slash:
335
+ t += "/"
336
+
337
+ # Without recursive
338
+ fs.get(fs_join(source, "subdir", "*"), t)
339
+ assert local_fs.isdir(local_join(target, "newdir"))
340
+ assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
341
+ assert local_fs.isfile(local_join(target, "newdir", "subfile2"))
342
+ assert not local_fs.exists(local_join(target, "newdir", "nesteddir"))
343
+ assert not local_fs.exists(
344
+ local_join(target, "newdir", "nesteddir", "nestedfile")
345
+ )
346
+ assert not local_fs.exists(local_join(target, "subdir"))
347
+ assert not local_fs.exists(local_join(target, "newdir", "subdir"))
348
+
349
+ local_fs.rm(local_join(target, "newdir"), recursive=True)
350
+ assert local_fs.ls(target) == []
351
+
352
+ # With recursive
353
+ for glob, recursive in zip(["*", "**"], [True, False]):
354
+ fs.get(fs_join(source, "subdir", glob), t, recursive=recursive)
355
+ assert local_fs.isdir(local_join(target, "newdir"))
356
+ assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
357
+ assert local_fs.isfile(local_join(target, "newdir", "subfile2"))
358
+ assert local_fs.isdir(local_join(target, "newdir", "nesteddir"))
359
+ assert local_fs.isfile(
360
+ local_join(target, "newdir", "nesteddir", "nestedfile")
361
+ )
362
+ assert not local_fs.exists(local_join(target, "subdir"))
363
+ assert not local_fs.exists(local_join(target, "newdir", "subdir"))
364
+
365
+ local_fs.rm(local_join(target, "newdir"), recursive=True)
366
+ assert not local_fs.exists(local_join(target, "newdir"))
367
+
368
+ # Limit recursive by maxdepth
369
+ fs.get(
370
+ fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1
371
+ )
372
+ assert local_fs.isdir(local_join(target, "newdir"))
373
+ assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
374
+ assert local_fs.isfile(local_join(target, "newdir", "subfile2"))
375
+ assert not local_fs.exists(local_join(target, "newdir", "nesteddir"))
376
+ assert not local_fs.exists(local_join(target, "subdir"))
377
+ assert not local_fs.exists(local_join(target, "newdir", "subdir"))
378
+
379
+ local_fs.rm(local_fs.ls(target, detail=False), recursive=True)
380
+ assert not local_fs.exists(local_join(target, "newdir"))
381
+
382
+ @pytest.mark.parametrize(
383
+ GLOB_EDGE_CASES_TESTS["argnames"],
384
+ GLOB_EDGE_CASES_TESTS["argvalues"],
385
+ )
386
+ def test_get_glob_edge_cases(
387
+ self,
388
+ path,
389
+ recursive,
390
+ maxdepth,
391
+ expected,
392
+ fs,
393
+ fs_join,
394
+ fs_glob_edge_cases_files,
395
+ local_fs,
396
+ local_join,
397
+ local_target,
398
+ ):
399
+ # Copy scenario 1g
400
+ source = fs_glob_edge_cases_files
401
+
402
+ target = local_target
403
+
404
+ for new_dir, target_slash in product([True, False], [True, False]):
405
+ local_fs.mkdir(target)
406
+
407
+ t = local_join(target, "newdir") if new_dir else target
408
+ t = t + "/" if target_slash else t
409
+
410
+ fs.get(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth)
411
+
412
+ output = local_fs.find(target)
413
+ if new_dir:
414
+ prefixed_expected = [
415
+ make_path_posix(local_join(target, "newdir", p)) for p in expected
416
+ ]
417
+ else:
418
+ prefixed_expected = [
419
+ make_path_posix(local_join(target, p)) for p in expected
420
+ ]
421
+ assert sorted(output) == sorted(prefixed_expected)
422
+
423
+ try:
424
+ local_fs.rm(target, recursive=True)
425
+ except FileNotFoundError:
426
+ pass
427
+
428
+ def test_get_list_of_files_to_existing_directory(
429
+ self,
430
+ fs,
431
+ fs_join,
432
+ fs_bulk_operations_scenario_0,
433
+ local_fs,
434
+ local_join,
435
+ local_target,
436
+ ):
437
+ # Copy scenario 2a
438
+ source = fs_bulk_operations_scenario_0
439
+
440
+ target = local_target
441
+ local_fs.mkdir(target)
442
+
443
+ source_files = [
444
+ fs_join(source, "file1"),
445
+ fs_join(source, "file2"),
446
+ fs_join(source, "subdir", "subfile1"),
447
+ ]
448
+
449
+ for target_slash in [False, True]:
450
+ t = target + "/" if target_slash else target
451
+
452
+ fs.get(source_files, t)
453
+ assert local_fs.isfile(local_join(target, "file1"))
454
+ assert local_fs.isfile(local_join(target, "file2"))
455
+ assert local_fs.isfile(local_join(target, "subfile1"))
456
+
457
+ local_fs.rm(
458
+ [
459
+ local_join(target, "file1"),
460
+ local_join(target, "file2"),
461
+ local_join(target, "subfile1"),
462
+ ],
463
+ recursive=True,
464
+ )
465
+ assert local_fs.ls(target) == []
466
+
467
+ def test_get_list_of_files_to_new_directory(
468
+ self,
469
+ fs,
470
+ fs_join,
471
+ fs_bulk_operations_scenario_0,
472
+ local_fs,
473
+ local_join,
474
+ local_target,
475
+ ):
476
+ # Copy scenario 2b
477
+ source = fs_bulk_operations_scenario_0
478
+
479
+ target = local_target
480
+ local_fs.mkdir(target)
481
+
482
+ source_files = [
483
+ fs_join(source, "file1"),
484
+ fs_join(source, "file2"),
485
+ fs_join(source, "subdir", "subfile1"),
486
+ ]
487
+
488
+ fs.get(source_files, local_join(target, "newdir") + "/") # Note trailing slash
489
+ assert local_fs.isdir(local_join(target, "newdir"))
490
+ assert local_fs.isfile(local_join(target, "newdir", "file1"))
491
+ assert local_fs.isfile(local_join(target, "newdir", "file2"))
492
+ assert local_fs.isfile(local_join(target, "newdir", "subfile1"))
493
+
494
+ def test_get_directory_recursive(
495
+ self, fs, fs_join, fs_path, local_fs, local_join, local_target
496
+ ):
497
+ # https://github.com/fsspec/filesystem_spec/issues/1062
498
+ # Recursive cp/get/put of source directory into non-existent target directory.
499
+ src = fs_join(fs_path, "src")
500
+ src_file = fs_join(src, "file")
501
+ fs.mkdir(src)
502
+ fs.touch(src_file)
503
+
504
+ target = local_target
505
+
506
+ # get without slash
507
+ assert not local_fs.exists(target)
508
+ for loop in range(2):
509
+ fs.get(src, target, recursive=True)
510
+ assert local_fs.isdir(target)
511
+
512
+ if loop == 0:
513
+ assert local_fs.isfile(local_join(target, "file"))
514
+ assert not local_fs.exists(local_join(target, "src"))
515
+ else:
516
+ assert local_fs.isfile(local_join(target, "file"))
517
+ assert local_fs.isdir(local_join(target, "src"))
518
+ assert local_fs.isfile(local_join(target, "src", "file"))
519
+
520
+ local_fs.rm(target, recursive=True)
521
+
522
+ # get with slash
523
+ assert not local_fs.exists(target)
524
+ for loop in range(2):
525
+ fs.get(src + "/", target, recursive=True)
526
+ assert local_fs.isdir(target)
527
+ assert local_fs.isfile(local_join(target, "file"))
528
+ assert not local_fs.exists(local_join(target, "src"))
529
+
530
+ def test_get_directory_without_files_with_same_name_prefix(
531
+ self,
532
+ fs,
533
+ fs_join,
534
+ local_fs,
535
+ local_join,
536
+ local_target,
537
+ fs_dir_and_file_with_same_name_prefix,
538
+ ):
539
+ # Create the test dirs
540
+ source = fs_dir_and_file_with_same_name_prefix
541
+ target = local_target
542
+
543
+ # Test without glob
544
+ fs.get(fs_join(source, "subdir"), target, recursive=True)
545
+
546
+ assert local_fs.isfile(local_join(target, "subfile.txt"))
547
+ assert not local_fs.isfile(local_join(target, "subdir.txt"))
548
+
549
+ local_fs.rm([local_join(target, "subfile.txt")])
550
+ assert local_fs.ls(target) == []
551
+
552
+ # Test with glob
553
+ fs.get(fs_join(source, "subdir*"), target, recursive=True)
554
+
555
+ assert local_fs.isdir(local_join(target, "subdir"))
556
+ assert local_fs.isfile(local_join(target, "subdir", "subfile.txt"))
557
+ assert local_fs.isfile(local_join(target, "subdir.txt"))
558
+
559
+ def test_get_with_source_and_destination_as_list(
560
+ self,
561
+ fs,
562
+ fs_join,
563
+ local_fs,
564
+ local_join,
565
+ local_target,
566
+ fs_10_files_with_hashed_names,
567
+ ):
568
+ # Create the test dir
569
+ source = fs_10_files_with_hashed_names
570
+ target = local_target
571
+
572
+ # Create list of files for source and destination
573
+ source_files = []
574
+ destination_files = []
575
+ for i in range(10):
576
+ hashed_i = md5(str(i).encode("utf-8")).hexdigest()
577
+ source_files.append(fs_join(source, f"{hashed_i}.txt"))
578
+ destination_files.append(
579
+ make_path_posix(local_join(target, f"{hashed_i}.txt"))
580
+ )
581
+
582
+ # Copy and assert order was kept
583
+ fs.get(rpath=source_files, lpath=destination_files)
584
+
585
+ for i in range(10):
586
+ file_content = local_fs.cat(destination_files[i]).decode("utf-8")
587
+ assert file_content == str(i)
lib/python3.10/site-packages/fsspec/tests/abstract/mv.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pytest
4
+
5
+ import fsspec
6
+
7
+
8
+ def test_move_raises_error_with_tmpdir(tmpdir):
9
+ # Create a file in the temporary directory
10
+ source = tmpdir.join("source_file.txt")
11
+ source.write("content")
12
+
13
+ # Define a destination that simulates a protected or invalid path
14
+ destination = tmpdir.join("non_existent_directory/destination_file.txt")
15
+
16
+ # Instantiate the filesystem (assuming the local file system interface)
17
+ fs = fsspec.filesystem("file")
18
+
19
+ # Use the actual file paths as string
20
+ with pytest.raises(FileNotFoundError):
21
+ fs.mv(str(source), str(destination))
22
+
23
+
24
+ @pytest.mark.parametrize("recursive", (True, False))
25
+ def test_move_raises_error_with_tmpdir_permission(recursive, tmpdir):
26
+ # Create a file in the temporary directory
27
+ source = tmpdir.join("source_file.txt")
28
+ source.write("content")
29
+
30
+ # Create a protected directory (non-writable)
31
+ protected_dir = tmpdir.mkdir("protected_directory")
32
+ protected_path = str(protected_dir)
33
+
34
+ # Set the directory to read-only
35
+ if os.name == "nt":
36
+ os.system(f'icacls "{protected_path}" /deny Everyone:(W)')
37
+ else:
38
+ os.chmod(protected_path, 0o555) # Sets the directory to read-only
39
+
40
+ # Define a destination inside the protected directory
41
+ destination = protected_dir.join("destination_file.txt")
42
+
43
+ # Instantiate the filesystem (assuming the local file system interface)
44
+ fs = fsspec.filesystem("file")
45
+
46
+ # Try to move the file to the read-only directory, expecting a permission error
47
+ with pytest.raises(PermissionError):
48
+ fs.mv(str(source), str(destination), recursive=recursive)
49
+
50
+ # Assert the file was not created in the destination
51
+ assert not os.path.exists(destination)
52
+
53
+ # Cleanup: Restore permissions so the directory can be cleaned up
54
+ if os.name == "nt":
55
+ os.system(f'icacls "{protected_path}" /remove:d Everyone')
56
+ else:
57
+ os.chmod(protected_path, 0o755) # Restore write permission for cleanup
lib/python3.10/site-packages/fsspec/tests/abstract/open.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+
4
+ class AbstractOpenTests:
5
+ def test_open_exclusive(self, fs, fs_target):
6
+ with fs.open(fs_target, "wb") as f:
7
+ f.write(b"data")
8
+ with fs.open(fs_target, "rb") as f:
9
+ assert f.read() == b"data"
10
+ with pytest.raises(FileExistsError):
11
+ fs.open(fs_target, "xb")
lib/python3.10/site-packages/fsspec/tests/abstract/pipe.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+
4
+ class AbstractPipeTests:
5
+ def test_pipe_exclusive(self, fs, fs_target):
6
+ fs.pipe_file(fs_target, b"data")
7
+ assert fs.cat_file(fs_target) == b"data"
8
+ with pytest.raises(FileExistsError):
9
+ fs.pipe_file(fs_target, b"data", mode="create")
10
+ fs.pipe_file(fs_target, b"new data", mode="overwrite")
11
+ assert fs.cat_file(fs_target) == b"new data"
lib/python3.10/site-packages/fsspec/tests/abstract/put.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from hashlib import md5
2
+ from itertools import product
3
+
4
+ import pytest
5
+
6
+ from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS
7
+
8
+
9
+ class AbstractPutTests:
10
+ def test_put_file_to_existing_directory(
11
+ self,
12
+ fs,
13
+ fs_join,
14
+ fs_target,
15
+ local_join,
16
+ local_bulk_operations_scenario_0,
17
+ supports_empty_directories,
18
+ ):
19
+ # Copy scenario 1a
20
+ source = local_bulk_operations_scenario_0
21
+
22
+ target = fs_target
23
+ fs.mkdir(target)
24
+ if not supports_empty_directories:
25
+ # Force target directory to exist by adding a dummy file
26
+ fs.touch(fs_join(target, "dummy"))
27
+ assert fs.isdir(target)
28
+
29
+ target_file2 = fs_join(target, "file2")
30
+ target_subfile1 = fs_join(target, "subfile1")
31
+
32
+ # Copy from source directory
33
+ fs.put(local_join(source, "file2"), target)
34
+ assert fs.isfile(target_file2)
35
+
36
+ # Copy from sub directory
37
+ fs.put(local_join(source, "subdir", "subfile1"), target)
38
+ assert fs.isfile(target_subfile1)
39
+
40
+ # Remove copied files
41
+ fs.rm([target_file2, target_subfile1])
42
+ assert not fs.exists(target_file2)
43
+ assert not fs.exists(target_subfile1)
44
+
45
+ # Repeat with trailing slash on target
46
+ fs.put(local_join(source, "file2"), target + "/")
47
+ assert fs.isdir(target)
48
+ assert fs.isfile(target_file2)
49
+
50
+ fs.put(local_join(source, "subdir", "subfile1"), target + "/")
51
+ assert fs.isfile(target_subfile1)
52
+
53
+ def test_put_file_to_new_directory(
54
+ self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0
55
+ ):
56
+ # Copy scenario 1b
57
+ source = local_bulk_operations_scenario_0
58
+
59
+ target = fs_target
60
+ fs.mkdir(target)
61
+
62
+ fs.put(
63
+ local_join(source, "subdir", "subfile1"), fs_join(target, "newdir/")
64
+ ) # Note trailing slash
65
+ assert fs.isdir(target)
66
+ assert fs.isdir(fs_join(target, "newdir"))
67
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
68
+
69
+ def test_put_file_to_file_in_existing_directory(
70
+ self,
71
+ fs,
72
+ fs_join,
73
+ fs_target,
74
+ local_join,
75
+ supports_empty_directories,
76
+ local_bulk_operations_scenario_0,
77
+ ):
78
+ # Copy scenario 1c
79
+ source = local_bulk_operations_scenario_0
80
+
81
+ target = fs_target
82
+ fs.mkdir(target)
83
+ if not supports_empty_directories:
84
+ # Force target directory to exist by adding a dummy file
85
+ fs.touch(fs_join(target, "dummy"))
86
+ assert fs.isdir(target)
87
+
88
+ fs.put(local_join(source, "subdir", "subfile1"), fs_join(target, "newfile"))
89
+ assert fs.isfile(fs_join(target, "newfile"))
90
+
91
+ def test_put_file_to_file_in_new_directory(
92
+ self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0
93
+ ):
94
+ # Copy scenario 1d
95
+ source = local_bulk_operations_scenario_0
96
+
97
+ target = fs_target
98
+ fs.mkdir(target)
99
+
100
+ fs.put(
101
+ local_join(source, "subdir", "subfile1"),
102
+ fs_join(target, "newdir", "newfile"),
103
+ )
104
+ assert fs.isdir(fs_join(target, "newdir"))
105
+ assert fs.isfile(fs_join(target, "newdir", "newfile"))
106
+
107
+ def test_put_directory_to_existing_directory(
108
+ self,
109
+ fs,
110
+ fs_join,
111
+ fs_target,
112
+ local_bulk_operations_scenario_0,
113
+ supports_empty_directories,
114
+ ):
115
+ # Copy scenario 1e
116
+ source = local_bulk_operations_scenario_0
117
+
118
+ target = fs_target
119
+ fs.mkdir(target)
120
+ if not supports_empty_directories:
121
+ # Force target directory to exist by adding a dummy file
122
+ dummy = fs_join(target, "dummy")
123
+ fs.touch(dummy)
124
+ assert fs.isdir(target)
125
+
126
+ for source_slash, target_slash in zip([False, True], [False, True]):
127
+ s = fs_join(source, "subdir")
128
+ if source_slash:
129
+ s += "/"
130
+ t = target + "/" if target_slash else target
131
+
132
+ # Without recursive does nothing
133
+ fs.put(s, t)
134
+ assert fs.ls(target, detail=False) == (
135
+ [] if supports_empty_directories else [dummy]
136
+ )
137
+
138
+ # With recursive
139
+ fs.put(s, t, recursive=True)
140
+ if source_slash:
141
+ assert fs.isfile(fs_join(target, "subfile1"))
142
+ assert fs.isfile(fs_join(target, "subfile2"))
143
+ assert fs.isdir(fs_join(target, "nesteddir"))
144
+ assert fs.isfile(fs_join(target, "nesteddir", "nestedfile"))
145
+ assert not fs.exists(fs_join(target, "subdir"))
146
+
147
+ fs.rm(
148
+ [
149
+ fs_join(target, "subfile1"),
150
+ fs_join(target, "subfile2"),
151
+ fs_join(target, "nesteddir"),
152
+ ],
153
+ recursive=True,
154
+ )
155
+ else:
156
+ assert fs.isdir(fs_join(target, "subdir"))
157
+ assert fs.isfile(fs_join(target, "subdir", "subfile1"))
158
+ assert fs.isfile(fs_join(target, "subdir", "subfile2"))
159
+ assert fs.isdir(fs_join(target, "subdir", "nesteddir"))
160
+ assert fs.isfile(fs_join(target, "subdir", "nesteddir", "nestedfile"))
161
+
162
+ fs.rm(fs_join(target, "subdir"), recursive=True)
163
+ assert fs.ls(target, detail=False) == (
164
+ [] if supports_empty_directories else [dummy]
165
+ )
166
+
167
+ # Limit recursive by maxdepth
168
+ fs.put(s, t, recursive=True, maxdepth=1)
169
+ if source_slash:
170
+ assert fs.isfile(fs_join(target, "subfile1"))
171
+ assert fs.isfile(fs_join(target, "subfile2"))
172
+ assert not fs.exists(fs_join(target, "nesteddir"))
173
+ assert not fs.exists(fs_join(target, "subdir"))
174
+
175
+ fs.rm(
176
+ [
177
+ fs_join(target, "subfile1"),
178
+ fs_join(target, "subfile2"),
179
+ ],
180
+ recursive=True,
181
+ )
182
+ else:
183
+ assert fs.isdir(fs_join(target, "subdir"))
184
+ assert fs.isfile(fs_join(target, "subdir", "subfile1"))
185
+ assert fs.isfile(fs_join(target, "subdir", "subfile2"))
186
+ assert not fs.exists(fs_join(target, "subdir", "nesteddir"))
187
+
188
+ fs.rm(fs_join(target, "subdir"), recursive=True)
189
+ assert fs.ls(target, detail=False) == (
190
+ [] if supports_empty_directories else [dummy]
191
+ )
192
+
193
+ def test_put_directory_to_new_directory(
194
+ self,
195
+ fs,
196
+ fs_join,
197
+ fs_target,
198
+ local_bulk_operations_scenario_0,
199
+ supports_empty_directories,
200
+ ):
201
+ # Copy scenario 1f
202
+ source = local_bulk_operations_scenario_0
203
+
204
+ target = fs_target
205
+ fs.mkdir(target)
206
+
207
+ for source_slash, target_slash in zip([False, True], [False, True]):
208
+ s = fs_join(source, "subdir")
209
+ if source_slash:
210
+ s += "/"
211
+ t = fs_join(target, "newdir")
212
+ if target_slash:
213
+ t += "/"
214
+
215
+ # Without recursive does nothing
216
+ fs.put(s, t)
217
+ if supports_empty_directories:
218
+ assert fs.ls(target) == []
219
+ else:
220
+ with pytest.raises(FileNotFoundError):
221
+ fs.ls(target)
222
+
223
+ # With recursive
224
+ fs.put(s, t, recursive=True)
225
+ assert fs.isdir(fs_join(target, "newdir"))
226
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
227
+ assert fs.isfile(fs_join(target, "newdir", "subfile2"))
228
+ assert fs.isdir(fs_join(target, "newdir", "nesteddir"))
229
+ assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile"))
230
+ assert not fs.exists(fs_join(target, "subdir"))
231
+
232
+ fs.rm(fs_join(target, "newdir"), recursive=True)
233
+ assert not fs.exists(fs_join(target, "newdir"))
234
+
235
+ # Limit recursive by maxdepth
236
+ fs.put(s, t, recursive=True, maxdepth=1)
237
+ assert fs.isdir(fs_join(target, "newdir"))
238
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
239
+ assert fs.isfile(fs_join(target, "newdir", "subfile2"))
240
+ assert not fs.exists(fs_join(target, "newdir", "nesteddir"))
241
+ assert not fs.exists(fs_join(target, "subdir"))
242
+
243
+ fs.rm(fs_join(target, "newdir"), recursive=True)
244
+ assert not fs.exists(fs_join(target, "newdir"))
245
+
246
+ def test_put_glob_to_existing_directory(
247
+ self,
248
+ fs,
249
+ fs_join,
250
+ fs_target,
251
+ local_join,
252
+ supports_empty_directories,
253
+ local_bulk_operations_scenario_0,
254
+ ):
255
+ # Copy scenario 1g
256
+ source = local_bulk_operations_scenario_0
257
+
258
+ target = fs_target
259
+ fs.mkdir(target)
260
+ if not supports_empty_directories:
261
+ # Force target directory to exist by adding a dummy file
262
+ dummy = fs_join(target, "dummy")
263
+ fs.touch(dummy)
264
+ assert fs.isdir(target)
265
+
266
+ for target_slash in [False, True]:
267
+ t = target + "/" if target_slash else target
268
+
269
+ # Without recursive
270
+ fs.put(local_join(source, "subdir", "*"), t)
271
+ assert fs.isfile(fs_join(target, "subfile1"))
272
+ assert fs.isfile(fs_join(target, "subfile2"))
273
+ assert not fs.isdir(fs_join(target, "nesteddir"))
274
+ assert not fs.exists(fs_join(target, "nesteddir", "nestedfile"))
275
+ assert not fs.exists(fs_join(target, "subdir"))
276
+
277
+ fs.rm(
278
+ [
279
+ fs_join(target, "subfile1"),
280
+ fs_join(target, "subfile2"),
281
+ ],
282
+ recursive=True,
283
+ )
284
+ assert fs.ls(target, detail=False) == (
285
+ [] if supports_empty_directories else [dummy]
286
+ )
287
+
288
+ # With recursive
289
+ for glob, recursive in zip(["*", "**"], [True, False]):
290
+ fs.put(local_join(source, "subdir", glob), t, recursive=recursive)
291
+ assert fs.isfile(fs_join(target, "subfile1"))
292
+ assert fs.isfile(fs_join(target, "subfile2"))
293
+ assert fs.isdir(fs_join(target, "nesteddir"))
294
+ assert fs.isfile(fs_join(target, "nesteddir", "nestedfile"))
295
+ assert not fs.exists(fs_join(target, "subdir"))
296
+
297
+ fs.rm(
298
+ [
299
+ fs_join(target, "subfile1"),
300
+ fs_join(target, "subfile2"),
301
+ fs_join(target, "nesteddir"),
302
+ ],
303
+ recursive=True,
304
+ )
305
+ assert fs.ls(target, detail=False) == (
306
+ [] if supports_empty_directories else [dummy]
307
+ )
308
+
309
+ # Limit recursive by maxdepth
310
+ fs.put(
311
+ local_join(source, "subdir", glob),
312
+ t,
313
+ recursive=recursive,
314
+ maxdepth=1,
315
+ )
316
+ assert fs.isfile(fs_join(target, "subfile1"))
317
+ assert fs.isfile(fs_join(target, "subfile2"))
318
+ assert not fs.exists(fs_join(target, "nesteddir"))
319
+ assert not fs.exists(fs_join(target, "subdir"))
320
+
321
+ fs.rm(
322
+ [
323
+ fs_join(target, "subfile1"),
324
+ fs_join(target, "subfile2"),
325
+ ],
326
+ recursive=True,
327
+ )
328
+ assert fs.ls(target, detail=False) == (
329
+ [] if supports_empty_directories else [dummy]
330
+ )
331
+
332
+ def test_put_glob_to_new_directory(
333
+ self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0
334
+ ):
335
+ # Copy scenario 1h
336
+ source = local_bulk_operations_scenario_0
337
+
338
+ target = fs_target
339
+ fs.mkdir(target)
340
+
341
+ for target_slash in [False, True]:
342
+ t = fs_join(target, "newdir")
343
+ if target_slash:
344
+ t += "/"
345
+
346
+ # Without recursive
347
+ fs.put(local_join(source, "subdir", "*"), t)
348
+ assert fs.isdir(fs_join(target, "newdir"))
349
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
350
+ assert fs.isfile(fs_join(target, "newdir", "subfile2"))
351
+ assert not fs.exists(fs_join(target, "newdir", "nesteddir"))
352
+ assert not fs.exists(fs_join(target, "newdir", "nesteddir", "nestedfile"))
353
+ assert not fs.exists(fs_join(target, "subdir"))
354
+ assert not fs.exists(fs_join(target, "newdir", "subdir"))
355
+
356
+ fs.rm(fs_join(target, "newdir"), recursive=True)
357
+ assert not fs.exists(fs_join(target, "newdir"))
358
+
359
+ # With recursive
360
+ for glob, recursive in zip(["*", "**"], [True, False]):
361
+ fs.put(local_join(source, "subdir", glob), t, recursive=recursive)
362
+ assert fs.isdir(fs_join(target, "newdir"))
363
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
364
+ assert fs.isfile(fs_join(target, "newdir", "subfile2"))
365
+ assert fs.isdir(fs_join(target, "newdir", "nesteddir"))
366
+ assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile"))
367
+ assert not fs.exists(fs_join(target, "subdir"))
368
+ assert not fs.exists(fs_join(target, "newdir", "subdir"))
369
+
370
+ fs.rm(fs_join(target, "newdir"), recursive=True)
371
+ assert not fs.exists(fs_join(target, "newdir"))
372
+
373
+ # Limit recursive by maxdepth
374
+ fs.put(
375
+ local_join(source, "subdir", glob),
376
+ t,
377
+ recursive=recursive,
378
+ maxdepth=1,
379
+ )
380
+ assert fs.isdir(fs_join(target, "newdir"))
381
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
382
+ assert fs.isfile(fs_join(target, "newdir", "subfile2"))
383
+ assert not fs.exists(fs_join(target, "newdir", "nesteddir"))
384
+ assert not fs.exists(fs_join(target, "subdir"))
385
+ assert not fs.exists(fs_join(target, "newdir", "subdir"))
386
+
387
+ fs.rm(fs_join(target, "newdir"), recursive=True)
388
+ assert not fs.exists(fs_join(target, "newdir"))
389
+
390
+ @pytest.mark.parametrize(
391
+ GLOB_EDGE_CASES_TESTS["argnames"],
392
+ GLOB_EDGE_CASES_TESTS["argvalues"],
393
+ )
394
+ def test_put_glob_edge_cases(
395
+ self,
396
+ path,
397
+ recursive,
398
+ maxdepth,
399
+ expected,
400
+ fs,
401
+ fs_join,
402
+ fs_target,
403
+ local_glob_edge_cases_files,
404
+ local_join,
405
+ fs_sanitize_path,
406
+ ):
407
+ # Copy scenario 1g
408
+ source = local_glob_edge_cases_files
409
+
410
+ target = fs_target
411
+
412
+ for new_dir, target_slash in product([True, False], [True, False]):
413
+ fs.mkdir(target)
414
+
415
+ t = fs_join(target, "newdir") if new_dir else target
416
+ t = t + "/" if target_slash else t
417
+
418
+ fs.put(local_join(source, path), t, recursive=recursive, maxdepth=maxdepth)
419
+
420
+ output = fs.find(target)
421
+ if new_dir:
422
+ prefixed_expected = [
423
+ fs_sanitize_path(fs_join(target, "newdir", p)) for p in expected
424
+ ]
425
+ else:
426
+ prefixed_expected = [
427
+ fs_sanitize_path(fs_join(target, p)) for p in expected
428
+ ]
429
+ assert sorted(output) == sorted(prefixed_expected)
430
+
431
+ try:
432
+ fs.rm(target, recursive=True)
433
+ except FileNotFoundError:
434
+ pass
435
+
436
+ def test_put_list_of_files_to_existing_directory(
437
+ self,
438
+ fs,
439
+ fs_join,
440
+ fs_target,
441
+ local_join,
442
+ local_bulk_operations_scenario_0,
443
+ supports_empty_directories,
444
+ ):
445
+ # Copy scenario 2a
446
+ source = local_bulk_operations_scenario_0
447
+
448
+ target = fs_target
449
+ fs.mkdir(target)
450
+ if not supports_empty_directories:
451
+ # Force target directory to exist by adding a dummy file
452
+ dummy = fs_join(target, "dummy")
453
+ fs.touch(dummy)
454
+ assert fs.isdir(target)
455
+
456
+ source_files = [
457
+ local_join(source, "file1"),
458
+ local_join(source, "file2"),
459
+ local_join(source, "subdir", "subfile1"),
460
+ ]
461
+
462
+ for target_slash in [False, True]:
463
+ t = target + "/" if target_slash else target
464
+
465
+ fs.put(source_files, t)
466
+ assert fs.isfile(fs_join(target, "file1"))
467
+ assert fs.isfile(fs_join(target, "file2"))
468
+ assert fs.isfile(fs_join(target, "subfile1"))
469
+
470
+ fs.rm(
471
+ [
472
+ fs_join(target, "file1"),
473
+ fs_join(target, "file2"),
474
+ fs_join(target, "subfile1"),
475
+ ],
476
+ recursive=True,
477
+ )
478
+ assert fs.ls(target, detail=False) == (
479
+ [] if supports_empty_directories else [dummy]
480
+ )
481
+
482
+ def test_put_list_of_files_to_new_directory(
483
+ self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0
484
+ ):
485
+ # Copy scenario 2b
486
+ source = local_bulk_operations_scenario_0
487
+
488
+ target = fs_target
489
+ fs.mkdir(target)
490
+
491
+ source_files = [
492
+ local_join(source, "file1"),
493
+ local_join(source, "file2"),
494
+ local_join(source, "subdir", "subfile1"),
495
+ ]
496
+
497
+ fs.put(source_files, fs_join(target, "newdir") + "/") # Note trailing slash
498
+ assert fs.isdir(fs_join(target, "newdir"))
499
+ assert fs.isfile(fs_join(target, "newdir", "file1"))
500
+ assert fs.isfile(fs_join(target, "newdir", "file2"))
501
+ assert fs.isfile(fs_join(target, "newdir", "subfile1"))
502
+
503
+ def test_put_directory_recursive(
504
+ self, fs, fs_join, fs_target, local_fs, local_join, local_path
505
+ ):
506
+ # https://github.com/fsspec/filesystem_spec/issues/1062
507
+ # Recursive cp/get/put of source directory into non-existent target directory.
508
+ src = local_join(local_path, "src")
509
+ src_file = local_join(src, "file")
510
+ local_fs.mkdir(src)
511
+ local_fs.touch(src_file)
512
+
513
+ target = fs_target
514
+
515
+ # put without slash
516
+ assert not fs.exists(target)
517
+ for loop in range(2):
518
+ fs.put(src, target, recursive=True)
519
+ assert fs.isdir(target)
520
+
521
+ if loop == 0:
522
+ assert fs.isfile(fs_join(target, "file"))
523
+ assert not fs.exists(fs_join(target, "src"))
524
+ else:
525
+ assert fs.isfile(fs_join(target, "file"))
526
+ assert fs.isdir(fs_join(target, "src"))
527
+ assert fs.isfile(fs_join(target, "src", "file"))
528
+
529
+ fs.rm(target, recursive=True)
530
+
531
+ # put with slash
532
+ assert not fs.exists(target)
533
+ for loop in range(2):
534
+ fs.put(src + "/", target, recursive=True)
535
+ assert fs.isdir(target)
536
+ assert fs.isfile(fs_join(target, "file"))
537
+ assert not fs.exists(fs_join(target, "src"))
538
+
539
+ def test_put_directory_without_files_with_same_name_prefix(
540
+ self,
541
+ fs,
542
+ fs_join,
543
+ fs_target,
544
+ local_join,
545
+ local_dir_and_file_with_same_name_prefix,
546
+ supports_empty_directories,
547
+ ):
548
+ # Create the test dirs
549
+ source = local_dir_and_file_with_same_name_prefix
550
+ target = fs_target
551
+
552
+ # Test without glob
553
+ fs.put(local_join(source, "subdir"), fs_target, recursive=True)
554
+
555
+ assert fs.isfile(fs_join(fs_target, "subfile.txt"))
556
+ assert not fs.isfile(fs_join(fs_target, "subdir.txt"))
557
+
558
+ fs.rm([fs_join(target, "subfile.txt")])
559
+ if supports_empty_directories:
560
+ assert fs.ls(target) == []
561
+ else:
562
+ assert not fs.exists(target)
563
+
564
+ # Test with glob
565
+ fs.put(local_join(source, "subdir*"), fs_target, recursive=True)
566
+
567
+ assert fs.isdir(fs_join(fs_target, "subdir"))
568
+ assert fs.isfile(fs_join(fs_target, "subdir", "subfile.txt"))
569
+ assert fs.isfile(fs_join(fs_target, "subdir.txt"))
570
+
571
+ def test_copy_with_source_and_destination_as_list(
572
+ self, fs, fs_target, fs_join, local_join, local_10_files_with_hashed_names
573
+ ):
574
+ # Create the test dir
575
+ source = local_10_files_with_hashed_names
576
+ target = fs_target
577
+
578
+ # Create list of files for source and destination
579
+ source_files = []
580
+ destination_files = []
581
+ for i in range(10):
582
+ hashed_i = md5(str(i).encode("utf-8")).hexdigest()
583
+ source_files.append(local_join(source, f"{hashed_i}.txt"))
584
+ destination_files.append(fs_join(target, f"{hashed_i}.txt"))
585
+
586
+ # Copy and assert order was kept
587
+ fs.put(lpath=source_files, rpath=destination_files)
588
+
589
+ for i in range(10):
590
+ file_content = fs.cat(destination_files[i]).decode("utf-8")
591
+ assert file_content == str(i)
lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ uv
lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/METADATA ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: iniconfig
3
+ Version: 2.1.0
4
+ Summary: brain-dead simple config-ini parsing
5
+ Project-URL: Homepage, https://github.com/pytest-dev/iniconfig
6
+ Author-email: Ronny Pfannschmidt <[email protected]>, Holger Krekel <[email protected]>
7
+ License-Expression: MIT
8
+ License-File: LICENSE
9
+ Classifier: Development Status :: 4 - Beta
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Operating System :: MacOS :: MacOS X
13
+ Classifier: Operating System :: Microsoft :: Windows
14
+ Classifier: Operating System :: POSIX
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3 :: Only
17
+ Classifier: Programming Language :: Python :: 3.8
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Programming Language :: Python :: 3.12
22
+ Classifier: Programming Language :: Python :: 3.13
23
+ Classifier: Topic :: Software Development :: Libraries
24
+ Classifier: Topic :: Utilities
25
+ Requires-Python: >=3.8
26
+ Description-Content-Type: text/x-rst
27
+
28
+ iniconfig: brain-dead simple parsing of ini files
29
+ =======================================================
30
+
31
+ iniconfig is a small and simple INI-file parser module
32
+ having a unique set of features:
33
+
34
+ * maintains order of sections and entries
35
+ * supports multi-line values with or without line-continuations
36
+ * supports "#" comments everywhere
37
+ * raises errors with proper line-numbers
38
+ * no bells and whistles like automatic substitutions
39
+ * iniconfig raises an Error if two sections have the same name.
40
+
41
+ If you encounter issues or have feature wishes please report them to:
42
+
43
+ https://github.com/RonnyPfannschmidt/iniconfig/issues
44
+
45
+ Basic Example
46
+ ===================================
47
+
48
+ If you have an ini file like this:
49
+
50
+ .. code-block:: ini
51
+
52
+ # content of example.ini
53
+ [section1] # comment
54
+ name1=value1 # comment
55
+ name1b=value1,value2 # comment
56
+
57
+ [section2]
58
+ name2=
59
+ line1
60
+ line2
61
+
62
+ then you can do:
63
+
64
+ .. code-block:: pycon
65
+
66
+ >>> import iniconfig
67
+ >>> ini = iniconfig.IniConfig("example.ini")
68
+ >>> ini['section1']['name1'] # raises KeyError if not exists
69
+ 'value1'
70
+ >>> ini.get('section1', 'name1b', [], lambda x: x.split(","))
71
+ ['value1', 'value2']
72
+ >>> ini.get('section1', 'notexist', [], lambda x: x.split(","))
73
+ []
74
+ >>> [x.name for x in list(ini)]
75
+ ['section1', 'section2']
76
+ >>> list(list(ini)[0].items())
77
+ [('name1', 'value1'), ('name1b', 'value1,value2')]
78
+ >>> 'section1' in ini
79
+ True
80
+ >>> 'inexistendsection' in ini
81
+ False
lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/RECORD ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ iniconfig-2.1.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
2
+ iniconfig-2.1.0.dist-info/METADATA,sha256=uS-Ec4h2hMZZFTrbd_4EGKcxBQHnQ3CfwSYjzQPn5cs,2651
3
+ iniconfig-2.1.0.dist-info/RECORD,,
4
+ iniconfig-2.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ iniconfig-2.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
6
+ iniconfig-2.1.0.dist-info/licenses/LICENSE,sha256=NAn6kfes5VeJRjJnZlbjImT-XvdYFTVyXcmiN3RVG9Q,1098
7
+ iniconfig/__init__.py,sha256=H1UqjEmX-GytGCsqCafTLG-q1CPc_okvCKGairRFMq0,5462
8
+ iniconfig/_parse.py,sha256=OWGLbmE8GjxcoMWTvnGbck1RoNsTm5bt5ficIRZqWJ8,2436
9
+ iniconfig/_version.py,sha256=dseuoOPG9WZ1Ezr1SC3wS9_hczkX-b1NdE4TQPHFJso,511
10
+ iniconfig/exceptions.py,sha256=BJguifCkPayz-n0hI2D5ym1USoAWYNIdi05Jc4r2r4o,490
11
+ iniconfig/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/REQUESTED ADDED
File without changes
lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
lib/python3.10/site-packages/iniconfig-2.1.0.dist-info/licenses/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2010 - 2023 Holger Krekel and others
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy of
6
+ this software and associated documentation files (the "Software"), to deal in
7
+ the Software without restriction, including without limitation the rights to
8
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
9
+ of the Software, and to permit persons to whom the Software is furnished to do
10
+ so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
lib/python3.10/site-packages/multiprocess-0.70.12.2.dist-info/COPYING.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2006-2008, R Oudkerk
2
+
3
+ All rights reserved.
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions
7
+ are met:
8
+
9
+ 1. Redistributions of source code must retain the above copyright
10
+ notice, this list of conditions and the following disclaimer.
11
+ 2. Redistributions in binary form must reproduce the above copyright
12
+ notice, this list of conditions and the following disclaimer in the
13
+ documentation and/or other materials provided with the distribution.
14
+ 3. Neither the name of author nor the names of any contributors may be
15
+ used to endorse or promote products derived from this software
16
+ without specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
19
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
+ ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
+ OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
+ OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
+ SUCH DAMAGE.
lib/python3.10/site-packages/multiprocess-0.70.12.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ uv
lib/python3.10/site-packages/multiprocess-0.70.12.2.dist-info/METADATA ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: multiprocess
3
+ Version: 0.70.12.2
4
+ Summary: better multiprocessing and multithreading in python
5
+ Home-page: https://github.com/uqfoundation/multiprocess
6
+ Author: Mike McKerns
7
+ Maintainer: Mike McKerns
8
+ License: BSD
9
+ Download-URL: https://github.com/uqfoundation/multiprocess/releases/download/multiprocess-0.70.12.2/multiprocess-0.70.12.2.tar.gz
10
+ Platform: UNKNOWN
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: BSD License
15
+ Classifier: Programming Language :: Python :: 2
16
+ Classifier: Programming Language :: Python :: 2.7
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Topic :: Scientific/Engineering
23
+ Classifier: Topic :: Software Development
24
+ Requires-Dist: dill (>=0.3.4)
25
+
26
+ -----------------------------------------------------------------
27
+ multiprocess: better multiprocessing and multithreading in python
28
+ -----------------------------------------------------------------
29
+
30
+ About Multiprocess
31
+ ====================
32
+
33
+ ``multiprocess`` is a fork of ``multiprocessing``, and is developed as part of ``pathos``:
34
+ https://github.com/uqfoundation/pathos
35
+
36
+ ``multiprocessing`` is a package for the Python language which supports the
37
+ spawning of processes using the API of the standard library's
38
+ ``threading`` module. ``multiprocessing`` has been distributed in the standard
39
+ library since python 2.6.
40
+
41
+ Features:
42
+
43
+ - Objects can be transferred between processes using pipes or multi-producer/multi-consumer queues.
44
+ - Objects can be shared between processes using a server process or (for simple data) shared memory.
45
+ - Equivalents of all the synchronization primitives in ``threading`` are available.
46
+ - A ``Pool`` class makes it easy to submit tasks to a pool of worker processes.
47
+
48
+
49
+ ``multiprocess`` is part of ``pathos``, a python framework for heterogeneous computing.
50
+ ``multiprocess`` is in active development, so any user feedback, bug reports, comments,
51
+ or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/multiprocess/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query.
52
+
53
+ NOTE: A C compiler is required to build the included extension module. For python 3.3 and above, a C compiler is suggested, but not required.
54
+
55
+
56
+ Major Changes
57
+ ==============
58
+
59
+ - enhanced serialization, using ``dill``
60
+
61
+
62
+ Current Release
63
+ ===============
64
+
65
+ This documentation is for version ``multiprocess-0.70.12.2`` (a fork of ``multiprocessing-0.70a1``).
66
+
67
+ The latest released version of ``multiprocess`` is available from::
68
+
69
+ https://pypi.org/project/multiprocess
70
+
71
+ ``Multiprocessing`` is distributed under a BSD license.
72
+
73
+
74
+ Development Version
75
+ ===================
76
+
77
+ You can get the latest development version with all the shiny new features at::
78
+
79
+ https://github.com/uqfoundation
80
+
81
+ If you have a new contribution, please submit a pull request.
82
+
83
+
84
+ Installation
85
+ ============
86
+
87
+ ``multiprocess`` is packaged to install from source, so you must
88
+ download the tarball, unzip, and run the installer::
89
+
90
+ [download]
91
+ $ tar -xvzf multiprocess-0.70.12.2.tgz
92
+ $ cd multiprocess-0.70.12.2
93
+ $ python setup.py build
94
+ $ python setup.py install
95
+
96
+ You will be warned of any missing dependencies and/or settings
97
+ after you run the "build" step above.
98
+
99
+ Alternately, ``multiprocess`` can be installed with ``pip`` or ``easy_install``::
100
+
101
+ $ pip install multiprocess
102
+
103
+ NOTE: A C compiler is required to build the included extension module from source. For python 3.3 and above, a C compiler is suggested, but not required. Binary installs do not require a C compiler.
104
+
105
+
106
+ Requirements
107
+ ============
108
+
109
+ ``multiprocess`` requires::
110
+
111
+ - ``python``, **version == 2.7** or **version >= 3.6**, or ``pypy``
112
+ - ``dill``, **version >= 0.3.4**
113
+
114
+ Optional requirements::
115
+
116
+ - ``setuptools``, **version >= 0.6**
117
+
118
+
119
+ Basic Usage
120
+ ===========
121
+
122
+ The ``multiprocess.Process`` class follows the API of ``threading.Thread``.
123
+ For example ::
124
+
125
+ from multiprocess import Process, Queue
126
+
127
+ def f(q):
128
+ q.put('hello world')
129
+
130
+ if __name__ == '__main__':
131
+ q = Queue()
132
+ p = Process(target=f, args=[q])
133
+ p.start()
134
+ print (q.get())
135
+ p.join()
136
+
137
+ Synchronization primitives like locks, semaphores and conditions are
138
+ available, for example ::
139
+
140
+ >>> from multiprocess import Condition
141
+ >>> c = Condition()
142
+ >>> print (c)
143
+ <Condition(<RLock(None, 0)>), 0>
144
+ >>> c.acquire()
145
+ True
146
+ >>> print (c)
147
+ <Condition(<RLock(MainProcess, 1)>), 0>
148
+
149
+ One can also use a manager to create shared objects either in shared
150
+ memory or in a server process, for example ::
151
+
152
+ >>> from multiprocess import Manager
153
+ >>> manager = Manager()
154
+ >>> l = manager.list(range(10))
155
+ >>> l.reverse()
156
+ >>> print (l)
157
+ [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
158
+ >>> print (repr(l))
159
+ <Proxy[list] object at 0x00E1B3B0>
160
+
161
+ Tasks can be offloaded to a pool of worker processes in various ways,
162
+ for example ::
163
+
164
+ >>> from multiprocess import Pool
165
+ >>> def f(x): return x*x
166
+ ...
167
+ >>> p = Pool(4)
168
+ >>> result = p.map_async(f, range(10))
169
+ >>> print (result.get(timeout=1))
170
+ [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
171
+
172
+ When ``dill`` is installed, serialization is extended to most objects,
173
+ for example ::
174
+
175
+ >>> from multiprocess import Pool
176
+ >>> p = Pool(4)
177
+ >>> print (p.map(lambda x: (lambda y:y**2)(x) + x, xrange(10)))
178
+ [0, 2, 6, 12, 20, 30, 42, 56, 72, 90]
179
+
180
+
181
+ More Information
182
+ ================
183
+
184
+ Probably the best way to get started is to look at the documentation at
185
+ http://multiprocess.rtfd.io. See ``multiprocess.examples`` for a set of example
186
+ scripts. You can also run the test suite with ``python -m multiprocess.tests``.
187
+ Please feel free to submit a ticket on github, or ask a question on
188
+ stackoverflow (**@Mike McKerns**). If you would like to share how you use
189
+ ``multiprocess`` in your work, please post send an email
190
+ (to **mmckerns at uqfoundation dot org**).
191
+
192
+
193
+ Citation
194
+ ========
195
+
196
+ If you use ``multiprocess`` to do research that leads to publication, we ask that you
197
+ acknowledge use of ``multiprocess`` by citing the following in your publication::
198
+
199
+ M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis,
200
+ "Building a framework for predictive science", Proceedings of
201
+ the 10th Python in Science Conference, 2011;
202
+ http://arxiv.org/pdf/1202.1056
203
+
204
+ Michael McKerns and Michael Aivazis,
205
+ "pathos: a framework for heterogeneous computing", 2010- ;
206
+ https://uqfoundation.github.io/project/pathos
207
+
208
+ Please see https://uqfoundation.github.io/project/pathos or
209
+ http://arxiv.org/pdf/1202.1056 for further information.
210
+
211
+
lib/python3.10/site-packages/multiprocess-0.70.12.2.dist-info/RECORD ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _multiprocess/__init__.py,sha256=zuJ1_0yr-gCp0oAe5-vLNCp1myHAXuKVc5MRLv9lzLA,31
2
+ multiprocess-0.70.12.2.dist-info/COPYING.txt,sha256=n3_yfLkw0sMgLuB-PS1hRvTeZ20GmjPaMWbJjNuoOpU,1493
3
+ multiprocess-0.70.12.2.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
4
+ multiprocess-0.70.12.2.dist-info/LICENSE,sha256=JxI4GBBqj5Kc1mivfwcAYeMERyc1g7s3lEuKRbitHrw,1934
5
+ multiprocess-0.70.12.2.dist-info/METADATA,sha256=_I_CULDdj7EcO-xCtYoXLMbhfAvST1Vtzw9iMJvkJTY,6919
6
+ multiprocess-0.70.12.2.dist-info/RECORD,,
7
+ multiprocess-0.70.12.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ multiprocess-0.70.12.2.dist-info/WHEEL,sha256=7QEGVCap_ZxScbUFuCWpZks0RoQUFFqL2hOoEAOaKLE,93
9
+ multiprocess-0.70.12.2.dist-info/top_level.txt,sha256=qtJc8GNdvi6suNpISX0Myln9AXJBYrNuas1MCqRPPqg,27
10
+ multiprocess/__init__.py,sha256=IPMNMbDOPugZSGkgZlVV50OvjG9B4VJDbTRKW_PLVrU,943
11
+ multiprocess/connection.py,sha256=GhvxnMNj7sk8Jb0S90gfTx8SHK9YKbquiNFmTZ6uBA4,32182
12
+ multiprocess/context.py,sha256=zpJw0Rb1QhPO_OUHW7PIoXhWt0yAC2g9P0htI1ExNzY,11270
13
+ multiprocess/dummy/__init__.py,sha256=kSekDqD_NCy0FDg7XnxZSgW-Ldg1_iRr07sNwDajKpA,3061
14
+ multiprocess/dummy/connection.py,sha256=1j3Rl5_enBM-_kMO6HDmum3kPAoFE4Zs485HV5H-V6s,1598
15
+ multiprocess/forkserver.py,sha256=hiltKfLImDYJyAcezNAgMDaQznB2LtYWgwre0QroLRg,12138
16
+ multiprocess/heap.py,sha256=9rt5u5m5rkhJNfDWiCLpYDoWIt0LbElmx52yMqk7phQ,11626
17
+ multiprocess/managers.py,sha256=Jy6sf_lW81InSpV-GrQf8_koAL3zWZX8TTtqG3i8sK8,47311
18
+ multiprocess/pool.py,sha256=s8-RW_sBWjZUonbgOyrWJDEr-yW-n7gPspJTWRceKbg,32555
19
+ multiprocess/popen_fork.py,sha256=Nvq5vVId24UfkOQxXhxZbcXuo8d6YMc409yRXAamTd0,2374
20
+ multiprocess/popen_forkserver.py,sha256=SrEbV8Wv0Uu_UegkaW-cayXRdjTGXr560Yyy90pj-yE,2227
21
+ multiprocess/popen_spawn_posix.py,sha256=l7XSWqR5UWiUSJh35qeSElLuNfUeEYwvH5HzKRnnyqg,2029
22
+ multiprocess/popen_spawn_win32.py,sha256=A9uvlPmhO8JBzNcEU_Gmix2Q_qYJW1NXZgXPwtN5Ao0,4011
23
+ multiprocess/process.py,sha256=3ODgVhevgOl2RXWMv3V_ESX5_CHJMOUkOQftt61-qrE,12000
24
+ multiprocess/queues.py,sha256=XRZqsorfG9TlM8B6tKqwkIb03dnyGQ2a7W7TZkBhLYM,12109
25
+ multiprocess/reduction.py,sha256=NQQ6KbDhmuAyaDeWaIarTZQokGPhcFda1poNnPm5uNc,9637
26
+ multiprocess/resource_sharer.py,sha256=nEApLhMQqd8KunfaNKl3n8vdeiCGPxKrSL1Ja0nNAEk,5132
27
+ multiprocess/resource_tracker.py,sha256=AUypNVano3I0_mEA1GXmdg0Vfy0bsKMUU8mxCwg6uCs,8696
28
+ multiprocess/shared_memory.py,sha256=3c-lnw0tGQaqWlsPGyfpkCHQh_KQvy1JX6WF1IRMzJ0,18521
29
+ multiprocess/sharedctypes.py,sha256=d-9SKRJHRlJJC331IxEoWOUXIeY9zxCbhWejXOmzGw0,6306
30
+ multiprocess/spawn.py,sha256=cgtV66HhV_yIVzvdblc8bVdSpem16Ks0BOFu_bV5PDQ,9293
31
+ multiprocess/synchronize.py,sha256=6q1ijwWyWLWLO8uUtaYT9MKepAYKfdzWPSEZGyJFP4s,11829
32
+ multiprocess/tests/__init__.py,sha256=qVV0YnbbgqccqONtDolmgSdXX3eM9EIjzYFudfBiITQ,190661
33
+ multiprocess/tests/__main__.py,sha256=3mqbVdRWqb9lCQ0Nndn1m2TIvqqk8P_QAGEXNxlcd3I,948
34
+ multiprocess/tests/mp_fork_bomb.py,sha256=6ADOEzh1aXHZ21aOGoBPhKcgB5sj15G9tQVgSc6GrlY,448
35
+ multiprocess/tests/mp_preload.py,sha256=cj2tUiPQQqGhPrXBO9LfaY8l0Dk29UdlHMJdG-7LTpQ,351
36
+ multiprocess/tests/test_multiprocessing_fork.py,sha256=BzF6mmub8lAnOGbJF888YrWjKdzcg5TP-v63pckKGqs,479
37
+ multiprocess/tests/test_multiprocessing_forkserver.py,sha256=aefqw98Z4nriFWxijdQqJ9x1iK3zN1RW51Dd5NO4XUU,394
38
+ multiprocess/tests/test_multiprocessing_main_handling.py,sha256=sdavO-pion69T5Cyc6Cl91hsPoc-V5JMrvFD3fhow6M,11812
39
+ multiprocess/tests/test_multiprocessing_spawn.py,sha256=jbm4_yI_Dxj3CAl83dwqbNBDwhPyKPtPW65p9KlSGWA,279
40
+ multiprocess/util.py,sha256=Et2Rtd_Hc7-wXgtLOqlgHCQrK4wGm3LkmfYWBXOLsxw,13993