Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +4 -0
- lib/python3.10/site-packages/attrs-25.3.0.dist-info/INSTALLER +1 -0
- lib/python3.10/site-packages/attrs-25.3.0.dist-info/METADATA +232 -0
- lib/python3.10/site-packages/attrs-25.3.0.dist-info/RECORD +37 -0
- lib/python3.10/site-packages/attrs-25.3.0.dist-info/REQUESTED +0 -0
- lib/python3.10/site-packages/attrs-25.3.0.dist-info/WHEEL +4 -0
- lib/python3.10/site-packages/attrs-25.3.0.dist-info/licenses/LICENSE +21 -0
- lib/python3.10/site-packages/babel/locale-data/ccp.dat +3 -0
- lib/python3.10/site-packages/babel/locale-data/mn.dat +3 -0
- lib/python3.10/site-packages/babel/locale-data/ro.dat +3 -0
- lib/python3.10/site-packages/babel/locale-data/tg.dat +3 -0
- lib/python3.10/site-packages/csvw-3.5.1.dist-info/INSTALLER +1 -0
- lib/python3.10/site-packages/csvw-3.5.1.dist-info/RECORD +24 -0
- lib/python3.10/site-packages/csvw-3.5.1.dist-info/REQUESTED +0 -0
- lib/python3.10/site-packages/csvw-3.5.1.dist-info/WHEEL +6 -0
- lib/python3.10/site-packages/csvw-3.5.1.dist-info/entry_points.txt +7 -0
- lib/python3.10/site-packages/onnx-1.17.0.dist-info/LICENSE +202 -0
- lib/python3.10/site-packages/onnx-1.17.0.dist-info/METADATA +350 -0
- lib/python3.10/site-packages/onnx-1.17.0.dist-info/RECORD +0 -0
- lib/python3.10/site-packages/onnx-1.17.0.dist-info/WHEEL +6 -0
- lib/python3.10/site-packages/onnx-1.17.0.dist-info/top_level.txt +1 -0
- lib/python3.10/site-packages/parameterized/__init__.py +3 -0
- lib/python3.10/site-packages/parameterized/parameterized.py +732 -0
- lib/python3.10/site-packages/parameterized/test.py +695 -0
- lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- lib/python3.10/site-packages/torch/_VF.py +31 -0
- lib/python3.10/site-packages/torch/_VF.pyi +0 -0
- lib/python3.10/site-packages/torch/__config__.py +23 -0
- lib/python3.10/site-packages/torch/__future__.py +75 -0
- lib/python3.10/site-packages/torch/__init__.py +0 -0
- lib/python3.10/site-packages/torch/_appdirs.py +667 -0
- lib/python3.10/site-packages/torch/_classes.py +56 -0
- lib/python3.10/site-packages/torch/_compile.py +38 -0
- lib/python3.10/site-packages/torch/_custom_ops.py +324 -0
- lib/python3.10/site-packages/torch/_deploy.py +104 -0
- lib/python3.10/site-packages/torch/_environment.py +2 -0
- lib/python3.10/site-packages/torch/_linalg_utils.py +150 -0
- lib/python3.10/site-packages/torch/_namedtensor_internals.py +159 -0
- lib/python3.10/site-packages/torch/_ops.py +1362 -0
- lib/python3.10/site-packages/torch/_python_dispatcher.py +182 -0
- lib/python3.10/site-packages/torch/_size_docs.py +39 -0
- lib/python3.10/site-packages/torch/_streambase.py +20 -0
- lib/python3.10/site-packages/torch/_tensor.py +1775 -0
- lib/python3.10/site-packages/torch/_tensor_docs.py +0 -0
- lib/python3.10/site-packages/torch/_tensor_str.py +704 -0
- lib/python3.10/site-packages/torch/_torch_docs.py +0 -0
- lib/python3.10/site-packages/torch/_utils.py +1056 -0
- lib/python3.10/site-packages/torch/_utils_internal.py +274 -0
- lib/python3.10/site-packages/torch/_vmap_internals.py +245 -0
- lib/python3.10/site-packages/torch/_weights_only_unpickler.py +553 -0
.gitattributes
CHANGED
@@ -140,3 +140,7 @@ lib/python3.10/site-packages/babel/locale-data/to.dat filter=lfs diff=lfs merge=
|
|
140 |
lib/python3.10/site-packages/babel/locale-data/yrl.dat filter=lfs diff=lfs merge=lfs -text
|
141 |
lib/python3.10/site-packages/babel/locale-data/yo.dat filter=lfs diff=lfs merge=lfs -text
|
142 |
lib/python3.10/site-packages/babel/locale-data/uk.dat filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
140 |
lib/python3.10/site-packages/babel/locale-data/yrl.dat filter=lfs diff=lfs merge=lfs -text
|
141 |
lib/python3.10/site-packages/babel/locale-data/yo.dat filter=lfs diff=lfs merge=lfs -text
|
142 |
lib/python3.10/site-packages/babel/locale-data/uk.dat filter=lfs diff=lfs merge=lfs -text
|
143 |
+
lib/python3.10/site-packages/babel/locale-data/ccp.dat filter=lfs diff=lfs merge=lfs -text
|
144 |
+
lib/python3.10/site-packages/babel/locale-data/ro.dat filter=lfs diff=lfs merge=lfs -text
|
145 |
+
lib/python3.10/site-packages/babel/locale-data/tg.dat filter=lfs diff=lfs merge=lfs -text
|
146 |
+
lib/python3.10/site-packages/babel/locale-data/mn.dat filter=lfs diff=lfs merge=lfs -text
|
lib/python3.10/site-packages/attrs-25.3.0.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
uv
|
lib/python3.10/site-packages/attrs-25.3.0.dist-info/METADATA
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.4
|
2 |
+
Name: attrs
|
3 |
+
Version: 25.3.0
|
4 |
+
Summary: Classes Without Boilerplate
|
5 |
+
Project-URL: Documentation, https://www.attrs.org/
|
6 |
+
Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html
|
7 |
+
Project-URL: GitHub, https://github.com/python-attrs/attrs
|
8 |
+
Project-URL: Funding, https://github.com/sponsors/hynek
|
9 |
+
Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi
|
10 |
+
Author-email: Hynek Schlawack <[email protected]>
|
11 |
+
License-Expression: MIT
|
12 |
+
License-File: LICENSE
|
13 |
+
Keywords: attribute,boilerplate,class
|
14 |
+
Classifier: Development Status :: 5 - Production/Stable
|
15 |
+
Classifier: Programming Language :: Python :: 3.8
|
16 |
+
Classifier: Programming Language :: Python :: 3.9
|
17 |
+
Classifier: Programming Language :: Python :: 3.10
|
18 |
+
Classifier: Programming Language :: Python :: 3.11
|
19 |
+
Classifier: Programming Language :: Python :: 3.12
|
20 |
+
Classifier: Programming Language :: Python :: 3.13
|
21 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
22 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
23 |
+
Classifier: Typing :: Typed
|
24 |
+
Requires-Python: >=3.8
|
25 |
+
Provides-Extra: benchmark
|
26 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'benchmark'
|
27 |
+
Requires-Dist: hypothesis; extra == 'benchmark'
|
28 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'benchmark'
|
29 |
+
Requires-Dist: pympler; extra == 'benchmark'
|
30 |
+
Requires-Dist: pytest-codspeed; extra == 'benchmark'
|
31 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'benchmark'
|
32 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'benchmark'
|
33 |
+
Requires-Dist: pytest>=4.3.0; extra == 'benchmark'
|
34 |
+
Provides-Extra: cov
|
35 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'cov'
|
36 |
+
Requires-Dist: coverage[toml]>=5.3; extra == 'cov'
|
37 |
+
Requires-Dist: hypothesis; extra == 'cov'
|
38 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'cov'
|
39 |
+
Requires-Dist: pympler; extra == 'cov'
|
40 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'cov'
|
41 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'cov'
|
42 |
+
Requires-Dist: pytest>=4.3.0; extra == 'cov'
|
43 |
+
Provides-Extra: dev
|
44 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'dev'
|
45 |
+
Requires-Dist: hypothesis; extra == 'dev'
|
46 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'dev'
|
47 |
+
Requires-Dist: pre-commit-uv; extra == 'dev'
|
48 |
+
Requires-Dist: pympler; extra == 'dev'
|
49 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'dev'
|
50 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'dev'
|
51 |
+
Requires-Dist: pytest>=4.3.0; extra == 'dev'
|
52 |
+
Provides-Extra: docs
|
53 |
+
Requires-Dist: cogapp; extra == 'docs'
|
54 |
+
Requires-Dist: furo; extra == 'docs'
|
55 |
+
Requires-Dist: myst-parser; extra == 'docs'
|
56 |
+
Requires-Dist: sphinx; extra == 'docs'
|
57 |
+
Requires-Dist: sphinx-notfound-page; extra == 'docs'
|
58 |
+
Requires-Dist: sphinxcontrib-towncrier; extra == 'docs'
|
59 |
+
Requires-Dist: towncrier; extra == 'docs'
|
60 |
+
Provides-Extra: tests
|
61 |
+
Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'tests'
|
62 |
+
Requires-Dist: hypothesis; extra == 'tests'
|
63 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests'
|
64 |
+
Requires-Dist: pympler; extra == 'tests'
|
65 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests'
|
66 |
+
Requires-Dist: pytest-xdist[psutil]; extra == 'tests'
|
67 |
+
Requires-Dist: pytest>=4.3.0; extra == 'tests'
|
68 |
+
Provides-Extra: tests-mypy
|
69 |
+
Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests-mypy'
|
70 |
+
Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.10') and extra == 'tests-mypy'
|
71 |
+
Description-Content-Type: text/markdown
|
72 |
+
|
73 |
+
<p align="center">
|
74 |
+
<a href="https://www.attrs.org/">
|
75 |
+
<img src="https://raw.githubusercontent.com/python-attrs/attrs/main/docs/_static/attrs_logo.svg" width="35%" alt="attrs" />
|
76 |
+
</a>
|
77 |
+
</p>
|
78 |
+
|
79 |
+
|
80 |
+
*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)).
|
81 |
+
[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020!
|
82 |
+
|
83 |
+
Its main goal is to help you to write **concise** and **correct** software without slowing down your code.
|
84 |
+
|
85 |
+
|
86 |
+
## Sponsors
|
87 |
+
|
88 |
+
*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek).
|
89 |
+
Especially those generously supporting us at the *The Organization* tier and higher:
|
90 |
+
|
91 |
+
<!-- sponsor-break-begin -->
|
92 |
+
|
93 |
+
<p align="center">
|
94 |
+
|
95 |
+
<!-- [[[cog
|
96 |
+
import pathlib, tomllib
|
97 |
+
|
98 |
+
for sponsor in tomllib.loads(pathlib.Path("pyproject.toml").read_text())["tool"]["sponcon"]["sponsors"]:
|
99 |
+
print(f'<a href="{sponsor["url"]}"><img title="{sponsor["title"]}" src="https://www.attrs.org/en/25.3.0/_static/sponsors/{sponsor["img"]}" width="190" /></a>')
|
100 |
+
]]] -->
|
101 |
+
<a href="https://www.variomedia.de/"><img title="Variomedia AG" src="https://www.attrs.org/en/25.3.0/_static/sponsors/Variomedia.svg" width="190" /></a>
|
102 |
+
<a href="https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek"><img title="Tidelift" src="https://www.attrs.org/en/25.3.0/_static/sponsors/Tidelift.svg" width="190" /></a>
|
103 |
+
<a href="https://klaviyo.com/"><img title="Klaviyo" src="https://www.attrs.org/en/25.3.0/_static/sponsors/Klaviyo.svg" width="190" /></a>
|
104 |
+
<a href="https://privacy-solutions.org/"><img title="Privacy Solutions" src="https://www.attrs.org/en/25.3.0/_static/sponsors/Privacy-Solutions.svg" width="190" /></a>
|
105 |
+
<a href="https://www.emsys-renewables.com/"><img title="emsys renewables" src="https://www.attrs.org/en/25.3.0/_static/sponsors/emsys-renewables.svg" width="190" /></a>
|
106 |
+
<a href="https://filepreviews.io/"><img title="FilePreviews" src="https://www.attrs.org/en/25.3.0/_static/sponsors/FilePreviews.svg" width="190" /></a>
|
107 |
+
<a href="https://polar.sh/"><img title="Polar" src="https://www.attrs.org/en/25.3.0/_static/sponsors/Polar.svg" width="190" /></a>
|
108 |
+
<!-- [[[end]]] -->
|
109 |
+
|
110 |
+
</p>
|
111 |
+
|
112 |
+
<!-- sponsor-break-end -->
|
113 |
+
|
114 |
+
<p align="center">
|
115 |
+
<strong>Please consider <a href="https://github.com/sponsors/hynek">joining them</a> to help make <em>attrs</em>’s maintenance more sustainable!</strong>
|
116 |
+
</p>
|
117 |
+
|
118 |
+
<!-- teaser-end -->
|
119 |
+
|
120 |
+
## Example
|
121 |
+
|
122 |
+
*attrs* gives you a class decorator and a way to declaratively define the attributes on that class:
|
123 |
+
|
124 |
+
<!-- code-begin -->
|
125 |
+
|
126 |
+
```pycon
|
127 |
+
>>> from attrs import asdict, define, make_class, Factory
|
128 |
+
|
129 |
+
>>> @define
|
130 |
+
... class SomeClass:
|
131 |
+
... a_number: int = 42
|
132 |
+
... list_of_numbers: list[int] = Factory(list)
|
133 |
+
...
|
134 |
+
... def hard_math(self, another_number):
|
135 |
+
... return self.a_number + sum(self.list_of_numbers) * another_number
|
136 |
+
|
137 |
+
|
138 |
+
>>> sc = SomeClass(1, [1, 2, 3])
|
139 |
+
>>> sc
|
140 |
+
SomeClass(a_number=1, list_of_numbers=[1, 2, 3])
|
141 |
+
|
142 |
+
>>> sc.hard_math(3)
|
143 |
+
19
|
144 |
+
>>> sc == SomeClass(1, [1, 2, 3])
|
145 |
+
True
|
146 |
+
>>> sc != SomeClass(2, [3, 2, 1])
|
147 |
+
True
|
148 |
+
|
149 |
+
>>> asdict(sc)
|
150 |
+
{'a_number': 1, 'list_of_numbers': [1, 2, 3]}
|
151 |
+
|
152 |
+
>>> SomeClass()
|
153 |
+
SomeClass(a_number=42, list_of_numbers=[])
|
154 |
+
|
155 |
+
>>> C = make_class("C", ["a", "b"])
|
156 |
+
>>> C("foo", "bar")
|
157 |
+
C(a='foo', b='bar')
|
158 |
+
```
|
159 |
+
|
160 |
+
After *declaring* your attributes, *attrs* gives you:
|
161 |
+
|
162 |
+
- a concise and explicit overview of the class's attributes,
|
163 |
+
- a nice human-readable `__repr__`,
|
164 |
+
- equality-checking methods,
|
165 |
+
- an initializer,
|
166 |
+
- and much more,
|
167 |
+
|
168 |
+
*without* writing dull boilerplate code again and again and *without* runtime performance penalties.
|
169 |
+
|
170 |
+
---
|
171 |
+
|
172 |
+
This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0.
|
173 |
+
The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**.
|
174 |
+
|
175 |
+
Check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for an in-depth explanation!
|
176 |
+
|
177 |
+
|
178 |
+
### Hate Type Annotations!?
|
179 |
+
|
180 |
+
No problem!
|
181 |
+
Types are entirely **optional** with *attrs*.
|
182 |
+
Simply assign `attrs.field()` to the attributes instead of annotating them with types:
|
183 |
+
|
184 |
+
```python
|
185 |
+
from attrs import define, field
|
186 |
+
|
187 |
+
@define
|
188 |
+
class SomeClass:
|
189 |
+
a_number = field(default=42)
|
190 |
+
list_of_numbers = field(factory=list)
|
191 |
+
```
|
192 |
+
|
193 |
+
|
194 |
+
## Data Classes
|
195 |
+
|
196 |
+
On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*).
|
197 |
+
In practice it does a lot more and is more flexible.
|
198 |
+
For instance, it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization), has a replacement for `__init_subclass__`, and allows for stepping through the generated methods using a debugger.
|
199 |
+
|
200 |
+
For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes), but generally speaking, we are more likely to commit crimes against nature to make things work that one would expect to work, but that are quite complicated in practice.
|
201 |
+
|
202 |
+
|
203 |
+
## Project Information
|
204 |
+
|
205 |
+
- [**Changelog**](https://www.attrs.org/en/stable/changelog.html)
|
206 |
+
- [**Documentation**](https://www.attrs.org/)
|
207 |
+
- [**PyPI**](https://pypi.org/project/attrs/)
|
208 |
+
- [**Source Code**](https://github.com/python-attrs/attrs)
|
209 |
+
- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md)
|
210 |
+
- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs)
|
211 |
+
- **Get Help**: use the `python-attrs` tag on [Stack Overflow](https://stackoverflow.com/questions/tagged/python-attrs)
|
212 |
+
|
213 |
+
|
214 |
+
### *attrs* for Enterprise
|
215 |
+
|
216 |
+
Available as part of the [Tidelift Subscription](https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek).
|
217 |
+
|
218 |
+
The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications.
|
219 |
+
Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use.
|
220 |
+
|
221 |
+
## Release Information
|
222 |
+
|
223 |
+
### Changes
|
224 |
+
|
225 |
+
- Restore support for generator-based `field_transformer`s.
|
226 |
+
[#1417](https://github.com/python-attrs/attrs/issues/1417)
|
227 |
+
|
228 |
+
|
229 |
+
|
230 |
+
---
|
231 |
+
|
232 |
+
[Full changelog →](https://www.attrs.org/en/stable/changelog.html)
|
lib/python3.10/site-packages/attrs-25.3.0.dist-info/RECORD
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
attr/__init__.py,sha256=fOYIvt1eGSqQre4uCS3sJWKZ0mwAuC8UD6qba5OS9_U,2057
|
2 |
+
attr/__init__.pyi,sha256=QIXnnHPoucmDWkbpNsWTP-cgJ1bn8le7DjyRa_wYdew,11281
|
3 |
+
attr/_cmp.py,sha256=3Nn1TjxllUYiX_nJoVnEkXoDk0hM1DYKj5DE7GZe4i0,4117
|
4 |
+
attr/_cmp.pyi,sha256=U-_RU_UZOyPUEQzXE6RMYQQcjkZRY25wTH99sN0s7MM,368
|
5 |
+
attr/_compat.py,sha256=4hlXbWhdDjQCDK6FKF1EgnZ3POiHgtpp54qE0nxaGHg,2704
|
6 |
+
attr/_config.py,sha256=dGq3xR6fgZEF6UBt_L0T-eUHIB4i43kRmH0P28sJVw8,843
|
7 |
+
attr/_funcs.py,sha256=5-tUKJtp3h5El55EcDl6GWXFp68fT8D8U7uCRN6497I,15854
|
8 |
+
attr/_make.py,sha256=lBUPPmxiA1BeHzB6OlHoCEh--tVvM1ozXO8eXOa6g4c,96664
|
9 |
+
attr/_next_gen.py,sha256=7FRkbtl_N017SuBhf_Vw3mw2c2pGZhtCGOzadgz7tp4,24395
|
10 |
+
attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469
|
11 |
+
attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121
|
12 |
+
attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209
|
13 |
+
attr/converters.py,sha256=GlDeOzPeTFgeBBLbj9G57Ez5lAk68uhSALRYJ_exe84,3861
|
14 |
+
attr/converters.pyi,sha256=orU2bff-VjQa2kMDyvnMQV73oJT2WRyQuw4ZR1ym1bE,643
|
15 |
+
attr/exceptions.py,sha256=HRFq4iybmv7-DcZwyjl6M1euM2YeJVK_hFxuaBGAngI,1977
|
16 |
+
attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539
|
17 |
+
attr/filters.py,sha256=ZBiKWLp3R0LfCZsq7X11pn9WX8NslS2wXM4jsnLOGc8,1795
|
18 |
+
attr/filters.pyi,sha256=3J5BG-dTxltBk1_-RuNRUHrv2qu1v8v4aDNAQ7_mifA,208
|
19 |
+
attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20 |
+
attr/setters.py,sha256=5-dcT63GQK35ONEzSgfXCkbB7pPkaR-qv15mm4PVSzQ,1617
|
21 |
+
attr/setters.pyi,sha256=NnVkaFU1BB4JB8E4JuXyrzTUgvtMpj8p3wBdJY7uix4,584
|
22 |
+
attr/validators.py,sha256=WaB1HLAHHqRHWsrv_K9H-sJ7ESil3H3Cmv2d8TtVZx4,20046
|
23 |
+
attr/validators.pyi,sha256=s2WhKPqskxbsckJfKk8zOuuB088GfgpyxcCYSNFLqNU,2603
|
24 |
+
attrs-25.3.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
|
25 |
+
attrs-25.3.0.dist-info/METADATA,sha256=W38cREj7s1wqNf1fg4hVwZmL1xh0AdSp4IhtTMROinw,10993
|
26 |
+
attrs-25.3.0.dist-info/RECORD,,
|
27 |
+
attrs-25.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
28 |
+
attrs-25.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
29 |
+
attrs-25.3.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109
|
30 |
+
attrs/__init__.py,sha256=qeQJZ4O08yczSn840v9bYOaZyRE81WsVi-QCrY3krCU,1107
|
31 |
+
attrs/__init__.pyi,sha256=nZmInocjM7tHV4AQw0vxO_fo6oJjL_PonlV9zKKW8DY,7931
|
32 |
+
attrs/converters.py,sha256=8kQljrVwfSTRu8INwEk8SI0eGrzmWftsT7rM0EqyohM,76
|
33 |
+
attrs/exceptions.py,sha256=ACCCmg19-vDFaDPY9vFl199SPXCQMN_bENs4DALjzms,76
|
34 |
+
attrs/filters.py,sha256=VOUMZug9uEU6dUuA0dF1jInUK0PL3fLgP0VBS5d-CDE,73
|
35 |
+
attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
36 |
+
attrs/setters.py,sha256=eL1YidYQV3T2h9_SYIZSZR1FAcHGb1TuCTy0E0Lv2SU,73
|
37 |
+
attrs/validators.py,sha256=xcy6wD5TtTkdCG1f4XWbocPSO0faBjk5IfVJfP6SUj0,76
|
lib/python3.10/site-packages/attrs-25.3.0.dist-info/REQUESTED
ADDED
File without changes
|
lib/python3.10/site-packages/attrs-25.3.0.dist-info/WHEEL
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: hatchling 1.27.0
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
lib/python3.10/site-packages/attrs-25.3.0.dist-info/licenses/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The MIT License (MIT)
|
2 |
+
|
3 |
+
Copyright (c) 2015 Hynek Schlawack and the attrs contributors
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
lib/python3.10/site-packages/babel/locale-data/ccp.dat
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:267fcee4cc0de83306c4dad752976b853ff3e59398877f929b9c8651c57c6c82
|
3 |
+
size 207292
|
lib/python3.10/site-packages/babel/locale-data/mn.dat
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3b7a12a670dddcfb7e751af0760d6fdb8e52e88ca6dd2067659de0a8e8505b90
|
3 |
+
size 183647
|
lib/python3.10/site-packages/babel/locale-data/ro.dat
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7752947aea3fa7420e246e0187e236baca00ead64dc2084af290459e786f326
|
3 |
+
size 200714
|
lib/python3.10/site-packages/babel/locale-data/tg.dat
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:12b32fd18e121a7a3d8134896e13152995e249efca3525056a805e20368ff2d9
|
3 |
+
size 117163
|
lib/python3.10/site-packages/csvw-3.5.1.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
uv
|
lib/python3.10/site-packages/csvw-3.5.1.dist-info/RECORD
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
../../../bin/csvw2datasette,sha256=idn-zDFxQgtqizjVsFqzZN1EMbYENQKi12Xw0QMn-XA,344
|
2 |
+
../../../bin/csvw2json,sha256=AqA3j0EMLtP0vSpSZXb1skG2wrim_ZEY3co30lraHdg,334
|
3 |
+
../../../bin/csvw2markdown,sha256=jsHR8SPiB14K6ukm0x_Lnl3Cj1pM4E5ab6DIsjXFctg,342
|
4 |
+
../../../bin/csvw2sqlite,sha256=WP2TwzIfvvN-KiLHVqbvmKKDzg7s_2jk7v7pqGn3cJE,338
|
5 |
+
../../../bin/csvwdescribe,sha256=kosAokPfV39E5w_C3e0fdiHus6FAv5Aoigj1siG2NJA,340
|
6 |
+
../../../bin/csvwvalidate,sha256=MtOnfL_1X_t88S9OnRmyqGH6EUszXjQC-_bAQeSHUN0,340
|
7 |
+
csvw-3.5.1.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
|
8 |
+
csvw-3.5.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
9 |
+
csvw-3.5.1.dist-info/METADATA,sha256=khGOskjHlAs3P3SD-ZhmcQnULLZqO2UnFvRF23akSvs,10158
|
10 |
+
csvw-3.5.1.dist-info/RECORD,,
|
11 |
+
csvw-3.5.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12 |
+
csvw-3.5.1.dist-info/WHEEL,sha256=TJ49d73sNs10F0aze1W_bTW2P_X7-F4YXOlBqoqA-jY,109
|
13 |
+
csvw-3.5.1.dist-info/entry_points.txt,sha256=fk9IQtAGIpAeWAXe3I3xGNozh4ww_7b80c_vD2GFuEI,268
|
14 |
+
csvw-3.5.1.dist-info/top_level.txt,sha256=mkl3l5vQaigdnccNcB3IT5J_Puu2Rp5PVYHqoUOh0Vo,5
|
15 |
+
csvw/__init__.py,sha256=rvhNNJEynZvUYykkF_Z7-zBS8xeg_3IO1ioj77XRDqA,781
|
16 |
+
csvw/__main__.py,sha256=N1Ff8Buz8_xp1Fbw_nNHEcmq_WW2k8SKiLeguQ5_F5Q,5622
|
17 |
+
csvw/datatypes.py,sha256=gKWd-WmoDgyoprmUioU3W7nsMu4N52cP1o45HyeJ3QQ,38609
|
18 |
+
csvw/db.py,sha256=2iyFhMmioPJwww447wN1S6o1TcpW0Vz5cnR4zAfxwSU,25011
|
19 |
+
csvw/dsv.py,sha256=Fy05xY-ffwG41kJHeaaJ-bicoDprKaYH_98ED1YjVFY,15897
|
20 |
+
csvw/dsv_dialects.py,sha256=TUVi-gtYXC-OhHQbZZykpvnOfHgghsFKk0wE7jokAaQ,5177
|
21 |
+
csvw/frictionless.py,sha256=YuC36bL7Bs3elZ-MIpLBpcNIkKCt49jA0-VqGyNW4-0,8284
|
22 |
+
csvw/jsonld.py,sha256=Nw-Vl8HG3gKCrAW5WdDs3wctr18Fc_R_rTSxqGyZqE8,6535
|
23 |
+
csvw/metadata.py,sha256=lVE1vNpXRcIxV8e4rcZp5t2ShWMmH6GbcHJrPP6ufEc,71853
|
24 |
+
csvw/utils.py,sha256=46wWXpjmpd7esW96MeCPydqCMQoXYxmblalwKmbnzrc,8280
|
lib/python3.10/site-packages/csvw-3.5.1.dist-info/REQUESTED
ADDED
File without changes
|
lib/python3.10/site-packages/csvw-3.5.1.dist-info/WHEEL
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: setuptools (75.2.0)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py2-none-any
|
5 |
+
Tag: py3-none-any
|
6 |
+
|
lib/python3.10/site-packages/csvw-3.5.1.dist-info/entry_points.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[console_scripts]
|
2 |
+
csvw2datasette = csvw.__main__:csvw2datasette
|
3 |
+
csvw2json = csvw.__main__:csvw2json
|
4 |
+
csvw2markdown = csvw.__main__:csvw2markdown
|
5 |
+
csvw2sqlite = csvw.__main__:csvw2sqlite
|
6 |
+
csvwdescribe = csvw.__main__:csvwdescribe
|
7 |
+
csvwvalidate = csvw.__main__:csvwvalidate
|
lib/python3.10/site-packages/onnx-1.17.0.dist-info/LICENSE
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
Apache License
|
3 |
+
Version 2.0, January 2004
|
4 |
+
http://www.apache.org/licenses/
|
5 |
+
|
6 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
7 |
+
|
8 |
+
1. Definitions.
|
9 |
+
|
10 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
11 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
12 |
+
|
13 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
14 |
+
the copyright owner that is granting the License.
|
15 |
+
|
16 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
17 |
+
other entities that control, are controlled by, or are under common
|
18 |
+
control with that entity. For the purposes of this definition,
|
19 |
+
"control" means (i) the power, direct or indirect, to cause the
|
20 |
+
direction or management of such entity, whether by contract or
|
21 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
22 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
23 |
+
|
24 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
25 |
+
exercising permissions granted by this License.
|
26 |
+
|
27 |
+
"Source" form shall mean the preferred form for making modifications,
|
28 |
+
including but not limited to software source code, documentation
|
29 |
+
source, and configuration files.
|
30 |
+
|
31 |
+
"Object" form shall mean any form resulting from mechanical
|
32 |
+
transformation or translation of a Source form, including but
|
33 |
+
not limited to compiled object code, generated documentation,
|
34 |
+
and conversions to other media types.
|
35 |
+
|
36 |
+
"Work" shall mean the work of authorship, whether in Source or
|
37 |
+
Object form, made available under the License, as indicated by a
|
38 |
+
copyright notice that is included in or attached to the work
|
39 |
+
(an example is provided in the Appendix below).
|
40 |
+
|
41 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
42 |
+
form, that is based on (or derived from) the Work and for which the
|
43 |
+
editorial revisions, annotations, elaborations, or other modifications
|
44 |
+
represent, as a whole, an original work of authorship. For the purposes
|
45 |
+
of this License, Derivative Works shall not include works that remain
|
46 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
47 |
+
the Work and Derivative Works thereof.
|
48 |
+
|
49 |
+
"Contribution" shall mean any work of authorship, including
|
50 |
+
the original version of the Work and any modifications or additions
|
51 |
+
to that Work or Derivative Works thereof, that is intentionally
|
52 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
53 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
54 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
55 |
+
means any form of electronic, verbal, or written communication sent
|
56 |
+
to the Licensor or its representatives, including but not limited to
|
57 |
+
communication on electronic mailing lists, source code control systems,
|
58 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
59 |
+
Licensor for the purpose of discussing and improving the Work, but
|
60 |
+
excluding communication that is conspicuously marked or otherwise
|
61 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
62 |
+
|
63 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
64 |
+
on behalf of whom a Contribution has been received by Licensor and
|
65 |
+
subsequently incorporated within the Work.
|
66 |
+
|
67 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
68 |
+
this License, each Contributor hereby grants to You a perpetual,
|
69 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
70 |
+
copyright license to reproduce, prepare Derivative Works of,
|
71 |
+
publicly display, publicly perform, sublicense, and distribute the
|
72 |
+
Work and such Derivative Works in Source or Object form.
|
73 |
+
|
74 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
75 |
+
this License, each Contributor hereby grants to You a perpetual,
|
76 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
77 |
+
(except as stated in this section) patent license to make, have made,
|
78 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
79 |
+
where such license applies only to those patent claims licensable
|
80 |
+
by such Contributor that are necessarily infringed by their
|
81 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
82 |
+
with the Work to which such Contribution(s) was submitted. If You
|
83 |
+
institute patent litigation against any entity (including a
|
84 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
85 |
+
or a Contribution incorporated within the Work constitutes direct
|
86 |
+
or contributory patent infringement, then any patent licenses
|
87 |
+
granted to You under this License for that Work shall terminate
|
88 |
+
as of the date such litigation is filed.
|
89 |
+
|
90 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
91 |
+
Work or Derivative Works thereof in any medium, with or without
|
92 |
+
modifications, and in Source or Object form, provided that You
|
93 |
+
meet the following conditions:
|
94 |
+
|
95 |
+
(a) You must give any other recipients of the Work or
|
96 |
+
Derivative Works a copy of this License; and
|
97 |
+
|
98 |
+
(b) You must cause any modified files to carry prominent notices
|
99 |
+
stating that You changed the files; and
|
100 |
+
|
101 |
+
(c) You must retain, in the Source form of any Derivative Works
|
102 |
+
that You distribute, all copyright, patent, trademark, and
|
103 |
+
attribution notices from the Source form of the Work,
|
104 |
+
excluding those notices that do not pertain to any part of
|
105 |
+
the Derivative Works; and
|
106 |
+
|
107 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
108 |
+
distribution, then any Derivative Works that You distribute must
|
109 |
+
include a readable copy of the attribution notices contained
|
110 |
+
within such NOTICE file, excluding those notices that do not
|
111 |
+
pertain to any part of the Derivative Works, in at least one
|
112 |
+
of the following places: within a NOTICE text file distributed
|
113 |
+
as part of the Derivative Works; within the Source form or
|
114 |
+
documentation, if provided along with the Derivative Works; or,
|
115 |
+
within a display generated by the Derivative Works, if and
|
116 |
+
wherever such third-party notices normally appear. The contents
|
117 |
+
of the NOTICE file are for informational purposes only and
|
118 |
+
do not modify the License. You may add Your own attribution
|
119 |
+
notices within Derivative Works that You distribute, alongside
|
120 |
+
or as an addendum to the NOTICE text from the Work, provided
|
121 |
+
that such additional attribution notices cannot be construed
|
122 |
+
as modifying the License.
|
123 |
+
|
124 |
+
You may add Your own copyright statement to Your modifications and
|
125 |
+
may provide additional or different license terms and conditions
|
126 |
+
for use, reproduction, or distribution of Your modifications, or
|
127 |
+
for any such Derivative Works as a whole, provided Your use,
|
128 |
+
reproduction, and distribution of the Work otherwise complies with
|
129 |
+
the conditions stated in this License.
|
130 |
+
|
131 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
132 |
+
any Contribution intentionally submitted for inclusion in the Work
|
133 |
+
by You to the Licensor shall be under the terms and conditions of
|
134 |
+
this License, without any additional terms or conditions.
|
135 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
136 |
+
the terms of any separate license agreement you may have executed
|
137 |
+
with Licensor regarding such Contributions.
|
138 |
+
|
139 |
+
6. Trademarks. This License does not grant permission to use the trade
|
140 |
+
names, trademarks, service marks, or product names of the Licensor,
|
141 |
+
except as required for reasonable and customary use in describing the
|
142 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
143 |
+
|
144 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
145 |
+
agreed to in writing, Licensor provides the Work (and each
|
146 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
147 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
148 |
+
implied, including, without limitation, any warranties or conditions
|
149 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
150 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
151 |
+
appropriateness of using or redistributing the Work and assume any
|
152 |
+
risks associated with Your exercise of permissions under this License.
|
153 |
+
|
154 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
155 |
+
whether in tort (including negligence), contract, or otherwise,
|
156 |
+
unless required by applicable law (such as deliberate and grossly
|
157 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
158 |
+
liable to You for damages, including any direct, indirect, special,
|
159 |
+
incidental, or consequential damages of any character arising as a
|
160 |
+
result of this License or out of the use or inability to use the
|
161 |
+
Work (including but not limited to damages for loss of goodwill,
|
162 |
+
work stoppage, computer failure or malfunction, or any and all
|
163 |
+
other commercial damages or losses), even if such Contributor
|
164 |
+
has been advised of the possibility of such damages.
|
165 |
+
|
166 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
167 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
168 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
169 |
+
or other liability obligations and/or rights consistent with this
|
170 |
+
License. However, in accepting such obligations, You may act only
|
171 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
172 |
+
of any other Contributor, and only if You agree to indemnify,
|
173 |
+
defend, and hold each Contributor harmless for any liability
|
174 |
+
incurred by, or claims asserted against, such Contributor by reason
|
175 |
+
of your accepting any such warranty or additional liability.
|
176 |
+
|
177 |
+
END OF TERMS AND CONDITIONS
|
178 |
+
|
179 |
+
APPENDIX: How to apply the Apache License to your work.
|
180 |
+
|
181 |
+
To apply the Apache License to your work, attach the following
|
182 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
183 |
+
replaced with your own identifying information. (Don't include
|
184 |
+
the brackets!) The text should be enclosed in the appropriate
|
185 |
+
comment syntax for the file format. We also recommend that a
|
186 |
+
file or class name and description of purpose be included on the
|
187 |
+
same "printed page" as the copyright notice for easier
|
188 |
+
identification within third-party archives.
|
189 |
+
|
190 |
+
Copyright [yyyy] [name of copyright owner]
|
191 |
+
|
192 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
193 |
+
you may not use this file except in compliance with the License.
|
194 |
+
You may obtain a copy of the License at
|
195 |
+
|
196 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
197 |
+
|
198 |
+
Unless required by applicable law or agreed to in writing, software
|
199 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
200 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
201 |
+
See the License for the specific language governing permissions and
|
202 |
+
limitations under the License.
|
lib/python3.10/site-packages/onnx-1.17.0.dist-info/METADATA
ADDED
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: onnx
|
3 |
+
Version: 1.17.0
|
4 |
+
Summary: Open Neural Network Exchange
|
5 |
+
Author-email: ONNX Contributors <[email protected]>
|
6 |
+
License: Apache License v2.0
|
7 |
+
Project-URL: Homepage, https://onnx.ai/
|
8 |
+
Project-URL: Repository, https://github.com/onnx/onnx
|
9 |
+
Classifier: Programming Language :: Python :: 3
|
10 |
+
Requires-Python: >=3.8
|
11 |
+
Description-Content-Type: text/markdown
|
12 |
+
License-File: LICENSE
|
13 |
+
Requires-Dist: numpy >=1.20
|
14 |
+
Requires-Dist: protobuf >=3.20.2
|
15 |
+
Provides-Extra: reference
|
16 |
+
Requires-Dist: google-re2 ; extra == 'reference'
|
17 |
+
Requires-Dist: Pillow ; extra == 'reference'
|
18 |
+
|
19 |
+
<!--
|
20 |
+
Copyright (c) ONNX Project Contributors
|
21 |
+
|
22 |
+
SPDX-License-Identifier: Apache-2.0
|
23 |
+
-->
|
24 |
+
|
25 |
+
<p align="center"><img width="40%" src="https://github.com/onnx/onnx/raw/main/docs/onnx-horizontal-color.png" /></p>
|
26 |
+
|
27 |
+
[](https://pypi.org/project/onnx)
|
28 |
+
[](https://github.com/onnx/onnx/actions/workflows/main.yml)
|
29 |
+
[](https://bestpractices.coreinfrastructure.org/projects/3313)
|
30 |
+
[](https://api.securityscorecards.dev/projects/github.com/onnx/onnx)
|
31 |
+
[](https://api.reuse.software/info/github.com/onnx/onnx)
|
32 |
+
[](https://github.com/astral-sh/ruff)
|
33 |
+
[](https://github.com/psf/black)
|
34 |
+
|
35 |
+
[Open Neural Network Exchange (ONNX)](https://onnx.ai) is an open ecosystem that empowers AI developers
|
36 |
+
to choose the right tools as their project evolves. ONNX provides an open source format for AI models, both deep learning and traditional ML. It defines an extensible computation graph model, as well as definitions of built-in operators and standard
|
37 |
+
data types. Currently we focus on the capabilities needed for inferencing (scoring).
|
38 |
+
|
39 |
+
ONNX is [widely supported](http://onnx.ai/supported-tools) and can be found in many frameworks, tools, and hardware. Enabling interoperability between different frameworks and streamlining the path from research to production helps increase the speed of innovation in the AI community. We invite the community to join us and further evolve ONNX.
|
40 |
+
|
41 |
+
# Use ONNX
|
42 |
+
|
43 |
+
* [Documentation of ONNX Python Package](https://onnx.ai/onnx/)
|
44 |
+
* [Tutorials for creating ONNX models](https://github.com/onnx/tutorials)
|
45 |
+
* [Pre-trained ONNX models](https://github.com/onnx/models)
|
46 |
+
|
47 |
+
# Learn about the ONNX spec
|
48 |
+
|
49 |
+
* [Overview](https://github.com/onnx/onnx/blob/main/docs/Overview.md)
|
50 |
+
* [ONNX intermediate representation spec](https://github.com/onnx/onnx/blob/main/docs/IR.md)
|
51 |
+
* [Versioning principles of the spec](https://github.com/onnx/onnx/blob/main/docs/Versioning.md)
|
52 |
+
* [Operators documentation](https://github.com/onnx/onnx/blob/main/docs/Operators.md)
|
53 |
+
* [Operators documentation](https://onnx.ai/onnx/operators/index.html) (latest release)
|
54 |
+
* [Python API Overview](https://github.com/onnx/onnx/blob/main/docs/PythonAPIOverview.md)
|
55 |
+
|
56 |
+
# Programming utilities for working with ONNX Graphs
|
57 |
+
|
58 |
+
* [Shape and Type Inference](https://github.com/onnx/onnx/blob/main/docs/ShapeInference.md)
|
59 |
+
* [Graph Optimization](https://github.com/onnx/optimizer)
|
60 |
+
* [Opset Version Conversion](https://github.com/onnx/onnx/blob/main/docs/docsgen/source/api/version_converter.md)
|
61 |
+
|
62 |
+
# Contribute
|
63 |
+
|
64 |
+
ONNX is a community project and the open governance model is described [here](https://github.com/onnx/onnx/blob/main/community/readme.md). We encourage you to join the effort and contribute feedback, ideas, and code. You can participate in the [Special Interest Groups](https://github.com/onnx/onnx/blob/main/community/sigs.md) and [Working Groups](https://github.com/onnx/onnx/blob/main/community/working-groups.md) to shape the future of ONNX.
|
65 |
+
|
66 |
+
Check out our [contribution guide](https://github.com/onnx/onnx/blob/main/CONTRIBUTING.md) to get started.
|
67 |
+
|
68 |
+
If you think some operator should be added to ONNX specification, please read
|
69 |
+
[this document](https://github.com/onnx/onnx/blob/main/docs/AddNewOp.md).
|
70 |
+
|
71 |
+
# Community meetings
|
72 |
+
|
73 |
+
The schedules of the regular meetings of the Steering Committee, the working groups and the SIGs can be found [here](https://onnx.ai/calendar)
|
74 |
+
|
75 |
+
Community Meetups are held at least once a year. Content from previous community meetups are at:
|
76 |
+
|
77 |
+
* 2020.04.09 <https://wiki.lfaidata.foundation/display/DL/LF+AI+Day+-ONNX+Community+Virtual+Meetup+-+Silicon+Valley+-+April+9>
|
78 |
+
* 2020.10.14 <https://wiki.lfaidata.foundation/display/DL/LF+AI+Day+-+ONNX+Community+Workshop+-+October+14>
|
79 |
+
* 2021.03.24 <https://wiki.lfaidata.foundation/pages/viewpage.action?pageId=35160391>
|
80 |
+
* 2021.10.21 <https://wiki.lfaidata.foundation/pages/viewpage.action?pageId=46989689>
|
81 |
+
* 2022.06.24 <https://wiki.lfaidata.foundation/display/DL/ONNX+Community+Day+-+June+24>
|
82 |
+
* 2023.06.28 <https://wiki.lfaidata.foundation/display/DL/ONNX+Community+Day+2023+-+June+28>
|
83 |
+
|
84 |
+
# Discuss
|
85 |
+
|
86 |
+
We encourage you to open [Issues](https://github.com/onnx/onnx/issues), or use [Slack](https://lfaifoundation.slack.com/) (If you have not joined yet, please use this [link](https://join.slack.com/t/lfaifoundation/shared_invite/zt-o65errpw-gMTbwNr7FnNbVXNVFkmyNA) to join the group) for more real-time discussion.
|
87 |
+
|
88 |
+
# Follow Us
|
89 |
+
|
90 |
+
Stay up to date with the latest ONNX news. [[Facebook](https://www.facebook.com/onnxai/)] [[Twitter](https://twitter.com/onnxai)]
|
91 |
+
|
92 |
+
# Roadmap
|
93 |
+
|
94 |
+
A roadmap process takes place every year. More details can be found [here](https://github.com/onnx/steering-committee/tree/main/roadmap)
|
95 |
+
|
96 |
+
# Installation
|
97 |
+
|
98 |
+
## Official Python packages
|
99 |
+
|
100 |
+
ONNX released packages are published in PyPi.
|
101 |
+
|
102 |
+
```sh
|
103 |
+
pip install onnx # or pip install onnx[reference] for optional reference implementation dependencies
|
104 |
+
```
|
105 |
+
|
106 |
+
[ONNX weekly packages](https://pypi.org/project/onnx-weekly/) are published in PyPI to enable experimentation and early testing.
|
107 |
+
|
108 |
+
## vcpkg packages
|
109 |
+
|
110 |
+
onnx is in the maintenance list of [vcpkg](https://github.com/microsoft/vcpkg), you can easily use vcpkg to build and install it.
|
111 |
+
|
112 |
+
```sh
|
113 |
+
git clone https://github.com/microsoft/vcpkg.git
|
114 |
+
cd vcpkg
|
115 |
+
./bootstrap-vcpkg.bat # For powershell
|
116 |
+
./bootstrap-vcpkg.sh # For bash
|
117 |
+
./vcpkg install onnx
|
118 |
+
```
|
119 |
+
|
120 |
+
## Conda packages
|
121 |
+
|
122 |
+
A binary build of ONNX is available from [Conda](https://conda.io), in [conda-forge](https://conda-forge.org/):
|
123 |
+
|
124 |
+
```sh
|
125 |
+
conda install -c conda-forge onnx
|
126 |
+
```
|
127 |
+
|
128 |
+
## Build ONNX from Source
|
129 |
+
|
130 |
+
Before building from source uninstall any existing versions of onnx `pip uninstall onnx`.
|
131 |
+
|
132 |
+
c++17 or higher C++ compiler version is required to build ONNX from source. Still, users can specify their own `CMAKE_CXX_STANDARD` version for building ONNX.
|
133 |
+
|
134 |
+
If you don't have protobuf installed, ONNX will internally download and build protobuf for ONNX build.
|
135 |
+
|
136 |
+
Or, you can manually install [protobuf C/C++ libraries and tools](https://github.com/protocolbuffers/protobuf) with specified version before proceeding forward. Then depending on how you installed protobuf, you need to set environment variable CMAKE_ARGS to "-DONNX_USE_PROTOBUF_SHARED_LIBS=ON" or "-DONNX_USE_PROTOBUF_SHARED_LIBS=OFF". For example, you may need to run the following command:
|
137 |
+
|
138 |
+
Linux:
|
139 |
+
|
140 |
+
```sh
|
141 |
+
export CMAKE_ARGS="-DONNX_USE_PROTOBUF_SHARED_LIBS=ON"
|
142 |
+
```
|
143 |
+
|
144 |
+
Windows:
|
145 |
+
|
146 |
+
```bat
|
147 |
+
set CMAKE_ARGS="-DONNX_USE_PROTOBUF_SHARED_LIBS=ON"
|
148 |
+
```
|
149 |
+
|
150 |
+
The ON/OFF depends on what kind of protobuf library you have. Shared libraries are files ending with \*.dll/\*.so/\*.dylib. Static libraries are files ending with \*.a/\*.lib. This option depends on how you get your protobuf library and how it was built. And it is default OFF. You don't need to run the commands above if you'd prefer to use a static protobuf library.
|
151 |
+
|
152 |
+
### Windows
|
153 |
+
|
154 |
+
If you are building ONNX from source, it is recommended that you also build Protobuf locally as a static library. The version distributed with conda-forge is a DLL, but ONNX expects it to be a static library. Building protobuf locally also lets you control the version of protobuf. The tested and recommended version is 3.21.12.
|
155 |
+
|
156 |
+
The instructions in this README assume you are using Visual Studio. It is recommended that you run all the commands from a shell started from "x64 Native Tools Command Prompt for VS 2019" and keep the build system generator for cmake (e.g., cmake -G "Visual Studio 16 2019") consistent while building protobuf as well as ONNX.
|
157 |
+
|
158 |
+
You can get protobuf by running the following commands:
|
159 |
+
|
160 |
+
```bat
|
161 |
+
git clone https://github.com/protocolbuffers/protobuf.git
|
162 |
+
cd protobuf
|
163 |
+
git checkout v21.12
|
164 |
+
cd cmake
|
165 |
+
cmake -G "Visual Studio 16 2019" -A x64 -DCMAKE_INSTALL_PREFIX=<protobuf_install_dir> -Dprotobuf_MSVC_STATIC_RUNTIME=OFF -Dprotobuf_BUILD_SHARED_LIBS=OFF -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_BUILD_EXAMPLES=OFF .
|
166 |
+
msbuild protobuf.sln /m /p:Configuration=Release
|
167 |
+
msbuild INSTALL.vcxproj /p:Configuration=Release
|
168 |
+
```
|
169 |
+
|
170 |
+
Then it will be built as a static library and installed to <protobuf_install_dir>. Please add the bin directory(which contains protoc.exe) to your PATH.
|
171 |
+
|
172 |
+
```bat
|
173 |
+
set CMAKE_PREFIX_PATH=<protobuf_install_dir>;%CMAKE_PREFIX_PATH%
|
174 |
+
```
|
175 |
+
|
176 |
+
Please note: if your protobuf_install_dir contains spaces, **do not** add quotation marks around it.
|
177 |
+
|
178 |
+
Alternative: if you don't want to change your PATH, you can set ONNX_PROTOC_EXECUTABLE instead.
|
179 |
+
|
180 |
+
```bat
|
181 |
+
set CMAKE_ARGS=-DONNX_PROTOC_EXECUTABLE=<full_path_to_protoc.exe>
|
182 |
+
```
|
183 |
+
|
184 |
+
Then you can build ONNX as:
|
185 |
+
|
186 |
+
```
|
187 |
+
git clone https://github.com/onnx/onnx.git
|
188 |
+
cd onnx
|
189 |
+
git submodule update --init --recursive
|
190 |
+
# prefer lite proto
|
191 |
+
set CMAKE_ARGS=-DONNX_USE_LITE_PROTO=ON
|
192 |
+
pip install -e . -v
|
193 |
+
```
|
194 |
+
|
195 |
+
### Linux
|
196 |
+
|
197 |
+
First, you need to install protobuf. The minimum Protobuf compiler (protoc) version required by ONNX is 3.6.1. Please note that old protoc versions might not work with `CMAKE_ARGS=-DONNX_USE_LITE_PROTO=ON`.
|
198 |
+
|
199 |
+
Ubuntu 20.04 (and newer) users may choose to install protobuf via
|
200 |
+
|
201 |
+
```sh
|
202 |
+
apt-get install python3-pip python3-dev libprotobuf-dev protobuf-compiler
|
203 |
+
```
|
204 |
+
|
205 |
+
In this case, it is required to add `-DONNX_USE_PROTOBUF_SHARED_LIBS=ON` to CMAKE_ARGS in the ONNX build step.
|
206 |
+
|
207 |
+
A more general way is to build and install it from source. See the instructions below for more details.
|
208 |
+
|
209 |
+
<details>
|
210 |
+
<summary> Installing Protobuf from source </summary>
|
211 |
+
|
212 |
+
Debian/Ubuntu:
|
213 |
+
|
214 |
+
```sh
|
215 |
+
git clone https://github.com/protocolbuffers/protobuf.git
|
216 |
+
cd protobuf
|
217 |
+
git checkout v21.12
|
218 |
+
git submodule update --init --recursive
|
219 |
+
mkdir build_source && cd build_source
|
220 |
+
cmake ../cmake -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_POSITION_INDEPENDENT_CODE=ON -Dprotobuf_BUILD_TESTS=OFF -DCMAKE_BUILD_TYPE=Release
|
221 |
+
make -j$(nproc)
|
222 |
+
make install
|
223 |
+
```
|
224 |
+
|
225 |
+
CentOS/RHEL/Fedora:
|
226 |
+
|
227 |
+
```sh
|
228 |
+
git clone https://github.com/protocolbuffers/protobuf.git
|
229 |
+
cd protobuf
|
230 |
+
git checkout v21.12
|
231 |
+
git submodule update --init --recursive
|
232 |
+
mkdir build_source && cd build_source
|
233 |
+
cmake ../cmake -DCMAKE_INSTALL_LIBDIR=lib64 -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_POSITION_INDEPENDENT_CODE=ON -Dprotobuf_BUILD_TESTS=OFF -DCMAKE_BUILD_TYPE=Release
|
234 |
+
make -j$(nproc)
|
235 |
+
make install
|
236 |
+
```
|
237 |
+
|
238 |
+
Here "-DCMAKE_POSITION_INDEPENDENT_CODE=ON" is crucial. By default static libraries are built without "-fPIC" flag, they are not position independent code. But shared libraries must be position independent code. Python C/C++ extensions(like ONNX) are shared libraries. So if a static library was not built with "-fPIC", it can't be linked to such a shared library.
|
239 |
+
|
240 |
+
Once build is successful, update PATH to include protobuf paths.
|
241 |
+
|
242 |
+
</details>
|
243 |
+
|
244 |
+
Then you can build ONNX as:
|
245 |
+
|
246 |
+
```sh
|
247 |
+
git clone https://github.com/onnx/onnx.git
|
248 |
+
cd onnx
|
249 |
+
git submodule update --init --recursive
|
250 |
+
# Optional: prefer lite proto
|
251 |
+
export CMAKE_ARGS=-DONNX_USE_LITE_PROTO=ON
|
252 |
+
pip install -e . -v
|
253 |
+
```
|
254 |
+
|
255 |
+
### Mac
|
256 |
+
|
257 |
+
```sh
|
258 |
+
export NUM_CORES=`sysctl -n hw.ncpu`
|
259 |
+
brew update
|
260 |
+
brew install autoconf && brew install automake
|
261 |
+
wget https://github.com/protocolbuffers/protobuf/releases/download/v21.12/protobuf-cpp-3.21.12.tar.gz
|
262 |
+
tar -xvf protobuf-cpp-3.21.12.tar.gz
|
263 |
+
cd protobuf-3.21.12
|
264 |
+
mkdir build_source && cd build_source
|
265 |
+
cmake ../cmake -Dprotobuf_BUILD_SHARED_LIBS=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON -Dprotobuf_BUILD_TESTS=OFF -DCMAKE_BUILD_TYPE=Release
|
266 |
+
make -j${NUM_CORES}
|
267 |
+
make install
|
268 |
+
```
|
269 |
+
|
270 |
+
Once build is successful, update PATH to include protobuf paths.
|
271 |
+
|
272 |
+
Then you can build ONNX as:
|
273 |
+
|
274 |
+
```sh
|
275 |
+
git clone --recursive https://github.com/onnx/onnx.git
|
276 |
+
cd onnx
|
277 |
+
# Optional: prefer lite proto
|
278 |
+
set CMAKE_ARGS=-DONNX_USE_LITE_PROTO=ON
|
279 |
+
pip install -e . -v
|
280 |
+
```
|
281 |
+
|
282 |
+
## Verify Installation
|
283 |
+
|
284 |
+
After installation, run
|
285 |
+
|
286 |
+
```sh
|
287 |
+
python -c "import onnx"
|
288 |
+
```
|
289 |
+
|
290 |
+
to verify it works.
|
291 |
+
|
292 |
+
## Common Build Options
|
293 |
+
|
294 |
+
For full list refer to CMakeLists.txt
|
295 |
+
|
296 |
+
### Environment variables
|
297 |
+
|
298 |
+
* `USE_MSVC_STATIC_RUNTIME` should be 1 or 0, not ON or OFF. When set to 1 onnx links statically to runtime library.
|
299 |
+
**Default**: `USE_MSVC_STATIC_RUNTIME=0`
|
300 |
+
|
301 |
+
* `DEBUG` should be 0 or 1. When set to 1 onnx is built in debug mode. or debug versions of the dependencies, you need to open the [CMakeLists file](https://github.com/onnx/onnx/blob/main/CMakeLists.txt) and append a letter `d` at the end of the package name lines. For example, `NAMES protobuf-lite` would become `NAMES protobuf-lited`.
|
302 |
+
**Default**: `Debug=0`
|
303 |
+
|
304 |
+
### CMake variables
|
305 |
+
|
306 |
+
* `ONNX_USE_PROTOBUF_SHARED_LIBS` should be `ON` or `OFF`.
|
307 |
+
**Default**: `ONNX_USE_PROTOBUF_SHARED_LIBS=OFF USE_MSVC_STATIC_RUNTIME=0`
|
308 |
+
`ONNX_USE_PROTOBUF_SHARED_LIBS` determines how onnx links to protobuf libraries.
|
309 |
+
* When set to `ON` - onnx will dynamically link to protobuf shared libs, PROTOBUF_USE_DLLS will be defined as described [here](https://github.com/protocolbuffers/protobuf/blob/main/cmake/README.md#dlls-vs-static-linking), Protobuf_USE_STATIC_LIBS will be set to `OFF` and `USE_MSVC_STATIC_RUNTIME` must be 0.
|
310 |
+
* When set to `OFF` - onnx will link statically to protobuf, and Protobuf_USE_STATIC_LIBS will be set to `ON` (to force the use of the static libraries) and `USE_MSVC_STATIC_RUNTIME` can be `0` or `1`.
|
311 |
+
|
312 |
+
* `ONNX_USE_LITE_PROTO` should be `ON` or `OFF`. When set to `ON` onnx uses lite protobuf instead of full protobuf.
|
313 |
+
**Default**: `ONNX_USE_LITE_PROTO=OFF`
|
314 |
+
|
315 |
+
* `ONNX_WERROR` should be `ON` or `OFF`. When set to `ON` warnings are treated as errors.
|
316 |
+
**Default**: `ONNX_WERROR=OFF` in local builds, `ON` in CI and release pipelines.
|
317 |
+
|
318 |
+
## Common Errors
|
319 |
+
|
320 |
+
* Note: the `import onnx` command does not work from the source checkout directory; in this case you'll see `ModuleNotFoundError: No module named 'onnx.onnx_cpp2py_export'`. Change into another directory to fix this error.
|
321 |
+
|
322 |
+
* If you run into any issues while building Protobuf as a static library, please ensure that shared Protobuf libraries, like libprotobuf, are not installed on your device or in the conda environment. If these shared libraries exist, either remove them to build Protobuf from source as a static library, or skip the Protobuf build from source to use the shared version directly.
|
323 |
+
|
324 |
+
* If you run into any issues while building ONNX from source, and your error message reads, `Could not find pythonXX.lib`, ensure that you have consistent Python versions for common commands, such as `python` and `pip`. Clean all existing build files and rebuild ONNX again.
|
325 |
+
|
326 |
+
# Testing
|
327 |
+
|
328 |
+
ONNX uses [pytest](https://docs.pytest.org) as test driver. In order to run tests, you will first need to install `pytest`:
|
329 |
+
|
330 |
+
```sh
|
331 |
+
pip install pytest nbval
|
332 |
+
```
|
333 |
+
|
334 |
+
After installing pytest, use the following command to run tests.
|
335 |
+
|
336 |
+
```sh
|
337 |
+
pytest
|
338 |
+
```
|
339 |
+
|
340 |
+
# Development
|
341 |
+
|
342 |
+
Check out the [contributor guide](https://github.com/onnx/onnx/blob/main/CONTRIBUTING.md) for instructions.
|
343 |
+
|
344 |
+
# License
|
345 |
+
|
346 |
+
[Apache License v2.0](LICENSE)
|
347 |
+
|
348 |
+
# Code of Conduct
|
349 |
+
|
350 |
+
[ONNX Open Source Code of Conduct](https://onnx.ai/codeofconduct.html)
|
lib/python3.10/site-packages/onnx-1.17.0.dist-info/RECORD
ADDED
The diff for this file is too large to render.
See raw diff
|
|
lib/python3.10/site-packages/onnx-1.17.0.dist-info/WHEEL
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: setuptools (75.1.0)
|
3 |
+
Root-Is-Purelib: false
|
4 |
+
Tag: cp310-cp310-manylinux_2_17_x86_64
|
5 |
+
Tag: cp310-cp310-manylinux2014_x86_64
|
6 |
+
|
lib/python3.10/site-packages/onnx-1.17.0.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
onnx
|
lib/python3.10/site-packages/parameterized/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .parameterized import parameterized, param, parameterized_class
|
2 |
+
|
3 |
+
__version__ = "0.9.0"
|
lib/python3.10/site-packages/parameterized/parameterized.py
ADDED
@@ -0,0 +1,732 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import sys
|
3 |
+
import inspect
|
4 |
+
import warnings
|
5 |
+
from typing import Iterable
|
6 |
+
from functools import wraps
|
7 |
+
from types import MethodType as MethodType
|
8 |
+
from collections import namedtuple
|
9 |
+
|
10 |
+
try:
|
11 |
+
from unittest import mock
|
12 |
+
except ImportError:
|
13 |
+
try:
|
14 |
+
import mock
|
15 |
+
except ImportError:
|
16 |
+
mock = None
|
17 |
+
|
18 |
+
try:
|
19 |
+
from collections import OrderedDict as MaybeOrderedDict
|
20 |
+
except ImportError:
|
21 |
+
MaybeOrderedDict = dict
|
22 |
+
|
23 |
+
from unittest import TestCase
|
24 |
+
|
25 |
+
try:
|
26 |
+
from unittest import SkipTest
|
27 |
+
except ImportError:
|
28 |
+
class SkipTest(Exception):
|
29 |
+
pass
|
30 |
+
|
31 |
+
# NOTE: even though Python 2 support has been dropped, these checks have been
|
32 |
+
# left in place to avoid merge conflicts. They can be removed in the future, and
|
33 |
+
# future code can be written to assume Python 3.
|
34 |
+
PY3 = sys.version_info[0] == 3
|
35 |
+
PY2 = sys.version_info[0] == 2
|
36 |
+
|
37 |
+
|
38 |
+
if PY3:
|
39 |
+
# Python 3 doesn't have an InstanceType, so just use a dummy type.
|
40 |
+
class InstanceType():
|
41 |
+
pass
|
42 |
+
lzip = lambda *a: list(zip(*a))
|
43 |
+
text_type = str
|
44 |
+
string_types = str,
|
45 |
+
bytes_type = bytes
|
46 |
+
def make_method(func, instance, type):
|
47 |
+
if instance is None:
|
48 |
+
return func
|
49 |
+
return MethodType(func, instance)
|
50 |
+
else:
|
51 |
+
from types import InstanceType
|
52 |
+
lzip = zip
|
53 |
+
text_type = unicode
|
54 |
+
bytes_type = str
|
55 |
+
string_types = basestring,
|
56 |
+
def make_method(func, instance, type):
|
57 |
+
return MethodType(func, instance, type)
|
58 |
+
|
59 |
+
def to_text(x):
|
60 |
+
if isinstance(x, text_type):
|
61 |
+
return x
|
62 |
+
try:
|
63 |
+
return text_type(x, "utf-8")
|
64 |
+
except UnicodeDecodeError:
|
65 |
+
return text_type(x, "latin1")
|
66 |
+
|
67 |
+
CompatArgSpec = namedtuple("CompatArgSpec", "args varargs keywords defaults")
|
68 |
+
|
69 |
+
|
70 |
+
def getargspec(func):
|
71 |
+
if PY2:
|
72 |
+
return CompatArgSpec(*inspect.getargspec(func))
|
73 |
+
args = inspect.getfullargspec(func)
|
74 |
+
if args.kwonlyargs:
|
75 |
+
raise TypeError((
|
76 |
+
"parameterized does not (yet) support functions with keyword "
|
77 |
+
"only arguments, but %r has keyword only arguments. "
|
78 |
+
"Please open an issue with your usecase if this affects you: "
|
79 |
+
"https://github.com/wolever/parameterized/issues/new"
|
80 |
+
) %(func, ))
|
81 |
+
return CompatArgSpec(*args[:4])
|
82 |
+
|
83 |
+
|
84 |
+
def skip_on_empty_helper(*a, **kw):
|
85 |
+
raise SkipTest("parameterized input is empty")
|
86 |
+
|
87 |
+
|
88 |
+
def reapply_patches_if_need(func):
|
89 |
+
|
90 |
+
def dummy_wrapper(orgfunc):
|
91 |
+
@wraps(orgfunc)
|
92 |
+
def dummy_func(*args, **kwargs):
|
93 |
+
return orgfunc(*args, **kwargs)
|
94 |
+
return dummy_func
|
95 |
+
|
96 |
+
if hasattr(func, 'patchings'):
|
97 |
+
is_original_async = inspect.iscoroutinefunction(func)
|
98 |
+
func = dummy_wrapper(func)
|
99 |
+
tmp_patchings = func.patchings
|
100 |
+
delattr(func, 'patchings')
|
101 |
+
for patch_obj in tmp_patchings:
|
102 |
+
if is_original_async:
|
103 |
+
func = patch_obj.decorate_async_callable(func)
|
104 |
+
else:
|
105 |
+
func = patch_obj.decorate_callable(func)
|
106 |
+
return func
|
107 |
+
|
108 |
+
|
109 |
+
# `parameterized.expand` strips out `mock` patches from the source method in favor of re-applying them over the
|
110 |
+
# generated methods instead. Sadly, this can cause problems with old versions of the `mock` package, as shown in
|
111 |
+
# https://bugs.python.org/issue40126 (bpo-40126).
|
112 |
+
#
|
113 |
+
# Long story short, bpo-40126 arises whenever the `patchings` list of a `mock`-decorated method is left fully empty.
|
114 |
+
#
|
115 |
+
# The bug has been fixed in the `mock` code itself since:
|
116 |
+
# - Python 3.7.8-rc1, 3.8.3-rc1 and later (for the `unittest.mock` package) [0][1].
|
117 |
+
# - Version 4 of the `mock` backport package (https://pypi.org/project/mock/) [2].
|
118 |
+
#
|
119 |
+
# To work around the problem when running old `mock` versions, we avoid fully stripping out patches from the source
|
120 |
+
# method in favor of replacing them with a "dummy" no-op patch instead.
|
121 |
+
#
|
122 |
+
# [0] https://docs.python.org/release/3.7.10/whatsnew/changelog.html#python-3-7-8-release-candidate-1
|
123 |
+
# [1] https://docs.python.org/release/3.8.10/whatsnew/changelog.html#python-3-8-3-release-candidate-1
|
124 |
+
# [2] https://mock.readthedocs.io/en/stable/changelog.html#b1
|
125 |
+
|
126 |
+
PYTHON_DOESNT_HAVE_FIX_FOR_BPO_40126 = (
|
127 |
+
sys.version_info[:3] < (3, 7, 8) or (sys.version_info[:2] >= (3, 8) and sys.version_info[:3] < (3, 8, 3))
|
128 |
+
)
|
129 |
+
|
130 |
+
try:
|
131 |
+
import mock as _mock_backport
|
132 |
+
except ImportError:
|
133 |
+
_mock_backport = None
|
134 |
+
|
135 |
+
MOCK_BACKPORT_DOESNT_HAVE_FIX_FOR_BPO_40126 = _mock_backport is not None and _mock_backport.version_info[0] < 4
|
136 |
+
|
137 |
+
AVOID_CLEARING_MOCK_PATCHES = PYTHON_DOESNT_HAVE_FIX_FOR_BPO_40126 or MOCK_BACKPORT_DOESNT_HAVE_FIX_FOR_BPO_40126
|
138 |
+
|
139 |
+
|
140 |
+
class DummyPatchTarget(object):
|
141 |
+
dummy_attribute = None
|
142 |
+
|
143 |
+
@staticmethod
|
144 |
+
def create_dummy_patch():
|
145 |
+
if mock is not None:
|
146 |
+
return mock.patch.object(DummyPatchTarget(), "dummy_attribute", new=None)
|
147 |
+
else:
|
148 |
+
raise ImportError("Missing mock package")
|
149 |
+
|
150 |
+
|
151 |
+
def delete_patches_if_need(func):
|
152 |
+
if hasattr(func, 'patchings'):
|
153 |
+
if AVOID_CLEARING_MOCK_PATCHES:
|
154 |
+
func.patchings[:] = [DummyPatchTarget.create_dummy_patch()]
|
155 |
+
else:
|
156 |
+
func.patchings[:] = []
|
157 |
+
|
158 |
+
|
159 |
+
_param = namedtuple("param", "args kwargs")
|
160 |
+
|
161 |
+
class param(_param):
|
162 |
+
""" Represents a single parameter to a test case.
|
163 |
+
|
164 |
+
For example::
|
165 |
+
|
166 |
+
>>> p = param("foo", bar=16)
|
167 |
+
>>> p
|
168 |
+
param("foo", bar=16)
|
169 |
+
>>> p.args
|
170 |
+
('foo', )
|
171 |
+
>>> p.kwargs
|
172 |
+
{'bar': 16}
|
173 |
+
|
174 |
+
Intended to be used as an argument to ``@parameterized``::
|
175 |
+
|
176 |
+
@parameterized([
|
177 |
+
param("foo", bar=16),
|
178 |
+
])
|
179 |
+
def test_stuff(foo, bar=16):
|
180 |
+
pass
|
181 |
+
"""
|
182 |
+
|
183 |
+
def __new__(cls, *args , **kwargs):
|
184 |
+
return _param.__new__(cls, args, kwargs)
|
185 |
+
|
186 |
+
@classmethod
|
187 |
+
def explicit(cls, args=None, kwargs=None):
|
188 |
+
""" Creates a ``param`` by explicitly specifying ``args`` and
|
189 |
+
``kwargs``::
|
190 |
+
|
191 |
+
>>> param.explicit([1,2,3])
|
192 |
+
param(*(1, 2, 3))
|
193 |
+
>>> param.explicit(kwargs={"foo": 42})
|
194 |
+
param(*(), **{"foo": "42"})
|
195 |
+
"""
|
196 |
+
args = args or ()
|
197 |
+
kwargs = kwargs or {}
|
198 |
+
return cls(*args, **kwargs)
|
199 |
+
|
200 |
+
@classmethod
|
201 |
+
def from_decorator(cls, args):
|
202 |
+
""" Returns an instance of ``param()`` for ``@parameterized`` argument
|
203 |
+
``args``::
|
204 |
+
|
205 |
+
>>> param.from_decorator((42, ))
|
206 |
+
param(args=(42, ), kwargs={})
|
207 |
+
>>> param.from_decorator("foo")
|
208 |
+
param(args=("foo", ), kwargs={})
|
209 |
+
"""
|
210 |
+
if isinstance(args, param):
|
211 |
+
return args
|
212 |
+
elif isinstance(args, (str, bytes)) or not isinstance(args, Iterable):
|
213 |
+
args = (args, )
|
214 |
+
try:
|
215 |
+
return cls(*args)
|
216 |
+
except TypeError as e:
|
217 |
+
if "after * must be" not in str(e):
|
218 |
+
raise
|
219 |
+
raise TypeError(
|
220 |
+
"Parameters must be tuples, but %r is not (hint: use '(%r, )')"
|
221 |
+
%(args, args),
|
222 |
+
)
|
223 |
+
|
224 |
+
def __repr__(self):
|
225 |
+
return "param(*%r, **%r)" %self
|
226 |
+
|
227 |
+
|
228 |
+
class QuietOrderedDict(MaybeOrderedDict):
|
229 |
+
""" When OrderedDict is available, use it to make sure that the kwargs in
|
230 |
+
doc strings are consistently ordered. """
|
231 |
+
__str__ = dict.__str__
|
232 |
+
__repr__ = dict.__repr__
|
233 |
+
|
234 |
+
|
235 |
+
def parameterized_argument_value_pairs(func, p):
|
236 |
+
"""Return tuples of parameterized arguments and their values.
|
237 |
+
|
238 |
+
This is useful if you are writing your own doc_func
|
239 |
+
function and need to know the values for each parameter name::
|
240 |
+
|
241 |
+
>>> def func(a, foo=None, bar=42, **kwargs): pass
|
242 |
+
>>> p = param(1, foo=7, extra=99)
|
243 |
+
>>> parameterized_argument_value_pairs(func, p)
|
244 |
+
[("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
|
245 |
+
|
246 |
+
If the function's first argument is named ``self`` then it will be
|
247 |
+
ignored::
|
248 |
+
|
249 |
+
>>> def func(self, a): pass
|
250 |
+
>>> p = param(1)
|
251 |
+
>>> parameterized_argument_value_pairs(func, p)
|
252 |
+
[("a", 1)]
|
253 |
+
|
254 |
+
Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
|
255 |
+
|
256 |
+
>>> def func(foo, *args): pass
|
257 |
+
>>> p = param(1)
|
258 |
+
>>> parameterized_argument_value_pairs(func, p)
|
259 |
+
[("foo", 1)]
|
260 |
+
>>> p = param(1, 16)
|
261 |
+
>>> parameterized_argument_value_pairs(func, p)
|
262 |
+
[("foo", 1), ("*args", (16, ))]
|
263 |
+
"""
|
264 |
+
argspec = getargspec(func)
|
265 |
+
arg_offset = 1 if argspec.args[:1] == ["self"] else 0
|
266 |
+
|
267 |
+
named_args = argspec.args[arg_offset:]
|
268 |
+
|
269 |
+
result = lzip(named_args, p.args)
|
270 |
+
named_args = argspec.args[len(result) + arg_offset:]
|
271 |
+
varargs = p.args[len(result):]
|
272 |
+
|
273 |
+
result.extend([
|
274 |
+
(name, p.kwargs.get(name, default))
|
275 |
+
for (name, default)
|
276 |
+
in zip(named_args, argspec.defaults or [])
|
277 |
+
])
|
278 |
+
|
279 |
+
seen_arg_names = set([ n for (n, _) in result ])
|
280 |
+
keywords = QuietOrderedDict(sorted([
|
281 |
+
(name, p.kwargs[name])
|
282 |
+
for name in p.kwargs
|
283 |
+
if name not in seen_arg_names
|
284 |
+
]))
|
285 |
+
|
286 |
+
if varargs:
|
287 |
+
result.append(("*%s" %(argspec.varargs, ), tuple(varargs)))
|
288 |
+
|
289 |
+
if keywords:
|
290 |
+
result.append(("**%s" %(argspec.keywords, ), keywords))
|
291 |
+
|
292 |
+
return result
|
293 |
+
|
294 |
+
|
295 |
+
def short_repr(x, n=64):
|
296 |
+
""" A shortened repr of ``x`` which is guaranteed to be ``unicode``::
|
297 |
+
|
298 |
+
>>> short_repr("foo")
|
299 |
+
u"foo"
|
300 |
+
>>> short_repr("123456789", n=4)
|
301 |
+
u"12...89"
|
302 |
+
"""
|
303 |
+
|
304 |
+
x_repr = to_text(repr(x))
|
305 |
+
if len(x_repr) > n:
|
306 |
+
x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:]
|
307 |
+
return x_repr
|
308 |
+
|
309 |
+
|
310 |
+
def default_doc_func(func, num, p):
|
311 |
+
if func.__doc__ is None:
|
312 |
+
return None
|
313 |
+
|
314 |
+
all_args_with_values = parameterized_argument_value_pairs(func, p)
|
315 |
+
|
316 |
+
# Assumes that the function passed is a bound method.
|
317 |
+
descs = ["%s=%s" %(n, short_repr(v)) for n, v in all_args_with_values]
|
318 |
+
|
319 |
+
# The documentation might be a multiline string, so split it
|
320 |
+
# and just work with the first string, ignoring the period
|
321 |
+
# at the end if there is one.
|
322 |
+
first, nl, rest = func.__doc__.lstrip().partition("\n")
|
323 |
+
suffix = ""
|
324 |
+
if first.endswith("."):
|
325 |
+
suffix = "."
|
326 |
+
first = first[:-1]
|
327 |
+
args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs))
|
328 |
+
return "".join(
|
329 |
+
to_text(x)
|
330 |
+
for x in [first.rstrip(), args, suffix, nl, rest]
|
331 |
+
)
|
332 |
+
|
333 |
+
|
334 |
+
def default_name_func(func, num, p):
|
335 |
+
base_name = func.__name__
|
336 |
+
name_suffix = "_%s" %(num, )
|
337 |
+
|
338 |
+
if len(p.args) > 0 and isinstance(p.args[0], string_types):
|
339 |
+
name_suffix += "_" + parameterized.to_safe_name(p.args[0])
|
340 |
+
return base_name + name_suffix
|
341 |
+
|
342 |
+
|
343 |
+
_test_runner_override = None
|
344 |
+
_test_runner_guess = False
|
345 |
+
_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
|
346 |
+
_test_runner_aliases = {
|
347 |
+
"_pytest": "pytest",
|
348 |
+
}
|
349 |
+
|
350 |
+
|
351 |
+
def set_test_runner(name):
|
352 |
+
global _test_runner_override
|
353 |
+
if name not in _test_runners:
|
354 |
+
raise TypeError(
|
355 |
+
"Invalid test runner: %r (must be one of: %s)"
|
356 |
+
%(name, ", ".join(_test_runners)),
|
357 |
+
)
|
358 |
+
_test_runner_override = name
|
359 |
+
|
360 |
+
|
361 |
+
def detect_runner():
|
362 |
+
""" Guess which test runner we're using by traversing the stack and looking
|
363 |
+
for the first matching module. This *should* be reasonably safe, as
|
364 |
+
it's done during test discovery where the test runner should be the
|
365 |
+
stack frame immediately outside. """
|
366 |
+
if _test_runner_override is not None:
|
367 |
+
return _test_runner_override
|
368 |
+
global _test_runner_guess
|
369 |
+
if _test_runner_guess is False:
|
370 |
+
stack = inspect.stack()
|
371 |
+
for record in reversed(stack):
|
372 |
+
frame = record[0]
|
373 |
+
module = frame.f_globals.get("__name__").partition(".")[0]
|
374 |
+
if module in _test_runner_aliases:
|
375 |
+
module = _test_runner_aliases[module]
|
376 |
+
if module in _test_runners:
|
377 |
+
_test_runner_guess = module
|
378 |
+
break
|
379 |
+
if record[1].endswith("python2.6/unittest.py"):
|
380 |
+
_test_runner_guess = "unittest"
|
381 |
+
break
|
382 |
+
else:
|
383 |
+
_test_runner_guess = None
|
384 |
+
return _test_runner_guess
|
385 |
+
|
386 |
+
|
387 |
+
|
388 |
+
class parameterized(object):
|
389 |
+
""" Parameterize a test case::
|
390 |
+
|
391 |
+
class TestInt(object):
|
392 |
+
@parameterized([
|
393 |
+
("A", 10),
|
394 |
+
("F", 15),
|
395 |
+
param("10", 42, base=42)
|
396 |
+
])
|
397 |
+
def test_int(self, input, expected, base=16):
|
398 |
+
actual = int(input, base=base)
|
399 |
+
assert_equal(actual, expected)
|
400 |
+
|
401 |
+
@parameterized([
|
402 |
+
(2, 3, 5)
|
403 |
+
(3, 5, 8),
|
404 |
+
])
|
405 |
+
def test_add(a, b, expected):
|
406 |
+
assert_equal(a + b, expected)
|
407 |
+
"""
|
408 |
+
|
409 |
+
def __init__(self, input, doc_func=None, skip_on_empty=False):
|
410 |
+
self.get_input = self.input_as_callable(input)
|
411 |
+
self.doc_func = doc_func or default_doc_func
|
412 |
+
self.skip_on_empty = skip_on_empty
|
413 |
+
|
414 |
+
def __call__(self, test_func):
|
415 |
+
self.assert_not_in_testcase_subclass()
|
416 |
+
|
417 |
+
@wraps(test_func)
|
418 |
+
def wrapper(test_self=None):
|
419 |
+
test_cls = test_self and type(test_self)
|
420 |
+
if test_self is not None:
|
421 |
+
if issubclass(test_cls, InstanceType):
|
422 |
+
raise TypeError((
|
423 |
+
"@parameterized can't be used with old-style classes, but "
|
424 |
+
"%r has an old-style class. Consider using a new-style "
|
425 |
+
"class, or '@parameterized.expand' "
|
426 |
+
"(see http://stackoverflow.com/q/54867/71522 for more "
|
427 |
+
"information on old-style classes)."
|
428 |
+
) %(test_self, ))
|
429 |
+
|
430 |
+
original_doc = wrapper.__doc__
|
431 |
+
for num, args in enumerate(wrapper.parameterized_input):
|
432 |
+
p = param.from_decorator(args)
|
433 |
+
unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
|
434 |
+
try:
|
435 |
+
wrapper.__doc__ = nose_tuple[0].__doc__
|
436 |
+
# Nose uses `getattr(instance, test_func.__name__)` to get
|
437 |
+
# a method bound to the test instance (as opposed to a
|
438 |
+
# method bound to the instance of the class created when
|
439 |
+
# tests were being enumerated). Set a value here to make
|
440 |
+
# sure nose can get the correct test method.
|
441 |
+
if test_self is not None:
|
442 |
+
setattr(test_cls, test_func.__name__, unbound_func)
|
443 |
+
yield nose_tuple
|
444 |
+
finally:
|
445 |
+
if test_self is not None:
|
446 |
+
delattr(test_cls, test_func.__name__)
|
447 |
+
wrapper.__doc__ = original_doc
|
448 |
+
|
449 |
+
input = self.get_input()
|
450 |
+
if not input:
|
451 |
+
if not self.skip_on_empty:
|
452 |
+
raise ValueError(
|
453 |
+
"Parameters iterable is empty (hint: use "
|
454 |
+
"`parameterized([], skip_on_empty=True)` to skip "
|
455 |
+
"this test when the input is empty)"
|
456 |
+
)
|
457 |
+
wrapper = wraps(test_func)(skip_on_empty_helper)
|
458 |
+
|
459 |
+
wrapper.parameterized_input = input
|
460 |
+
wrapper.parameterized_func = test_func
|
461 |
+
test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
|
462 |
+
|
463 |
+
return wrapper
|
464 |
+
|
465 |
+
def param_as_nose_tuple(self, test_self, func, num, p):
|
466 |
+
nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
|
467 |
+
nose_func.__doc__ = self.doc_func(func, num, p)
|
468 |
+
# Track the unbound function because we need to setattr the unbound
|
469 |
+
# function onto the class for nose to work (see comments above), and
|
470 |
+
# Python 3 doesn't let us pull the function out of a bound method.
|
471 |
+
unbound_func = nose_func
|
472 |
+
if test_self is not None:
|
473 |
+
# Under nose on Py2 we need to return an unbound method to make
|
474 |
+
# sure that the `self` in the method is properly shared with the
|
475 |
+
# `self` used in `setUp` and `tearDown`. But only there. Everyone
|
476 |
+
# else needs a bound method.
|
477 |
+
func_self = (
|
478 |
+
None if PY2 and detect_runner() == "nose" else
|
479 |
+
test_self
|
480 |
+
)
|
481 |
+
nose_func = make_method(nose_func, func_self, type(test_self))
|
482 |
+
return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
|
483 |
+
|
484 |
+
def assert_not_in_testcase_subclass(self):
|
485 |
+
parent_classes = self._terrible_magic_get_defining_classes()
|
486 |
+
if any(issubclass(cls, TestCase) for cls in parent_classes):
|
487 |
+
raise Exception("Warning: '@parameterized' tests won't work "
|
488 |
+
"inside subclasses of 'TestCase' - use "
|
489 |
+
"'@parameterized.expand' instead.")
|
490 |
+
|
491 |
+
def _terrible_magic_get_defining_classes(self):
|
492 |
+
""" Returns the set of parent classes of the class currently being defined.
|
493 |
+
Will likely only work if called from the ``parameterized`` decorator.
|
494 |
+
This function is entirely @brandon_rhodes's fault, as he suggested
|
495 |
+
the implementation: http://stackoverflow.com/a/8793684/71522
|
496 |
+
"""
|
497 |
+
stack = inspect.stack()
|
498 |
+
if len(stack) <= 4:
|
499 |
+
return []
|
500 |
+
frame = stack[4]
|
501 |
+
code_context = frame[4] and frame[4][0].strip()
|
502 |
+
if not (code_context and code_context.startswith("class ")):
|
503 |
+
return []
|
504 |
+
_, _, parents = code_context.partition("(")
|
505 |
+
parents, _, _ = parents.partition(")")
|
506 |
+
return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
|
507 |
+
|
508 |
+
@classmethod
|
509 |
+
def input_as_callable(cls, input):
|
510 |
+
if callable(input):
|
511 |
+
return lambda: cls.check_input_values(input())
|
512 |
+
input_values = cls.check_input_values(input)
|
513 |
+
return lambda: input_values
|
514 |
+
|
515 |
+
@classmethod
|
516 |
+
def check_input_values(cls, input_values):
|
517 |
+
# Explicitly convery non-list inputs to a list so that:
|
518 |
+
# 1. A helpful exception will be raised if they aren't iterable, and
|
519 |
+
# 2. Generators are unwrapped exactly once (otherwise `nosetests
|
520 |
+
# --processes=n` has issues; see:
|
521 |
+
# https://github.com/wolever/nose-parameterized/pull/31)
|
522 |
+
if not isinstance(input_values, list):
|
523 |
+
input_values = list(input_values)
|
524 |
+
return [ param.from_decorator(p) for p in input_values ]
|
525 |
+
|
526 |
+
@classmethod
|
527 |
+
def expand(cls, input, name_func=None, doc_func=None, skip_on_empty=False,
|
528 |
+
namespace=None, **legacy):
|
529 |
+
""" A "brute force" method of parameterizing test cases. Creates new
|
530 |
+
test cases and injects them into the namespace that the wrapped
|
531 |
+
function is being defined in. Useful for parameterizing tests in
|
532 |
+
subclasses of 'UnitTest', where Nose test generators don't work.
|
533 |
+
|
534 |
+
:param input: An iterable of values to pass to the test function.
|
535 |
+
:param name_func: A function that takes a single argument (the
|
536 |
+
value from the input iterable) and returns a string to use as
|
537 |
+
the name of the test case. If not provided, the name of the
|
538 |
+
test case will be the name of the test function with the
|
539 |
+
parameter value appended.
|
540 |
+
:param doc_func: A function that takes a single argument (the
|
541 |
+
value from the input iterable) and returns a string to use as
|
542 |
+
the docstring of the test case. If not provided, the docstring
|
543 |
+
of the test case will be the docstring of the test function.
|
544 |
+
:param skip_on_empty: If True, the test will be skipped if the
|
545 |
+
input iterable is empty. If False, a ValueError will be raised
|
546 |
+
if the input iterable is empty.
|
547 |
+
:param namespace: The namespace (dict-like) to inject the test cases
|
548 |
+
into. If not provided, the namespace of the test function will
|
549 |
+
be used.
|
550 |
+
|
551 |
+
>>> @parameterized.expand([("foo", 1, 2)])
|
552 |
+
... def test_add1(name, input, expected):
|
553 |
+
... actual = add1(input)
|
554 |
+
... assert_equal(actual, expected)
|
555 |
+
...
|
556 |
+
>>> locals()
|
557 |
+
... 'test_add1_foo_0': <function ...> ...
|
558 |
+
>>>
|
559 |
+
"""
|
560 |
+
|
561 |
+
if "testcase_func_name" in legacy:
|
562 |
+
warnings.warn("testcase_func_name= is deprecated; use name_func=",
|
563 |
+
DeprecationWarning, stacklevel=2)
|
564 |
+
if not name_func:
|
565 |
+
name_func = legacy["testcase_func_name"]
|
566 |
+
|
567 |
+
if "testcase_func_doc" in legacy:
|
568 |
+
warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
|
569 |
+
DeprecationWarning, stacklevel=2)
|
570 |
+
if not doc_func:
|
571 |
+
doc_func = legacy["testcase_func_doc"]
|
572 |
+
|
573 |
+
doc_func = doc_func or default_doc_func
|
574 |
+
name_func = name_func or default_name_func
|
575 |
+
|
576 |
+
def parameterized_expand_wrapper(f, instance=None):
|
577 |
+
frame_locals = namespace
|
578 |
+
if frame_locals is None:
|
579 |
+
frame_locals = inspect.currentframe().f_back.f_locals
|
580 |
+
|
581 |
+
parameters = cls.input_as_callable(input)()
|
582 |
+
|
583 |
+
if not parameters:
|
584 |
+
if not skip_on_empty:
|
585 |
+
raise ValueError(
|
586 |
+
"Parameters iterable is empty (hint: use "
|
587 |
+
"`parameterized.expand([], skip_on_empty=True)` to skip "
|
588 |
+
"this test when the input is empty)"
|
589 |
+
)
|
590 |
+
return wraps(f)(skip_on_empty_helper)
|
591 |
+
|
592 |
+
digits = len(str(len(parameters) - 1))
|
593 |
+
for num, p in enumerate(parameters):
|
594 |
+
name = name_func(f, "{num:0>{digits}}".format(digits=digits, num=num), p)
|
595 |
+
# If the original function has patches applied by 'mock.patch',
|
596 |
+
# re-construct all patches on the just former decoration layer
|
597 |
+
# of param_as_standalone_func so as not to share
|
598 |
+
# patch objects between new functions
|
599 |
+
nf = reapply_patches_if_need(f)
|
600 |
+
frame_locals[name] = cls.param_as_standalone_func(p, nf, name)
|
601 |
+
frame_locals[name].__doc__ = doc_func(f, num, p)
|
602 |
+
|
603 |
+
# Delete original patches to prevent new function from evaluating
|
604 |
+
# original patching object as well as re-constructed patches.
|
605 |
+
delete_patches_if_need(f)
|
606 |
+
|
607 |
+
f.__test__ = False
|
608 |
+
|
609 |
+
return parameterized_expand_wrapper
|
610 |
+
|
611 |
+
@classmethod
|
612 |
+
def param_as_standalone_func(cls, p, func, name):
|
613 |
+
if inspect.iscoroutinefunction(func):
|
614 |
+
@wraps(func)
|
615 |
+
async def standalone_func(*a, **kw):
|
616 |
+
return await func(*(a + p.args), **p.kwargs, **kw)
|
617 |
+
else:
|
618 |
+
@wraps(func)
|
619 |
+
def standalone_func(*a, **kw):
|
620 |
+
return func(*(a + p.args), **p.kwargs, **kw)
|
621 |
+
|
622 |
+
standalone_func.__name__ = name
|
623 |
+
|
624 |
+
# place_as is used by py.test to determine what source file should be
|
625 |
+
# used for this test.
|
626 |
+
standalone_func.place_as = func
|
627 |
+
|
628 |
+
# Remove __wrapped__ because py.test will try to look at __wrapped__
|
629 |
+
# to determine which parameters should be used with this test case,
|
630 |
+
# and obviously we don't need it to do any parameterization.
|
631 |
+
try:
|
632 |
+
del standalone_func.__wrapped__
|
633 |
+
except AttributeError:
|
634 |
+
pass
|
635 |
+
return standalone_func
|
636 |
+
|
637 |
+
@classmethod
|
638 |
+
def to_safe_name(cls, s):
|
639 |
+
if not isinstance(s, str):
|
640 |
+
s = str(s)
|
641 |
+
return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
|
642 |
+
|
643 |
+
|
644 |
+
def parameterized_class(attrs, input_values=None, class_name_func=None, classname_func=None):
|
645 |
+
""" Parameterizes a test class by setting attributes on the class.
|
646 |
+
|
647 |
+
Can be used in two ways:
|
648 |
+
|
649 |
+
1) With a list of dictionaries containing attributes to override::
|
650 |
+
|
651 |
+
@parameterized_class([
|
652 |
+
{ "username": "foo" },
|
653 |
+
{ "username": "bar", "access_level": 2 },
|
654 |
+
])
|
655 |
+
class TestUserAccessLevel(TestCase):
|
656 |
+
...
|
657 |
+
|
658 |
+
2) With a tuple of attributes, then a list of tuples of values:
|
659 |
+
|
660 |
+
@parameterized_class(("username", "access_level"), [
|
661 |
+
("foo", 1),
|
662 |
+
("bar", 2)
|
663 |
+
])
|
664 |
+
class TestUserAccessLevel(TestCase):
|
665 |
+
...
|
666 |
+
|
667 |
+
"""
|
668 |
+
|
669 |
+
if isinstance(attrs, string_types):
|
670 |
+
attrs = [attrs]
|
671 |
+
|
672 |
+
input_dicts = (
|
673 |
+
attrs if input_values is None else
|
674 |
+
[dict(zip(attrs, vals)) for vals in input_values]
|
675 |
+
)
|
676 |
+
|
677 |
+
class_name_func = class_name_func or default_class_name_func
|
678 |
+
|
679 |
+
if classname_func:
|
680 |
+
warnings.warn(
|
681 |
+
"classname_func= is deprecated; use class_name_func= instead. "
|
682 |
+
"See: https://github.com/wolever/parameterized/pull/74#issuecomment-613577057",
|
683 |
+
DeprecationWarning,
|
684 |
+
stacklevel=2,
|
685 |
+
)
|
686 |
+
class_name_func = lambda cls, idx, input: classname_func(cls, idx, input_dicts)
|
687 |
+
|
688 |
+
def decorator(base_class):
|
689 |
+
test_class_module = sys.modules[base_class.__module__].__dict__
|
690 |
+
for idx, input_dict in enumerate(input_dicts):
|
691 |
+
test_class_dict = dict(base_class.__dict__)
|
692 |
+
test_class_dict.update(input_dict)
|
693 |
+
|
694 |
+
name = class_name_func(base_class, idx, input_dict)
|
695 |
+
|
696 |
+
test_class_module[name] = type(name, (base_class, ), test_class_dict)
|
697 |
+
|
698 |
+
# We need to leave the base class in place (see issue #73), but if we
|
699 |
+
# leave the test_ methods in place, the test runner will try to pick
|
700 |
+
# them up and run them... which doesn't make sense, since no parameters
|
701 |
+
# will have been applied.
|
702 |
+
# Address this by iterating over the base class and remove all test
|
703 |
+
# methods.
|
704 |
+
for method_name in list(base_class.__dict__):
|
705 |
+
if method_name.startswith("test"):
|
706 |
+
delattr(base_class, method_name)
|
707 |
+
return base_class
|
708 |
+
|
709 |
+
return decorator
|
710 |
+
|
711 |
+
|
712 |
+
def get_class_name_suffix(params_dict):
|
713 |
+
if "name" in params_dict:
|
714 |
+
return parameterized.to_safe_name(params_dict["name"])
|
715 |
+
|
716 |
+
params_vals = (
|
717 |
+
params_dict.values() if PY3 else
|
718 |
+
(v for (_, v) in sorted(params_dict.items()))
|
719 |
+
)
|
720 |
+
return parameterized.to_safe_name(next((
|
721 |
+
v for v in params_vals
|
722 |
+
if isinstance(v, string_types)
|
723 |
+
), ""))
|
724 |
+
|
725 |
+
|
726 |
+
def default_class_name_func(cls, num, params_dict):
|
727 |
+
suffix = get_class_name_suffix(params_dict)
|
728 |
+
return "%s_%s%s" %(
|
729 |
+
cls.__name__,
|
730 |
+
num,
|
731 |
+
suffix and "_" + suffix,
|
732 |
+
)
|
lib/python3.10/site-packages/parameterized/test.py
ADDED
@@ -0,0 +1,695 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
|
3 |
+
import inspect
|
4 |
+
import sys
|
5 |
+
import mock
|
6 |
+
from functools import wraps
|
7 |
+
from unittest import TestCase
|
8 |
+
try:
|
9 |
+
from nose.tools import assert_equal, assert_raises
|
10 |
+
except ImportError:
|
11 |
+
def assert_equal(*args, **kwds):
|
12 |
+
return TestCase().assertEqual(*args, **kwds)
|
13 |
+
def assert_raises(*args, **kwds):
|
14 |
+
return TestCase().assertRaises(*args, **kwds)
|
15 |
+
|
16 |
+
from .parameterized import (
|
17 |
+
PY3, PY2, parameterized, param, parameterized_argument_value_pairs,
|
18 |
+
short_repr, detect_runner, parameterized_class, SkipTest,
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
def assert_contains(haystack, needle):
|
23 |
+
if needle not in haystack:
|
24 |
+
raise AssertionError("%r not in %r" %(needle, haystack))
|
25 |
+
|
26 |
+
|
27 |
+
def assert_raises_regexp_decorator(expected_exception, expected_regexp):
|
28 |
+
"""
|
29 |
+
Assert that a wrapped `unittest.TestCase` method raises an error matching the given type and message regex.
|
30 |
+
|
31 |
+
:param expected_exception: Exception class expected to be raised.
|
32 |
+
:param expected_regexp: Regexp (re pattern object or string) expected to be found in error message.
|
33 |
+
"""
|
34 |
+
|
35 |
+
def func_decorator(func):
|
36 |
+
@wraps(func)
|
37 |
+
def wrapper(self, *args, **kwargs):
|
38 |
+
with self.assertRaisesRegexp(expected_exception, expected_regexp):
|
39 |
+
func(self, *args, **kwargs)
|
40 |
+
|
41 |
+
return wrapper
|
42 |
+
|
43 |
+
return func_decorator
|
44 |
+
|
45 |
+
|
46 |
+
runner = detect_runner()
|
47 |
+
UNITTEST = runner.startswith("unittest")
|
48 |
+
NOSE2 = (runner == "nose2")
|
49 |
+
PYTEST = (runner == "pytest")
|
50 |
+
|
51 |
+
SKIP_FLAGS = {
|
52 |
+
"generator": UNITTEST,
|
53 |
+
"standalone": UNITTEST,
|
54 |
+
# nose2 doesn't run tests on old-style classes under Py2, so don't expect
|
55 |
+
# these tests to run under nose2.
|
56 |
+
"py2nose2": (PY2 and NOSE2),
|
57 |
+
"pytest": PYTEST,
|
58 |
+
}
|
59 |
+
|
60 |
+
missing_tests = set()
|
61 |
+
|
62 |
+
|
63 |
+
def expect(skip, tests=None):
|
64 |
+
if tests is None:
|
65 |
+
tests = skip
|
66 |
+
skip = None
|
67 |
+
if any(SKIP_FLAGS.get(f) for f in (skip or "").split()):
|
68 |
+
return
|
69 |
+
missing_tests.update(tests)
|
70 |
+
|
71 |
+
|
72 |
+
def expect_exception_matching_regex(tests, expected_exception, expected_regexp):
|
73 |
+
"""
|
74 |
+
Assert that the given `unittest.TestCase` tests raise an error matching the given type and message regex.
|
75 |
+
|
76 |
+
:param tests: A single test name or list of test names.
|
77 |
+
:param expected_exception: Exception class expected to be raised.
|
78 |
+
:param expected_regexp: Regexp (re pattern object or string) expected to be found in error message.
|
79 |
+
"""
|
80 |
+
if not isinstance(tests, list):
|
81 |
+
tests = [tests]
|
82 |
+
|
83 |
+
decorator = assert_raises_regexp_decorator(expected_exception, expected_regexp)
|
84 |
+
frame_locals = inspect.currentframe().f_back.f_locals
|
85 |
+
|
86 |
+
for test in tests:
|
87 |
+
if test in frame_locals:
|
88 |
+
test_method = frame_locals[test]
|
89 |
+
decorated_test_method = decorator(test_method)
|
90 |
+
frame_locals[test] = decorated_test_method
|
91 |
+
|
92 |
+
|
93 |
+
test_params = [
|
94 |
+
(42, ),
|
95 |
+
"foo0",
|
96 |
+
b"bar",
|
97 |
+
123,
|
98 |
+
param("foo1"),
|
99 |
+
param("foo2", bar=42),
|
100 |
+
]
|
101 |
+
|
102 |
+
expect("standalone", [
|
103 |
+
"test_naked_function(42, bar=None)",
|
104 |
+
"test_naked_function('foo0', bar=None)",
|
105 |
+
"test_naked_function(b'bar', bar=None)",
|
106 |
+
"test_naked_function(123, bar=None)",
|
107 |
+
"test_naked_function('foo1', bar=None)",
|
108 |
+
"test_naked_function('foo2', bar=42)",
|
109 |
+
])
|
110 |
+
|
111 |
+
@parameterized(test_params)
|
112 |
+
def test_naked_function(foo, bar=None):
|
113 |
+
missing_tests.remove("test_naked_function(%r, bar=%r)" %(foo, bar))
|
114 |
+
|
115 |
+
|
116 |
+
class TestParameterized(object):
|
117 |
+
expect("generator", [
|
118 |
+
"test_instance_method(42, bar=None)",
|
119 |
+
"test_instance_method(b'bar', bar=None)",
|
120 |
+
"test_instance_method(123, bar=None)",
|
121 |
+
"test_instance_method('foo0', bar=None)",
|
122 |
+
"test_instance_method('foo1', bar=None)",
|
123 |
+
"test_instance_method('foo2', bar=42)",
|
124 |
+
])
|
125 |
+
|
126 |
+
@parameterized(test_params)
|
127 |
+
def test_instance_method(self, foo, bar=None):
|
128 |
+
missing_tests.remove("test_instance_method(%r, bar=%r)" %(foo, bar))
|
129 |
+
|
130 |
+
|
131 |
+
if not PYTEST:
|
132 |
+
# py.test doesn't use xunit-style setup/teardown, so these tests don't apply
|
133 |
+
class TestSetupTeardown(object):
|
134 |
+
expect("generator", [
|
135 |
+
"test_setup(setup 1)",
|
136 |
+
"teardown_called(teardown 1)",
|
137 |
+
"test_setup(setup 2)",
|
138 |
+
"teardown_called(teardown 2)",
|
139 |
+
])
|
140 |
+
|
141 |
+
stack = ["setup 1", "teardown 1", "setup 2", "teardown 2"]
|
142 |
+
actual_order = "error: setup not called"
|
143 |
+
|
144 |
+
def setUp(self):
|
145 |
+
self.actual_order = self.stack.pop(0)
|
146 |
+
|
147 |
+
def tearDown(self):
|
148 |
+
missing_tests.remove("teardown_called(%s)" %(self.stack.pop(0), ))
|
149 |
+
|
150 |
+
@parameterized([(1, ), (2, )])
|
151 |
+
def test_setup(self, count, *a):
|
152 |
+
assert_equal(self.actual_order, "setup %s" %(count, ))
|
153 |
+
missing_tests.remove("test_setup(%s)" %(self.actual_order, ))
|
154 |
+
|
155 |
+
|
156 |
+
def custom_naming_func(custom_tag):
|
157 |
+
def custom_naming_func(testcase_func, param_num, param):
|
158 |
+
arg = param.args[0]
|
159 |
+
return testcase_func.__name__ + ('_%s_name_' % custom_tag) + parameterized.to_safe_name(arg)
|
160 |
+
|
161 |
+
return custom_naming_func
|
162 |
+
|
163 |
+
|
164 |
+
@mock.patch("os.getpid")
|
165 |
+
class TestParameterizedExpandWithMockPatchForClass(TestCase):
|
166 |
+
expect([
|
167 |
+
"test_one_function_patch_decorator('foo1', 'umask', 'getpid')",
|
168 |
+
"test_one_function_patch_decorator('foo0', 'umask', 'getpid')",
|
169 |
+
"test_one_function_patch_decorator(42, 'umask', 'getpid')",
|
170 |
+
])
|
171 |
+
|
172 |
+
@parameterized.expand([(42, ), "foo0", param("foo1")])
|
173 |
+
@mock.patch("os.umask")
|
174 |
+
def test_one_function_patch_decorator(self, foo, mock_umask, mock_getpid):
|
175 |
+
missing_tests.remove("test_one_function_patch_decorator(%r, %r, %r)" %
|
176 |
+
(foo, mock_umask._mock_name,
|
177 |
+
mock_getpid._mock_name))
|
178 |
+
|
179 |
+
expect([
|
180 |
+
"test_multiple_function_patch_decorator"
|
181 |
+
"(42, 51, 'umask', 'fdopen', 'getpid')",
|
182 |
+
"test_multiple_function_patch_decorator"
|
183 |
+
"('foo0', 'bar0', 'umask', 'fdopen', 'getpid')",
|
184 |
+
"test_multiple_function_patch_decorator"
|
185 |
+
"('foo1', 'bar1', 'umask', 'fdopen', 'getpid')",
|
186 |
+
])
|
187 |
+
|
188 |
+
@parameterized.expand([(42, 51), ("foo0", "bar0"), param("foo1", "bar1")])
|
189 |
+
@mock.patch("os.fdopen")
|
190 |
+
@mock.patch("os.umask")
|
191 |
+
def test_multiple_function_patch_decorator(self, foo, bar, mock_umask,
|
192 |
+
mock_fdopen, mock_getpid):
|
193 |
+
missing_tests.remove("test_multiple_function_patch_decorator"
|
194 |
+
"(%r, %r, %r, %r, %r)" %
|
195 |
+
(foo, bar, mock_umask._mock_name,
|
196 |
+
mock_fdopen._mock_name, mock_getpid._mock_name))
|
197 |
+
|
198 |
+
|
199 |
+
@mock.patch("os.getpid")
|
200 |
+
class TestParameterizedExpandWithNoExpand(object):
|
201 |
+
expect("generator", [
|
202 |
+
"test_patch_class_no_expand(42, 51, 'umask', 'getpid')",
|
203 |
+
])
|
204 |
+
|
205 |
+
@parameterized([(42, 51)])
|
206 |
+
@mock.patch("os.umask")
|
207 |
+
def test_patch_class_no_expand(self, foo, bar, mock_umask, mock_getpid):
|
208 |
+
missing_tests.remove("test_patch_class_no_expand"
|
209 |
+
"(%r, %r, %r, %r)" %
|
210 |
+
(foo, bar, mock_umask._mock_name,
|
211 |
+
mock_getpid._mock_name))
|
212 |
+
|
213 |
+
|
214 |
+
class TestParameterizedExpandWithNoMockPatchForClass(TestCase):
|
215 |
+
expect([
|
216 |
+
"test_one_function_patch_decorator('foo1', 'umask')",
|
217 |
+
"test_one_function_patch_decorator('foo0', 'umask')",
|
218 |
+
"test_one_function_patch_decorator(42, 'umask')",
|
219 |
+
])
|
220 |
+
|
221 |
+
@parameterized.expand([(42, ), "foo0", param("foo1")])
|
222 |
+
@mock.patch("os.umask")
|
223 |
+
def test_one_function_patch_decorator(self, foo, mock_umask):
|
224 |
+
missing_tests.remove("test_one_function_patch_decorator(%r, %r)" %
|
225 |
+
(foo, mock_umask._mock_name))
|
226 |
+
|
227 |
+
expect([
|
228 |
+
"test_multiple_function_patch_decorator(42, 51, 'umask', 'fdopen')",
|
229 |
+
"test_multiple_function_patch_decorator('foo0', 'bar0', 'umask', 'fdopen')",
|
230 |
+
"test_multiple_function_patch_decorator('foo1', 'bar1', 'umask', 'fdopen')",
|
231 |
+
])
|
232 |
+
|
233 |
+
@parameterized.expand([(42, 51), ("foo0", "bar0"), param("foo1", "bar1")])
|
234 |
+
@mock.patch("os.fdopen")
|
235 |
+
@mock.patch("os.umask")
|
236 |
+
def test_multiple_function_patch_decorator(self, foo, bar, mock_umask,
|
237 |
+
mock_fdopen):
|
238 |
+
missing_tests.remove("test_multiple_function_patch_decorator"
|
239 |
+
"(%r, %r, %r, %r)" %
|
240 |
+
(foo, bar, mock_umask._mock_name,
|
241 |
+
mock_fdopen._mock_name))
|
242 |
+
|
243 |
+
expect([
|
244 |
+
"test_patch_decorator_over_test_with_error('foo_this', 'umask')",
|
245 |
+
"test_patch_decorator_over_test_with_error('foo_that', 'umask')",
|
246 |
+
])
|
247 |
+
|
248 |
+
@parameterized.expand([
|
249 |
+
("foo_this",),
|
250 |
+
("foo_that",),
|
251 |
+
])
|
252 |
+
@mock.patch("os.umask")
|
253 |
+
def test_patch_decorator_over_test_with_error(self, foo, mock_umask):
|
254 |
+
missing_tests.remove(
|
255 |
+
"test_patch_decorator_over_test_with_error({!r}, {!r})".format(foo, mock_umask._mock_name)
|
256 |
+
)
|
257 |
+
raise ValueError("This error should have been caught")
|
258 |
+
|
259 |
+
expect_exception_matching_regex(
|
260 |
+
tests=[
|
261 |
+
"test_patch_decorator_over_test_with_error_0_foo_this",
|
262 |
+
"test_patch_decorator_over_test_with_error_1_foo_that",
|
263 |
+
],
|
264 |
+
expected_exception=ValueError,
|
265 |
+
expected_regexp="^This error should have been caught$",
|
266 |
+
)
|
267 |
+
|
268 |
+
|
269 |
+
class TestParameterizedExpandWithNoMockPatchForClassNoExpand(object):
|
270 |
+
expect("generator", [
|
271 |
+
"test_patch_no_expand(42, 51, 'umask')",
|
272 |
+
])
|
273 |
+
|
274 |
+
@parameterized([(42, 51)])
|
275 |
+
@mock.patch("os.umask")
|
276 |
+
def test_patch_no_expand(self, foo, bar, mock_umask):
|
277 |
+
missing_tests.remove("test_patch_no_expand(%r, %r, %r)" %
|
278 |
+
(foo, bar, mock_umask._mock_name))
|
279 |
+
|
280 |
+
|
281 |
+
expect("standalone", [
|
282 |
+
"test_mock_patch_standalone_function(42, 'umask')",
|
283 |
+
])
|
284 |
+
|
285 |
+
@parameterized([(42, )])
|
286 |
+
@mock.patch("os.umask")
|
287 |
+
def test_mock_patch_standalone_function(foo, mock_umask):
|
288 |
+
missing_tests.remove(
|
289 |
+
"test_mock_patch_standalone_function(%r, %r)" %(
|
290 |
+
foo, mock_umask._mock_name
|
291 |
+
)
|
292 |
+
)
|
293 |
+
|
294 |
+
@mock.patch.multiple("os", umask=mock.DEFAULT)
|
295 |
+
class TestParameterizedExpandWithMockPatchMultiple(TestCase):
|
296 |
+
expect([
|
297 |
+
"test_mock_patch_multiple_expand_on_method(42, 'umask', 'getpid')",
|
298 |
+
"test_mock_patch_multiple_expand_on_class(16, 'umask')",
|
299 |
+
])
|
300 |
+
|
301 |
+
@parameterized.expand([(42, )])
|
302 |
+
@mock.patch.multiple("os", getpid=mock.DEFAULT)
|
303 |
+
def test_mock_patch_multiple_expand_on_method(self, param, umask, getpid):
|
304 |
+
missing_tests.remove(
|
305 |
+
"test_mock_patch_multiple_expand_on_method(%r, %r, %r)" %(
|
306 |
+
param, umask._mock_name, getpid._mock_name
|
307 |
+
)
|
308 |
+
)
|
309 |
+
|
310 |
+
@parameterized.expand([(16, )])
|
311 |
+
def test_mock_patch_multiple_expand_on_class(self, param, umask):
|
312 |
+
missing_tests.remove(
|
313 |
+
"test_mock_patch_multiple_expand_on_class(%r, %r)" %(
|
314 |
+
param, umask._mock_name,
|
315 |
+
)
|
316 |
+
)
|
317 |
+
|
318 |
+
expect("standalone", [
|
319 |
+
"test_mock_patch_multiple_standalone(42, 'umask', 'getpid')",
|
320 |
+
])
|
321 |
+
|
322 |
+
@parameterized([(42, )])
|
323 |
+
@mock.patch.multiple("os", umask=mock.DEFAULT, getpid=mock.DEFAULT)
|
324 |
+
def test_mock_patch_multiple_standalone(param, umask, getpid):
|
325 |
+
missing_tests.remove(
|
326 |
+
"test_mock_patch_multiple_standalone(%r, %r, %r)" %(
|
327 |
+
param, umask._mock_name, getpid._mock_name
|
328 |
+
)
|
329 |
+
)
|
330 |
+
|
331 |
+
|
332 |
+
|
333 |
+
class TestParamerizedOnTestCase(TestCase):
|
334 |
+
expect([
|
335 |
+
"test_on_TestCase(42, bar=None)",
|
336 |
+
"test_on_TestCase(b'bar', bar=None)",
|
337 |
+
"test_on_TestCase(123, bar=None)",
|
338 |
+
"test_on_TestCase('foo0', bar=None)",
|
339 |
+
"test_on_TestCase('foo1', bar=None)",
|
340 |
+
"test_on_TestCase('foo2', bar=42)",
|
341 |
+
])
|
342 |
+
|
343 |
+
@parameterized.expand(test_params)
|
344 |
+
def test_on_TestCase(self, foo, bar=None):
|
345 |
+
missing_tests.remove("test_on_TestCase(%r, bar=%r)" %(foo, bar))
|
346 |
+
|
347 |
+
expect([
|
348 |
+
"test_on_TestCase2_custom_name_42(42, bar=None)",
|
349 |
+
"test_on_TestCase2_custom_name_b_bar_(b'bar', bar=None)",
|
350 |
+
"test_on_TestCase2_custom_name_123(123, bar=None)",
|
351 |
+
"test_on_TestCase2_custom_name_foo0('foo0', bar=None)",
|
352 |
+
"test_on_TestCase2_custom_name_foo1('foo1', bar=None)",
|
353 |
+
"test_on_TestCase2_custom_name_foo2('foo2', bar=42)",
|
354 |
+
])
|
355 |
+
|
356 |
+
@parameterized.expand(test_params,
|
357 |
+
name_func=custom_naming_func("custom"))
|
358 |
+
def test_on_TestCase2(self, foo, bar=None):
|
359 |
+
stack = inspect.stack()
|
360 |
+
frame = stack[1]
|
361 |
+
frame_locals = frame[0].f_locals
|
362 |
+
nose_test_method_name = frame_locals['a'][0]._testMethodName
|
363 |
+
expected_name = "test_on_TestCase2_custom_name_" + parameterized.to_safe_name(foo)
|
364 |
+
assert_equal(nose_test_method_name, expected_name,
|
365 |
+
"Test Method name '%s' did not get customized to expected: '%s'" %
|
366 |
+
(nose_test_method_name, expected_name))
|
367 |
+
missing_tests.remove("%s(%r, bar=%r)" %(expected_name, foo, bar))
|
368 |
+
|
369 |
+
|
370 |
+
class TestParameterizedExpandDocstring(TestCase):
|
371 |
+
def _assert_docstring(self, expected_docstring, rstrip=False):
|
372 |
+
""" Checks the current test method's docstring. Must be called directly
|
373 |
+
from the test method. """
|
374 |
+
stack = inspect.stack()
|
375 |
+
f_locals = stack[3][0].f_locals
|
376 |
+
test_method = (
|
377 |
+
f_locals.get("testMethod") or # Py27
|
378 |
+
f_locals.get("function") or # Py33
|
379 |
+
f_locals.get("method") or # Py38
|
380 |
+
f_locals.get("testfunction") or # Py382
|
381 |
+
None
|
382 |
+
)
|
383 |
+
if test_method is None:
|
384 |
+
raise AssertionError("uh oh, unittest changed a local variable name")
|
385 |
+
actual_docstring = test_method.__doc__
|
386 |
+
if rstrip:
|
387 |
+
actual_docstring = actual_docstring.rstrip()
|
388 |
+
assert_equal(actual_docstring, expected_docstring)
|
389 |
+
|
390 |
+
@parameterized.expand([param("foo")],
|
391 |
+
doc_func=lambda f, n, p: "stuff")
|
392 |
+
def test_custom_doc_func(self, foo, bar=None):
|
393 |
+
"""Documentation"""
|
394 |
+
self._assert_docstring("stuff")
|
395 |
+
|
396 |
+
@parameterized.expand([param("foo")])
|
397 |
+
def test_single_line_docstring(self, foo):
|
398 |
+
"""Documentation."""
|
399 |
+
self._assert_docstring("Documentation [with foo=%r]." %(foo, ))
|
400 |
+
|
401 |
+
@parameterized.expand([param("foo")])
|
402 |
+
def test_empty_docstring(self, foo):
|
403 |
+
""
|
404 |
+
self._assert_docstring("[with foo=%r]" %(foo, ))
|
405 |
+
|
406 |
+
@parameterized.expand([param("foo")])
|
407 |
+
def test_multiline_documentation(self, foo):
|
408 |
+
"""Documentation.
|
409 |
+
|
410 |
+
More"""
|
411 |
+
self._assert_docstring(
|
412 |
+
"Documentation [with foo=%r].\n\n"
|
413 |
+
" More" %(foo, )
|
414 |
+
)
|
415 |
+
|
416 |
+
@parameterized.expand([param("foo")])
|
417 |
+
def test_unicode_docstring(self, foo):
|
418 |
+
u"""Döcumentation."""
|
419 |
+
self._assert_docstring(u"Döcumentation [with foo=%r]." %(foo, ))
|
420 |
+
|
421 |
+
@parameterized.expand([param("foo", )])
|
422 |
+
def test_default_values_get_correct_value(self, foo, bar=12):
|
423 |
+
"""Documentation"""
|
424 |
+
self._assert_docstring("Documentation [with foo=%r, bar=%r]" %(foo, bar))
|
425 |
+
|
426 |
+
@parameterized.expand([param("foo", )])
|
427 |
+
def test_with_leading_newline(self, foo, bar=12):
|
428 |
+
"""
|
429 |
+
Documentation
|
430 |
+
"""
|
431 |
+
self._assert_docstring("Documentation [with foo=%r, bar=%r]" %(foo, bar), rstrip=True)
|
432 |
+
|
433 |
+
|
434 |
+
def test_warns_when_using_parameterized_with_TestCase():
|
435 |
+
try:
|
436 |
+
class TestTestCaseWarnsOnBadUseOfParameterized(TestCase):
|
437 |
+
@parameterized([(42, )])
|
438 |
+
def test_in_subclass_of_TestCase(self, foo):
|
439 |
+
pass
|
440 |
+
except Exception as e:
|
441 |
+
assert_contains(str(e), "parameterized.expand")
|
442 |
+
else:
|
443 |
+
raise AssertionError("Expected exception not raised")
|
444 |
+
|
445 |
+
|
446 |
+
def test_helpful_error_on_empty_iterable_input():
|
447 |
+
try:
|
448 |
+
parameterized([])(lambda: None)
|
449 |
+
except ValueError as e:
|
450 |
+
assert_contains(str(e), "iterable is empty")
|
451 |
+
else:
|
452 |
+
raise AssertionError("Expected exception not raised")
|
453 |
+
|
454 |
+
def test_skip_test_on_empty_iterable():
|
455 |
+
func = parameterized([], skip_on_empty=True)(lambda: None)
|
456 |
+
assert_raises(SkipTest, func)
|
457 |
+
|
458 |
+
|
459 |
+
def test_helpful_error_on_empty_iterable_input_expand():
|
460 |
+
try:
|
461 |
+
class ExpectErrorOnEmptyInput(TestCase):
|
462 |
+
@parameterized.expand([])
|
463 |
+
def test_expect_error(self):
|
464 |
+
pass
|
465 |
+
except ValueError as e:
|
466 |
+
assert_contains(str(e), "iterable is empty")
|
467 |
+
else:
|
468 |
+
raise AssertionError("Expected exception not raised")
|
469 |
+
|
470 |
+
|
471 |
+
expect("stadalone generator", [
|
472 |
+
"test_wrapped_iterable_input('foo')",
|
473 |
+
])
|
474 |
+
@parameterized(lambda: iter(["foo"]))
|
475 |
+
def test_wrapped_iterable_input(foo):
|
476 |
+
missing_tests.remove("test_wrapped_iterable_input(%r)" %(foo, ))
|
477 |
+
|
478 |
+
def test_helpful_error_on_non_iterable_input():
|
479 |
+
try:
|
480 |
+
parameterized(lambda: 42)(lambda: None)
|
481 |
+
except Exception as e:
|
482 |
+
assert_contains(str(e), "is not iterable")
|
483 |
+
else:
|
484 |
+
raise AssertionError("Expected exception not raised")
|
485 |
+
|
486 |
+
|
487 |
+
def tearDownModule():
|
488 |
+
missing = sorted(list(missing_tests))
|
489 |
+
assert_equal(missing, [])
|
490 |
+
|
491 |
+
def test_old_style_classes():
|
492 |
+
if PY3:
|
493 |
+
raise SkipTest("Py3 doesn't have old-style classes")
|
494 |
+
class OldStyleClass:
|
495 |
+
@parameterized(["foo"])
|
496 |
+
def parameterized_method(self, param):
|
497 |
+
pass
|
498 |
+
try:
|
499 |
+
list(OldStyleClass().parameterized_method())
|
500 |
+
except TypeError as e:
|
501 |
+
assert_contains(str(e), "new-style")
|
502 |
+
assert_contains(str(e), "parameterized.expand")
|
503 |
+
assert_contains(str(e), "OldStyleClass")
|
504 |
+
else:
|
505 |
+
raise AssertionError("expected TypeError not raised by old-style class")
|
506 |
+
|
507 |
+
|
508 |
+
class TestOldStyleClass:
|
509 |
+
expect("py2nose2 generator", [
|
510 |
+
"test_on_old_style_class('foo')",
|
511 |
+
"test_on_old_style_class('bar')",
|
512 |
+
])
|
513 |
+
|
514 |
+
@parameterized.expand(["foo", "bar"])
|
515 |
+
def test_old_style_classes(self, param):
|
516 |
+
missing_tests.remove("test_on_old_style_class(%r)" %(param, ))
|
517 |
+
|
518 |
+
|
519 |
+
@parameterized([
|
520 |
+
("", param(), []),
|
521 |
+
("*a, **kw", param(), []),
|
522 |
+
("*a, **kw", param(1, foo=42), [("*a", (1, )), ("**kw", {"foo": 42})]),
|
523 |
+
("foo", param(1), [("foo", 1)]),
|
524 |
+
("foo, *a", param(1), [("foo", 1)]),
|
525 |
+
("foo, *a", param(1, 9), [("foo", 1), ("*a", (9, ))]),
|
526 |
+
("foo, *a, **kw", param(1, bar=9), [("foo", 1), ("**kw", {"bar": 9})]),
|
527 |
+
("x=9", param(), [("x", 9)]),
|
528 |
+
("x=9", param(1), [("x", 1)]),
|
529 |
+
("x, y=9, *a, **kw", param(1), [("x", 1), ("y", 9)]),
|
530 |
+
("x, y=9, *a, **kw", param(1, 2), [("x", 1), ("y", 2)]),
|
531 |
+
("x, y=9, *a, **kw", param(1, 2, 3), [("x", 1), ("y", 2), ("*a", (3, ))]),
|
532 |
+
("x, y=9, *a, **kw", param(1, y=2), [("x", 1), ("y", 2)]),
|
533 |
+
("x, y=9, *a, **kw", param(1, z=2), [("x", 1), ("y", 9), ("**kw", {"z": 2})]),
|
534 |
+
("x, y=9, *a, **kw", param(1, 2, 3, z=3), [("x", 1), ("y", 2), ("*a", (3, )), ("**kw", {"z": 3})]),
|
535 |
+
])
|
536 |
+
def test_parameterized_argument_value_pairs(func_params, p, expected):
|
537 |
+
helper = eval("lambda %s: None" %(func_params, ))
|
538 |
+
actual = parameterized_argument_value_pairs(helper, p)
|
539 |
+
assert_equal(actual, expected)
|
540 |
+
|
541 |
+
|
542 |
+
@parameterized([
|
543 |
+
("abcd", "'abcd'"),
|
544 |
+
("123456789", "'12...89'"),
|
545 |
+
(123456789, "123...789"),
|
546 |
+
(123456789, "12...89", 4),
|
547 |
+
])
|
548 |
+
def test_short_repr(input, expected, n=6):
|
549 |
+
assert_equal(short_repr(input, n=n), expected)
|
550 |
+
|
551 |
+
@parameterized([
|
552 |
+
("foo", ),
|
553 |
+
])
|
554 |
+
def test_with_docstring(input):
|
555 |
+
""" Docstring! """
|
556 |
+
pass
|
557 |
+
|
558 |
+
|
559 |
+
cases_over_10 = [(i, i+1) for i in range(11)]
|
560 |
+
|
561 |
+
@parameterized(cases_over_10)
|
562 |
+
def test_cases_over_10(input, expected):
|
563 |
+
assert_equal(input, expected-1)
|
564 |
+
|
565 |
+
|
566 |
+
@parameterized_class(("a", "b", "c"), [
|
567 |
+
("foo", 1, 2),
|
568 |
+
(0, 1, 2),
|
569 |
+
])
|
570 |
+
class TestParameterizedClass(TestCase):
|
571 |
+
expect([
|
572 |
+
"TestParameterizedClass_0_foo:test_method_a('foo', 1, 2)",
|
573 |
+
"TestParameterizedClass_0_foo:test_method_b('foo', 1, 2)",
|
574 |
+
"TestParameterizedClass_0_foo:testCamelCaseMethodC('foo', 1, 2)",
|
575 |
+
"TestParameterizedClass_1:test_method_a(0, 1, 2)",
|
576 |
+
"TestParameterizedClass_1:test_method_b(0, 1, 2)",
|
577 |
+
"TestParameterizedClass_1:testCamelCaseMethodC(0, 1, 2)",
|
578 |
+
])
|
579 |
+
|
580 |
+
def _assertions(self, test_name):
|
581 |
+
assert hasattr(self, "a")
|
582 |
+
assert_equal(self.b + self.c, 3)
|
583 |
+
missing_tests.remove("%s:%s(%r, %r, %r)" %(
|
584 |
+
self.__class__.__name__,
|
585 |
+
test_name,
|
586 |
+
self.a,
|
587 |
+
self.b,
|
588 |
+
self.c,
|
589 |
+
))
|
590 |
+
|
591 |
+
def test_method_a(self):
|
592 |
+
self._assertions("test_method_a")
|
593 |
+
|
594 |
+
def test_method_b(self):
|
595 |
+
self._assertions("test_method_b")
|
596 |
+
|
597 |
+
def testCamelCaseMethodC(self):
|
598 |
+
self._assertions("testCamelCaseMethodC")
|
599 |
+
|
600 |
+
|
601 |
+
@parameterized_class(("a", ), [
|
602 |
+
(1, ),
|
603 |
+
(2, ),
|
604 |
+
], class_name_func=lambda cls, idx, attrs: "%s_custom_func_%s" %(cls.__name__, attrs["a"]))
|
605 |
+
class TestNamedParameterizedClass(TestCase):
|
606 |
+
expect([
|
607 |
+
"TestNamedParameterizedClass_custom_func_1:test_method(1)",
|
608 |
+
"TestNamedParameterizedClass_custom_func_2:test_method(2)",
|
609 |
+
])
|
610 |
+
|
611 |
+
def test_method(self):
|
612 |
+
missing_tests.remove("%s:test_method(%r)" %(
|
613 |
+
self.__class__.__name__,
|
614 |
+
self.a,
|
615 |
+
))
|
616 |
+
|
617 |
+
|
618 |
+
@parameterized_class([
|
619 |
+
{"foo": 42},
|
620 |
+
{"bar": "some stuff"},
|
621 |
+
{"bar": "other stuff", "name": "some name", "foo": 12},
|
622 |
+
])
|
623 |
+
class TestParameterizedClassDict(TestCase):
|
624 |
+
expect([
|
625 |
+
"TestParameterizedClassDict_0:setUp(42, 'empty')",
|
626 |
+
"TestParameterizedClassDict_0:test_method(42, 'empty')",
|
627 |
+
"TestParameterizedClassDict_0:tearDown(42, 'empty')",
|
628 |
+
"TestParameterizedClassDict_1_some_stuff:setUp(0, 'some stuff')",
|
629 |
+
"TestParameterizedClassDict_1_some_stuff:test_method(0, 'some stuff')",
|
630 |
+
"TestParameterizedClassDict_1_some_stuff:tearDown(0, 'some stuff')",
|
631 |
+
"TestParameterizedClassDict_2_some_name:setUp(12, 'other stuff')",
|
632 |
+
"TestParameterizedClassDict_2_some_name:test_method(12, 'other stuff')",
|
633 |
+
"TestParameterizedClassDict_2_some_name:tearDown(12, 'other stuff')",
|
634 |
+
])
|
635 |
+
|
636 |
+
foo = 0
|
637 |
+
bar = 'empty'
|
638 |
+
|
639 |
+
def setUp(self):
|
640 |
+
# Ensure that super() works (issue #73)
|
641 |
+
super(TestParameterizedClassDict, self).setUp()
|
642 |
+
missing_tests.remove("%s:setUp(%r, %r)" %(
|
643 |
+
self.__class__.__name__,
|
644 |
+
self.foo,
|
645 |
+
self.bar,
|
646 |
+
))
|
647 |
+
|
648 |
+
def tearDown(self):
|
649 |
+
# Ensure that super() works (issue #73)
|
650 |
+
super(TestParameterizedClassDict, self).tearDown()
|
651 |
+
missing_tests.remove("%s:tearDown(%r, %r)" %(
|
652 |
+
self.__class__.__name__,
|
653 |
+
self.foo,
|
654 |
+
self.bar,
|
655 |
+
))
|
656 |
+
|
657 |
+
def test_method(self):
|
658 |
+
missing_tests.remove("%s:test_method(%r, %r)" %(
|
659 |
+
self.__class__.__name__,
|
660 |
+
self.foo,
|
661 |
+
self.bar,
|
662 |
+
))
|
663 |
+
|
664 |
+
|
665 |
+
class TestUnicodeDocstring(object):
|
666 |
+
@parameterized.expand([
|
667 |
+
'value1',
|
668 |
+
'vålüé¡'
|
669 |
+
])
|
670 |
+
def test_with_docstring(self, param):
|
671 |
+
""" Это док-стринг, содержащий не-ascii символы """
|
672 |
+
pass
|
673 |
+
|
674 |
+
if sys.version_info.major == 3 and sys.version_info.minor >= 8:
|
675 |
+
from unittest import IsolatedAsyncioTestCase
|
676 |
+
|
677 |
+
class TestAsyncParameterizedExpandWithNoMockPatchForClass(IsolatedAsyncioTestCase):
|
678 |
+
expect([
|
679 |
+
"test_one_async_function('foo1')",
|
680 |
+
"test_one_async_function('foo0')",
|
681 |
+
"test_one_async_function(42)",
|
682 |
+
"test_one_async_function_patch_decorator('foo1', 'umask')",
|
683 |
+
"test_one_async_function_patch_decorator('foo0', 'umask')",
|
684 |
+
"test_one_async_function_patch_decorator(42, 'umask')",
|
685 |
+
])
|
686 |
+
|
687 |
+
@parameterized.expand([(42,), "foo0", param("foo1")])
|
688 |
+
async def test_one_async_function(self, foo):
|
689 |
+
missing_tests.remove("test_one_async_function(%r)" % (foo, ))
|
690 |
+
|
691 |
+
@parameterized.expand([(42,), "foo0", param("foo1")])
|
692 |
+
@mock.patch("os.umask")
|
693 |
+
async def test_one_async_function_patch_decorator(self, foo, mock_umask):
|
694 |
+
missing_tests.remove("test_one_async_function_patch_decorator(%r, %r)" %
|
695 |
+
(foo, mock_umask._mock_name))
|
lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (37.9 kB). View file
|
|
lib/python3.10/site-packages/torch/_VF.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This makes the functions in torch._C._VariableFunctions available as
|
3 |
+
torch._VF.<funcname>
|
4 |
+
without mypy being able to find them.
|
5 |
+
|
6 |
+
A subset of those functions are mapped to ATen functions in
|
7 |
+
torch/jit/_builtins.py
|
8 |
+
|
9 |
+
See https://github.com/pytorch/pytorch/issues/21478 for the reason for
|
10 |
+
introducing torch._VF
|
11 |
+
|
12 |
+
"""
|
13 |
+
|
14 |
+
import sys
|
15 |
+
import types
|
16 |
+
|
17 |
+
import torch
|
18 |
+
|
19 |
+
|
20 |
+
class VFModule(types.ModuleType):
|
21 |
+
vf: types.ModuleType
|
22 |
+
|
23 |
+
def __init__(self, name: str):
|
24 |
+
super().__init__(name)
|
25 |
+
self.vf = torch._C._VariableFunctions
|
26 |
+
|
27 |
+
def __getattr__(self, name: str) -> object:
|
28 |
+
return getattr(self.vf, name)
|
29 |
+
|
30 |
+
|
31 |
+
sys.modules[__name__] = VFModule(__name__)
|
lib/python3.10/site-packages/torch/_VF.pyi
ADDED
The diff for this file is too large to render.
See raw diff
|
|
lib/python3.10/site-packages/torch/__config__.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import torch
|
3 |
+
|
4 |
+
|
5 |
+
def show():
|
6 |
+
"""
|
7 |
+
Return a human-readable string with descriptions of the
|
8 |
+
configuration of PyTorch.
|
9 |
+
"""
|
10 |
+
return torch._C._show_config()
|
11 |
+
|
12 |
+
|
13 |
+
# TODO: In principle, we could provide more structured version/config
|
14 |
+
# information here. For now only CXX_FLAGS is exposed, as Timer
|
15 |
+
# uses them.
|
16 |
+
def _cxx_flags():
|
17 |
+
"""Returns the CXX_FLAGS used when building PyTorch."""
|
18 |
+
return torch._C._cxx_flags()
|
19 |
+
|
20 |
+
|
21 |
+
def parallel_info():
|
22 |
+
r"""Returns detailed string with parallelization settings"""
|
23 |
+
return torch._C._parallel_info()
|
lib/python3.10/site-packages/torch/__future__.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
_overwrite_module_params_on_conversion: bool = False
|
2 |
+
_swap_module_params_on_conversion: bool = False
|
3 |
+
|
4 |
+
|
5 |
+
def set_overwrite_module_params_on_conversion(value: bool) -> None:
|
6 |
+
"""
|
7 |
+
Sets whether to assign new tensors to the parameters instead of changing the
|
8 |
+
existing parameters in-place when converting an ``nn.Module``.
|
9 |
+
|
10 |
+
When enabled, the following methods will assign new parameters to the module:
|
11 |
+
|
12 |
+
#. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices
|
13 |
+
#. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype
|
14 |
+
#. :meth:`nn.Module.to`
|
15 |
+
#. :meth:`nn.Module.to_empty`
|
16 |
+
|
17 |
+
Args:
|
18 |
+
value (bool): Whether to assign new tensors or not.
|
19 |
+
|
20 |
+
"""
|
21 |
+
global _overwrite_module_params_on_conversion
|
22 |
+
_overwrite_module_params_on_conversion = value
|
23 |
+
|
24 |
+
|
25 |
+
def get_overwrite_module_params_on_conversion() -> bool:
|
26 |
+
"""
|
27 |
+
Returns whether to assign new tensors to the parameters instead of changing the
|
28 |
+
existing parameters in-place when converting an :class:`torch.nn.Module`. Defaults to ``False``.
|
29 |
+
|
30 |
+
See :func:`~torch.__future__.set_overwrite_module_params_on_conversion` for more information.
|
31 |
+
"""
|
32 |
+
return _overwrite_module_params_on_conversion
|
33 |
+
|
34 |
+
|
35 |
+
def set_swap_module_params_on_conversion(value: bool) -> None:
|
36 |
+
"""
|
37 |
+
Sets whether to use :func:`~torch.utils.swap_tensors` instead of setting ``.data`` to
|
38 |
+
change the existing parameters in-place when converting an ``nn.Module`` and instead
|
39 |
+
of ``param.copy_(state_dict[key])`` when loading a state dict into an ``nn.Module``.
|
40 |
+
|
41 |
+
.. note::
|
42 |
+
This function takes precedence over :func:`~torch.__future__.get_overwrite_module_params_on_conversion`
|
43 |
+
|
44 |
+
When enabled, the following methods will swap the existing parameters in-place:
|
45 |
+
|
46 |
+
#. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices
|
47 |
+
#. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype
|
48 |
+
#. :meth:`nn.Module.to`
|
49 |
+
#. :meth:`nn.Module.to_empty`
|
50 |
+
#. :meth:`nn.Module.load_state_dict`
|
51 |
+
|
52 |
+
The semantics for :meth:`~nn.Module.load_state_dict` when this is set are as follows:
|
53 |
+
|
54 |
+
#. For each parameter/buffer, its corresponding ``state_dict['key']`` is transformed via
|
55 |
+
:meth:`~torch.Tensor.module_load` (i.e. ``res = param.module_load(state_dict['key'])``)
|
56 |
+
#. If necessary, ``res`` will be wrapped in an :class:`~nn.Parameter`
|
57 |
+
#. The parameter/buffer in the module will be swapped via :func:`~torch.utils.swap_tensors`
|
58 |
+
with ``res``
|
59 |
+
|
60 |
+
Args:
|
61 |
+
value (bool): Whether to use :func:`~torch.utils.swap_tensors` or not.
|
62 |
+
|
63 |
+
"""
|
64 |
+
global _swap_module_params_on_conversion
|
65 |
+
_swap_module_params_on_conversion = value
|
66 |
+
|
67 |
+
|
68 |
+
def get_swap_module_params_on_conversion() -> bool:
|
69 |
+
"""
|
70 |
+
Returns whether to use :func:`~torch.utils.swap_tensors` instead of setting .data to
|
71 |
+
change the existing parameters in-place when converting an ``nn.Module``. Defaults to ``False``.
|
72 |
+
|
73 |
+
See :func:`~torch.__future__.set_swap_module_params_on_conversion` for more information.
|
74 |
+
"""
|
75 |
+
return _swap_module_params_on_conversion
|
lib/python3.10/site-packages/torch/__init__.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
lib/python3.10/site-packages/torch/_appdirs.py
ADDED
@@ -0,0 +1,667 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# Copyright (c) 2005-2010 ActiveState Software Inc.
|
4 |
+
# Copyright (c) 2013 Eddy Petrișor
|
5 |
+
|
6 |
+
# flake8: noqa
|
7 |
+
|
8 |
+
"""
|
9 |
+
This file is directly from
|
10 |
+
https://github.com/ActiveState/appdirs/blob/3fe6a83776843a46f20c2e5587afcffe05e03b39/appdirs.py
|
11 |
+
|
12 |
+
The license of https://github.com/ActiveState/appdirs copied below:
|
13 |
+
|
14 |
+
|
15 |
+
# This is the MIT license
|
16 |
+
|
17 |
+
Copyright (c) 2010 ActiveState Software Inc.
|
18 |
+
|
19 |
+
Permission is hereby granted, free of charge, to any person obtaining a
|
20 |
+
copy of this software and associated documentation files (the
|
21 |
+
"Software"), to deal in the Software without restriction, including
|
22 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
23 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
24 |
+
permit persons to whom the Software is furnished to do so, subject to
|
25 |
+
the following conditions:
|
26 |
+
|
27 |
+
The above copyright notice and this permission notice shall be included
|
28 |
+
in all copies or substantial portions of the Software.
|
29 |
+
|
30 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
31 |
+
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
32 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
33 |
+
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
34 |
+
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
35 |
+
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
36 |
+
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
37 |
+
"""
|
38 |
+
|
39 |
+
"""Utilities for determining application-specific dirs.
|
40 |
+
|
41 |
+
See <https://github.com/ActiveState/appdirs> for details and usage.
|
42 |
+
"""
|
43 |
+
# Dev Notes:
|
44 |
+
# - MSDN on where to store app data files:
|
45 |
+
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
|
46 |
+
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
|
47 |
+
# - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
48 |
+
|
49 |
+
__version__ = "1.4.4"
|
50 |
+
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
|
51 |
+
|
52 |
+
|
53 |
+
import os
|
54 |
+
import sys
|
55 |
+
|
56 |
+
|
57 |
+
unicode = str
|
58 |
+
|
59 |
+
if sys.platform.startswith("java"):
|
60 |
+
import platform
|
61 |
+
|
62 |
+
os_name = platform.java_ver()[3][0]
|
63 |
+
if os_name.startswith("Windows"): # "Windows XP", "Windows 7", etc.
|
64 |
+
system = "win32"
|
65 |
+
elif os_name.startswith("Mac"): # "Mac OS X", etc.
|
66 |
+
system = "darwin"
|
67 |
+
else: # "Linux", "SunOS", "FreeBSD", etc.
|
68 |
+
# Setting this to "linux2" is not ideal, but only Windows or Mac
|
69 |
+
# are actually checked for and the rest of the module expects
|
70 |
+
# *sys.platform* style strings.
|
71 |
+
system = "linux2"
|
72 |
+
else:
|
73 |
+
system = sys.platform
|
74 |
+
|
75 |
+
|
76 |
+
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
77 |
+
r"""Return full path to the user-specific data dir for this application.
|
78 |
+
|
79 |
+
"appname" is the name of application.
|
80 |
+
If None, just the system directory is returned.
|
81 |
+
"appauthor" (only used on Windows) is the name of the
|
82 |
+
appauthor or distributing body for this application. Typically
|
83 |
+
it is the owning company name. This falls back to appname. You may
|
84 |
+
pass False to disable it.
|
85 |
+
"version" is an optional version path element to append to the
|
86 |
+
path. You might want to use this if you want multiple versions
|
87 |
+
of your app to be able to run independently. If used, this
|
88 |
+
would typically be "<major>.<minor>".
|
89 |
+
Only applied when appname is present.
|
90 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
91 |
+
roaming appdata directory. That means that for users on a Windows
|
92 |
+
network setup for roaming profiles, this user data will be
|
93 |
+
sync'd on login. See
|
94 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
95 |
+
for a discussion of issues.
|
96 |
+
|
97 |
+
Typical user data directories are:
|
98 |
+
Mac OS X: ~/Library/Application Support/<AppName>
|
99 |
+
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
|
100 |
+
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
|
101 |
+
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
|
102 |
+
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
|
103 |
+
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
|
104 |
+
|
105 |
+
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
|
106 |
+
That means, by default "~/.local/share/<AppName>".
|
107 |
+
"""
|
108 |
+
if system == "win32":
|
109 |
+
if appauthor is None:
|
110 |
+
appauthor = appname
|
111 |
+
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
|
112 |
+
path = os.path.normpath(_get_win_folder(const))
|
113 |
+
if appname:
|
114 |
+
if appauthor is not False:
|
115 |
+
path = os.path.join(path, appauthor, appname)
|
116 |
+
else:
|
117 |
+
path = os.path.join(path, appname)
|
118 |
+
elif system == "darwin":
|
119 |
+
path = os.path.expanduser("~/Library/Application Support/")
|
120 |
+
if appname:
|
121 |
+
path = os.path.join(path, appname)
|
122 |
+
else:
|
123 |
+
path = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
|
124 |
+
if appname:
|
125 |
+
path = os.path.join(path, appname)
|
126 |
+
if appname and version:
|
127 |
+
path = os.path.join(path, version)
|
128 |
+
return path
|
129 |
+
|
130 |
+
|
131 |
+
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
|
132 |
+
r"""Return full path to the user-shared data dir for this application.
|
133 |
+
|
134 |
+
"appname" is the name of application.
|
135 |
+
If None, just the system directory is returned.
|
136 |
+
"appauthor" (only used on Windows) is the name of the
|
137 |
+
appauthor or distributing body for this application. Typically
|
138 |
+
it is the owning company name. This falls back to appname. You may
|
139 |
+
pass False to disable it.
|
140 |
+
"version" is an optional version path element to append to the
|
141 |
+
path. You might want to use this if you want multiple versions
|
142 |
+
of your app to be able to run independently. If used, this
|
143 |
+
would typically be "<major>.<minor>".
|
144 |
+
Only applied when appname is present.
|
145 |
+
"multipath" is an optional parameter only applicable to *nix
|
146 |
+
which indicates that the entire list of data dirs should be
|
147 |
+
returned. By default, the first item from XDG_DATA_DIRS is
|
148 |
+
returned, or '/usr/local/share/<AppName>',
|
149 |
+
if XDG_DATA_DIRS is not set
|
150 |
+
|
151 |
+
Typical site data directories are:
|
152 |
+
Mac OS X: /Library/Application Support/<AppName>
|
153 |
+
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
|
154 |
+
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
|
155 |
+
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
156 |
+
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
|
157 |
+
|
158 |
+
For Unix, this is using the $XDG_DATA_DIRS[0] default.
|
159 |
+
|
160 |
+
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
161 |
+
"""
|
162 |
+
if system == "win32":
|
163 |
+
if appauthor is None:
|
164 |
+
appauthor = appname
|
165 |
+
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
|
166 |
+
if appname:
|
167 |
+
if appauthor is not False:
|
168 |
+
path = os.path.join(path, appauthor, appname)
|
169 |
+
else:
|
170 |
+
path = os.path.join(path, appname)
|
171 |
+
elif system == "darwin":
|
172 |
+
path = os.path.expanduser("/Library/Application Support")
|
173 |
+
if appname:
|
174 |
+
path = os.path.join(path, appname)
|
175 |
+
else:
|
176 |
+
# XDG default for $XDG_DATA_DIRS
|
177 |
+
# only first, if multipath is False
|
178 |
+
path = os.getenv(
|
179 |
+
"XDG_DATA_DIRS", os.pathsep.join(["/usr/local/share", "/usr/share"])
|
180 |
+
)
|
181 |
+
pathlist = [
|
182 |
+
os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
|
183 |
+
]
|
184 |
+
if appname:
|
185 |
+
if version:
|
186 |
+
appname = os.path.join(appname, version)
|
187 |
+
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
188 |
+
|
189 |
+
if multipath:
|
190 |
+
path = os.pathsep.join(pathlist)
|
191 |
+
else:
|
192 |
+
path = pathlist[0]
|
193 |
+
return path
|
194 |
+
|
195 |
+
if appname and version:
|
196 |
+
path = os.path.join(path, version)
|
197 |
+
return path
|
198 |
+
|
199 |
+
|
200 |
+
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
|
201 |
+
r"""Return full path to the user-specific config dir for this application.
|
202 |
+
|
203 |
+
"appname" is the name of application.
|
204 |
+
If None, just the system directory is returned.
|
205 |
+
"appauthor" (only used on Windows) is the name of the
|
206 |
+
appauthor or distributing body for this application. Typically
|
207 |
+
it is the owning company name. This falls back to appname. You may
|
208 |
+
pass False to disable it.
|
209 |
+
"version" is an optional version path element to append to the
|
210 |
+
path. You might want to use this if you want multiple versions
|
211 |
+
of your app to be able to run independently. If used, this
|
212 |
+
would typically be "<major>.<minor>".
|
213 |
+
Only applied when appname is present.
|
214 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
215 |
+
roaming appdata directory. That means that for users on a Windows
|
216 |
+
network setup for roaming profiles, this user data will be
|
217 |
+
sync'd on login. See
|
218 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
219 |
+
for a discussion of issues.
|
220 |
+
|
221 |
+
Typical user config directories are:
|
222 |
+
Mac OS X: ~/Library/Preferences/<AppName>
|
223 |
+
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
|
224 |
+
Win *: same as user_data_dir
|
225 |
+
|
226 |
+
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
|
227 |
+
That means, by default "~/.config/<AppName>".
|
228 |
+
"""
|
229 |
+
if system == "win32":
|
230 |
+
path = user_data_dir(appname, appauthor, None, roaming)
|
231 |
+
elif system == "darwin":
|
232 |
+
path = os.path.expanduser("~/Library/Preferences/")
|
233 |
+
if appname:
|
234 |
+
path = os.path.join(path, appname)
|
235 |
+
else:
|
236 |
+
path = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
|
237 |
+
if appname:
|
238 |
+
path = os.path.join(path, appname)
|
239 |
+
if appname and version:
|
240 |
+
path = os.path.join(path, version)
|
241 |
+
return path
|
242 |
+
|
243 |
+
|
244 |
+
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
|
245 |
+
r"""Return full path to the user-shared data dir for this application.
|
246 |
+
|
247 |
+
"appname" is the name of application.
|
248 |
+
If None, just the system directory is returned.
|
249 |
+
"appauthor" (only used on Windows) is the name of the
|
250 |
+
appauthor or distributing body for this application. Typically
|
251 |
+
it is the owning company name. This falls back to appname. You may
|
252 |
+
pass False to disable it.
|
253 |
+
"version" is an optional version path element to append to the
|
254 |
+
path. You might want to use this if you want multiple versions
|
255 |
+
of your app to be able to run independently. If used, this
|
256 |
+
would typically be "<major>.<minor>".
|
257 |
+
Only applied when appname is present.
|
258 |
+
"multipath" is an optional parameter only applicable to *nix
|
259 |
+
which indicates that the entire list of config dirs should be
|
260 |
+
returned. By default, the first item from XDG_CONFIG_DIRS is
|
261 |
+
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
|
262 |
+
|
263 |
+
Typical site config directories are:
|
264 |
+
Mac OS X: same as site_data_dir
|
265 |
+
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
|
266 |
+
$XDG_CONFIG_DIRS
|
267 |
+
Win *: same as site_data_dir
|
268 |
+
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
269 |
+
|
270 |
+
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
|
271 |
+
|
272 |
+
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
273 |
+
"""
|
274 |
+
if system == "win32":
|
275 |
+
path = site_data_dir(appname, appauthor)
|
276 |
+
if appname and version:
|
277 |
+
path = os.path.join(path, version)
|
278 |
+
elif system == "darwin":
|
279 |
+
path = os.path.expanduser("/Library/Preferences")
|
280 |
+
if appname:
|
281 |
+
path = os.path.join(path, appname)
|
282 |
+
else:
|
283 |
+
# XDG default for $XDG_CONFIG_DIRS
|
284 |
+
# only first, if multipath is False
|
285 |
+
path = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg")
|
286 |
+
pathlist = [
|
287 |
+
os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
|
288 |
+
]
|
289 |
+
if appname:
|
290 |
+
if version:
|
291 |
+
appname = os.path.join(appname, version)
|
292 |
+
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
293 |
+
|
294 |
+
if multipath:
|
295 |
+
path = os.pathsep.join(pathlist)
|
296 |
+
else:
|
297 |
+
path = pathlist[0]
|
298 |
+
return path
|
299 |
+
|
300 |
+
|
301 |
+
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
|
302 |
+
r"""Return full path to the user-specific cache dir for this application.
|
303 |
+
|
304 |
+
"appname" is the name of application.
|
305 |
+
If None, just the system directory is returned.
|
306 |
+
"appauthor" (only used on Windows) is the name of the
|
307 |
+
appauthor or distributing body for this application. Typically
|
308 |
+
it is the owning company name. This falls back to appname. You may
|
309 |
+
pass False to disable it.
|
310 |
+
"version" is an optional version path element to append to the
|
311 |
+
path. You might want to use this if you want multiple versions
|
312 |
+
of your app to be able to run independently. If used, this
|
313 |
+
would typically be "<major>.<minor>".
|
314 |
+
Only applied when appname is present.
|
315 |
+
"opinion" (boolean) can be False to disable the appending of
|
316 |
+
"Cache" to the base app data dir for Windows. See
|
317 |
+
discussion below.
|
318 |
+
|
319 |
+
Typical user cache directories are:
|
320 |
+
Mac OS X: ~/Library/Caches/<AppName>
|
321 |
+
Unix: ~/.cache/<AppName> (XDG default)
|
322 |
+
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
|
323 |
+
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
|
324 |
+
|
325 |
+
On Windows the only suggestion in the MSDN docs is that local settings go in
|
326 |
+
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
|
327 |
+
app data dir (the default returned by `user_data_dir` above). Apps typically
|
328 |
+
put cache data somewhere *under* the given dir here. Some examples:
|
329 |
+
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
|
330 |
+
...\Acme\SuperApp\Cache\1.0
|
331 |
+
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
|
332 |
+
This can be disabled with the `opinion=False` option.
|
333 |
+
"""
|
334 |
+
if system == "win32":
|
335 |
+
if appauthor is None:
|
336 |
+
appauthor = appname
|
337 |
+
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
|
338 |
+
if appname:
|
339 |
+
if appauthor is not False:
|
340 |
+
path = os.path.join(path, appauthor, appname)
|
341 |
+
else:
|
342 |
+
path = os.path.join(path, appname)
|
343 |
+
if opinion:
|
344 |
+
path = os.path.join(path, "Cache")
|
345 |
+
elif system == "darwin":
|
346 |
+
path = os.path.expanduser("~/Library/Caches")
|
347 |
+
if appname:
|
348 |
+
path = os.path.join(path, appname)
|
349 |
+
else:
|
350 |
+
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
|
351 |
+
if appname:
|
352 |
+
path = os.path.join(path, appname)
|
353 |
+
if appname and version:
|
354 |
+
path = os.path.join(path, version)
|
355 |
+
return path
|
356 |
+
|
357 |
+
|
358 |
+
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
|
359 |
+
r"""Return full path to the user-specific state dir for this application.
|
360 |
+
|
361 |
+
"appname" is the name of application.
|
362 |
+
If None, just the system directory is returned.
|
363 |
+
"appauthor" (only used on Windows) is the name of the
|
364 |
+
appauthor or distributing body for this application. Typically
|
365 |
+
it is the owning company name. This falls back to appname. You may
|
366 |
+
pass False to disable it.
|
367 |
+
"version" is an optional version path element to append to the
|
368 |
+
path. You might want to use this if you want multiple versions
|
369 |
+
of your app to be able to run independently. If used, this
|
370 |
+
would typically be "<major>.<minor>".
|
371 |
+
Only applied when appname is present.
|
372 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
373 |
+
roaming appdata directory. That means that for users on a Windows
|
374 |
+
network setup for roaming profiles, this user data will be
|
375 |
+
sync'd on login. See
|
376 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
377 |
+
for a discussion of issues.
|
378 |
+
|
379 |
+
Typical user state directories are:
|
380 |
+
Mac OS X: same as user_data_dir
|
381 |
+
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
|
382 |
+
Win *: same as user_data_dir
|
383 |
+
|
384 |
+
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
|
385 |
+
to extend the XDG spec and support $XDG_STATE_HOME.
|
386 |
+
|
387 |
+
That means, by default "~/.local/state/<AppName>".
|
388 |
+
"""
|
389 |
+
if system in ["win32", "darwin"]:
|
390 |
+
path = user_data_dir(appname, appauthor, None, roaming)
|
391 |
+
else:
|
392 |
+
path = os.getenv("XDG_STATE_HOME", os.path.expanduser("~/.local/state"))
|
393 |
+
if appname:
|
394 |
+
path = os.path.join(path, appname)
|
395 |
+
if appname and version:
|
396 |
+
path = os.path.join(path, version)
|
397 |
+
return path
|
398 |
+
|
399 |
+
|
400 |
+
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
401 |
+
r"""Return full path to the user-specific log dir for this application.
|
402 |
+
|
403 |
+
"appname" is the name of application.
|
404 |
+
If None, just the system directory is returned.
|
405 |
+
"appauthor" (only used on Windows) is the name of the
|
406 |
+
appauthor or distributing body for this application. Typically
|
407 |
+
it is the owning company name. This falls back to appname. You may
|
408 |
+
pass False to disable it.
|
409 |
+
"version" is an optional version path element to append to the
|
410 |
+
path. You might want to use this if you want multiple versions
|
411 |
+
of your app to be able to run independently. If used, this
|
412 |
+
would typically be "<major>.<minor>".
|
413 |
+
Only applied when appname is present.
|
414 |
+
"opinion" (boolean) can be False to disable the appending of
|
415 |
+
"Logs" to the base app data dir for Windows, and "log" to the
|
416 |
+
base cache dir for Unix. See discussion below.
|
417 |
+
|
418 |
+
Typical user log directories are:
|
419 |
+
Mac OS X: ~/Library/Logs/<AppName>
|
420 |
+
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
|
421 |
+
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
|
422 |
+
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
|
423 |
+
|
424 |
+
On Windows the only suggestion in the MSDN docs is that local settings
|
425 |
+
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
|
426 |
+
examples of what some windows apps use for a logs dir.)
|
427 |
+
|
428 |
+
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
|
429 |
+
value for Windows and appends "log" to the user cache dir for Unix.
|
430 |
+
This can be disabled with the `opinion=False` option.
|
431 |
+
"""
|
432 |
+
if system == "darwin":
|
433 |
+
path = os.path.join(os.path.expanduser("~/Library/Logs"), appname)
|
434 |
+
elif system == "win32":
|
435 |
+
path = user_data_dir(appname, appauthor, version)
|
436 |
+
version = False
|
437 |
+
if opinion:
|
438 |
+
path = os.path.join(path, "Logs")
|
439 |
+
else:
|
440 |
+
path = user_cache_dir(appname, appauthor, version)
|
441 |
+
version = False
|
442 |
+
if opinion:
|
443 |
+
path = os.path.join(path, "log")
|
444 |
+
if appname and version:
|
445 |
+
path = os.path.join(path, version)
|
446 |
+
return path
|
447 |
+
|
448 |
+
|
449 |
+
class AppDirs(object):
|
450 |
+
"""Convenience wrapper for getting application dirs."""
|
451 |
+
|
452 |
+
def __init__(
|
453 |
+
self, appname=None, appauthor=None, version=None, roaming=False, multipath=False
|
454 |
+
):
|
455 |
+
self.appname = appname
|
456 |
+
self.appauthor = appauthor
|
457 |
+
self.version = version
|
458 |
+
self.roaming = roaming
|
459 |
+
self.multipath = multipath
|
460 |
+
|
461 |
+
@property
|
462 |
+
def user_data_dir(self):
|
463 |
+
return user_data_dir(
|
464 |
+
self.appname, self.appauthor, version=self.version, roaming=self.roaming
|
465 |
+
)
|
466 |
+
|
467 |
+
@property
|
468 |
+
def site_data_dir(self):
|
469 |
+
return site_data_dir(
|
470 |
+
self.appname, self.appauthor, version=self.version, multipath=self.multipath
|
471 |
+
)
|
472 |
+
|
473 |
+
@property
|
474 |
+
def user_config_dir(self):
|
475 |
+
return user_config_dir(
|
476 |
+
self.appname, self.appauthor, version=self.version, roaming=self.roaming
|
477 |
+
)
|
478 |
+
|
479 |
+
@property
|
480 |
+
def site_config_dir(self):
|
481 |
+
return site_config_dir(
|
482 |
+
self.appname, self.appauthor, version=self.version, multipath=self.multipath
|
483 |
+
)
|
484 |
+
|
485 |
+
@property
|
486 |
+
def user_cache_dir(self):
|
487 |
+
return user_cache_dir(self.appname, self.appauthor, version=self.version)
|
488 |
+
|
489 |
+
@property
|
490 |
+
def user_state_dir(self):
|
491 |
+
return user_state_dir(self.appname, self.appauthor, version=self.version)
|
492 |
+
|
493 |
+
@property
|
494 |
+
def user_log_dir(self):
|
495 |
+
return user_log_dir(self.appname, self.appauthor, version=self.version)
|
496 |
+
|
497 |
+
|
498 |
+
# ---- internal support stuff
|
499 |
+
|
500 |
+
|
501 |
+
def _get_win_folder_from_registry(csidl_name):
|
502 |
+
"""This is a fallback technique at best. I'm not sure if using the
|
503 |
+
registry for this guarantees us the correct answer for all CSIDL_*
|
504 |
+
names.
|
505 |
+
"""
|
506 |
+
import winreg as _winreg
|
507 |
+
|
508 |
+
shell_folder_name = {
|
509 |
+
"CSIDL_APPDATA": "AppData",
|
510 |
+
"CSIDL_COMMON_APPDATA": "Common AppData",
|
511 |
+
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
512 |
+
}[csidl_name]
|
513 |
+
|
514 |
+
key = _winreg.OpenKey(
|
515 |
+
_winreg.HKEY_CURRENT_USER,
|
516 |
+
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
|
517 |
+
)
|
518 |
+
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
|
519 |
+
return dir
|
520 |
+
|
521 |
+
|
522 |
+
def _get_win_folder_with_pywin32(csidl_name):
|
523 |
+
from win32com.shell import shell, shellcon
|
524 |
+
|
525 |
+
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
|
526 |
+
# Try to make this a unicode path because SHGetFolderPath does
|
527 |
+
# not return unicode strings when there is unicode data in the
|
528 |
+
# path.
|
529 |
+
try:
|
530 |
+
dir = unicode(dir)
|
531 |
+
|
532 |
+
# Downgrade to short path name if have highbit chars. See
|
533 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
534 |
+
has_high_char = False
|
535 |
+
for c in dir:
|
536 |
+
if ord(c) > 255:
|
537 |
+
has_high_char = True
|
538 |
+
break
|
539 |
+
if has_high_char:
|
540 |
+
try:
|
541 |
+
import win32api
|
542 |
+
|
543 |
+
dir = win32api.GetShortPathName(dir)
|
544 |
+
except ImportError:
|
545 |
+
pass
|
546 |
+
except UnicodeError:
|
547 |
+
pass
|
548 |
+
return dir
|
549 |
+
|
550 |
+
|
551 |
+
def _get_win_folder_with_ctypes(csidl_name):
|
552 |
+
import ctypes
|
553 |
+
|
554 |
+
csidl_const = {
|
555 |
+
"CSIDL_APPDATA": 26,
|
556 |
+
"CSIDL_COMMON_APPDATA": 35,
|
557 |
+
"CSIDL_LOCAL_APPDATA": 28,
|
558 |
+
}[csidl_name]
|
559 |
+
|
560 |
+
buf = ctypes.create_unicode_buffer(1024)
|
561 |
+
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
562 |
+
|
563 |
+
# Downgrade to short path name if have highbit chars. See
|
564 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
565 |
+
has_high_char = False
|
566 |
+
for c in buf:
|
567 |
+
if ord(c) > 255:
|
568 |
+
has_high_char = True
|
569 |
+
break
|
570 |
+
if has_high_char:
|
571 |
+
buf2 = ctypes.create_unicode_buffer(1024)
|
572 |
+
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
573 |
+
buf = buf2
|
574 |
+
|
575 |
+
return buf.value
|
576 |
+
|
577 |
+
|
578 |
+
def _get_win_folder_with_jna(csidl_name):
|
579 |
+
import array
|
580 |
+
|
581 |
+
from com.sun import jna
|
582 |
+
from com.sun.jna.platform import win32
|
583 |
+
|
584 |
+
buf_size = win32.WinDef.MAX_PATH * 2
|
585 |
+
buf = array.zeros("c", buf_size)
|
586 |
+
shell = win32.Shell32.INSTANCE
|
587 |
+
shell.SHGetFolderPath(
|
588 |
+
None,
|
589 |
+
getattr(win32.ShlObj, csidl_name),
|
590 |
+
None,
|
591 |
+
win32.ShlObj.SHGFP_TYPE_CURRENT,
|
592 |
+
buf,
|
593 |
+
)
|
594 |
+
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
595 |
+
|
596 |
+
# Downgrade to short path name if have highbit chars. See
|
597 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
598 |
+
has_high_char = False
|
599 |
+
for c in dir:
|
600 |
+
if ord(c) > 255:
|
601 |
+
has_high_char = True
|
602 |
+
break
|
603 |
+
if has_high_char:
|
604 |
+
buf = array.zeros("c", buf_size)
|
605 |
+
kernel = win32.Kernel32.INSTANCE
|
606 |
+
if kernel.GetShortPathName(dir, buf, buf_size):
|
607 |
+
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
608 |
+
|
609 |
+
return dir
|
610 |
+
|
611 |
+
|
612 |
+
if system == "win32":
|
613 |
+
try:
|
614 |
+
import win32com.shell
|
615 |
+
|
616 |
+
_get_win_folder = _get_win_folder_with_pywin32
|
617 |
+
except ImportError:
|
618 |
+
try:
|
619 |
+
from ctypes import windll
|
620 |
+
|
621 |
+
_get_win_folder = _get_win_folder_with_ctypes
|
622 |
+
except ImportError:
|
623 |
+
try:
|
624 |
+
import com.sun.jna
|
625 |
+
|
626 |
+
_get_win_folder = _get_win_folder_with_jna
|
627 |
+
except ImportError:
|
628 |
+
_get_win_folder = _get_win_folder_from_registry
|
629 |
+
|
630 |
+
|
631 |
+
# ---- self test code
|
632 |
+
|
633 |
+
if __name__ == "__main__":
|
634 |
+
appname = "MyApp"
|
635 |
+
appauthor = "MyCompany"
|
636 |
+
|
637 |
+
props = (
|
638 |
+
"user_data_dir",
|
639 |
+
"user_config_dir",
|
640 |
+
"user_cache_dir",
|
641 |
+
"user_state_dir",
|
642 |
+
"user_log_dir",
|
643 |
+
"site_data_dir",
|
644 |
+
"site_config_dir",
|
645 |
+
)
|
646 |
+
|
647 |
+
print(f"-- app dirs {__version__} --")
|
648 |
+
|
649 |
+
print("-- app dirs (with optional 'version')")
|
650 |
+
dirs = AppDirs(appname, appauthor, version="1.0")
|
651 |
+
for prop in props:
|
652 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
653 |
+
|
654 |
+
print("\n-- app dirs (without optional 'version')")
|
655 |
+
dirs = AppDirs(appname, appauthor)
|
656 |
+
for prop in props:
|
657 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
658 |
+
|
659 |
+
print("\n-- app dirs (without optional 'appauthor')")
|
660 |
+
dirs = AppDirs(appname)
|
661 |
+
for prop in props:
|
662 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
663 |
+
|
664 |
+
print("\n-- app dirs (with disabled 'appauthor')")
|
665 |
+
dirs = AppDirs(appname, appauthor=False)
|
666 |
+
for prop in props:
|
667 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
lib/python3.10/site-packages/torch/_classes.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import types
|
3 |
+
|
4 |
+
import torch._C
|
5 |
+
|
6 |
+
|
7 |
+
class _ClassNamespace(types.ModuleType):
|
8 |
+
def __init__(self, name):
|
9 |
+
super().__init__("torch.classes" + name)
|
10 |
+
self.name = name
|
11 |
+
|
12 |
+
def __getattr__(self, attr):
|
13 |
+
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
|
14 |
+
if proxy is None:
|
15 |
+
raise RuntimeError(f"Class {self.name}.{attr} not registered!")
|
16 |
+
return proxy
|
17 |
+
|
18 |
+
|
19 |
+
class _Classes(types.ModuleType):
|
20 |
+
__file__ = "_classes.py"
|
21 |
+
|
22 |
+
def __init__(self) -> None:
|
23 |
+
super().__init__("torch.classes")
|
24 |
+
|
25 |
+
def __getattr__(self, name):
|
26 |
+
namespace = _ClassNamespace(name)
|
27 |
+
setattr(self, name, namespace)
|
28 |
+
return namespace
|
29 |
+
|
30 |
+
@property
|
31 |
+
def loaded_libraries(self):
|
32 |
+
return torch.ops.loaded_libraries
|
33 |
+
|
34 |
+
def load_library(self, path):
|
35 |
+
"""
|
36 |
+
Loads a shared library from the given path into the current process.
|
37 |
+
|
38 |
+
The library being loaded may run global initialization code to register
|
39 |
+
custom classes with the PyTorch JIT runtime. This allows dynamically
|
40 |
+
loading custom classes. For this, you should compile your class
|
41 |
+
and the static registration code into a shared library object, and then
|
42 |
+
call ``torch.classes.load_library('path/to/libcustom.so')`` to load the
|
43 |
+
shared object.
|
44 |
+
|
45 |
+
After the library is loaded, it is added to the
|
46 |
+
``torch.classes.loaded_libraries`` attribute, a set that may be inspected
|
47 |
+
for the paths of all libraries loaded using this function.
|
48 |
+
|
49 |
+
Args:
|
50 |
+
path (str): A path to a shared library to load.
|
51 |
+
"""
|
52 |
+
torch.ops.load_library(path)
|
53 |
+
|
54 |
+
|
55 |
+
# The classes "namespace"
|
56 |
+
classes = _Classes()
|
lib/python3.10/site-packages/torch/_compile.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
"""
|
3 |
+
APIs related to torch.compile which lazily import torch._dynamo to avoid
|
4 |
+
circular dependencies.
|
5 |
+
"""
|
6 |
+
|
7 |
+
import functools
|
8 |
+
|
9 |
+
|
10 |
+
def _disable_dynamo(fn=None, recursive=True):
|
11 |
+
"""
|
12 |
+
This API should be only used inside torch, external users should still use
|
13 |
+
torch._dynamo.disable. The main goal of this API is to avoid circular
|
14 |
+
imports issues that is common while using _dynamo.disable inside torch
|
15 |
+
itself.
|
16 |
+
|
17 |
+
This API avoids it by lazily importing torch._dynamo from the import time to
|
18 |
+
the invocation of the decorated function.
|
19 |
+
"""
|
20 |
+
if fn is not None:
|
21 |
+
|
22 |
+
@functools.wraps(fn)
|
23 |
+
def inner(*args, **kwargs):
|
24 |
+
# cache this on the first invocation to avoid adding too much overhead.
|
25 |
+
disable_fn = getattr(fn, "__dynamo_disable", None)
|
26 |
+
if disable_fn is None:
|
27 |
+
import torch._dynamo
|
28 |
+
|
29 |
+
disable_fn = torch._dynamo.disable(fn, recursive)
|
30 |
+
fn.__dynamo_disable = disable_fn
|
31 |
+
|
32 |
+
return disable_fn(*args, **kwargs)
|
33 |
+
|
34 |
+
return inner
|
35 |
+
else:
|
36 |
+
# decorator usage like @_disable_dynamo(recursive=False). The resulting
|
37 |
+
# object expects the original decorated function as the arg.
|
38 |
+
return functools.partial(_disable_dynamo, recursive=recursive)
|
lib/python3.10/site-packages/torch/_custom_ops.py
ADDED
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import inspect
|
3 |
+
|
4 |
+
from torch._custom_op.impl import (
|
5 |
+
_custom_op_with_schema,
|
6 |
+
_find_custom_op,
|
7 |
+
infer_schema,
|
8 |
+
parse_qualname,
|
9 |
+
validate_namespace,
|
10 |
+
)
|
11 |
+
from torch.library import get_ctx
|
12 |
+
|
13 |
+
|
14 |
+
__all__ = [
|
15 |
+
"custom_op",
|
16 |
+
"impl",
|
17 |
+
"impl_abstract",
|
18 |
+
"get_ctx",
|
19 |
+
"impl_save_for_backward",
|
20 |
+
"impl_backward",
|
21 |
+
]
|
22 |
+
|
23 |
+
|
24 |
+
def custom_op(qualname, func_or_schema=None):
|
25 |
+
r"""Register a new custom operator
|
26 |
+
|
27 |
+
In PyTorch, defining an op (short for "operator") is a two step-process:
|
28 |
+
- we need to define the op (by providing an operator name and schema)
|
29 |
+
- we need to implement behavior for how the operator interacts with
|
30 |
+
various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.
|
31 |
+
|
32 |
+
This entrypoint defines the custom operator (the first step)
|
33 |
+
you must then perform the second step by calling various
|
34 |
+
``impl_*`` APIs.
|
35 |
+
|
36 |
+
This API may be used as a decorator (see examples).
|
37 |
+
|
38 |
+
For a detailed guide on custom ops, please see
|
39 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
40 |
+
|
41 |
+
Arguments:
|
42 |
+
qualname (str): Should be a string that looks like
|
43 |
+
"namespace::operator_name". Operators in PyTorch need a namespace to
|
44 |
+
avoid name collisions; a given operator may only be created once.
|
45 |
+
If you are writing a Python library, we recommend the namespace to
|
46 |
+
be the name of your top-level module.
|
47 |
+
func_or_schema (Union[Callable, str]): Each PyTorch operator needs a
|
48 |
+
schema that tells PyTorch the types of the inputs/outputs.
|
49 |
+
If this is a Callable, we will automatically infer the schema from
|
50 |
+
the type annotations on the function (see examples). Otherwise,
|
51 |
+
if you don't want to use type annotations, you may provide us the
|
52 |
+
schema string.
|
53 |
+
|
54 |
+
Example::
|
55 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
56 |
+
>>> import torch
|
57 |
+
>>> import numpy as np
|
58 |
+
>>> from torch import Tensor
|
59 |
+
>>>
|
60 |
+
>>> # Step 1: define the custom op.
|
61 |
+
>>> # We need to provide the API a "prototype function"
|
62 |
+
>>> # (a function that returns NotImplementedError), from which
|
63 |
+
>>> # we will infer the types of the inputs and outputs.
|
64 |
+
>>> @torch._custom_ops.custom_op("mylibrary::numpy_sin")
|
65 |
+
>>> def numpy_sin(x: Tensor) -> Tensor:
|
66 |
+
>>> raise NotImplementedError
|
67 |
+
>>>
|
68 |
+
>>> # The custom op is now accessible via the torch.ops module:
|
69 |
+
>>> torch.ops.mylibrary.numpy_sin
|
70 |
+
>>>
|
71 |
+
>>> # Step 2: Register an implementation for various PyTorch subsystems
|
72 |
+
>>>
|
73 |
+
>>> # Register an implementation for CPU tensors
|
74 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cpu")
|
75 |
+
>>> def numpy_sin_impl_cpu(x):
|
76 |
+
>>> return torch.from_numpy(np.sin(x.numpy()))
|
77 |
+
>>>
|
78 |
+
>>> # Register an implementation for CUDA tensors
|
79 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cuda")
|
80 |
+
>>> def numpy_sin_impl_cuda(x):
|
81 |
+
>>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device)
|
82 |
+
>>>
|
83 |
+
>>> x = torch.randn(3)
|
84 |
+
>>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cpu
|
85 |
+
>>>
|
86 |
+
>>> x_cuda = x.cuda()
|
87 |
+
>>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cuda
|
88 |
+
|
89 |
+
"""
|
90 |
+
ns, name = parse_qualname(qualname)
|
91 |
+
validate_namespace(ns)
|
92 |
+
|
93 |
+
def inner(func):
|
94 |
+
if not inspect.isfunction(func):
|
95 |
+
raise ValueError(
|
96 |
+
f"custom_op(...)(func): Expected `func` to be a Python "
|
97 |
+
f"function, got: {type(func)}"
|
98 |
+
)
|
99 |
+
|
100 |
+
if func.__name__ != name:
|
101 |
+
raise ValueError(
|
102 |
+
f"custom_op(qualname='{qualname}', ...)(func): expected `func` "
|
103 |
+
f"to have name '{name}' but got '{func.__name__}'. "
|
104 |
+
f"Please either change the name of `func` or the qualname that "
|
105 |
+
f"is passed to `custom_op`"
|
106 |
+
)
|
107 |
+
|
108 |
+
schema = infer_schema(func, mutates_args=())
|
109 |
+
_custom_op_with_schema(qualname, schema)
|
110 |
+
return func
|
111 |
+
|
112 |
+
if func_or_schema is None:
|
113 |
+
return inner
|
114 |
+
if isinstance(func_or_schema, str):
|
115 |
+
_custom_op_with_schema(qualname, func_or_schema)
|
116 |
+
else:
|
117 |
+
return inner(func_or_schema)
|
118 |
+
|
119 |
+
|
120 |
+
def impl(qualname, *, device_types=("cpu", "cuda"), func=None):
|
121 |
+
r"""Register an implementation for a device type for this custom op.
|
122 |
+
|
123 |
+
If the op is passed multiple Tensor inputs with different device
|
124 |
+
types, it will dispatch to the registered implementation for the highest
|
125 |
+
priority device type among those present.
|
126 |
+
The supported device types, in order of priority, are {'cuda', 'cpu'}.
|
127 |
+
|
128 |
+
This API may be used as a decorator (see examples).
|
129 |
+
|
130 |
+
For a detailed guide on custom ops, please see
|
131 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
132 |
+
|
133 |
+
Arguments:
|
134 |
+
device_types (str or Iterable[str]): the device type(s) to register the function for.
|
135 |
+
|
136 |
+
Example::
|
137 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
138 |
+
>>> import torch
|
139 |
+
>>> import numpy as np
|
140 |
+
>>> from torch import Tensor
|
141 |
+
>>>
|
142 |
+
>>> # Step 1: define the custom op.
|
143 |
+
>>> # We need to provide the API a "prototype function"
|
144 |
+
>>> # (a function that returns NotImplementedError), from which
|
145 |
+
>>> # we will infer the types of the inputs and outputs.
|
146 |
+
>>> @torch._custom_ops.custom_op("mylibrary::numpy_cos")
|
147 |
+
>>> def numpy_cos(x: Tensor) -> Tensor:
|
148 |
+
>>> raise NotImplementedError
|
149 |
+
>>>
|
150 |
+
>>> # The custom op is now accessible via the torch.ops module:
|
151 |
+
>>> torch.ops.mylibrary.numpy_cos
|
152 |
+
>>>
|
153 |
+
>>> # Step 2: Register an implementation for various PyTorch subsystems
|
154 |
+
>>>
|
155 |
+
>>> # Register an implementation for CPU tensors
|
156 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cpu")
|
157 |
+
>>> def numpy_cos_impl_cpu(x):
|
158 |
+
>>> return torch.from_numpy(np.cos(x.numpy()))
|
159 |
+
>>>
|
160 |
+
>>> # Register an implementation for CUDA tensors
|
161 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cuda")
|
162 |
+
>>> def numpy_cos_impl_cuda(x):
|
163 |
+
>>> return torch.from_numpy(np.cos(x.cpu().numpy())).to(x.device)
|
164 |
+
>>>
|
165 |
+
>>> x = torch.randn(3)
|
166 |
+
>>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cpu
|
167 |
+
>>>
|
168 |
+
>>> x_cuda = x.cuda()
|
169 |
+
>>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cuda
|
170 |
+
|
171 |
+
"""
|
172 |
+
|
173 |
+
def inner(func):
|
174 |
+
custom_op = _find_custom_op(qualname, also_check_torch_library=True)
|
175 |
+
custom_op.impl(device_types, _stacklevel=3)(func)
|
176 |
+
return func
|
177 |
+
|
178 |
+
if func is None:
|
179 |
+
return inner
|
180 |
+
return inner(func)
|
181 |
+
|
182 |
+
|
183 |
+
def impl_abstract(qualname, *, func=None):
|
184 |
+
r"""Register an abstract implementation for this operator.
|
185 |
+
|
186 |
+
An "abstract implementation" specifies the behavior of this operator on
|
187 |
+
Tensors that carry no data. Given some input Tensors with certain properties
|
188 |
+
(sizes/strides/storage_offset/device), it specifies what the properties of
|
189 |
+
the output Tensors are.
|
190 |
+
|
191 |
+
The abstract implementation has the same signature as the operator.
|
192 |
+
It is run for both FakeTensors and meta tensors. To write an abstract
|
193 |
+
implementation, assume that all Tensor inputs to the operator are
|
194 |
+
regular CPU/CUDA/Meta tensors, but they do not have storage, and
|
195 |
+
you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
|
196 |
+
The abstract implementation must consist of only PyTorch operations
|
197 |
+
(and may not directly access the storage or data of any input or
|
198 |
+
intermediate Tensors).
|
199 |
+
|
200 |
+
This API may be used as a decorator (see examples).
|
201 |
+
|
202 |
+
For a detailed guide on custom ops, please see
|
203 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
204 |
+
|
205 |
+
Examples::
|
206 |
+
>>> import numpy as np
|
207 |
+
>>> from torch import Tensor
|
208 |
+
>>>
|
209 |
+
>>> # Example 1: an operator without data-dependent output shape
|
210 |
+
>>> @torch._custom_ops.custom_op("mylibrary::custom_linear")
|
211 |
+
>>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
|
212 |
+
>>> raise NotImplementedError
|
213 |
+
>>>
|
214 |
+
>>> @torch._custom_ops.impl_abstract("mylibrary::custom_linear")
|
215 |
+
>>> def custom_linear_abstract(x, weight):
|
216 |
+
>>> assert x.dim() == 2
|
217 |
+
>>> assert weight.dim() == 2
|
218 |
+
>>> assert bias.dim() == 1
|
219 |
+
>>> assert x.shape[1] == weight.shape[1]
|
220 |
+
>>> assert weight.shape[0] == bias.shape[0]
|
221 |
+
>>> assert x.device == weight.device
|
222 |
+
>>>
|
223 |
+
>>> return (x @ weight.t()) + bias
|
224 |
+
>>>
|
225 |
+
>>> # Example 2: an operator with data-dependent output shape
|
226 |
+
>>> @torch._custom_ops.custom_op('mylibrary::custom_nonzero')
|
227 |
+
>>> def custom_nonzero(x: Tensor) -> Tensor:
|
228 |
+
>>> ...
|
229 |
+
>>>
|
230 |
+
>>> @torch._custom_ops.impl_abstract("mylibrary::custom_nonzero")
|
231 |
+
>>> def custom_nonzero_abstract(x):
|
232 |
+
>>> # Number of nonzero-elements is data-dependent.
|
233 |
+
>>> # Since we cannot peek at the data in an abstract impl,
|
234 |
+
>>> # we use the ctx object to construct a new symint that
|
235 |
+
>>> # represents the data-dependent size.
|
236 |
+
>>> ctx = torch._custom_ops.get_ctx()
|
237 |
+
>>> nnz = ctx.create_unbacked_symint()
|
238 |
+
>>> shape = [x.dim(), nnz]
|
239 |
+
>>> result = x.new_empty(shape, dtype=torch.long)
|
240 |
+
>>> return result
|
241 |
+
>>>
|
242 |
+
>>> @torch._custom_ops.impl("mylibrary::custom_nonzero")
|
243 |
+
>>> def custom_nonzero_impl(x):
|
244 |
+
>>> x_np = to_numpy(x)
|
245 |
+
>>> res = np.stack(np.nonzero(x_np), axis=1)
|
246 |
+
>>> # unbacked symbolic ints in PyTorch must be >= 2, so we
|
247 |
+
>>> # constrain the range to at least 2
|
248 |
+
>>> if res.shape[0] <= 1:
|
249 |
+
>>> raise RuntimeError("not supported")
|
250 |
+
>>> return torch.tensor(res, device=x.device)
|
251 |
+
|
252 |
+
"""
|
253 |
+
import torch.library
|
254 |
+
|
255 |
+
return torch.library.register_fake(qualname, func, _stacklevel=2)
|
256 |
+
|
257 |
+
|
258 |
+
def impl_save_for_backward(qualname, *, func=None):
|
259 |
+
r"""Register a function that tells us what to save for backward.
|
260 |
+
|
261 |
+
Please see :func:`impl_backward` for more details.
|
262 |
+
"""
|
263 |
+
|
264 |
+
def inner(func):
|
265 |
+
custom_op = _find_custom_op(qualname, also_check_torch_library=True)
|
266 |
+
custom_op.impl_save_for_backward(_stacklevel=3)(func)
|
267 |
+
return func
|
268 |
+
|
269 |
+
if func is None:
|
270 |
+
return inner
|
271 |
+
return inner(func)
|
272 |
+
|
273 |
+
|
274 |
+
def impl_backward(qualname, output_differentiability=None, *, func=None):
|
275 |
+
r"""Registers a backward formula for an operator.
|
276 |
+
|
277 |
+
In order for an operator to work with autograd, you need to register
|
278 |
+
a backward formula. There are two pieces to this:
|
279 |
+
1. You must give us a function to specify what to save for backward.
|
280 |
+
Call this the "save for backward" function.
|
281 |
+
2. You must give us a function that computes gradients. Call this the
|
282 |
+
"backward" function.
|
283 |
+
|
284 |
+
Use `impl_save_for_backward` to define a "save for backward" function
|
285 |
+
that specifies what gets saved for backward. The function should accept
|
286 |
+
two arguments ``(inputs, output)`` and return the quantities to be saved
|
287 |
+
for backward.
|
288 |
+
|
289 |
+
During runtime, when you call the operator in a forwards pass, PyTorch
|
290 |
+
will invoke the "save for backward" function with the inputs and output
|
291 |
+
of the operator.
|
292 |
+
|
293 |
+
Use `impl_backward` to define the "backward" function. The backward
|
294 |
+
function must accept ``(ctx, saved, *grads)``:
|
295 |
+
- ``ctx`` is a context object where we may provide information
|
296 |
+
- ``saved`` is exactly what gets returned from the "save for backward"
|
297 |
+
function
|
298 |
+
- ``grads`` is one or more gradients. The number of gradients matches
|
299 |
+
the number of outputs of the operator.
|
300 |
+
|
301 |
+
The backward function must return a dict that maps the name of
|
302 |
+
an input to the operator to its corresponding gradient. All inputs that
|
303 |
+
were declared to be Tensors in the operator definition must be accounted
|
304 |
+
for in the dict. The gradient may be a Tensor or None.
|
305 |
+
|
306 |
+
For a detailed guide on custom ops, please see
|
307 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
308 |
+
|
309 |
+
"""
|
310 |
+
|
311 |
+
def inner(func):
|
312 |
+
custom_op = _find_custom_op(qualname, also_check_torch_library=True)
|
313 |
+
custom_op.impl_backward(output_differentiability, _stacklevel=3)(func)
|
314 |
+
return func
|
315 |
+
|
316 |
+
if func is None:
|
317 |
+
return inner
|
318 |
+
return inner(func)
|
319 |
+
|
320 |
+
|
321 |
+
def _destroy(qualname):
|
322 |
+
"""De-registers a custom op. For testing purposes only"""
|
323 |
+
custom_op = _find_custom_op(qualname)
|
324 |
+
custom_op._destroy()
|
lib/python3.10/site-packages/torch/_deploy.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import io
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch.package import Importer, OrderedImporter, PackageImporter, sys_importer
|
6 |
+
from torch.package._package_pickler import create_pickler
|
7 |
+
from torch.package._package_unpickler import PackageUnpickler
|
8 |
+
from torch.serialization import _maybe_decode_ascii
|
9 |
+
|
10 |
+
|
11 |
+
def _save_storages(importer, obj):
|
12 |
+
serialized_storages = []
|
13 |
+
serialized_dtypes = []
|
14 |
+
|
15 |
+
importer = importer if isinstance(importer, torch.package.PackageImporter) else None
|
16 |
+
importers: Importer
|
17 |
+
if importer is not None:
|
18 |
+
importers = OrderedImporter(importer, sys_importer)
|
19 |
+
else:
|
20 |
+
importers = sys_importer
|
21 |
+
|
22 |
+
def persistent_id(obj):
|
23 |
+
if torch.is_storage(obj) or isinstance(obj, torch.storage.TypedStorage):
|
24 |
+
if isinstance(obj, torch.storage.TypedStorage):
|
25 |
+
# TODO: Once we decide to break serialization FC, we can
|
26 |
+
# remove this case
|
27 |
+
dtype = obj.dtype
|
28 |
+
else:
|
29 |
+
dtype = torch.uint8
|
30 |
+
|
31 |
+
serialized_storages.append(obj)
|
32 |
+
serialized_dtypes.append(dtype)
|
33 |
+
return ("storage", len(serialized_storages) - 1)
|
34 |
+
|
35 |
+
if hasattr(obj, "__reduce_deploy__"):
|
36 |
+
if _serialized_reduces.get(id(obj)) is None:
|
37 |
+
_serialized_reduces[id(obj)] = (
|
38 |
+
"reduce_deploy",
|
39 |
+
id(obj),
|
40 |
+
*obj.__reduce_deploy__(importers),
|
41 |
+
)
|
42 |
+
return _serialized_reduces[id(obj)]
|
43 |
+
|
44 |
+
return None
|
45 |
+
|
46 |
+
# Write the pickle data for `obj`
|
47 |
+
data_buf = io.BytesIO()
|
48 |
+
pickler = create_pickler(data_buf, importers)
|
49 |
+
pickler.persistent_id = persistent_id
|
50 |
+
pickler.dump(obj)
|
51 |
+
data_value = data_buf.getvalue()
|
52 |
+
return (
|
53 |
+
data_value,
|
54 |
+
serialized_storages,
|
55 |
+
serialized_dtypes,
|
56 |
+
importer.zip_reader if importer else None,
|
57 |
+
)
|
58 |
+
|
59 |
+
|
60 |
+
def _load_storages(id, zip_reader, obj_bytes, serialized_storages, serialized_dtypes):
|
61 |
+
def persistent_load(saved_id):
|
62 |
+
assert isinstance(saved_id, tuple)
|
63 |
+
typename = _maybe_decode_ascii(saved_id[0])
|
64 |
+
data = saved_id[1:]
|
65 |
+
|
66 |
+
if typename == "storage":
|
67 |
+
# TODO: Once we decide to break serialization FC, we can
|
68 |
+
# stop wrapping with TypedStorage
|
69 |
+
storage = serialized_storages[data[0]]
|
70 |
+
dtype = serialized_dtypes[data[0]]
|
71 |
+
return torch.storage.TypedStorage(
|
72 |
+
wrap_storage=storage.untyped(), dtype=dtype
|
73 |
+
)
|
74 |
+
|
75 |
+
if typename == "reduce_deploy":
|
76 |
+
reduce_id, func, args = data
|
77 |
+
if reduce_id not in _loaded_reduces:
|
78 |
+
_loaded_reduces[reduce_id] = func(_raw_packages[zip_reader], *args)
|
79 |
+
return _loaded_reduces[reduce_id]
|
80 |
+
|
81 |
+
return None
|
82 |
+
|
83 |
+
importer: Importer
|
84 |
+
if zip_reader is not None:
|
85 |
+
importer = OrderedImporter(_get_package(zip_reader), sys_importer)
|
86 |
+
else:
|
87 |
+
importer = sys_importer
|
88 |
+
|
89 |
+
unpickler = PackageUnpickler(importer, io.BytesIO(obj_bytes))
|
90 |
+
unpickler.persistent_load = persistent_load # type: ignore[method-assign]
|
91 |
+
result = _deploy_objects[id] = unpickler.load()
|
92 |
+
return result
|
93 |
+
|
94 |
+
|
95 |
+
def _get_package(zip_reader):
|
96 |
+
if zip_reader not in _raw_packages:
|
97 |
+
_raw_packages[zip_reader] = PackageImporter(zip_reader)
|
98 |
+
return _raw_packages[zip_reader]
|
99 |
+
|
100 |
+
|
101 |
+
_raw_packages: dict = {}
|
102 |
+
_deploy_objects: dict = {}
|
103 |
+
_serialized_reduces: dict = {}
|
104 |
+
_loaded_reduces: dict = {}
|
lib/python3.10/site-packages/torch/_environment.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
def is_fbcode() -> bool:
|
2 |
+
return False
|
lib/python3.10/site-packages/torch/_linalg_utils.py
ADDED
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
"""Various linear algebra utility methods for internal use."""
|
3 |
+
|
4 |
+
from typing import Optional, Tuple
|
5 |
+
|
6 |
+
import torch
|
7 |
+
from torch import Tensor
|
8 |
+
|
9 |
+
|
10 |
+
def is_sparse(A):
|
11 |
+
"""Check if tensor A is a sparse tensor"""
|
12 |
+
if isinstance(A, torch.Tensor):
|
13 |
+
return A.layout == torch.sparse_coo
|
14 |
+
|
15 |
+
error_str = "expected Tensor"
|
16 |
+
if not torch.jit.is_scripting():
|
17 |
+
error_str += f" but got {type(A)}"
|
18 |
+
raise TypeError(error_str)
|
19 |
+
|
20 |
+
|
21 |
+
def get_floating_dtype(A):
|
22 |
+
"""Return the floating point dtype of tensor A.
|
23 |
+
|
24 |
+
Integer types map to float32.
|
25 |
+
"""
|
26 |
+
dtype = A.dtype
|
27 |
+
if dtype in (torch.float16, torch.float32, torch.float64):
|
28 |
+
return dtype
|
29 |
+
return torch.float32
|
30 |
+
|
31 |
+
|
32 |
+
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
|
33 |
+
"""Multiply two matrices.
|
34 |
+
|
35 |
+
If A is None, return B. A can be sparse or dense. B is always
|
36 |
+
dense.
|
37 |
+
"""
|
38 |
+
if A is None:
|
39 |
+
return B
|
40 |
+
if is_sparse(A):
|
41 |
+
return torch.sparse.mm(A, B)
|
42 |
+
return torch.matmul(A, B)
|
43 |
+
|
44 |
+
|
45 |
+
def bform(X: Tensor, A: Optional[Tensor], Y: Tensor) -> Tensor:
|
46 |
+
"""Return bilinear form of matrices: :math:`X^T A Y`."""
|
47 |
+
return matmul(X.mT, matmul(A, Y))
|
48 |
+
|
49 |
+
|
50 |
+
def qform(A: Optional[Tensor], S: Tensor):
|
51 |
+
"""Return quadratic form :math:`S^T A S`."""
|
52 |
+
return bform(S, A, S)
|
53 |
+
|
54 |
+
|
55 |
+
def basis(A):
|
56 |
+
"""Return orthogonal basis of A columns."""
|
57 |
+
return torch.linalg.qr(A).Q
|
58 |
+
|
59 |
+
|
60 |
+
def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
|
61 |
+
"""Return eigenpairs of A with specified ordering."""
|
62 |
+
if largest is None:
|
63 |
+
largest = False
|
64 |
+
E, Z = torch.linalg.eigh(A, UPLO="U")
|
65 |
+
# assuming that E is ordered
|
66 |
+
if largest:
|
67 |
+
E = torch.flip(E, dims=(-1,))
|
68 |
+
Z = torch.flip(Z, dims=(-1,))
|
69 |
+
return E, Z
|
70 |
+
|
71 |
+
|
72 |
+
# These functions were deprecated and removed
|
73 |
+
# This nice error message can be removed in version 1.13+
|
74 |
+
def matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor:
|
75 |
+
raise RuntimeError(
|
76 |
+
"This function was deprecated since version 1.9 and is now removed.\n"
|
77 |
+
"Please use the `torch.linalg.matrix_rank` function instead. "
|
78 |
+
"The parameter 'symmetric' was renamed in `torch.linalg.matrix_rank()` to 'hermitian'."
|
79 |
+
)
|
80 |
+
|
81 |
+
|
82 |
+
def solve(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
|
83 |
+
raise RuntimeError(
|
84 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
85 |
+
"`torch.solve` is deprecated in favor of `torch.linalg.solve`. "
|
86 |
+
"`torch.linalg.solve` has its arguments reversed and does not return the LU factorization.\n\n"
|
87 |
+
"To get the LU factorization see `torch.lu`, which can be used with `torch.lu_solve` or `torch.lu_unpack`.\n"
|
88 |
+
"X = torch.solve(B, A).solution "
|
89 |
+
"should be replaced with:\n"
|
90 |
+
"X = torch.linalg.solve(A, B)"
|
91 |
+
)
|
92 |
+
|
93 |
+
|
94 |
+
def lstsq(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
|
95 |
+
raise RuntimeError(
|
96 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
97 |
+
"`torch.lstsq` is deprecated in favor of `torch.linalg.lstsq`.\n"
|
98 |
+
"`torch.linalg.lstsq` has reversed arguments and does not return the QR decomposition in "
|
99 |
+
"the returned tuple (although it returns other information about the problem).\n\n"
|
100 |
+
"To get the QR decomposition consider using `torch.linalg.qr`.\n\n"
|
101 |
+
"The returned solution in `torch.lstsq` stored the residuals of the solution in the "
|
102 |
+
"last m - n columns of the returned value whenever m > n. In torch.linalg.lstsq, "
|
103 |
+
"the residuals are in the field 'residuals' of the returned named tuple.\n\n"
|
104 |
+
"The unpacking of the solution, as in\n"
|
105 |
+
"X, _ = torch.lstsq(B, A).solution[:A.size(1)]\n"
|
106 |
+
"should be replaced with:\n"
|
107 |
+
"X = torch.linalg.lstsq(A, B).solution"
|
108 |
+
)
|
109 |
+
|
110 |
+
|
111 |
+
def _symeig(
|
112 |
+
input,
|
113 |
+
eigenvectors=False,
|
114 |
+
upper=True,
|
115 |
+
*,
|
116 |
+
out=None,
|
117 |
+
) -> Tuple[Tensor, Tensor]:
|
118 |
+
raise RuntimeError(
|
119 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
120 |
+
"The default behavior has changed from using the upper triangular portion of the matrix by default "
|
121 |
+
"to using the lower triangular portion.\n\n"
|
122 |
+
"L, _ = torch.symeig(A, upper=upper) "
|
123 |
+
"should be replaced with:\n"
|
124 |
+
"L = torch.linalg.eigvalsh(A, UPLO='U' if upper else 'L')\n\n"
|
125 |
+
"and\n\n"
|
126 |
+
"L, V = torch.symeig(A, eigenvectors=True) "
|
127 |
+
"should be replaced with:\n"
|
128 |
+
"L, V = torch.linalg.eigh(A, UPLO='U' if upper else 'L')"
|
129 |
+
)
|
130 |
+
|
131 |
+
|
132 |
+
def eig(
|
133 |
+
self: Tensor,
|
134 |
+
eigenvectors: bool = False,
|
135 |
+
*,
|
136 |
+
e=None,
|
137 |
+
v=None,
|
138 |
+
) -> Tuple[Tensor, Tensor]:
|
139 |
+
raise RuntimeError(
|
140 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
141 |
+
"`torch.linalg.eig` returns complex tensors of dtype `cfloat` or `cdouble` rather than real tensors "
|
142 |
+
"mimicking complex tensors.\n\n"
|
143 |
+
"L, _ = torch.eig(A) "
|
144 |
+
"should be replaced with:\n"
|
145 |
+
"L_complex = torch.linalg.eigvals(A)\n\n"
|
146 |
+
"and\n\n"
|
147 |
+
"L, V = torch.eig(A, eigenvectors=True) "
|
148 |
+
"should be replaced with:\n"
|
149 |
+
"L_complex, V_complex = torch.linalg.eig(A)"
|
150 |
+
)
|
lib/python3.10/site-packages/torch/_namedtensor_internals.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
from collections import OrderedDict
|
3 |
+
|
4 |
+
|
5 |
+
"""
|
6 |
+
This file contains helper functions that implement experimental functionality
|
7 |
+
for named tensors in python. All of these are experimental, unstable, and
|
8 |
+
subject to change or deletion.
|
9 |
+
"""
|
10 |
+
|
11 |
+
|
12 |
+
def check_serializing_named_tensor(tensor):
|
13 |
+
if tensor.has_names():
|
14 |
+
raise RuntimeError(
|
15 |
+
"NYI: Named tensors don't support serialization. Please drop "
|
16 |
+
"names via `tensor = tensor.rename(None)` before serialization."
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
def build_dim_map(tensor):
|
21 |
+
"""Returns a map of { dim: dim_name } where dim is a name if the dim is named
|
22 |
+
and the dim index otherwise."""
|
23 |
+
return OrderedDict(
|
24 |
+
[(idx if name is None else name, name) for idx, name in enumerate(tensor.names)]
|
25 |
+
)
|
26 |
+
|
27 |
+
|
28 |
+
def unzip_namedshape(namedshape):
|
29 |
+
if isinstance(namedshape, OrderedDict):
|
30 |
+
namedshape = namedshape.items()
|
31 |
+
if not hasattr(namedshape, "__iter__") and not isinstance(namedshape, tuple):
|
32 |
+
raise RuntimeError(
|
33 |
+
f"Expected namedshape to be OrderedDict or iterable of tuples, got: {type(namedshape)}"
|
34 |
+
)
|
35 |
+
if len(namedshape) == 0:
|
36 |
+
raise RuntimeError("Expected namedshape to non-empty.")
|
37 |
+
return zip(*namedshape)
|
38 |
+
|
39 |
+
|
40 |
+
def namer_api_name(inplace):
|
41 |
+
if inplace:
|
42 |
+
return "rename_"
|
43 |
+
else:
|
44 |
+
return "rename"
|
45 |
+
|
46 |
+
|
47 |
+
def is_ellipsis(item):
|
48 |
+
return item == Ellipsis or item == "..."
|
49 |
+
|
50 |
+
|
51 |
+
def single_ellipsis_index(names, fn_name):
|
52 |
+
ellipsis_indices = [i for i, name in enumerate(names) if is_ellipsis(name)]
|
53 |
+
if len(ellipsis_indices) >= 2:
|
54 |
+
raise RuntimeError(
|
55 |
+
f"{fn_name}: More than one Ellipsis ('...') found in names ("
|
56 |
+
f"{names}). This function supports up to one Ellipsis."
|
57 |
+
)
|
58 |
+
if len(ellipsis_indices) == 1:
|
59 |
+
return ellipsis_indices[0]
|
60 |
+
return None
|
61 |
+
|
62 |
+
|
63 |
+
def expand_single_ellipsis(numel_pre_glob, numel_post_glob, names):
|
64 |
+
return names[numel_pre_glob : len(names) - numel_post_glob]
|
65 |
+
|
66 |
+
|
67 |
+
def replace_ellipsis_by_position(ellipsis_idx, names, tensor_names):
|
68 |
+
globbed_names = expand_single_ellipsis(
|
69 |
+
ellipsis_idx, len(names) - ellipsis_idx - 1, tensor_names
|
70 |
+
)
|
71 |
+
return names[:ellipsis_idx] + globbed_names + names[ellipsis_idx + 1 :]
|
72 |
+
|
73 |
+
|
74 |
+
def resolve_ellipsis(names, tensor_names, fn_name):
|
75 |
+
"""
|
76 |
+
Expands ... inside `names` to be equal to a list of names from `tensor_names`.
|
77 |
+
"""
|
78 |
+
ellipsis_idx = single_ellipsis_index(names, fn_name)
|
79 |
+
if ellipsis_idx is None:
|
80 |
+
return names
|
81 |
+
return replace_ellipsis_by_position(ellipsis_idx, names, tensor_names)
|
82 |
+
|
83 |
+
|
84 |
+
def update_names_with_list(tensor, names, inplace):
|
85 |
+
# Special case for tensor.rename(None)
|
86 |
+
if len(names) == 1 and names[0] is None:
|
87 |
+
return tensor._update_names(None, inplace)
|
88 |
+
|
89 |
+
return tensor._update_names(
|
90 |
+
resolve_ellipsis(names, tensor.names, namer_api_name(inplace)), inplace
|
91 |
+
)
|
92 |
+
|
93 |
+
|
94 |
+
def update_names_with_mapping(tensor, rename_map, inplace):
|
95 |
+
dim_map = build_dim_map(tensor)
|
96 |
+
for old_dim in rename_map.keys():
|
97 |
+
new_dim = rename_map[old_dim]
|
98 |
+
if old_dim in dim_map.keys():
|
99 |
+
dim_map[old_dim] = new_dim
|
100 |
+
else:
|
101 |
+
raise RuntimeError(
|
102 |
+
f"{namer_api_name(inplace)}: Tried to rename dim '{old_dim}' to dim "
|
103 |
+
f"{new_dim} in Tensor[{tensor.names}] but dim '{old_dim}' does not exist"
|
104 |
+
)
|
105 |
+
return tensor._update_names(tuple(dim_map.values()), inplace)
|
106 |
+
|
107 |
+
|
108 |
+
def update_names(tensor, names, rename_map, inplace):
|
109 |
+
"""There are two usages:
|
110 |
+
|
111 |
+
tensor.rename(*names) returns a view on tensor with named dims `names`.
|
112 |
+
`names` must be of length `tensor.dim()`; otherwise, if '...' is in `names`,
|
113 |
+
then it is expanded greedily to be equal to the corresponding names from
|
114 |
+
`tensor.names`.
|
115 |
+
|
116 |
+
For example,
|
117 |
+
```
|
118 |
+
>>> # xdoctest: +SKIP
|
119 |
+
>>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
|
120 |
+
>>> x.rename('...', 'height', 'width').names
|
121 |
+
('N', 'C', 'height', 'width')
|
122 |
+
|
123 |
+
>>> # xdoctest: +SKIP
|
124 |
+
>>> x.rename('batch', '...', 'width').names
|
125 |
+
('batch', 'C', 'H', 'width')
|
126 |
+
|
127 |
+
```
|
128 |
+
|
129 |
+
tensor.rename(**rename_map) returns a view on tensor that has rename dims
|
130 |
+
as specified in the mapping `rename_map`.
|
131 |
+
|
132 |
+
For example,
|
133 |
+
```
|
134 |
+
>>> # xdoctest: +SKIP
|
135 |
+
>>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
|
136 |
+
>>> x.rename(W='width', H='height').names
|
137 |
+
('N', 'C', 'height', 'width')
|
138 |
+
|
139 |
+
```
|
140 |
+
|
141 |
+
Finally, tensor.rename has an in-place version called tensor.rename_.
|
142 |
+
"""
|
143 |
+
has_names = len(names) > 0
|
144 |
+
has_rename_pairs = bool(rename_map)
|
145 |
+
if has_names and has_rename_pairs:
|
146 |
+
raise RuntimeError(
|
147 |
+
f"{namer_api_name(inplace)}: This function takes either positional "
|
148 |
+
f"args or keyword args, but not both. Use tensor.{namer_api_name(inplace)}(*names) "
|
149 |
+
f"to name dims and tensor.{namer_api_name(inplace)}(**rename_map) to rename "
|
150 |
+
"dims."
|
151 |
+
)
|
152 |
+
|
153 |
+
# Special case for tensor.rename(*[]), which is valid for a 0 dim tensor.
|
154 |
+
if not has_names and not has_rename_pairs:
|
155 |
+
return update_names_with_list(tensor, names, inplace)
|
156 |
+
|
157 |
+
if has_names:
|
158 |
+
return update_names_with_list(tensor, names, inplace)
|
159 |
+
return update_names_with_mapping(tensor, rename_map, inplace)
|
lib/python3.10/site-packages/torch/_ops.py
ADDED
@@ -0,0 +1,1362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import abc
|
3 |
+
import contextlib
|
4 |
+
import ctypes
|
5 |
+
import importlib
|
6 |
+
import inspect
|
7 |
+
import sys
|
8 |
+
import types
|
9 |
+
from typing import Any, Callable, Dict, List, Set, Type, TypeVar, Union
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torch.utils._pytree as pytree
|
13 |
+
from torch import _utils_internal
|
14 |
+
from torch._C import _dispatch_is_included_in_alias as is_included_in_alias, DispatchKey
|
15 |
+
from torch._functorch.pyfunctorch import dispatch_functorch
|
16 |
+
from torch.utils._python_dispatch import TorchDispatchMode
|
17 |
+
|
18 |
+
|
19 |
+
_F = TypeVar("_F", bound=Callable[..., Any])
|
20 |
+
|
21 |
+
|
22 |
+
# Query `hasattr` only once.
|
23 |
+
_SET_GLOBAL_FLAGS = hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags")
|
24 |
+
|
25 |
+
|
26 |
+
@contextlib.contextmanager
|
27 |
+
def dl_open_guard():
|
28 |
+
"""
|
29 |
+
Context manager to set the RTLD_GLOBAL dynamic linker flag while we open a
|
30 |
+
shared library to load custom operators.
|
31 |
+
"""
|
32 |
+
if not _SET_GLOBAL_FLAGS:
|
33 |
+
yield
|
34 |
+
return
|
35 |
+
old_flags = sys.getdlopenflags()
|
36 |
+
sys.setdlopenflags(old_flags | ctypes.RTLD_GLOBAL)
|
37 |
+
try:
|
38 |
+
yield
|
39 |
+
finally:
|
40 |
+
sys.setdlopenflags(old_flags)
|
41 |
+
|
42 |
+
|
43 |
+
class OperatorBase:
|
44 |
+
"""
|
45 |
+
Base class for OpOverload (which represents C++ ATen operators) and HigherOrderOperator
|
46 |
+
(which represents Python-only operators that are unrepresentable in TorchScript).
|
47 |
+
"""
|
48 |
+
|
49 |
+
def __init__(self):
|
50 |
+
# The dispatch cache precomputes a mapping of dispatch key that the
|
51 |
+
# dispatcher wants to dispatch to, to an actual implementation of the
|
52 |
+
# dispatch key. Confusingly, the actual implementation could *also* be a
|
53 |
+
# dispatch key, but in this case, this refers to the C++ kernel that
|
54 |
+
# was registered to some dispatch key. Aliases are permitted in the
|
55 |
+
# latter but not the former; for example, you might lookup the
|
56 |
+
# entry for AutogradCPU, and this maps you to the Autograd key for
|
57 |
+
# the generic autograd kernel that works for all devices. Since this
|
58 |
+
# is the Python dispatcher, you can also put an arbitrary Python
|
59 |
+
# callable to call instead. This handler gets precisely the
|
60 |
+
# args/kwargs that the operator was __call__'ed with.
|
61 |
+
# NB: This name is hard-coded in torch/csrc/autograd/python_variable.cpp
|
62 |
+
# for use with OpOverload; cache lookup is done entirely from C++
|
63 |
+
# for speed.
|
64 |
+
# TODO: The cache is NOT currently used by HigherOrderOperator, but it should!
|
65 |
+
self._dispatch_cache: Dict[
|
66 |
+
DispatchKey, Union[DispatchKey, Callable[..., Any]]
|
67 |
+
] = {}
|
68 |
+
|
69 |
+
# This table allows you to override the behavior of a particular
|
70 |
+
# dispatch key to call a custom Python function, rather than the
|
71 |
+
# ordinary C++ configured behavior. This is the raison d'etre of
|
72 |
+
# Python dispatcher: to let you program the dispatcher from Python
|
73 |
+
# in case you need something unusual, and don't want to clobber
|
74 |
+
# the existing registrations using the Python operator registration
|
75 |
+
# API.
|
76 |
+
self.py_kernels: Dict[DispatchKey, Callable[..., Any]] = {}
|
77 |
+
|
78 |
+
# This table allows you to override the behavior of a particular
|
79 |
+
# operator for a particular TorchDispatchMode. In practice,
|
80 |
+
# we are using this mostly for ProxyTensorMode. Modes can be
|
81 |
+
# thought of as an open world extension of dispatch keys, so it
|
82 |
+
# makes sense that you should be able to register them, the same
|
83 |
+
# way you can register dispatch keys.
|
84 |
+
self.python_key_table: Dict[
|
85 |
+
Union[Type[TorchDispatchMode], Type[torch.Tensor]], Callable[..., Any]
|
86 |
+
] = {}
|
87 |
+
|
88 |
+
# This table allows you to override the behavior of functorch
|
89 |
+
# transformations. NB: this currently only does something for
|
90 |
+
# HigherOrderOperator
|
91 |
+
self.functorch_table = {}
|
92 |
+
|
93 |
+
def __call__(self, *args, **kwargs):
|
94 |
+
raise NotImplementedError
|
95 |
+
|
96 |
+
def has_kernel_for_dispatch_key(self, k):
|
97 |
+
return k in self.py_kernels
|
98 |
+
|
99 |
+
def has_kernel_for_any_dispatch_key(self, ks):
|
100 |
+
for k in self.py_kernels:
|
101 |
+
if not torch._C._dispatch_is_alias_key(k) and ks.has(k):
|
102 |
+
return True
|
103 |
+
return False
|
104 |
+
|
105 |
+
def py_impl(self, k: Any) -> Callable[[_F], _F]:
|
106 |
+
def inner(fn: _F) -> _F:
|
107 |
+
if inspect.isclass(k) and (
|
108 |
+
issubclass(k, TorchDispatchMode) or issubclass(k, torch.Tensor)
|
109 |
+
):
|
110 |
+
assert k not in self.python_key_table
|
111 |
+
# TODO(voz): Should we replace setting DispatchKey.Python entirely with setting mode keys?
|
112 |
+
self.python_key_table[k] = fn
|
113 |
+
self._dispatch_cache.clear()
|
114 |
+
return fn
|
115 |
+
|
116 |
+
if isinstance(k, torch._C._functorch.TransformType):
|
117 |
+
assert k not in self.functorch_table
|
118 |
+
self.functorch_table[k] = fn
|
119 |
+
return fn
|
120 |
+
|
121 |
+
assert isinstance(k, DispatchKey)
|
122 |
+
assert (
|
123 |
+
k != DispatchKey.Python
|
124 |
+
), "Please register a mode for the torch._C.DispatchKey.Python key instead."
|
125 |
+
|
126 |
+
if k in self.py_kernels:
|
127 |
+
raise RuntimeError(
|
128 |
+
f"Trying to override a python impl for {k} on operator {self.name()}"
|
129 |
+
)
|
130 |
+
self.py_kernels[k] = fn
|
131 |
+
self._dispatch_cache.clear()
|
132 |
+
return fn
|
133 |
+
|
134 |
+
return inner
|
135 |
+
|
136 |
+
# Registers an implementation to all **3** variants of functionalization that we have:
|
137 |
+
# - DispatchKey.Functionalize
|
138 |
+
# - functorch.TransformType.Functionalize
|
139 |
+
# - FunctionalTensorMode
|
140 |
+
# Example:
|
141 |
+
# @py_functionalize_impl
|
142 |
+
# def functionalize_rule(ctx, inner_f, *args):
|
143 |
+
# args_unwrapped = ctx.unwrap_tensors(args)
|
144 |
+
# with ctx.redispatch_to_next():
|
145 |
+
# out = ctx.functionalize(inner_f)(*args_unwrapped)
|
146 |
+
# return ctx.wrap_tensors(out)
|
147 |
+
def py_functionalize_impl(self, fn: _F) -> _F:
|
148 |
+
from torch._subclasses.functional_tensor import (
|
149 |
+
CppFunctionalizeAPI as _CppFunctionalizeAPI,
|
150 |
+
FunctorchFunctionalizeAPI as _FunctorchFunctionalizeAPI,
|
151 |
+
PythonFunctionalizeAPI as _PythonFunctionalizeAPI,
|
152 |
+
)
|
153 |
+
|
154 |
+
# Construct our three flavors of functionalization,
|
155 |
+
# each of which have slightly different wrap/unwrap/redispatch policies
|
156 |
+
def functionalize_dk_fn(*args, **kwargs):
|
157 |
+
return fn(_CppFunctionalizeAPI(), *args, **kwargs)
|
158 |
+
|
159 |
+
def functionalize_dispatch_mode_fn(mode, *args, **kwargs):
|
160 |
+
return fn(_PythonFunctionalizeAPI(mode), *args, **kwargs)
|
161 |
+
|
162 |
+
def functionalize_functorch_fn(interpreter, *args, **kwargs):
|
163 |
+
return fn(_FunctorchFunctionalizeAPI(interpreter), *args, **kwargs)
|
164 |
+
|
165 |
+
self.py_impl(DispatchKey.Functionalize)(functionalize_dk_fn)
|
166 |
+
self.py_impl(torch._subclasses.functional_tensor.FunctionalTensorMode)(
|
167 |
+
functionalize_dispatch_mode_fn
|
168 |
+
)
|
169 |
+
self.py_impl(torch._C._functorch.TransformType.Functionalize)(
|
170 |
+
functionalize_functorch_fn
|
171 |
+
)
|
172 |
+
|
173 |
+
return fn
|
174 |
+
|
175 |
+
def name(self):
|
176 |
+
raise NotImplementedError
|
177 |
+
|
178 |
+
|
179 |
+
# Equivalent to computeDispatchTableEntryWithDebug
|
180 |
+
def resolve_key(op: OperatorBase, k: DispatchKey): # type: ignore[valid-type]
|
181 |
+
# 1. (Direct) operator registration
|
182 |
+
if op.has_kernel_for_dispatch_key(k):
|
183 |
+
return k
|
184 |
+
# 2.1 Use CompositeExplicitAutogradNonFunctional kernel if available
|
185 |
+
cand = DispatchKey.CompositeExplicitAutogradNonFunctional
|
186 |
+
if (
|
187 |
+
k == DispatchKey.Undefined or is_included_in_alias(k, cand)
|
188 |
+
) and op.has_kernel_for_dispatch_key(cand):
|
189 |
+
return cand
|
190 |
+
# 2.2 Use CompositeExplicitAutograd kernel if available
|
191 |
+
cand = DispatchKey.CompositeExplicitAutograd
|
192 |
+
if (
|
193 |
+
k == DispatchKey.Undefined or is_included_in_alias(k, cand)
|
194 |
+
) and op.has_kernel_for_dispatch_key(cand):
|
195 |
+
return cand
|
196 |
+
has_backend_kernel = op.has_kernel_for_any_dispatch_key(
|
197 |
+
torch._C._dispatch_get_backend_keyset_from_autograd(k)
|
198 |
+
) or op.has_kernel_for_dispatch_key(DispatchKey.CompositeExplicitAutograd)
|
199 |
+
# 2.3. Use CompositeImplicitAutograd kernel if available
|
200 |
+
cand = DispatchKey.CompositeImplicitAutogradNestedTensor
|
201 |
+
if (
|
202 |
+
(k != DispatchKey.Undefined and is_included_in_alias(k, cand))
|
203 |
+
and op.has_kernel_for_dispatch_key(cand)
|
204 |
+
and not has_backend_kernel
|
205 |
+
):
|
206 |
+
return cand
|
207 |
+
cand = DispatchKey.CompositeImplicitAutograd
|
208 |
+
if (
|
209 |
+
k == DispatchKey.Undefined or is_included_in_alias(k, cand)
|
210 |
+
) and op.has_kernel_for_dispatch_key(cand):
|
211 |
+
if k == DispatchKey.AutogradOther and op.has_kernel_for_any_dispatch_key(
|
212 |
+
torch._C._dispatch_autogradother_backends
|
213 |
+
):
|
214 |
+
raise RuntimeError("ambiguous autogradother kernel")
|
215 |
+
elif not has_backend_kernel:
|
216 |
+
return cand
|
217 |
+
# 2.4. For autograd backend keys, use kernel from DispatchKey::Autograd if available
|
218 |
+
cand = DispatchKey.Autograd
|
219 |
+
if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
|
220 |
+
return cand
|
221 |
+
# 2.5 Use kernel from DispatchKey::FuncTorchBatchedDecomposition if available
|
222 |
+
cand = DispatchKey.FuncTorchBatchedDecomposition
|
223 |
+
if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
|
224 |
+
return cand
|
225 |
+
# Backend fallback
|
226 |
+
if torch._C._dispatch_has_backend_fallback(k):
|
227 |
+
# The dispatch key itself will implicitly route to backend fallback.
|
228 |
+
# This is probably not great for the pure Python implementation.
|
229 |
+
return k
|
230 |
+
raise NotImplementedError(f"could not find kernel for {op} at dispatch key {k}")
|
231 |
+
|
232 |
+
|
233 |
+
_higher_order_ops: Dict[str, "HigherOrderOperator"] = {}
|
234 |
+
|
235 |
+
_HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS = [
|
236 |
+
DispatchKey.PythonDispatcher, # type: ignore[attr-defined]
|
237 |
+
DispatchKey.PythonTLSSnapshot, # type: ignore[attr-defined]
|
238 |
+
DispatchKey.ADInplaceOrView,
|
239 |
+
DispatchKey.BackendSelect,
|
240 |
+
DispatchKey.AutocastCPU, # type: ignore[attr-defined]
|
241 |
+
DispatchKey.AutocastCUDA, # type: ignore[attr-defined]
|
242 |
+
]
|
243 |
+
|
244 |
+
|
245 |
+
class HigherOrderOperator(OperatorBase, abc.ABC):
|
246 |
+
# The HigherOrderOperator will appear as torch.ops.higher_order.{name}
|
247 |
+
#
|
248 |
+
# If you're creating a new HigherOrderOperator, please do not change the
|
249 |
+
# default. Adding operators to the global torch.ops namespace is a bad
|
250 |
+
# practice due to name collisions.
|
251 |
+
def __init__(self, name, *, cacheable=False):
|
252 |
+
super().__init__()
|
253 |
+
if type(self) is HigherOrderOperator:
|
254 |
+
raise RuntimeError(
|
255 |
+
"Direct instantiation of HigherOrderOperator is not allowed. Please subclass it."
|
256 |
+
)
|
257 |
+
self._name = name
|
258 |
+
|
259 |
+
# Make _OPNamespace not scream, this whole name based association needs a good hard look
|
260 |
+
self.__name__ = name
|
261 |
+
_higher_order_ops[name] = self
|
262 |
+
self._ns = "higher_order"
|
263 |
+
self.__module__ = "torch.ops.higher_order"
|
264 |
+
self._cacheable = cacheable
|
265 |
+
|
266 |
+
self.non_fallthrough_keys = torch._C._dispatch_keyset_full()
|
267 |
+
|
268 |
+
for dispatch_key in _HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS:
|
269 |
+
self.fallthrough(dispatch_key)
|
270 |
+
|
271 |
+
# [NOTE] We have to register pre-dispatch key implementation
|
272 |
+
# because sometimes HOP use aot-dispatch tracing to detect certaion
|
273 |
+
# mutations. This is problematic when we are functionalizing HOP
|
274 |
+
# during pre-dispatch because when the inner tracer starts, it will see
|
275 |
+
# that PreDispatch key is still active. In that case, we just redispatch
|
276 |
+
# it to next key. This is only safe to do when PreDispatch key stack has no
|
277 |
+
# active modes.
|
278 |
+
|
279 |
+
def py_impl(self, k: Any) -> Callable[[_F], _F]:
|
280 |
+
if isinstance(k, DispatchKey) and not self.non_fallthrough_keys.has(k):
|
281 |
+
self.non_fallthrough_keys = self.non_fallthrough_keys.add(k)
|
282 |
+
return super().py_impl(k)
|
283 |
+
|
284 |
+
@property
|
285 |
+
def namespace(self):
|
286 |
+
return self._ns
|
287 |
+
|
288 |
+
def cacheable(self):
|
289 |
+
return self._cacheable
|
290 |
+
|
291 |
+
def fallthrough(self, dispatch_key):
|
292 |
+
self.non_fallthrough_keys = self.non_fallthrough_keys.remove(dispatch_key)
|
293 |
+
|
294 |
+
# Use positional-only argument to avoid naming collide with custom ops arguments
|
295 |
+
# that are named "self".
|
296 |
+
def dispatch(self, /, dispatch_key, *args, **kwargs):
|
297 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode
|
298 |
+
|
299 |
+
if dispatch_key in self._dispatch_cache:
|
300 |
+
kernel = self._dispatch_cache[dispatch_key]
|
301 |
+
assert not isinstance(kernel, DispatchKey)
|
302 |
+
return kernel(*args, **kwargs)
|
303 |
+
|
304 |
+
if dispatch_key == DispatchKey.FuncTorchDynamicLayerFrontMode:
|
305 |
+
return dispatch_functorch(self, args, kwargs)
|
306 |
+
|
307 |
+
if dispatch_key == DispatchKey.Python:
|
308 |
+
# Keep the following 1:1 with handle_torch_function_no_python_arg_parser
|
309 |
+
# in torch/csrc/utils/python_arg_parser.cpp
|
310 |
+
|
311 |
+
overloaded_args_list = []
|
312 |
+
|
313 |
+
def has_python_key(tensor):
|
314 |
+
return torch._C._dispatch_keys(tensor).has("Python")
|
315 |
+
|
316 |
+
def check_overloaded(arg):
|
317 |
+
if isinstance(arg, torch.Tensor) and has_python_key(arg):
|
318 |
+
overloaded_args_list.append(arg)
|
319 |
+
|
320 |
+
for arg in (*args, *kwargs.values()):
|
321 |
+
check_overloaded(arg)
|
322 |
+
if isinstance(arg, (list, tuple)):
|
323 |
+
for a in arg:
|
324 |
+
check_overloaded(a)
|
325 |
+
|
326 |
+
overloaded_args = tuple(overloaded_args_list)
|
327 |
+
overloaded_types = tuple(type(arg) for arg in overloaded_args)
|
328 |
+
|
329 |
+
# Step 1: dispatch on any user TorchDispatchModes
|
330 |
+
from torch.utils._python_dispatch import _pop_mode_temporarily
|
331 |
+
|
332 |
+
curr_mode = _get_current_dispatch_mode()
|
333 |
+
if curr_mode is not None:
|
334 |
+
if type(curr_mode) in self.python_key_table:
|
335 |
+
handler = self.python_key_table[type(curr_mode)]
|
336 |
+
with _pop_mode_temporarily() as mode:
|
337 |
+
# "natural" calling convention: (mode, *args, **kwargs)
|
338 |
+
# TODO(rzou): we should support torch_dispatch calling convention too.
|
339 |
+
result = handler(mode, *args, **kwargs)
|
340 |
+
else:
|
341 |
+
raise NotImplementedError(
|
342 |
+
f"There was no rule registered for HOP {self._name} and mode {curr_mode}. "
|
343 |
+
f"We recommend filing an issue."
|
344 |
+
)
|
345 |
+
if result is not NotImplemented:
|
346 |
+
return result
|
347 |
+
|
348 |
+
# Step 2: dispatch on any subclasses
|
349 |
+
for arg in overloaded_args:
|
350 |
+
subclass_type = type(arg)
|
351 |
+
if (
|
352 |
+
subclass_type.__torch_dispatch__
|
353 |
+
== torch._C._disabled_torch_dispatch_impl
|
354 |
+
):
|
355 |
+
continue
|
356 |
+
if subclass_type in self.python_key_table:
|
357 |
+
handler = self.python_key_table[subclass_type]
|
358 |
+
# "natural" calling convention: (*args, **kwargs)
|
359 |
+
# TODO(rzou): we should support torch_dispatch calling convention too.
|
360 |
+
result = handler(*args, **kwargs)
|
361 |
+
else:
|
362 |
+
raise NotImplementedError(
|
363 |
+
f"There was no rule registered for HOP {self._name} and subclass {subclass_type}. "
|
364 |
+
f"We recommend filing an issue."
|
365 |
+
)
|
366 |
+
if result is not NotImplemented:
|
367 |
+
return result
|
368 |
+
|
369 |
+
# All handlers returned NotImplemented
|
370 |
+
raise TypeError(
|
371 |
+
f"Multiple dispatch failed for {self._name}. There was no registered that "
|
372 |
+
f"did not return NotImplemented. Use HOP.py_impl to register some. "
|
373 |
+
f"Tried mode: {curr_mode}) and subclasses: "
|
374 |
+
f"{[type(a) for a in overloaded_args]}"
|
375 |
+
)
|
376 |
+
|
377 |
+
functionality_key = torch._C._to_functionality_key(dispatch_key) # type: ignore[attr-defined]
|
378 |
+
if functionality_key == DispatchKey.PreDispatch:
|
379 |
+
from torch.utils._python_dispatch import _pop_mode_temporarily
|
380 |
+
|
381 |
+
# The check for Python in the exclude set is so we properly respect `with no_dispatch()`
|
382 |
+
# calls inside of a mode.
|
383 |
+
if (
|
384 |
+
_len_torch_dispatch_stack_pre_dispatch() > 0
|
385 |
+
) and not torch._C._dispatch_tls_is_dispatch_key_excluded(
|
386 |
+
DispatchKey.Python
|
387 |
+
):
|
388 |
+
curr_mode = _get_current_dispatch_mode_pre_dispatch()
|
389 |
+
assert (
|
390 |
+
curr_mode is not None
|
391 |
+
), "Illegal invocation of dispatch on torch._C.DispatchKey.PreDispatch without a mode."
|
392 |
+
assert (
|
393 |
+
type(curr_mode) in self.python_key_table
|
394 |
+
), f"Current active mode {curr_mode} not registered"
|
395 |
+
handler = self.python_key_table[type(curr_mode)]
|
396 |
+
with _pop_mode_temporarily(functionality_key) as mode:
|
397 |
+
return handler(mode, *args, **kwargs)
|
398 |
+
|
399 |
+
final_key = resolve_key(self, dispatch_key)
|
400 |
+
|
401 |
+
# This can current fail due to backend fallbacks. You just have to
|
402 |
+
# register them by hand for HigherOrderOperator.
|
403 |
+
if final_key not in self.py_kernels:
|
404 |
+
raise NotImplementedError(
|
405 |
+
f"could not find kernel for HigherOrderOperator {self._name} "
|
406 |
+
f"at dispatch key {final_key} (resolved from {dispatch_key})"
|
407 |
+
)
|
408 |
+
|
409 |
+
# [NOTE] We shouldn't cache PreDispatch kernel here because depending
|
410 |
+
# on what modes are active, predispatch behaviour is different.
|
411 |
+
# Also we do same thing for normal ops:
|
412 |
+
# See Note [Not Caching Per-Dispatch-Key Mode Handlers]
|
413 |
+
if dispatch_key != DispatchKey.PreDispatch:
|
414 |
+
self._dispatch_cache[dispatch_key] = self.py_kernels[final_key]
|
415 |
+
kernel = self.py_kernels[final_key]
|
416 |
+
# It's illegal to register DispatchKey to py_kernels, since there's no
|
417 |
+
# C++ kernel to call into
|
418 |
+
assert not isinstance(kernel, DispatchKey)
|
419 |
+
return kernel(*args, **kwargs)
|
420 |
+
|
421 |
+
@abc.abstractmethod
|
422 |
+
def __call__(self, /, *args, **kwargs):
|
423 |
+
# Dynamo already traces the body of HigherOrderOp beforehand when it
|
424 |
+
# so no need to trace into it.
|
425 |
+
from torch._dynamo import disable
|
426 |
+
|
427 |
+
@disable
|
428 |
+
def wrapper():
|
429 |
+
flat_args = _to_flat_tuple(args, kwargs)
|
430 |
+
if torch.overrides.has_torch_function(flat_args):
|
431 |
+
return torch.overrides.handle_torch_function(
|
432 |
+
self, flat_args, *args, **kwargs
|
433 |
+
)
|
434 |
+
|
435 |
+
dispatch_key_set = _compute_keyset(args, kwargs, self.non_fallthrough_keys)
|
436 |
+
return self.dispatch(
|
437 |
+
dispatch_key_set.highestPriorityTypeId(), *args, **kwargs
|
438 |
+
)
|
439 |
+
|
440 |
+
return wrapper()
|
441 |
+
|
442 |
+
def __str__(self):
|
443 |
+
return f"{self.name()}"
|
444 |
+
|
445 |
+
def name(self):
|
446 |
+
return self._name
|
447 |
+
|
448 |
+
|
449 |
+
def _to_flat_tuple(args, kwargs):
|
450 |
+
return pytree.arg_tree_leaves(*args, **kwargs)
|
451 |
+
|
452 |
+
|
453 |
+
def _compute_keyset(args, kwargs, non_fallthrough_keys):
|
454 |
+
tensors = _get_tensors(args, kwargs)
|
455 |
+
return key_extractor(tensors, non_fallthrough_keys)
|
456 |
+
|
457 |
+
|
458 |
+
def _get_tensors(args, kwargs):
|
459 |
+
flat_all = _to_flat_tuple(args, kwargs)
|
460 |
+
tensor_args = [t for t in flat_all if isinstance(t, torch.Tensor)]
|
461 |
+
return tuple(tensor_args)
|
462 |
+
|
463 |
+
|
464 |
+
# Note - this should maintain identical impl to the C++ dispatcher key extraction logic
|
465 |
+
# at ATen/core/dispatch/DispatchKeyExtractor.h
|
466 |
+
def key_extractor(tensors, key_mask):
|
467 |
+
key_set = torch._C._dispatch_tls_local_include_set()
|
468 |
+
for tensor in tensors:
|
469 |
+
key_set = key_set | torch._C._dispatch_keys(tensor)
|
470 |
+
key_set = key_set - torch._C._dispatch_tls_local_exclude_set()
|
471 |
+
key_set = key_set & key_mask
|
472 |
+
return key_set
|
473 |
+
|
474 |
+
|
475 |
+
# Mode stack for PreDispatchKey
|
476 |
+
# it should always have three keys with
|
477 |
+
# priority given to FunctionalTensorMode and
|
478 |
+
# then ProxyTorchDispatchMode. It means that
|
479 |
+
# slot 0 belongs to ProxyTorchDispatchMode and
|
480 |
+
# slot 1 belongs to FunctionalTensorMode.
|
481 |
+
#
|
482 |
+
# SchemaCheckMode is separate from the other 2,
|
483 |
+
# and is only valid when the stack is empty.
|
484 |
+
# SchemaCheckMode is for testing purposes, and
|
485 |
+
# is meant to run in eager mode on concrete inputs,
|
486 |
+
# checking for incorrect schemas in regards to
|
487 |
+
# aliasing or mutating ops.
|
488 |
+
class _ModeStackStateForPreDispatch:
|
489 |
+
def __init__(self):
|
490 |
+
self.__infra_modes = [None, None]
|
491 |
+
self._schema_check_mode = None
|
492 |
+
|
493 |
+
def set(self, index, mode):
|
494 |
+
assert index < len(self.__infra_modes)
|
495 |
+
self.__infra_modes[index] = mode
|
496 |
+
|
497 |
+
def get(self, index):
|
498 |
+
assert index < len(self.__infra_modes)
|
499 |
+
return self.__infra_modes[index]
|
500 |
+
|
501 |
+
def count(self):
|
502 |
+
return len([i for i in self.__infra_modes if i is not None]) + int(
|
503 |
+
self._schema_check_mode is not None
|
504 |
+
)
|
505 |
+
|
506 |
+
|
507 |
+
_mode_stack_state_for_pre_dispatch = _ModeStackStateForPreDispatch()
|
508 |
+
|
509 |
+
|
510 |
+
def unset_mode_pre_dispatch(mode_key, schema_check=False):
|
511 |
+
current_mode_stack_pre_dispatch = mode_stack_state_for_pre_dispatch()
|
512 |
+
assert mode_key is None or mode_key in (
|
513 |
+
torch._C._TorchDispatchModeKey.PROXY,
|
514 |
+
torch._C._TorchDispatchModeKey.FUNCTIONAL,
|
515 |
+
)
|
516 |
+
if schema_check:
|
517 |
+
assert mode_key is None
|
518 |
+
|
519 |
+
def _unset_mode():
|
520 |
+
if mode_key == torch._C._TorchDispatchModeKey.PROXY:
|
521 |
+
current_mode = current_mode_stack_pre_dispatch.get(0)
|
522 |
+
mode_stack_state_for_pre_dispatch().set(0, None)
|
523 |
+
return current_mode
|
524 |
+
elif mode_key == torch._C._TorchDispatchModeKey.FUNCTIONAL:
|
525 |
+
current_mode = current_mode_stack_pre_dispatch.get(1)
|
526 |
+
mode_stack_state_for_pre_dispatch().set(1, None)
|
527 |
+
return current_mode
|
528 |
+
else:
|
529 |
+
current_mode = mode_stack_state_for_pre_dispatch()._schema_check_mode
|
530 |
+
mode_stack_state_for_pre_dispatch()._schema_check_mode = None
|
531 |
+
return current_mode
|
532 |
+
|
533 |
+
current_mode = _unset_mode()
|
534 |
+
|
535 |
+
new_pre_dispatch_len = _len_torch_dispatch_stack_pre_dispatch()
|
536 |
+
# When we are unsetting a mode, we need to check if there is
|
537 |
+
# active mode left on the PreDispatch key. If there is nothing
|
538 |
+
# active, we need to remove PreDispatch key from local dispatch include
|
539 |
+
# set.
|
540 |
+
if new_pre_dispatch_len == 0:
|
541 |
+
torch._C._dispatch_tls_set_dispatch_key_included(DispatchKey.PreDispatch, False)
|
542 |
+
|
543 |
+
return current_mode
|
544 |
+
|
545 |
+
|
546 |
+
def _set_mode_pre_dispatch(mode):
|
547 |
+
from torch._subclasses.functional_tensor import FunctionalTensorMode
|
548 |
+
from torch._subclasses.schema_check_mode import SchemaCheckMode
|
549 |
+
from torch.fx.experimental.proxy_tensor import ProxyTorchDispatchMode
|
550 |
+
|
551 |
+
assert isinstance(
|
552 |
+
mode,
|
553 |
+
(
|
554 |
+
FunctionalTensorMode,
|
555 |
+
ProxyTorchDispatchMode,
|
556 |
+
SchemaCheckMode,
|
557 |
+
),
|
558 |
+
)
|
559 |
+
|
560 |
+
previous_mode_stack_len = _len_torch_dispatch_stack_pre_dispatch()
|
561 |
+
if isinstance(mode, SchemaCheckMode):
|
562 |
+
current_mode = mode_stack_state_for_pre_dispatch()._schema_check_mode
|
563 |
+
if previous_mode_stack_len > 0:
|
564 |
+
raise AssertionError(
|
565 |
+
"SchemaCheckMode for pre-dispatch must be used exclusively, found other modes on the stack"
|
566 |
+
)
|
567 |
+
mode_stack_state_for_pre_dispatch()._schema_check_mode = mode
|
568 |
+
elif isinstance(mode, FunctionalTensorMode):
|
569 |
+
current_mode = mode_stack_state_for_pre_dispatch().get(1)
|
570 |
+
assert current_mode is None
|
571 |
+
mode_stack_state_for_pre_dispatch().set(1, mode)
|
572 |
+
else:
|
573 |
+
current_mode = mode_stack_state_for_pre_dispatch().get(0)
|
574 |
+
assert current_mode is None
|
575 |
+
mode_stack_state_for_pre_dispatch().set(0, mode)
|
576 |
+
|
577 |
+
# When we are setting a mode, we need to check if there is
|
578 |
+
# active mode left on the PreDispatch key. If there was nothing
|
579 |
+
# active before setting this mode, it means that PreDispatch key
|
580 |
+
# was turned off. So we need to turn it on again.
|
581 |
+
if previous_mode_stack_len == 0:
|
582 |
+
torch._C._dispatch_tls_set_dispatch_key_included(DispatchKey.PreDispatch, True)
|
583 |
+
|
584 |
+
|
585 |
+
def _pop_mode_from_pre_dispatch():
|
586 |
+
mode_stack = mode_stack_state_for_pre_dispatch()
|
587 |
+
pre_dispatch_len = _len_torch_dispatch_stack_pre_dispatch()
|
588 |
+
|
589 |
+
if pre_dispatch_len == 0:
|
590 |
+
raise AssertionError("Trying to pop empty mode stack")
|
591 |
+
|
592 |
+
if mode_stack._schema_check_mode is not None:
|
593 |
+
return unset_mode_pre_dispatch(None, schema_check=True)
|
594 |
+
if mode_stack.get(1) is not None:
|
595 |
+
return unset_mode_pre_dispatch(torch._C._TorchDispatchModeKey.FUNCTIONAL)
|
596 |
+
if mode_stack.get(0) is not None:
|
597 |
+
return unset_mode_pre_dispatch(torch._C._TorchDispatchModeKey.PROXY)
|
598 |
+
|
599 |
+
|
600 |
+
def _len_torch_dispatch_stack_pre_dispatch():
|
601 |
+
return mode_stack_state_for_pre_dispatch().count()
|
602 |
+
|
603 |
+
|
604 |
+
def _get_dispatch_mode_pre_dispatch(mode_key):
|
605 |
+
assert mode_key in (
|
606 |
+
torch._C._TorchDispatchModeKey.PROXY,
|
607 |
+
torch._C._TorchDispatchModeKey.FUNCTIONAL,
|
608 |
+
)
|
609 |
+
if mode_key == torch._C._TorchDispatchModeKey.PROXY:
|
610 |
+
return mode_stack_state_for_pre_dispatch().get(0)
|
611 |
+
else:
|
612 |
+
return mode_stack_state_for_pre_dispatch().get(1)
|
613 |
+
|
614 |
+
|
615 |
+
def _get_current_dispatch_mode_pre_dispatch():
|
616 |
+
if mode_stack_state_for_pre_dispatch()._schema_check_mode is not None:
|
617 |
+
return mode_stack_state_for_pre_dispatch()._schema_check_mode
|
618 |
+
else:
|
619 |
+
stack_len = mode_stack_state_for_pre_dispatch().count()
|
620 |
+
if stack_len == 2:
|
621 |
+
return mode_stack_state_for_pre_dispatch().get(1)
|
622 |
+
if stack_len == 1:
|
623 |
+
return (
|
624 |
+
mode_stack_state_for_pre_dispatch().get(1)
|
625 |
+
if mode_stack_state_for_pre_dispatch().get(1) is not None
|
626 |
+
else mode_stack_state_for_pre_dispatch().get(0)
|
627 |
+
)
|
628 |
+
return None
|
629 |
+
|
630 |
+
|
631 |
+
def mode_stack_state_for_pre_dispatch():
|
632 |
+
global _mode_stack_state_for_pre_dispatch
|
633 |
+
return _mode_stack_state_for_pre_dispatch
|
634 |
+
|
635 |
+
|
636 |
+
cached_ops: Set["OpOverload"] = set()
|
637 |
+
|
638 |
+
|
639 |
+
def add_cached_op(op_overload):
|
640 |
+
global cached_ops
|
641 |
+
cached_ops.add(op_overload)
|
642 |
+
|
643 |
+
|
644 |
+
def reset_cached_ops():
|
645 |
+
global cached_ops
|
646 |
+
cached_ops.clear()
|
647 |
+
|
648 |
+
|
649 |
+
def get_cached_ops():
|
650 |
+
global cached_ops
|
651 |
+
return cached_ops
|
652 |
+
|
653 |
+
|
654 |
+
# Each OpOverload object contains pointer to a specific operator overload, a pointer to the parent `OpOverloadPacket` object.
|
655 |
+
# You can obtain an OpOverload object through attribute query on OpOverloadPacket.
|
656 |
+
class OpOverload(OperatorBase):
|
657 |
+
def __init__(self, overloadpacket, op, op_dk, schema, tags):
|
658 |
+
super().__init__()
|
659 |
+
self._op = op
|
660 |
+
self._op_dk = op_dk
|
661 |
+
self._schema = schema
|
662 |
+
self._overloadpacket = overloadpacket
|
663 |
+
self._tags = tags
|
664 |
+
self._overloadname = (
|
665 |
+
"default" if schema.overload_name == "" else schema.overload_name
|
666 |
+
)
|
667 |
+
self._name = self._schema.name
|
668 |
+
if schema.overload_name:
|
669 |
+
self._name += "." + schema.overload_name
|
670 |
+
self.__name__ = f"{self._schema.name.split('::')[1]}.{self._overloadname}"
|
671 |
+
self.__module__ = overloadpacket.__module__
|
672 |
+
op.__module__ = overloadpacket.__module__
|
673 |
+
self.__qualname__ = self._name
|
674 |
+
self.__annotations__ = {}
|
675 |
+
# Only compute the OperatorHandle when we need it. Not all OpOverloads have
|
676 |
+
# OperatorHandles (the TorchScript ones don't...)
|
677 |
+
self._lazy_handle = None
|
678 |
+
|
679 |
+
# If the OpOverload was constructed from a Library.def in Python.
|
680 |
+
self._defined_in_python = self.__qualname__ in torch.library._defs
|
681 |
+
|
682 |
+
# Logic replicated from aten/src/ATen/native/MathBitsFallback.h
|
683 |
+
is_write = None
|
684 |
+
for a in self._schema.arguments:
|
685 |
+
if a.alias_info is None:
|
686 |
+
continue
|
687 |
+
if is_write is None:
|
688 |
+
is_write = a.alias_info.is_write
|
689 |
+
else:
|
690 |
+
# We will conservatively call mixed mutable/non-mutable
|
691 |
+
# aliased inputs as NOT a view
|
692 |
+
is_write = a.alias_info.is_write or is_write
|
693 |
+
self.is_view = is_write is not None and not is_write
|
694 |
+
|
695 |
+
@property
|
696 |
+
def _namespace(self):
|
697 |
+
return self._schema.name.split("::")[0]
|
698 |
+
|
699 |
+
@property
|
700 |
+
def _opname(self):
|
701 |
+
return self._schema.name.split("::")[1]
|
702 |
+
|
703 |
+
@property
|
704 |
+
def _handle(self):
|
705 |
+
if self._lazy_handle is None:
|
706 |
+
self._lazy_handle = torch._C._dispatch_find_schema_or_throw(
|
707 |
+
self._schema.name, self._schema.overload_name
|
708 |
+
)
|
709 |
+
return self._lazy_handle
|
710 |
+
|
711 |
+
# it's a no-op since OpOverload object is immutable and must be unique for a given op overload.
|
712 |
+
def __deepcopy__(self, memo=None):
|
713 |
+
return self
|
714 |
+
|
715 |
+
def __repr__(self):
|
716 |
+
return "<OpOverload(op='{}.{}', overload='{}')>".format(
|
717 |
+
*self._schema.name.split("::"), self._overloadname
|
718 |
+
)
|
719 |
+
|
720 |
+
# Use positional-only argument to avoid naming collision with aten ops arguments
|
721 |
+
# that are named "self". This way, all the aten ops can be called by kwargs.
|
722 |
+
def __call__(self, /, *args, **kwargs):
|
723 |
+
return self._op(*args, **kwargs)
|
724 |
+
|
725 |
+
# Use positional-only argument to avoid naming collision with aten ops arguments
|
726 |
+
# that are named "self". This way, all the aten ops can be called by kwargs.
|
727 |
+
def redispatch(self, /, keyset, *args, **kwargs):
|
728 |
+
return self._handle.redispatch_boxed(keyset, *args, **kwargs)
|
729 |
+
|
730 |
+
def __hash__(self):
|
731 |
+
return hash(self._op)
|
732 |
+
|
733 |
+
# `my_namespace.my_op_name.overload_name`
|
734 |
+
def __str__(self):
|
735 |
+
return "{}.{}.{}".format(*self._schema.name.split("::"), self._overloadname)
|
736 |
+
|
737 |
+
def has_kernel_for_dispatch_key(self, k):
|
738 |
+
return super().has_kernel_for_dispatch_key(
|
739 |
+
k
|
740 |
+
) or torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), k)
|
741 |
+
|
742 |
+
def has_kernel_for_any_dispatch_key(self, ks):
|
743 |
+
return torch._C._dispatch_has_kernel_for_any_dispatch_key(
|
744 |
+
self.name(), ks
|
745 |
+
) or super().has_kernel_for_any_dispatch_key(ks)
|
746 |
+
|
747 |
+
@property
|
748 |
+
def namespace(self):
|
749 |
+
return self._schema.name.split("::")[0]
|
750 |
+
|
751 |
+
def _can_decompose(self):
|
752 |
+
dk = DispatchKey.CompositeImplicitAutograd
|
753 |
+
return dk in self.py_kernels or torch._C._dispatch_has_kernel_for_dispatch_key(
|
754 |
+
self.name(), dk
|
755 |
+
)
|
756 |
+
|
757 |
+
def decompose(self, *args, **kwargs):
|
758 |
+
dk = DispatchKey.CompositeImplicitAutograd
|
759 |
+
if dk in self.py_kernels:
|
760 |
+
# NB: This branch is not too necessary anymore, because we can
|
761 |
+
# apply Python CompositeImplicitAutograd *before* tracing
|
762 |
+
# using Python dispatcher (also taking advantage of the autograd
|
763 |
+
# formula). But it's included for completeness
|
764 |
+
return self.py_kernels[dk](*args, **kwargs)
|
765 |
+
elif torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), dk):
|
766 |
+
return self._op_dk(dk, *args, **kwargs)
|
767 |
+
else:
|
768 |
+
return NotImplemented
|
769 |
+
|
770 |
+
# Remove a dispatch key from the dispatch cache. This will force it to get
|
771 |
+
# recomputed the next time. Does nothing
|
772 |
+
# WARNING: if you register a dispatch key to py_kernels of an OpOverload,
|
773 |
+
# calling _del_dispatch on that key is NOT sufficient to apply your change,
|
774 |
+
# because a single registration may affect MULTIPLE dispatch keys (e.g.,
|
775 |
+
# registering Autograd affects AutogradCPU). del_dispatch is to be used
|
776 |
+
# only if you are specifically modifying how get_dispatch handles a
|
777 |
+
# particular input 'key'.
|
778 |
+
def _uncache_dispatch(self, key):
|
779 |
+
self._dispatch_cache.pop(key, None)
|
780 |
+
|
781 |
+
# This implements the pre-computation logic for the Python dispatcher.
|
782 |
+
def _get_dispatch(self, key):
|
783 |
+
# This is only called upon a cache miss
|
784 |
+
assert key not in self._dispatch_cache, f"{self} {key}"
|
785 |
+
|
786 |
+
if key == DispatchKey.Python:
|
787 |
+
if not isinstance(self, TorchBindOpOverload) and not self.python_key_table:
|
788 |
+
self._dispatch_cache[key] = key
|
789 |
+
add_cached_op(self)
|
790 |
+
return key
|
791 |
+
|
792 |
+
def handler(*args, **kwargs):
|
793 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode
|
794 |
+
|
795 |
+
# TODO: We also need to handle tensor subclasses here
|
796 |
+
# TODO(voz): We should walk all the nodes here / turn it into a list, topmode is ok for now.
|
797 |
+
curr_mode = type(_get_current_dispatch_mode())
|
798 |
+
assert (
|
799 |
+
curr_mode is not None
|
800 |
+
), "Illegal invocation of dispatch on torch._C.DispatchKey.Python without a mode."
|
801 |
+
|
802 |
+
if curr_mode not in self.python_key_table:
|
803 |
+
if isinstance(self, TorchBindOpOverload):
|
804 |
+
with torch.utils._python_dispatch._pop_mode_temporarily() as mode:
|
805 |
+
return torch._library.utils.handle_dispatch_mode(
|
806 |
+
mode, self, *args, **kwargs
|
807 |
+
)
|
808 |
+
else:
|
809 |
+
return self._op_dk(key, *args, **kwargs)
|
810 |
+
|
811 |
+
with torch.utils._python_dispatch._pop_mode_temporarily() as mode:
|
812 |
+
return self.python_key_table[curr_mode](mode, *args, **kwargs)
|
813 |
+
|
814 |
+
self._dispatch_cache[key] = handler
|
815 |
+
add_cached_op(self)
|
816 |
+
return handler
|
817 |
+
|
818 |
+
functionality_key = torch._C._to_functionality_key(key) # type: ignore[attr-defined]
|
819 |
+
if functionality_key == DispatchKey.PreDispatch:
|
820 |
+
curr_stack_len = _len_torch_dispatch_stack_pre_dispatch()
|
821 |
+
# The check for Python in the exclude set is so we properly respect `with no_dispatch()`
|
822 |
+
# calls inside of a mode.
|
823 |
+
if (
|
824 |
+
curr_stack_len > 0
|
825 |
+
and not torch._C._dispatch_tls_is_dispatch_key_excluded(
|
826 |
+
DispatchKey.Python
|
827 |
+
)
|
828 |
+
):
|
829 |
+
|
830 |
+
def handler(*args, **kwargs):
|
831 |
+
@contextlib.contextmanager
|
832 |
+
def _temporarily_pop_modes_from_pre_dispatch():
|
833 |
+
top_mode = _pop_mode_from_pre_dispatch()
|
834 |
+
try:
|
835 |
+
yield top_mode
|
836 |
+
finally:
|
837 |
+
_set_mode_pre_dispatch(top_mode)
|
838 |
+
|
839 |
+
with _temporarily_pop_modes_from_pre_dispatch() as curr_mode:
|
840 |
+
return torch._library.utils.handle_dispatch_mode(
|
841 |
+
curr_mode, self, *args, **kwargs
|
842 |
+
)
|
843 |
+
|
844 |
+
# Note [Not Caching Per-Dispatch-Key Mode Handlers]
|
845 |
+
# Note that we're not caching this handler. There isn't really a point, since the slow bit
|
846 |
+
# is the handler itself (in python).
|
847 |
+
# Also, not caching means that we don't have to reset the cache when any existing
|
848 |
+
# modes go out of scope (which in of itself takes time to loop through all operators).
|
849 |
+
return handler
|
850 |
+
|
851 |
+
final_key = resolve_key(self, key)
|
852 |
+
|
853 |
+
# See Note [Not Caching Per-Dispatch-Key Mode Handlers]
|
854 |
+
cache_result = key != DispatchKey.PreDispatch
|
855 |
+
|
856 |
+
# TODO: We could potentially have lots of debugging wrappers against
|
857 |
+
# dispatch keys; design some general registration mechanism instead of
|
858 |
+
# having if statement for each of them
|
859 |
+
if key == DispatchKey.Functionalize:
|
860 |
+
import torch._dispatch.python as pydispatch
|
861 |
+
|
862 |
+
if pydispatch.CROSSREF_FUNCTIONALIZE:
|
863 |
+
handler = pydispatch.make_crossref_functionalize(self, final_key)
|
864 |
+
if cache_result:
|
865 |
+
self._dispatch_cache[key] = handler
|
866 |
+
add_cached_op(self)
|
867 |
+
return handler
|
868 |
+
|
869 |
+
r = self.py_kernels.get(final_key, final_key)
|
870 |
+
if cache_result:
|
871 |
+
self._dispatch_cache[key] = r
|
872 |
+
add_cached_op(self)
|
873 |
+
return r
|
874 |
+
|
875 |
+
def name(self):
|
876 |
+
return self._name
|
877 |
+
|
878 |
+
@property
|
879 |
+
def overloadpacket(self):
|
880 |
+
return self._overloadpacket
|
881 |
+
|
882 |
+
@property
|
883 |
+
def op(self):
|
884 |
+
return self._op
|
885 |
+
|
886 |
+
@property
|
887 |
+
def tags(self):
|
888 |
+
return self._tags
|
889 |
+
|
890 |
+
# TODO: add more methods to expose information about input and output arguments
|
891 |
+
|
892 |
+
|
893 |
+
# TorchBindOpOverload are those custom ops which have at least one overload's
|
894 |
+
# schema consists of torch.ScriptObject (i.e. custom class) input.
|
895 |
+
# TorchBindOpOverload will skip C++ dispatcher and purely dispatched in python
|
896 |
+
# when its inputs contain FakeScriptObject in a similar way as higher order ops.
|
897 |
+
class TorchBindOpOverload(OpOverload):
|
898 |
+
def _fallthrough_keys(self) -> List[DispatchKey]:
|
899 |
+
# TODO: we should be calling the fallback for these, but a fallthrough is almost close
|
900 |
+
# enough to the fallback in most cases that we care about.
|
901 |
+
_DEFAULT_FALLTHROUGH_KEYS = [
|
902 |
+
DispatchKey.Autograd,
|
903 |
+
DispatchKey.AutogradCPU,
|
904 |
+
DispatchKey.AutogradCUDA,
|
905 |
+
DispatchKey.ADInplaceOrView,
|
906 |
+
DispatchKey.BackendSelect,
|
907 |
+
DispatchKey.PythonTLSSnapshot,
|
908 |
+
DispatchKey.PythonDispatcher,
|
909 |
+
]
|
910 |
+
|
911 |
+
def _may_use_fallthrough_instead_of_fallback(key: DispatchKey):
|
912 |
+
if torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), key):
|
913 |
+
return torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough(
|
914 |
+
self.name(), key
|
915 |
+
)
|
916 |
+
|
917 |
+
return (
|
918 |
+
key not in self.py_kernels
|
919 |
+
or self.py_kernels[key] is torch.library.fallthrough_kernel
|
920 |
+
)
|
921 |
+
|
922 |
+
return [
|
923 |
+
key
|
924 |
+
for key in _DEFAULT_FALLTHROUGH_KEYS
|
925 |
+
if _may_use_fallthrough_instead_of_fallback(key)
|
926 |
+
]
|
927 |
+
|
928 |
+
@contextlib.contextmanager
|
929 |
+
def _register_as_effectful_op_temporarily(self):
|
930 |
+
from torch._higher_order_ops.effects import (
|
931 |
+
_EffectType,
|
932 |
+
_register_effectful_op,
|
933 |
+
SIDE_EFFECTS,
|
934 |
+
)
|
935 |
+
|
936 |
+
try:
|
937 |
+
if self not in SIDE_EFFECTS:
|
938 |
+
_register_effectful_op(self, _EffectType.ORDERED)
|
939 |
+
yield
|
940 |
+
finally:
|
941 |
+
if self in SIDE_EFFECTS:
|
942 |
+
del SIDE_EFFECTS[self]
|
943 |
+
|
944 |
+
# Use positional-only argument to avoid naming collision with aten ops arguments
|
945 |
+
# that are named "self". This way, all the aten ops can be called by kwargs.
|
946 |
+
def __call__(self, /, *args, **kwargs):
|
947 |
+
if _must_dispatch_in_python(args, kwargs):
|
948 |
+
# When any inputs are FakeScriptObject, we need to
|
949 |
+
# skip c++ dispatcher and dispatch in python through _get_dispatch of python_dispatcher
|
950 |
+
# because C++ dispatcher will check the schema and cannot recognize FakeScriptObject.
|
951 |
+
#
|
952 |
+
# Note:
|
953 |
+
# 1. We only register the torchbind op temporarily as effectful op because we only want
|
954 |
+
# the effect token functionalization logic to be applied during tracing. Otherwise, the behavior
|
955 |
+
# of the eagerly executing the op might change after tracing.
|
956 |
+
# 2. We don't want to register the op as effectful for all torchbind ops in ctor because this might
|
957 |
+
# cause unexpected behavior for some autograd.profiler ops e.g. profiler._record_function_exit._RecordFunction.
|
958 |
+
with self._register_as_effectful_op_temporarily():
|
959 |
+
return self._dispatch_in_python(args, kwargs, self._fallthrough_keys())
|
960 |
+
return self._op(*args, **kwargs)
|
961 |
+
|
962 |
+
def _dispatch_in_python(self, args, kwargs, fallthrough_keys):
|
963 |
+
non_fallthrough_keys = torch._C._dispatch_keyset_full()
|
964 |
+
for key in fallthrough_keys:
|
965 |
+
non_fallthrough_keys = non_fallthrough_keys.remove(key)
|
966 |
+
|
967 |
+
dispatch_key_set = _compute_keyset(args, kwargs, non_fallthrough_keys)
|
968 |
+
dispatch_key = dispatch_key_set.highestPriorityTypeId()
|
969 |
+
|
970 |
+
handler = (
|
971 |
+
self._get_dispatch(dispatch_key)
|
972 |
+
if dispatch_key not in self._dispatch_cache
|
973 |
+
else self._dispatch_cache[dispatch_key]
|
974 |
+
)
|
975 |
+
|
976 |
+
if isinstance(handler, DispatchKey):
|
977 |
+
# fallthrough keys can be registered at runtime via torch.library.impl
|
978 |
+
# so need to add it to fallthrough_keys and re-dispatch.
|
979 |
+
if torch._C._dispatch_kernel_for_dispatch_key_is_fallthrough(
|
980 |
+
self.name(), dispatch_key
|
981 |
+
):
|
982 |
+
return self._dispatch_in_python(
|
983 |
+
args, kwargs, fallthrough_keys + [dispatch_key]
|
984 |
+
)
|
985 |
+
|
986 |
+
raise RuntimeError(
|
987 |
+
f"Torchbind op {self} received a FakeScriptObject input when dispatching {handler}."
|
988 |
+
f" but no python implementation is found."
|
989 |
+
f" Please file an issue on this when you encounter this error."
|
990 |
+
f" This error can happen when you export or compile the model."
|
991 |
+
f" It can still happpen even if a C++ implementation for {dispatch_key}. "
|
992 |
+
f" has been registered. That's because FakeScriptObject purely lives in python and cannot work "
|
993 |
+
f" with a C++ implementation."
|
994 |
+
)
|
995 |
+
|
996 |
+
assert isinstance(handler, Callable) # type: ignore[arg-type]
|
997 |
+
return handler(*args, **kwargs)
|
998 |
+
|
999 |
+
|
1000 |
+
def _must_dispatch_in_python(args, kwargs):
|
1001 |
+
return pytree.tree_any(
|
1002 |
+
lambda obj: isinstance(
|
1003 |
+
obj, torch._library.fake_class_registry.FakeScriptObject
|
1004 |
+
),
|
1005 |
+
(args, kwargs),
|
1006 |
+
)
|
1007 |
+
|
1008 |
+
|
1009 |
+
def _has_script_object_arg(schema: torch.FunctionSchema) -> bool:
|
1010 |
+
return any(isinstance(arg.type, torch.ClassType) for arg in schema.arguments)
|
1011 |
+
|
1012 |
+
|
1013 |
+
# OpOverloadPacket class contains pointer to a base unresolved operator that doesn't correspond to a specific operator
|
1014 |
+
# You can obtain an OpOverload object through attribute query.
|
1015 |
+
class OpOverloadPacket:
|
1016 |
+
def __init__(self, qualified_op_name, op_name, op, overload_names):
|
1017 |
+
# These attributes are accessible on the object through the properties
|
1018 |
+
# defined below but are immutable
|
1019 |
+
self._qualified_op_name = qualified_op_name
|
1020 |
+
self.__name__ = op_name
|
1021 |
+
self._op = op
|
1022 |
+
self._overload_names = overload_names
|
1023 |
+
self._dir = []
|
1024 |
+
self._has_torchbind_op_overload = any(
|
1025 |
+
_has_script_object_arg(schema) for schema in self._schemas.values()
|
1026 |
+
)
|
1027 |
+
|
1028 |
+
# it's a no-op since OpOverloadPacket object is immutable and must be unique for a given op.
|
1029 |
+
def __deepcopy__(self, memo=None):
|
1030 |
+
return self
|
1031 |
+
|
1032 |
+
def __repr__(self):
|
1033 |
+
return "<OpOverloadPacket(op='{}.{}')>".format(
|
1034 |
+
*self._qualified_op_name.split("::")
|
1035 |
+
)
|
1036 |
+
|
1037 |
+
def __hash__(self):
|
1038 |
+
return hash(self._op)
|
1039 |
+
|
1040 |
+
def __str__(self):
|
1041 |
+
return "{}.{}".format(*self._qualified_op_name.split("::"))
|
1042 |
+
|
1043 |
+
@property
|
1044 |
+
def op(self):
|
1045 |
+
return self._op
|
1046 |
+
|
1047 |
+
@property
|
1048 |
+
def _schemas(self):
|
1049 |
+
return {
|
1050 |
+
overload_name: torch._C._get_schema(self._qualified_op_name, overload_name)
|
1051 |
+
for overload_name in self._overload_names
|
1052 |
+
}
|
1053 |
+
|
1054 |
+
def __getattr__(self, key):
|
1055 |
+
# It is not a valid op_name when __file__ is passed in
|
1056 |
+
if key == "__file__":
|
1057 |
+
return "torch.ops"
|
1058 |
+
|
1059 |
+
# ensure that query for dunder attributes that does not exist on
|
1060 |
+
# opoverloadpacket but instead exists on the self._op object does not unnecessarily call
|
1061 |
+
# `_get_operation_overload` (which is an expensive operation).
|
1062 |
+
# This is done to prevent any potential slowdown. This list can be extended
|
1063 |
+
# if there exists other attributes like `__name__` that only exist on self._op and not on the
|
1064 |
+
# opoverloadpacket.
|
1065 |
+
# This is ok since we are guaranteed that an overload name for an aten op can't start with '__'
|
1066 |
+
try:
|
1067 |
+
if key.startswith("__"):
|
1068 |
+
return getattr(self._op, key)
|
1069 |
+
except AttributeError:
|
1070 |
+
# for consistency because it seems weird to
|
1071 |
+
# throw an attribute error with a message containing
|
1072 |
+
# an object name different from the one the attribute
|
1073 |
+
# query was performed on.
|
1074 |
+
raise AttributeError(
|
1075 |
+
f"'{str(self)}' can't have an overload name beginning with '__' and the "
|
1076 |
+
f"underlying op {str(self._op)} has no attribute {key} either."
|
1077 |
+
) from None
|
1078 |
+
|
1079 |
+
try:
|
1080 |
+
# This is ok since we are guaranteed that an overload name for an aten op can't be 'default'
|
1081 |
+
use_key = "" if key == "default" else key
|
1082 |
+
# TODO: disallow access to overloads registered by JIT
|
1083 |
+
op_dk_tags = torch._C._get_operation_overload(
|
1084 |
+
self._qualified_op_name, use_key
|
1085 |
+
)
|
1086 |
+
if op_dk_tags is None:
|
1087 |
+
raise AttributeError(
|
1088 |
+
f"The underlying op of '{str(self)}' has no overload name '{key}'"
|
1089 |
+
)
|
1090 |
+
|
1091 |
+
op_, op_dk_, tags = op_dk_tags
|
1092 |
+
schema = torch._C._get_schema(self._qualified_op_name, use_key)
|
1093 |
+
overload = (
|
1094 |
+
OpOverload(self, op_, op_dk_, schema, tags)
|
1095 |
+
if not _has_script_object_arg(schema)
|
1096 |
+
else TorchBindOpOverload(self, op_, op_dk_, schema, tags)
|
1097 |
+
)
|
1098 |
+
# cache the overload object
|
1099 |
+
setattr(self, key, overload)
|
1100 |
+
self._dir.append(key)
|
1101 |
+
return overload
|
1102 |
+
except RuntimeError:
|
1103 |
+
raise AttributeError(
|
1104 |
+
f"The underlying op of '{str(self)}' has no overload name '{key}'"
|
1105 |
+
) from None
|
1106 |
+
|
1107 |
+
def __iter__(self):
|
1108 |
+
return iter(self._dir)
|
1109 |
+
|
1110 |
+
# Use positional-only argument to avoid naming collision with aten ops arguments
|
1111 |
+
# that are named "self". This way, all the aten ops can be called by kwargs.
|
1112 |
+
def __call__(self, /, *args, **kwargs):
|
1113 |
+
# overloading __call__ to ensure torch.ops.foo.bar()
|
1114 |
+
# is still callable from JIT
|
1115 |
+
# We save the function ptr as the `op` attribute on
|
1116 |
+
# OpOverloadPacket to access it here.
|
1117 |
+
|
1118 |
+
# Directly calling OverloadPacket goes into C++, which will check
|
1119 |
+
# the schema and cause an error for torchbind op when inputs consist of FakeScriptObject so we
|
1120 |
+
# intercept it here and call TorchBindOpverload instead.
|
1121 |
+
if self._has_torchbind_op_overload and _must_dispatch_in_python(args, kwargs):
|
1122 |
+
return _call_overload_packet_from_python(self, args, kwargs)
|
1123 |
+
return self._op(*args, **(kwargs or {}))
|
1124 |
+
|
1125 |
+
# TODO: use this to make a __dir__
|
1126 |
+
def overloads(self):
|
1127 |
+
return [n if n else "default" for n in self._overload_names]
|
1128 |
+
|
1129 |
+
|
1130 |
+
# Note - this mirrors the logic of the cpp_function defined in jit/python/init.cpp
|
1131 |
+
# _jit_get_operations, which calls _get_operation_for_overload_or_packet.
|
1132 |
+
def _call_overload_packet_from_python(op: OpOverloadPacket, args, kwargs):
|
1133 |
+
# Re-use the torch function handling logic in cpp
|
1134 |
+
torch_function_called, ret = torch._C._maybe_call_torch_function_for_op_packet(
|
1135 |
+
op, *args, **kwargs
|
1136 |
+
)
|
1137 |
+
|
1138 |
+
if torch_function_called:
|
1139 |
+
return ret
|
1140 |
+
|
1141 |
+
# The following mirrors getOpWithStack.
|
1142 |
+
# In cpp, we do a schema matching for the arguments, and call ToIValue to
|
1143 |
+
# to check whether the arguments are valid. But need to do similar things here
|
1144 |
+
# and check the schema whether the FakeScriptObject is the corresponding fake class
|
1145 |
+
# of the actual class used in schema.
|
1146 |
+
exceptions = {}
|
1147 |
+
found_op = None
|
1148 |
+
for overload_name in op.overloads():
|
1149 |
+
op_overload = getattr(op, overload_name)
|
1150 |
+
try:
|
1151 |
+
_ = torch._C._check_schema_allow_fake_script_object(
|
1152 |
+
op_overload._schema, *args, **kwargs
|
1153 |
+
)
|
1154 |
+
found_op = op_overload
|
1155 |
+
break
|
1156 |
+
except RuntimeError as e:
|
1157 |
+
exceptions[overload_name] = e
|
1158 |
+
|
1159 |
+
if found_op:
|
1160 |
+
return found_op(*args, **kwargs)
|
1161 |
+
|
1162 |
+
err_msg = (
|
1163 |
+
f"Fail to match any TorchBindOverload of {op} with following exceptions:\n"
|
1164 |
+
)
|
1165 |
+
for i, (key, msg) in enumerate(exceptions.items()):
|
1166 |
+
err_msg += f"Overload name {key}:\n {msg}\n"
|
1167 |
+
raise RuntimeError(err_msg)
|
1168 |
+
|
1169 |
+
|
1170 |
+
# Resolution of torch.fn is different from torch.ops.aten.fn
|
1171 |
+
# torch.fn uses the Python argparser, matches with the
|
1172 |
+
# appropriate schema, and calls into the unboxed version of the method
|
1173 |
+
# torch.ops.aten.fn resolution is done via the mechanism defined in JIT.
|
1174 |
+
# JIT creates a stack of all the overloads and then tries to match the
|
1175 |
+
# correct one at runtime and always calls into the boxed version of the method
|
1176 |
+
# Autograd codegen creates VariableType, TracerType,
|
1177 |
+
# inplace or view type and python bindings.
|
1178 |
+
# Aten codegen generates tensor methods for the tensor class.
|
1179 |
+
|
1180 |
+
# _OpNamespace is a subclass of ModuleType because the torch script
|
1181 |
+
# allows attribute lookups on modules only. Since we want torch.ops.foo.bar()
|
1182 |
+
# to work from script, we need to ensure ops and foo are modules
|
1183 |
+
|
1184 |
+
|
1185 |
+
class _OpNamespace(types.ModuleType):
|
1186 |
+
"""
|
1187 |
+
An op namespace to dynamically bind Operators into Python.
|
1188 |
+
|
1189 |
+
Say a user has created a custom Operator called "my_namespace::my_op". To
|
1190 |
+
call this op, the user will write torch.ops.my_namespace.my_op(...).
|
1191 |
+
At startup, this operation will not yet be bound into Python. Instead, the
|
1192 |
+
following sequence of magic tricks will occur:
|
1193 |
+
1. `torch.ops.my_namespace` will invoke the `__getattr__` magic method
|
1194 |
+
on the `torch.ops` object, which will create a new `_OpNamespace`
|
1195 |
+
object called `my_namespace` and set it as an attribute on the `ops`
|
1196 |
+
object.
|
1197 |
+
2. `torch.ops.my_namespace.my_op` will then invoke `__getattr__` on
|
1198 |
+
the `my_namespace` object, which will retrieve the operation via
|
1199 |
+
`torch.get_operation`, a function bound from C++, and then in a similar
|
1200 |
+
fashion bind this new object onto the `my_namespace` object.
|
1201 |
+
3. `torch.ops.my_namespace.my_op(...)` then calls this new operation
|
1202 |
+
and subsequent accesses will incur no further lookup (the namespace and
|
1203 |
+
operation will already exist).
|
1204 |
+
"""
|
1205 |
+
|
1206 |
+
def __init__(self, name):
|
1207 |
+
super().__init__("torch.ops." + name)
|
1208 |
+
self.name = name
|
1209 |
+
self._dir = []
|
1210 |
+
|
1211 |
+
def __iter__(self):
|
1212 |
+
return iter(self._dir)
|
1213 |
+
|
1214 |
+
def __getattr__(self, op_name):
|
1215 |
+
# It is not a valid op_name when __file__ is passed in
|
1216 |
+
if op_name == "__file__":
|
1217 |
+
return "torch.ops"
|
1218 |
+
elif op_name in ["__origin__", "__self__"]:
|
1219 |
+
raise AttributeError(
|
1220 |
+
f"Invalid attribute '{op_name}' for '_OpNamespace' '{self.name}'"
|
1221 |
+
)
|
1222 |
+
|
1223 |
+
# Get the op `my_namespace::my_op` if available. This will also check
|
1224 |
+
# for overloads and raise an exception if there are more than one.
|
1225 |
+
namespace_name = self.name
|
1226 |
+
qualified_op_name = f"{namespace_name}::{op_name}"
|
1227 |
+
module_name = self.__module__ + "." + namespace_name
|
1228 |
+
|
1229 |
+
try:
|
1230 |
+
op, overload_names = _get_packet(qualified_op_name, module_name)
|
1231 |
+
if op is None:
|
1232 |
+
raise AttributeError(
|
1233 |
+
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
|
1234 |
+
)
|
1235 |
+
except RuntimeError as e:
|
1236 |
+
# Turn this into AttributeError so getattr(obj, key, default)
|
1237 |
+
# works (this is called by TorchScript with __origin__)
|
1238 |
+
raise AttributeError(
|
1239 |
+
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
|
1240 |
+
) from e
|
1241 |
+
|
1242 |
+
op.__module__ = module_name
|
1243 |
+
opoverloadpacket = OpOverloadPacket(
|
1244 |
+
qualified_op_name, op_name, op, overload_names
|
1245 |
+
)
|
1246 |
+
opoverloadpacket.__module__ = self.__module__ + "." + namespace_name
|
1247 |
+
# cache the opoverloadpacket to ensure that each op corresponds to
|
1248 |
+
# a unique OpOverloadPacket object
|
1249 |
+
setattr(self, op_name, opoverloadpacket)
|
1250 |
+
self._dir.append(op_name)
|
1251 |
+
return opoverloadpacket
|
1252 |
+
|
1253 |
+
|
1254 |
+
def _get_packet(qualname, op_module):
|
1255 |
+
op, overload_names = torch._C._jit_get_operation(qualname)
|
1256 |
+
if op is not None:
|
1257 |
+
# let the script frontend know that op is identical to the builtin op
|
1258 |
+
# with qualified_op_name
|
1259 |
+
torch.jit._builtins._register_builtin(op, qualname)
|
1260 |
+
op.__module__ = op_module
|
1261 |
+
return op, overload_names
|
1262 |
+
|
1263 |
+
|
1264 |
+
def _refresh_packet(packet):
|
1265 |
+
op, overload_names = _get_packet(packet._qualified_op_name, packet._op.__module__)
|
1266 |
+
assert op is not None
|
1267 |
+
packet._op = op
|
1268 |
+
packet._overload_names = overload_names
|
1269 |
+
|
1270 |
+
|
1271 |
+
class _PyOpNamespace(_OpNamespace):
|
1272 |
+
def __init__(self, name, ops):
|
1273 |
+
super().__init__(name)
|
1274 |
+
self._ops = ops
|
1275 |
+
|
1276 |
+
def __getattr__(self, name):
|
1277 |
+
# Following _OpNamespace.__getattr__, we cache the op on the _PyOpNamespace object.
|
1278 |
+
op = self._ops.get(name, None)
|
1279 |
+
if op is None:
|
1280 |
+
raise AttributeError(
|
1281 |
+
f"'_PyOpNamespace' '{self.name}' object has no attribute '{name}'"
|
1282 |
+
)
|
1283 |
+
setattr(self, name, op)
|
1284 |
+
return op
|
1285 |
+
|
1286 |
+
|
1287 |
+
class _Ops(types.ModuleType):
|
1288 |
+
__file__ = "_ops.py"
|
1289 |
+
|
1290 |
+
def __init__(self):
|
1291 |
+
super().__init__("torch.ops")
|
1292 |
+
self.loaded_libraries = set()
|
1293 |
+
self._higher_order_op_namespace = _PyOpNamespace(
|
1294 |
+
"torch.ops.higher_order", _higher_order_ops
|
1295 |
+
)
|
1296 |
+
self._dir = []
|
1297 |
+
|
1298 |
+
def __getattr__(self, name):
|
1299 |
+
# Check if the name is a HigherOrderOperator
|
1300 |
+
if name == "higher_order":
|
1301 |
+
return self._higher_order_op_namespace
|
1302 |
+
|
1303 |
+
# Here we are creating `torch.ops.my_namespace`
|
1304 |
+
namespace = _OpNamespace(name)
|
1305 |
+
setattr(self, name, namespace)
|
1306 |
+
self._dir.append(name)
|
1307 |
+
return namespace
|
1308 |
+
|
1309 |
+
def __iter__(self):
|
1310 |
+
return iter(self._dir)
|
1311 |
+
|
1312 |
+
def import_module(self, module):
|
1313 |
+
"""
|
1314 |
+
Imports a Python module that has torch.library registrations.
|
1315 |
+
|
1316 |
+
Generally, to extend PyTorch with custom operators, a user will
|
1317 |
+
create a Python module whose import triggers registration of
|
1318 |
+
the custom operators via a torch.ops.load_library call or a call
|
1319 |
+
to one or more torch.library.* APIs.
|
1320 |
+
|
1321 |
+
It is unexpected for Python modules to have side effects, so some
|
1322 |
+
linters and formatters will complain. Use this API to import Python
|
1323 |
+
modules that contain these torch.library side effects.
|
1324 |
+
|
1325 |
+
Args:
|
1326 |
+
module (str): The name of the Python module to import
|
1327 |
+
|
1328 |
+
"""
|
1329 |
+
importlib.import_module(module)
|
1330 |
+
|
1331 |
+
def load_library(self, path):
|
1332 |
+
"""
|
1333 |
+
Loads a shared library from the given path into the current process.
|
1334 |
+
|
1335 |
+
The library being loaded may run global initialization code to register
|
1336 |
+
custom operators with the PyTorch JIT runtime. This allows dynamically
|
1337 |
+
loading custom operators. For this, you should compile your operator
|
1338 |
+
and the static registration code into a shared library object, and then
|
1339 |
+
call ``torch.ops.load_library('path/to/libcustom.so')`` to load the
|
1340 |
+
shared object.
|
1341 |
+
|
1342 |
+
After the library is loaded, it is added to the
|
1343 |
+
``torch.ops.loaded_libraries`` attribute, a set that may be inspected
|
1344 |
+
for the paths of all libraries loaded using this function.
|
1345 |
+
|
1346 |
+
Args:
|
1347 |
+
path (str): A path to a shared library to load.
|
1348 |
+
"""
|
1349 |
+
if torch._running_with_deploy():
|
1350 |
+
return
|
1351 |
+
|
1352 |
+
path = _utils_internal.resolve_library_path(path)
|
1353 |
+
with dl_open_guard():
|
1354 |
+
# Import the shared library into the process, thus running its
|
1355 |
+
# static (global) initialization code in order to register custom
|
1356 |
+
# operators with the JIT.
|
1357 |
+
ctypes.CDLL(path)
|
1358 |
+
self.loaded_libraries.add(path)
|
1359 |
+
|
1360 |
+
|
1361 |
+
# The ops "namespace"
|
1362 |
+
ops: _Ops = _Ops()
|
lib/python3.10/site-packages/torch/_python_dispatcher.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import re
|
3 |
+
|
4 |
+
import torch._C as C
|
5 |
+
|
6 |
+
|
7 |
+
"""
|
8 |
+
PythonDispatcher class is a thin python-binding to C++ dispatcher and it
|
9 |
+
is designed to show how dispatcher precompute works. In particular,
|
10 |
+
it shows for a certain op `foo`, what the computed dispatch table looks
|
11 |
+
like after user register their kernels to certains dispatch keys.
|
12 |
+
|
13 |
+
In the real C++ dispatcher we support many dispatch keys for different
|
14 |
+
functionalities. For simplicity PythonDispatcher only supports dispatch
|
15 |
+
keys for a single example of each use case. These use cases are listed below:
|
16 |
+
|
17 |
+
- CPU/AutogradCPU: represents in-tree backends which we usually have dedicated inference &
|
18 |
+
autograd kernel in pytorch core library.
|
19 |
+
E.g. CPU, CUDA
|
20 |
+
- FPGA/AutogradOther: represents in-tree backends which we usually have backend specific
|
21 |
+
inference kernels, but they share the same autograd kernel specified in AutogradOther.
|
22 |
+
E.g. FPGA, SparseCsrCPU
|
23 |
+
- XLA/AutogradXLA: represents out-of-tree backends which we don't have either inference or autograd
|
24 |
+
kernel defined in pytorch core library. Backend owner is responsible for registering both
|
25 |
+
inference & autograd kernels in their extensions(e.g. torch-xla) for the operators they support.
|
26 |
+
E.g. XLA, XPU, MPS
|
27 |
+
- CompositeExplicitAutograd: alias key mapped to inference kernels of all backends like CPU, CUDA, XLA etc.
|
28 |
+
Kernels registered to this key MUST work for inference for all backends.
|
29 |
+
- Autograd: alias key mapped to autograd of all backends like AutogradCPU, AutogradXLA, AutogradOther.
|
30 |
+
Kernels registered to this key MUST work for autograd for all backends.
|
31 |
+
- CompositeImplicitAutograd: alias key CompositeImplicitAutograd = CompositeExplicitAutograd + Autograd
|
32 |
+
Kernels registered to this key MUST work for both inference + autograd for all backends.
|
33 |
+
|
34 |
+
Note we only allow registrations to alias keys inside pytorch core library. E.g
|
35 |
+
you shouldn't register a CompositeImplicitAutograd or CompositeExplicitAutograd
|
36 |
+
kernel from torch-xla extension, instead you should upstream the kernel into
|
37 |
+
pytorch/pytorch repo so that it's available for all backends and continuously
|
38 |
+
tested even without the extension.
|
39 |
+
|
40 |
+
Usage:
|
41 |
+
dispatcher = PythonDispatcher()
|
42 |
+
dispatcher.register(["CPU", "XLA", "CompositeImplicitAutograd"])
|
43 |
+
print(dispatcher.dispatchTable()) # This tells you exactly which kernel is used for certain backend.
|
44 |
+
# For more debugging information
|
45 |
+
# print(dispatcher.keys())
|
46 |
+
# print(dispatcher.registrations())
|
47 |
+
# print(dispatcher.rawRegistrations())
|
48 |
+
# print(dispatcher.rawDispatchTable())
|
49 |
+
PythonDispatcher calls C++ dispatcher under the hood for to precompute dispatch table.
|
50 |
+
This file only provides the simplified API for developers, relevant test code is located in
|
51 |
+
test/test_dispatch.py
|
52 |
+
"""
|
53 |
+
|
54 |
+
|
55 |
+
class PythonDispatcher:
|
56 |
+
namespace = "__test__"
|
57 |
+
name = "foo"
|
58 |
+
# fmt: off
|
59 |
+
runtime_keys = [
|
60 |
+
"CPU", "AutogradCPU",
|
61 |
+
"FPGA", "AutogradOther",
|
62 |
+
"XLA", "AutogradXLA",
|
63 |
+
"Lazy", "AutogradLazy",
|
64 |
+
]
|
65 |
+
# fmt: on
|
66 |
+
alias_keys = [
|
67 |
+
"CompositeExplicitAutograd",
|
68 |
+
"Autograd",
|
69 |
+
"CompositeImplicitAutograd",
|
70 |
+
]
|
71 |
+
supported_keys = runtime_keys + alias_keys
|
72 |
+
|
73 |
+
def __init__(self) -> None:
|
74 |
+
C._dispatch_check_invariants(self.name) # type: ignore[attr-defined]
|
75 |
+
self.ref = C._dispatch_library("FRAGMENT", self.namespace, "")
|
76 |
+
self.ref.def_("foo(Tensor x) -> Tensor")
|
77 |
+
|
78 |
+
"""
|
79 |
+
Returns a list of dispatch keys supported by PythonDispatcher.
|
80 |
+
You can register kernels to these keys.
|
81 |
+
"""
|
82 |
+
|
83 |
+
def keys(self):
|
84 |
+
return self.supported_keys
|
85 |
+
|
86 |
+
"""
|
87 |
+
Register kernels to the target dispatchKeys.
|
88 |
+
dispatchKeys(list[str]): a list of dispatch keys that you want to register
|
89 |
+
your own kernel. Note that you don't need to write the kernel yourself in
|
90 |
+
this PythonDispatcher.E.g. for CPU key, a kernel(e.g fn_CPU for CPU) is
|
91 |
+
automatically generated and registered.
|
92 |
+
"""
|
93 |
+
|
94 |
+
def register(self, dispatchKeys):
|
95 |
+
# Overriden is not supported and triggers a warning in C++ dispatcher.
|
96 |
+
if len(set(dispatchKeys)) != len(dispatchKeys):
|
97 |
+
raise RuntimeError(
|
98 |
+
f"Overriden is not allowed but found duplicates in {dispatchKeys}."
|
99 |
+
)
|
100 |
+
# We currently forbid this in codegen instead of C++ dispatcher.
|
101 |
+
if (
|
102 |
+
"CompositeImplicitAutograd" in dispatchKeys
|
103 |
+
and "CompositeExplicitAutograd" in dispatchKeys
|
104 |
+
):
|
105 |
+
raise RuntimeError(
|
106 |
+
"Registration to both CompositeImplicitAutograd and CompositeExplicitAutograd is not allowed."
|
107 |
+
)
|
108 |
+
for key in dispatchKeys:
|
109 |
+
if key not in self.supported_keys:
|
110 |
+
raise RuntimeError(
|
111 |
+
f"{key} is not supported, please select a dispatch key in {self.supported_keys}."
|
112 |
+
)
|
113 |
+
self.ref.impl_t_t("foo", dispatch=key, debug="fn_" + key)
|
114 |
+
|
115 |
+
"""
|
116 |
+
Helper function to format (key, kernel).
|
117 |
+
"""
|
118 |
+
|
119 |
+
def _format_line(self, key, kernel):
|
120 |
+
return f"{key:<15} {kernel}\n"
|
121 |
+
|
122 |
+
"""
|
123 |
+
Helper function to print a table header.
|
124 |
+
"""
|
125 |
+
|
126 |
+
def _format_header(self, header):
|
127 |
+
s = f"""
|
128 |
+
{header}
|
129 |
+
"""
|
130 |
+
s += self._format_line("key", "kernel")
|
131 |
+
s += "---------------------------\n"
|
132 |
+
return s
|
133 |
+
|
134 |
+
"""
|
135 |
+
Returns raw output of all registration info for debugging only.
|
136 |
+
Use registrations() for a simplified version.
|
137 |
+
"""
|
138 |
+
|
139 |
+
def rawRegistrations(self):
|
140 |
+
return C._dispatch_dump(f"{self.namespace}::{self.name}") # type: ignore[attr-defined]
|
141 |
+
|
142 |
+
"""
|
143 |
+
Returns raw output of computed dispatch table for debugging only.
|
144 |
+
Use dispatchTable() for a simplified version.
|
145 |
+
"""
|
146 |
+
|
147 |
+
def rawDispatchTable(self):
|
148 |
+
return C._dispatch_dump_table(f"{self.namespace}::{self.name}") # type: ignore[attr-defined]
|
149 |
+
|
150 |
+
"""
|
151 |
+
Returns a table(str) including all the registrations from users.
|
152 |
+
Note this includes registrations to both runtime keys and alias keys.
|
153 |
+
"""
|
154 |
+
|
155 |
+
def registrations(self):
|
156 |
+
output = self._format_header("Registered Kernels")
|
157 |
+
state = self.rawRegistrations()
|
158 |
+
state_entries = state.split("\n")
|
159 |
+
for line in state_entries:
|
160 |
+
first = line.split(":")[0]
|
161 |
+
if any(first.startswith(k) for k in self.supported_keys):
|
162 |
+
kernel = line.split("::")[0].split(" ")[1]
|
163 |
+
output += self._format_line(first, kernel)
|
164 |
+
return output
|
165 |
+
|
166 |
+
"""
|
167 |
+
Returns the computed dispatch table(str). Note this only include
|
168 |
+
runtime keys, registrations to alias keys have been decoded to their
|
169 |
+
mapped runtime keys.
|
170 |
+
"""
|
171 |
+
|
172 |
+
def dispatchTable(self):
|
173 |
+
output = self._format_header("Computed Dispatch Table")
|
174 |
+
table = self.rawDispatchTable()
|
175 |
+
table_entries = table.split("\n")
|
176 |
+
regex = re.compile(r"registered at .*FallbackKernel\.cpp.*(\[)")
|
177 |
+
for line in table_entries:
|
178 |
+
k = line.split(":")[0]
|
179 |
+
if k in self.runtime_keys:
|
180 |
+
entry = regex.sub("[", line)
|
181 |
+
output += self._format_line(k, entry.split(": ")[1])
|
182 |
+
return output
|
lib/python3.10/site-packages/torch/_size_docs.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
"""Adds docstrings to torch.Size functions"""
|
3 |
+
|
4 |
+
import torch._C
|
5 |
+
from torch._C import _add_docstr as add_docstr
|
6 |
+
|
7 |
+
|
8 |
+
def add_docstr_all(method, docstr):
|
9 |
+
add_docstr(getattr(torch._C.Size, method), docstr)
|
10 |
+
|
11 |
+
|
12 |
+
add_docstr_all(
|
13 |
+
"numel",
|
14 |
+
"""
|
15 |
+
numel() -> int
|
16 |
+
|
17 |
+
Returns the number of elements a :class:`torch.Tensor` with the given size would contain.
|
18 |
+
|
19 |
+
More formally, for a tensor ``x = tensor.ones(10, 10)`` with size ``s = torch.Size([10, 10])``,
|
20 |
+
``x.numel() == x.size().numel() == s.numel() == 100`` holds true.
|
21 |
+
|
22 |
+
Example::
|
23 |
+
>>> x=torch.ones(10, 10)
|
24 |
+
>>> s=x.size()
|
25 |
+
>>> s
|
26 |
+
torch.Size([10, 10])
|
27 |
+
>>> s.numel()
|
28 |
+
100
|
29 |
+
>>> x.numel() == s.numel()
|
30 |
+
True
|
31 |
+
|
32 |
+
|
33 |
+
.. warning::
|
34 |
+
|
35 |
+
This function does not return the number of dimensions described by :class:`torch.Size`, but instead the number
|
36 |
+
of elements a :class:`torch.Tensor` with that size would contain.
|
37 |
+
|
38 |
+
""",
|
39 |
+
)
|
lib/python3.10/site-packages/torch/_streambase.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing_extensions import deprecated
|
2 |
+
|
3 |
+
import torch
|
4 |
+
|
5 |
+
|
6 |
+
# Preserved only for BC reasons
|
7 |
+
@deprecated(
|
8 |
+
"`torch._streambase._StreamBase` is deprecated. Please use `torch.Stream` instead.",
|
9 |
+
category=FutureWarning,
|
10 |
+
)
|
11 |
+
class _StreamBase(torch.Stream):
|
12 |
+
pass
|
13 |
+
|
14 |
+
|
15 |
+
@deprecated(
|
16 |
+
"`torch._streambase._EventBase` is deprecated. Please use `torch.Event` instead.",
|
17 |
+
category=FutureWarning,
|
18 |
+
)
|
19 |
+
class _EventBase(torch.Event):
|
20 |
+
pass
|
lib/python3.10/site-packages/torch/_tensor.py
ADDED
@@ -0,0 +1,1775 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import copyreg
|
3 |
+
import enum
|
4 |
+
import functools
|
5 |
+
import warnings
|
6 |
+
from collections import OrderedDict
|
7 |
+
from copy import deepcopy
|
8 |
+
from numbers import Number
|
9 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
10 |
+
|
11 |
+
import torch
|
12 |
+
import torch._C as _C
|
13 |
+
from torch._namedtensor_internals import (
|
14 |
+
check_serializing_named_tensor,
|
15 |
+
is_ellipsis,
|
16 |
+
resolve_ellipsis,
|
17 |
+
single_ellipsis_index,
|
18 |
+
unzip_namedshape,
|
19 |
+
update_names,
|
20 |
+
)
|
21 |
+
from torch.overrides import (
|
22 |
+
get_default_nowrap_functions,
|
23 |
+
handle_torch_function,
|
24 |
+
has_torch_function,
|
25 |
+
has_torch_function_unary,
|
26 |
+
has_torch_function_variadic,
|
27 |
+
)
|
28 |
+
|
29 |
+
|
30 |
+
def _handle_torch_function_and_wrap_type_error_to_not_implemented(f):
|
31 |
+
assigned = functools.WRAPPER_ASSIGNMENTS
|
32 |
+
|
33 |
+
@functools.wraps(f, assigned=assigned)
|
34 |
+
def wrapped(*args, **kwargs):
|
35 |
+
try:
|
36 |
+
# See https://github.com/pytorch/pytorch/issues/75462
|
37 |
+
if has_torch_function(args):
|
38 |
+
return handle_torch_function(wrapped, args, *args, **kwargs)
|
39 |
+
return f(*args, **kwargs)
|
40 |
+
except TypeError:
|
41 |
+
return NotImplemented
|
42 |
+
|
43 |
+
return wrapped
|
44 |
+
|
45 |
+
|
46 |
+
# Should not be used, this is kept only for BC of loading old serialized Tensor subclasses
|
47 |
+
def _rebuild_from_type(func, type, args, dict):
|
48 |
+
if type is Tensor:
|
49 |
+
return func(*args)
|
50 |
+
|
51 |
+
ret = func(*args).as_subclass(type)
|
52 |
+
ret.__dict__ = dict
|
53 |
+
return ret
|
54 |
+
|
55 |
+
|
56 |
+
def _rebuild_from_type_v2(func, new_type, args, state):
|
57 |
+
ret = func(*args)
|
58 |
+
if type(ret) is not new_type:
|
59 |
+
ret = ret.as_subclass(new_type)
|
60 |
+
# Tensor does define __setstate__ even though it doesn't define
|
61 |
+
# __getstate__. So only use __setstate__ if it is NOT the one defined
|
62 |
+
# on Tensor
|
63 |
+
if (
|
64 |
+
getattr(ret.__class__, "__setstate__", Tensor.__setstate__)
|
65 |
+
is not Tensor.__setstate__
|
66 |
+
):
|
67 |
+
ret.__setstate__(state)
|
68 |
+
else:
|
69 |
+
ret = torch._utils._set_obj_state(ret, state)
|
70 |
+
return ret
|
71 |
+
|
72 |
+
|
73 |
+
# NB: If you subclass Tensor, and want to share the subclassed class
|
74 |
+
# across processes, you must also update torch/multiprocessing/reductions.py
|
75 |
+
# to define a ForkingPickler serialization mode for the class.
|
76 |
+
#
|
77 |
+
# NB: If you add a new method to Tensor, you must update
|
78 |
+
# torch/_C/__init__.pyi.in to add a type annotation for your method;
|
79 |
+
# otherwise, it will not show up in autocomplete.
|
80 |
+
class Tensor(torch._C.TensorBase):
|
81 |
+
_is_param: bool
|
82 |
+
|
83 |
+
def _clear_non_serializable_cached_data(self):
|
84 |
+
r"""Clears any data cached in the tensor's ``__dict__`` that would prevent the tensor
|
85 |
+
from being serialized.
|
86 |
+
|
87 |
+
For example, subclasses with custom dispatched sizes / strides cache this info in
|
88 |
+
non-serializable PyCapsules within the ``__dict__``, and this must be cleared out for
|
89 |
+
serialization to function.
|
90 |
+
|
91 |
+
Any subclass that overrides this MUST call ``super()._clear_non_serializable_cached_data().``
|
92 |
+
Additional data cleared within the override must be able to be re-cached transparently
|
93 |
+
to avoid breaking subclass functionality.
|
94 |
+
"""
|
95 |
+
if has_torch_function_unary(self):
|
96 |
+
return handle_torch_function(
|
97 |
+
Tensor._clear_non_serializable_cached_data, (self,), self
|
98 |
+
)
|
99 |
+
# NB: Wrapper subclasses that implement custom-dispatched sizes / strides cache
|
100 |
+
# this info via non-serializable PyCapsules.
|
101 |
+
CACHED_SIZES_STRIDES_KEYS = [
|
102 |
+
"_sym_sizes_capsule",
|
103 |
+
"_sym_sizes_capsule_len",
|
104 |
+
"_sym_strides_capsule",
|
105 |
+
"_sym_strides_capsule_len",
|
106 |
+
]
|
107 |
+
for key in CACHED_SIZES_STRIDES_KEYS:
|
108 |
+
self.__dict__.pop(key, None)
|
109 |
+
|
110 |
+
def __deepcopy__(self, memo):
|
111 |
+
if has_torch_function_unary(self):
|
112 |
+
return handle_torch_function(Tensor.__deepcopy__, (self,), self, memo)
|
113 |
+
if not self.is_leaf:
|
114 |
+
raise RuntimeError(
|
115 |
+
"Only Tensors created explicitly by the user "
|
116 |
+
"(graph leaves) support the deepcopy protocol at the moment. "
|
117 |
+
"If you were attempting to deepcopy a module, this may be because "
|
118 |
+
"of a torch.nn.utils.weight_norm usage, "
|
119 |
+
"see https://github.com/pytorch/pytorch/pull/103001"
|
120 |
+
)
|
121 |
+
if id(self) in memo:
|
122 |
+
return memo[id(self)]
|
123 |
+
with torch.no_grad():
|
124 |
+
# TODO: skipping storage copy is wrong for meta, as meta
|
125 |
+
# does accurate alias tracking; however, the code below
|
126 |
+
# doesn't work because of
|
127 |
+
# https://github.com/pytorch/pytorch/issues/47442
|
128 |
+
# Update the test in test_serialization if you remove 'meta' from here
|
129 |
+
if (
|
130 |
+
self.is_sparse
|
131 |
+
or self.device.type
|
132 |
+
in ["lazy", "xla", "mtia", "mps", "maia", "meta", "ipu"]
|
133 |
+
or (
|
134 |
+
not torch._C._has_storage(self)
|
135 |
+
and self.device.type == torch._C._get_privateuse1_backend_name()
|
136 |
+
)
|
137 |
+
or (type(self) is not Tensor and self.data_ptr() == 0)
|
138 |
+
):
|
139 |
+
new_tensor = self.clone()
|
140 |
+
if type(new_tensor) is not type(self):
|
141 |
+
raise RuntimeError(
|
142 |
+
"The default implementation of __deepcopy__() for wrapper subclasses "
|
143 |
+
"only works for subclass types that implement clone() and for which "
|
144 |
+
"cloning returns another instance of the same subclass. You should either "
|
145 |
+
"properly implement clone() for your subclass or override __deepcopy__() "
|
146 |
+
"if it is intended behavior for clone() to return an instance of a "
|
147 |
+
"different type."
|
148 |
+
)
|
149 |
+
else:
|
150 |
+
new_storage = self._typed_storage()._deepcopy(memo)
|
151 |
+
if self.is_quantized:
|
152 |
+
# quantizer_params can be different type based on torch attribute
|
153 |
+
quantizer_params: Union[
|
154 |
+
Tuple[torch.qscheme, float, int],
|
155 |
+
Tuple[torch.qscheme, Tensor, Tensor, int],
|
156 |
+
]
|
157 |
+
if self.qscheme() == torch.per_tensor_affine:
|
158 |
+
quantizer_params = (
|
159 |
+
self.qscheme(),
|
160 |
+
self.q_scale(),
|
161 |
+
self.q_zero_point(),
|
162 |
+
)
|
163 |
+
elif self.qscheme() in (
|
164 |
+
torch.per_channel_affine,
|
165 |
+
torch.per_channel_affine_float_qparams,
|
166 |
+
):
|
167 |
+
quantizer_params = (
|
168 |
+
self.qscheme(),
|
169 |
+
self.q_per_channel_scales(),
|
170 |
+
self.q_per_channel_zero_points(),
|
171 |
+
self.q_per_channel_axis(),
|
172 |
+
)
|
173 |
+
else:
|
174 |
+
raise RuntimeError(
|
175 |
+
f"Unsupported qscheme {self.qscheme()} in deepcopy"
|
176 |
+
)
|
177 |
+
# TODO: Once we decide to break serialization FC, no longer
|
178 |
+
# need to wrap with TypedStorage
|
179 |
+
new_tensor = torch._utils._rebuild_qtensor(
|
180 |
+
torch.storage.TypedStorage(
|
181 |
+
wrap_storage=new_storage._untyped_storage,
|
182 |
+
dtype=self.dtype,
|
183 |
+
_internal=True,
|
184 |
+
),
|
185 |
+
self.storage_offset(),
|
186 |
+
self.size(),
|
187 |
+
self.stride(),
|
188 |
+
quantizer_params,
|
189 |
+
self.requires_grad,
|
190 |
+
self._backward_hooks,
|
191 |
+
)
|
192 |
+
if type(new_tensor) is not type(self):
|
193 |
+
raise RuntimeError(
|
194 |
+
"The default implementation of __deepcopy__() for quantized tensors "
|
195 |
+
"expects the tensor returned by torch._utils._rebuild_qtensor() to "
|
196 |
+
"match the type of the instance being copied. If you encounter this, "
|
197 |
+
"please open an issue on PyTorch's GitHub."
|
198 |
+
)
|
199 |
+
else:
|
200 |
+
new_tensor = self.new_empty([])
|
201 |
+
if type(new_tensor) is not type(self):
|
202 |
+
raise RuntimeError(
|
203 |
+
"The default implementation of __deepcopy__() for non-wrapper subclasses "
|
204 |
+
"only works for subclass types that implement new_empty() and for which "
|
205 |
+
"that function returns another instance of the same subclass. You should "
|
206 |
+
"either properly implement new_empty() for your subclass or override "
|
207 |
+
"__deepcopy__() if it is intended behavior for new_empty() to return "
|
208 |
+
"an instance of a different type."
|
209 |
+
)
|
210 |
+
new_tensor.set_(
|
211 |
+
new_storage, self.storage_offset(), self.size(), self.stride()
|
212 |
+
)
|
213 |
+
if self.is_conj():
|
214 |
+
new_tensor = new_tensor.conj_physical()
|
215 |
+
if self.is_neg():
|
216 |
+
new_tensor = new_tensor.neg()
|
217 |
+
if self.requires_grad:
|
218 |
+
new_tensor.requires_grad_()
|
219 |
+
if self.grad is not None:
|
220 |
+
new_tensor.grad = self.grad.__deepcopy__(memo)
|
221 |
+
|
222 |
+
if type(self) is not Tensor:
|
223 |
+
if type(new_tensor) is not type(self):
|
224 |
+
raise RuntimeError(
|
225 |
+
"Type of deepcopy result does not match the type of the source tensor. "
|
226 |
+
"If you encounter this, please open an issue on PyTorch's GitHub."
|
227 |
+
)
|
228 |
+
|
229 |
+
# Plain Tensors don't have slots
|
230 |
+
slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined]
|
231 |
+
for slot in slots_to_save:
|
232 |
+
if hasattr(self, slot):
|
233 |
+
setattr(new_tensor, slot, deepcopy(getattr(self, slot), memo))
|
234 |
+
|
235 |
+
# don't try to deepcopy non-serializable cached data
|
236 |
+
self._clear_non_serializable_cached_data()
|
237 |
+
new_tensor.__dict__ = deepcopy(self.__dict__, memo)
|
238 |
+
|
239 |
+
memo[id(self)] = new_tensor
|
240 |
+
return new_tensor
|
241 |
+
|
242 |
+
def __reduce_ex__(self, proto):
|
243 |
+
materialize_fake_tensors = (
|
244 |
+
torch.serialization._serialization_tls.materialize_fake_tensors
|
245 |
+
)
|
246 |
+
state = torch._utils._get_obj_state(self)
|
247 |
+
# Ignore all state when using FakeTensor with skip_data(materialize_fake_tensors) because FakeTensor has
|
248 |
+
# some state that cannot be pickled
|
249 |
+
if (
|
250 |
+
# TODO: remove hasattr, it's a hack to support versions of torch that
|
251 |
+
# don't have _subclasses
|
252 |
+
hasattr(torch, "_subclasses")
|
253 |
+
and type(self) is torch._subclasses.fake_tensor.FakeTensor
|
254 |
+
and materialize_fake_tensors
|
255 |
+
) or (type(self) is Tensor and not state):
|
256 |
+
# Fast path for regular tensor without Python state.
|
257 |
+
return self._reduce_ex_internal(proto)
|
258 |
+
if has_torch_function_unary(self):
|
259 |
+
return handle_torch_function(Tensor.__reduce_ex__, (self,), self, proto)
|
260 |
+
func, args = self._reduce_ex_internal(proto)
|
261 |
+
# sizes / strides cache needs to be cleared here because it'll just be re-cached
|
262 |
+
# if cleared earlier. Note that state references the -actual- tensor dict.
|
263 |
+
self._clear_non_serializable_cached_data()
|
264 |
+
return (_rebuild_from_type_v2, (func, type(self), args, state))
|
265 |
+
|
266 |
+
def storage(self):
|
267 |
+
r"""
|
268 |
+
storage() -> torch.TypedStorage
|
269 |
+
|
270 |
+
Returns the underlying :class:`TypedStorage`.
|
271 |
+
|
272 |
+
.. warning::
|
273 |
+
|
274 |
+
:class:`TypedStorage` is deprecated. It will be removed in the future, and
|
275 |
+
:class:`UntypedStorage` will be the only storage class. To access the
|
276 |
+
:class:`UntypedStorage` directly, use :attr:`Tensor.untyped_storage()`.
|
277 |
+
"""
|
278 |
+
if has_torch_function_unary(self):
|
279 |
+
return handle_torch_function(Tensor.storage, (self,), self)
|
280 |
+
|
281 |
+
torch.storage._warn_typed_storage_removal(stacklevel=2)
|
282 |
+
return self._typed_storage()
|
283 |
+
|
284 |
+
# For internal use only, to avoid raising deprecation warning
|
285 |
+
def _typed_storage(self):
|
286 |
+
untyped_storage = self.untyped_storage()
|
287 |
+
return torch.TypedStorage(
|
288 |
+
wrap_storage=untyped_storage, dtype=self.dtype, _internal=True
|
289 |
+
)
|
290 |
+
|
291 |
+
def _reduce_ex_internal(self, proto):
|
292 |
+
check_serializing_named_tensor(self)
|
293 |
+
|
294 |
+
from torch.utils.hooks import warn_if_has_hooks
|
295 |
+
|
296 |
+
# See Note [Don't serialize hooks]
|
297 |
+
warn_if_has_hooks(self)
|
298 |
+
backward_hooks: Dict[Any, Any] = OrderedDict()
|
299 |
+
|
300 |
+
skip_data = torch.serialization._serialization_tls.skip_data
|
301 |
+
materialize_fake_tensors = (
|
302 |
+
torch.serialization._serialization_tls.materialize_fake_tensors
|
303 |
+
)
|
304 |
+
|
305 |
+
if self.device.type in ["xla", "maia"] or (
|
306 |
+
not torch._C._has_storage(self)
|
307 |
+
and self.device.type == torch._C._get_privateuse1_backend_name()
|
308 |
+
):
|
309 |
+
if skip_data:
|
310 |
+
raise RuntimeError(
|
311 |
+
"Cannot serialize tensors on backends with no storage under skip_data context manager"
|
312 |
+
)
|
313 |
+
cpu_tensor = self.cpu()
|
314 |
+
return (
|
315 |
+
torch._utils._rebuild_device_tensor_from_cpu_tensor,
|
316 |
+
(cpu_tensor, self.dtype, str(self.device), self.requires_grad),
|
317 |
+
)
|
318 |
+
# Legacy comment that does not hold anymore.
|
319 |
+
# Note: Numpy array is chosen to be the rebuild component for XLA, MTIA, MAIA Tensors.
|
320 |
+
# We considered a few options:
|
321 |
+
# 1. CPU tensor can't be used here.
|
322 |
+
# Otherwise in torch.load CPU storage is reconstructed with randomly
|
323 |
+
# initialized data, moved onto backend device, and then storage is updated
|
324 |
+
# to the serialized content. This works perfectly for CPU/CUDA but not these backends;
|
325 |
+
# their tensors are disconnected with storage so they don't get the update.
|
326 |
+
# 2. Python list is not a good fit due to performance reason.
|
327 |
+
# `tolist()` converts every single element in the tensor into python objects
|
328 |
+
# and serialize them one by one.
|
329 |
+
if self.device.type in ["mtia"]:
|
330 |
+
# Convert BFloat16 tesors to Float32 before conversion to numpy, as numpy doesn't
|
331 |
+
# support BFloat16. The rebuild tensor from numpy takes in the original self.dtype,
|
332 |
+
# this would reconstruct the BFloat16 tensor from numpy.
|
333 |
+
if skip_data:
|
334 |
+
raise RuntimeError(
|
335 |
+
"Cannot serialize tensors on backends with no storage under skip_data context manager"
|
336 |
+
)
|
337 |
+
numpy_tensor = (
|
338 |
+
self.cpu().numpy()
|
339 |
+
if self.dtype != torch.bfloat16
|
340 |
+
else self.cpu().to(torch.float32).numpy()
|
341 |
+
)
|
342 |
+
return (
|
343 |
+
torch._utils._rebuild_device_tensor_from_numpy,
|
344 |
+
(numpy_tensor, self.dtype, str(self.device), self.requires_grad),
|
345 |
+
)
|
346 |
+
if self.device.type == "meta":
|
347 |
+
# NB: This implementation BREAKS storage sharing. Current
|
348 |
+
# hypothesis is that no one cares for meta tensors.
|
349 |
+
if skip_data:
|
350 |
+
warnings.warn(
|
351 |
+
"Serializing tensors on the meta device under skip_data context manager is a no-op"
|
352 |
+
)
|
353 |
+
arg_meta = (
|
354 |
+
self.dtype,
|
355 |
+
tuple(self.size()),
|
356 |
+
self.stride(),
|
357 |
+
self.requires_grad,
|
358 |
+
)
|
359 |
+
return (torch._utils._rebuild_meta_tensor_no_storage, arg_meta)
|
360 |
+
if self.is_quantized:
|
361 |
+
if skip_data:
|
362 |
+
raise RuntimeError(
|
363 |
+
"Cannot serialize qtensor under skip_data context manager, file an issue if you need this feature"
|
364 |
+
)
|
365 |
+
# quantizer_params can be different type based on torch attribute
|
366 |
+
quantizer_params: Union[
|
367 |
+
Tuple[torch.qscheme, float, int], Tuple[Any, Tensor, Tensor, int]
|
368 |
+
]
|
369 |
+
if self.qscheme() == torch.per_tensor_affine:
|
370 |
+
quantizer_params = (
|
371 |
+
torch.per_tensor_affine,
|
372 |
+
self.q_scale(),
|
373 |
+
self.q_zero_point(),
|
374 |
+
)
|
375 |
+
elif self.qscheme() in (
|
376 |
+
torch.per_channel_affine,
|
377 |
+
torch.per_channel_affine_float_qparams,
|
378 |
+
):
|
379 |
+
# convert scales and zero points to tuple to avoid recursive calls
|
380 |
+
# when/if we get multi-axis quantized tensors in the future, the shape
|
381 |
+
# is recoverable from the main tensor shape
|
382 |
+
quantizer_params = (
|
383 |
+
torch.per_channel_affine,
|
384 |
+
self.q_per_channel_scales(),
|
385 |
+
self.q_per_channel_zero_points(),
|
386 |
+
self.q_per_channel_axis(),
|
387 |
+
)
|
388 |
+
else:
|
389 |
+
raise RuntimeError(
|
390 |
+
f"Serialization is not supported for tensors of type {self.qscheme()}"
|
391 |
+
)
|
392 |
+
# TODO: Once we decide to break serialization FC, no longer
|
393 |
+
# need to wrap with TypedStorage
|
394 |
+
args_qtensor = (
|
395 |
+
torch.storage.TypedStorage(
|
396 |
+
wrap_storage=self._typed_storage()._untyped_storage,
|
397 |
+
dtype=self.dtype,
|
398 |
+
_internal=True,
|
399 |
+
),
|
400 |
+
self.storage_offset(),
|
401 |
+
tuple(self.size()),
|
402 |
+
self.stride(),
|
403 |
+
quantizer_params,
|
404 |
+
self.requires_grad,
|
405 |
+
backward_hooks,
|
406 |
+
)
|
407 |
+
return (torch._utils._rebuild_qtensor, args_qtensor)
|
408 |
+
elif self.is_sparse:
|
409 |
+
if self.layout == torch.sparse_coo:
|
410 |
+
args_sparse = (
|
411 |
+
self.layout,
|
412 |
+
(self._indices(), self._values(), self.size(), self.is_coalesced()),
|
413 |
+
)
|
414 |
+
else:
|
415 |
+
raise NotImplementedError(
|
416 |
+
f"sparse tensor __reduce_ex__ for layout `{self.layout}`"
|
417 |
+
)
|
418 |
+
return (torch._utils._rebuild_sparse_tensor, args_sparse)
|
419 |
+
elif self.layout in {
|
420 |
+
torch.sparse_csr,
|
421 |
+
torch.sparse_csc,
|
422 |
+
torch.sparse_bsr,
|
423 |
+
torch.sparse_bsc,
|
424 |
+
}:
|
425 |
+
if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
|
426 |
+
compressed_indices, plain_indices = (
|
427 |
+
self.crow_indices(),
|
428 |
+
self.col_indices(),
|
429 |
+
)
|
430 |
+
else:
|
431 |
+
compressed_indices, plain_indices = (
|
432 |
+
self.ccol_indices(),
|
433 |
+
self.row_indices(),
|
434 |
+
)
|
435 |
+
args_sparse_compressed = (
|
436 |
+
self.layout,
|
437 |
+
(
|
438 |
+
compressed_indices,
|
439 |
+
plain_indices,
|
440 |
+
self.values(),
|
441 |
+
self.size(),
|
442 |
+
),
|
443 |
+
)
|
444 |
+
return (torch._utils._rebuild_sparse_tensor, args_sparse_compressed)
|
445 |
+
elif self.is_nested:
|
446 |
+
if skip_data:
|
447 |
+
raise RuntimeError(
|
448 |
+
"Cannot serialize nested tensor under skip_data context manager, file an issue if you need this feature"
|
449 |
+
)
|
450 |
+
args_nested = (
|
451 |
+
# NB: values() currently returns the storage as a buffer in an unsafe way.
|
452 |
+
# Ideally, we'd use a private API for this instead. TODO: Switch to this if
|
453 |
+
# we ever get around to adding it.
|
454 |
+
self.values(),
|
455 |
+
self._nested_tensor_size(),
|
456 |
+
self._nested_tensor_strides(),
|
457 |
+
self._nested_tensor_storage_offsets(),
|
458 |
+
)
|
459 |
+
return (torch._utils._rebuild_nested_tensor, args_nested)
|
460 |
+
elif (
|
461 |
+
type(self) is not torch.Tensor
|
462 |
+
and type(self).__torch_dispatch__ is not torch.Tensor.__torch_dispatch__
|
463 |
+
and (
|
464 |
+
isinstance(self, torch._subclasses.functional_tensor.FunctionalTensor)
|
465 |
+
or (
|
466 |
+
not isinstance(self, torch._subclasses.fake_tensor.FakeTensor)
|
467 |
+
and self.data_ptr() == 0
|
468 |
+
)
|
469 |
+
)
|
470 |
+
):
|
471 |
+
arg_wrapper_subclass = (
|
472 |
+
type(self),
|
473 |
+
self.dtype,
|
474 |
+
tuple(self.size()),
|
475 |
+
self.stride(),
|
476 |
+
self.storage_offset(),
|
477 |
+
self.layout,
|
478 |
+
self.device,
|
479 |
+
self.requires_grad,
|
480 |
+
)
|
481 |
+
return (torch._utils._rebuild_wrapper_subclass, arg_wrapper_subclass)
|
482 |
+
elif (
|
483 |
+
type(self) is not torch.Tensor
|
484 |
+
and type(self).__torch_dispatch__ is not torch.Tensor.__torch_dispatch__
|
485 |
+
and (
|
486 |
+
isinstance(self, torch._subclasses.fake_tensor.FakeTensor)
|
487 |
+
and not (skip_data and materialize_fake_tensors)
|
488 |
+
)
|
489 |
+
):
|
490 |
+
arg_wrapper_subclass = (
|
491 |
+
type(self),
|
492 |
+
self.dtype,
|
493 |
+
tuple(self.size()),
|
494 |
+
self.stride(),
|
495 |
+
self.storage_offset(),
|
496 |
+
self.layout,
|
497 |
+
self.device,
|
498 |
+
self.requires_grad,
|
499 |
+
)
|
500 |
+
return (torch._utils._rebuild_wrapper_subclass, arg_wrapper_subclass)
|
501 |
+
else:
|
502 |
+
v3_dtypes = torch.storage._new_dtypes()
|
503 |
+
if self.dtype in v3_dtypes:
|
504 |
+
rebuild_func = torch._utils._rebuild_tensor_v3
|
505 |
+
storage = self.untyped_storage()
|
506 |
+
else:
|
507 |
+
# TODO: Once we decide to break serialization FC, no longer
|
508 |
+
# need to wrap with TypedStorage
|
509 |
+
rebuild_func = torch._utils._rebuild_tensor_v2 # type: ignore[assignment]
|
510 |
+
storage = torch.storage.TypedStorage(
|
511 |
+
wrap_storage=self._typed_storage()._untyped_storage,
|
512 |
+
dtype=self.dtype,
|
513 |
+
_internal=True,
|
514 |
+
) # type: ignore[assignment]
|
515 |
+
|
516 |
+
# TODO: remove hasattr, it's a hack to support versions of torch that
|
517 |
+
# don't have _subclasses
|
518 |
+
if (
|
519 |
+
hasattr(torch, "_subclasses")
|
520 |
+
and isinstance(self, torch._subclasses.fake_tensor.FakeTensor)
|
521 |
+
and skip_data
|
522 |
+
):
|
523 |
+
storage._fake_device = self.device
|
524 |
+
|
525 |
+
args = (
|
526 |
+
storage,
|
527 |
+
self.storage_offset(),
|
528 |
+
tuple(self.size()),
|
529 |
+
self.stride(),
|
530 |
+
self.requires_grad,
|
531 |
+
backward_hooks,
|
532 |
+
) # previously was self._backward_hooks
|
533 |
+
|
534 |
+
if isinstance(storage, torch.storage.UntypedStorage):
|
535 |
+
args = args + (self.dtype,) # type: ignore[assignment]
|
536 |
+
|
537 |
+
metadata = torch._utils.get_tensor_metadata(self)
|
538 |
+
if metadata:
|
539 |
+
args = args + (metadata,) # type: ignore[assignment]
|
540 |
+
|
541 |
+
return (rebuild_func, args)
|
542 |
+
|
543 |
+
def __setstate__(self, state):
|
544 |
+
if has_torch_function_unary(self):
|
545 |
+
return handle_torch_function(Tensor.__setstate__, (self,), self, state)
|
546 |
+
# Warning: this method is NOT called when you torch.load() a tensor;
|
547 |
+
# that is managed by _rebuild_tensor_v2
|
548 |
+
if not self.is_leaf:
|
549 |
+
raise RuntimeError("__setstate__ can be only called on leaf Tensors")
|
550 |
+
if len(state) == 4:
|
551 |
+
# legacy serialization of Tensor
|
552 |
+
self.set_(*state)
|
553 |
+
return
|
554 |
+
elif len(state) == 5:
|
555 |
+
# legacy serialization of Variable
|
556 |
+
self.data = state[0]
|
557 |
+
state = (state[3], state[4], state[2])
|
558 |
+
# The setting of _backward_hooks is expected to be a no-op.
|
559 |
+
# See Note [Don't serialize hooks]
|
560 |
+
self.requires_grad, _, self._backward_hooks = state
|
561 |
+
|
562 |
+
def __repr__(self, *, tensor_contents=None):
|
563 |
+
if has_torch_function_unary(self):
|
564 |
+
return handle_torch_function(
|
565 |
+
Tensor.__repr__, (self,), self, tensor_contents=tensor_contents
|
566 |
+
)
|
567 |
+
# All strings are unicode in Python 3.
|
568 |
+
return torch._tensor_str._str(self, tensor_contents=tensor_contents)
|
569 |
+
|
570 |
+
def backward(
|
571 |
+
self, gradient=None, retain_graph=None, create_graph=False, inputs=None
|
572 |
+
):
|
573 |
+
r"""Computes the gradient of current tensor wrt graph leaves.
|
574 |
+
|
575 |
+
The graph is differentiated using the chain rule. If the tensor is
|
576 |
+
non-scalar (i.e. its data has more than one element) and requires
|
577 |
+
gradient, the function additionally requires specifying a ``gradient``.
|
578 |
+
It should be a tensor of matching type and shape, that represents
|
579 |
+
the gradient of the differentiated function w.r.t. ``self``.
|
580 |
+
|
581 |
+
This function accumulates gradients in the leaves - you might need to zero
|
582 |
+
``.grad`` attributes or set them to ``None`` before calling it.
|
583 |
+
See :ref:`Default gradient layouts<default-grad-layouts>`
|
584 |
+
for details on the memory layout of accumulated gradients.
|
585 |
+
|
586 |
+
.. note::
|
587 |
+
|
588 |
+
If you run any forward ops, create ``gradient``, and/or call ``backward``
|
589 |
+
in a user-specified CUDA stream context, see
|
590 |
+
:ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.
|
591 |
+
|
592 |
+
.. note::
|
593 |
+
|
594 |
+
When ``inputs`` are provided and a given input is not a leaf,
|
595 |
+
the current implementation will call its grad_fn (though it is not strictly needed to get this gradients).
|
596 |
+
It is an implementation detail on which the user should not rely.
|
597 |
+
See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
|
598 |
+
|
599 |
+
Args:
|
600 |
+
gradient (Tensor, optional): The gradient of the function
|
601 |
+
being differentiated w.r.t. ``self``.
|
602 |
+
This argument can be omitted if ``self`` is a scalar.
|
603 |
+
retain_graph (bool, optional): If ``False``, the graph used to compute
|
604 |
+
the grads will be freed. Note that in nearly all cases setting
|
605 |
+
this option to True is not needed and often can be worked around
|
606 |
+
in a much more efficient way. Defaults to the value of
|
607 |
+
``create_graph``.
|
608 |
+
create_graph (bool, optional): If ``True``, graph of the derivative will
|
609 |
+
be constructed, allowing to compute higher order derivative
|
610 |
+
products. Defaults to ``False``.
|
611 |
+
inputs (sequence of Tensor, optional): Inputs w.r.t. which the gradient will be
|
612 |
+
accumulated into ``.grad``. All other tensors will be ignored. If not
|
613 |
+
provided, the gradient is accumulated into all the leaf Tensors that were
|
614 |
+
used to compute the :attr:`tensors`.
|
615 |
+
"""
|
616 |
+
if has_torch_function_unary(self):
|
617 |
+
return handle_torch_function(
|
618 |
+
Tensor.backward,
|
619 |
+
(self,),
|
620 |
+
self,
|
621 |
+
gradient=gradient,
|
622 |
+
retain_graph=retain_graph,
|
623 |
+
create_graph=create_graph,
|
624 |
+
inputs=inputs,
|
625 |
+
)
|
626 |
+
torch.autograd.backward(
|
627 |
+
self, gradient, retain_graph, create_graph, inputs=inputs
|
628 |
+
)
|
629 |
+
|
630 |
+
def register_hook(self, hook):
|
631 |
+
r"""Registers a backward hook.
|
632 |
+
|
633 |
+
The hook will be called every time a gradient with respect to the
|
634 |
+
Tensor is computed. The hook should have the following signature::
|
635 |
+
|
636 |
+
hook(grad) -> Tensor or None
|
637 |
+
|
638 |
+
|
639 |
+
The hook should not modify its argument, but it can optionally return
|
640 |
+
a new gradient which will be used in place of :attr:`grad`.
|
641 |
+
|
642 |
+
This function returns a handle with a method ``handle.remove()``
|
643 |
+
that removes the hook from the module.
|
644 |
+
|
645 |
+
.. note::
|
646 |
+
See :ref:`backward-hooks-execution` for more information on how when this hook
|
647 |
+
is executed, and how its execution is ordered relative to other hooks.
|
648 |
+
|
649 |
+
Example::
|
650 |
+
|
651 |
+
>>> v = torch.tensor([0., 0., 0.], requires_grad=True)
|
652 |
+
>>> h = v.register_hook(lambda grad: grad * 2) # double the gradient
|
653 |
+
>>> v.backward(torch.tensor([1., 2., 3.]))
|
654 |
+
>>> v.grad
|
655 |
+
|
656 |
+
2
|
657 |
+
4
|
658 |
+
6
|
659 |
+
[torch.FloatTensor of size (3,)]
|
660 |
+
|
661 |
+
>>> h.remove() # removes the hook
|
662 |
+
"""
|
663 |
+
if has_torch_function_unary(self):
|
664 |
+
return handle_torch_function(Tensor.register_hook, (self,), self, hook)
|
665 |
+
if not self.requires_grad:
|
666 |
+
raise RuntimeError(
|
667 |
+
"cannot register a hook on a tensor that doesn't require gradient"
|
668 |
+
)
|
669 |
+
if self._backward_hooks is None:
|
670 |
+
self._backward_hooks = OrderedDict()
|
671 |
+
if self.grad_fn is not None:
|
672 |
+
self.grad_fn._register_hook_dict(self)
|
673 |
+
|
674 |
+
from torch.utils.hooks import RemovableHandle
|
675 |
+
|
676 |
+
handle = RemovableHandle(self._backward_hooks)
|
677 |
+
self._backward_hooks[handle.id] = hook
|
678 |
+
return handle
|
679 |
+
|
680 |
+
def register_post_accumulate_grad_hook(self, hook):
|
681 |
+
r"""Registers a backward hook that runs after grad accumulation.
|
682 |
+
|
683 |
+
The hook will be called after all gradients for a tensor have been accumulated,
|
684 |
+
meaning that the .grad field has been updated on that tensor. The post
|
685 |
+
accumulate grad hook is ONLY applicable for leaf tensors (tensors without a
|
686 |
+
.grad_fn field). Registering this hook on a non-leaf tensor will error!
|
687 |
+
|
688 |
+
The hook should have the following signature::
|
689 |
+
|
690 |
+
hook(param: Tensor) -> None
|
691 |
+
|
692 |
+
Note that, unlike other autograd hooks, this hook operates on the tensor
|
693 |
+
that requires grad and not the grad itself. The hook can in-place modify
|
694 |
+
and access its Tensor argument, including its .grad field.
|
695 |
+
|
696 |
+
This function returns a handle with a method ``handle.remove()``
|
697 |
+
that removes the hook from the module.
|
698 |
+
|
699 |
+
.. note::
|
700 |
+
See :ref:`backward-hooks-execution` for more information on how when this hook
|
701 |
+
is executed, and how its execution is ordered relative to other hooks. Since
|
702 |
+
this hook runs during the backward pass, it will run in no_grad mode (unless
|
703 |
+
create_graph is True). You can use torch.enable_grad() to re-enable autograd
|
704 |
+
within the hook if you need it.
|
705 |
+
|
706 |
+
Example::
|
707 |
+
|
708 |
+
>>> v = torch.tensor([0., 0., 0.], requires_grad=True)
|
709 |
+
>>> lr = 0.01
|
710 |
+
>>> # simulate a simple SGD update
|
711 |
+
>>> h = v.register_post_accumulate_grad_hook(lambda p: p.add_(p.grad, alpha=-lr))
|
712 |
+
>>> v.backward(torch.tensor([1., 2., 3.]))
|
713 |
+
>>> v
|
714 |
+
tensor([-0.0100, -0.0200, -0.0300], requires_grad=True)
|
715 |
+
|
716 |
+
>>> h.remove() # removes the hook
|
717 |
+
"""
|
718 |
+
if has_torch_function_unary(self):
|
719 |
+
return handle_torch_function(
|
720 |
+
Tensor.register_post_accumulate_grad_hook, (self,), self, hook
|
721 |
+
)
|
722 |
+
if not self.requires_grad:
|
723 |
+
raise RuntimeError(
|
724 |
+
"cannot register a hook on a tensor that doesn't require gradient"
|
725 |
+
)
|
726 |
+
if self.grad_fn is not None:
|
727 |
+
raise RuntimeError(
|
728 |
+
"post accumulate grad hooks cannot be registered on non-leaf tensors"
|
729 |
+
)
|
730 |
+
if self._post_accumulate_grad_hooks is None:
|
731 |
+
self._post_accumulate_grad_hooks: Dict[Any, Any] = OrderedDict()
|
732 |
+
|
733 |
+
from torch.utils.hooks import RemovableHandle
|
734 |
+
|
735 |
+
handle = RemovableHandle(self._post_accumulate_grad_hooks)
|
736 |
+
self._post_accumulate_grad_hooks[handle.id] = hook
|
737 |
+
return handle
|
738 |
+
|
739 |
+
def reinforce(self, reward):
|
740 |
+
def trim(str):
|
741 |
+
return "\n".join([line.strip() for line in str.split("\n")])
|
742 |
+
|
743 |
+
raise RuntimeError(
|
744 |
+
trim(
|
745 |
+
r"""reinforce() was removed.
|
746 |
+
Use torch.distributions instead.
|
747 |
+
See https://pytorch.org/docs/main/distributions.html
|
748 |
+
|
749 |
+
Instead of:
|
750 |
+
|
751 |
+
probs = policy_network(state)
|
752 |
+
action = probs.multinomial()
|
753 |
+
next_state, reward = env.step(action)
|
754 |
+
action.reinforce(reward)
|
755 |
+
action.backward()
|
756 |
+
|
757 |
+
Use:
|
758 |
+
|
759 |
+
probs = policy_network(state)
|
760 |
+
# NOTE: categorical is equivalent to what used to be called multinomial
|
761 |
+
m = torch.distributions.Categorical(probs)
|
762 |
+
action = m.sample()
|
763 |
+
next_state, reward = env.step(action)
|
764 |
+
loss = -m.log_prob(action) * reward
|
765 |
+
loss.backward()
|
766 |
+
"""
|
767 |
+
)
|
768 |
+
)
|
769 |
+
|
770 |
+
detach = _C._add_docstr(
|
771 |
+
_C.TensorBase.detach,
|
772 |
+
r"""
|
773 |
+
Returns a new Tensor, detached from the current graph.
|
774 |
+
|
775 |
+
The result will never require gradient.
|
776 |
+
|
777 |
+
This method also affects forward mode AD gradients and the result will never
|
778 |
+
have forward mode AD gradients.
|
779 |
+
|
780 |
+
.. note::
|
781 |
+
|
782 |
+
Returned Tensor shares the same storage with the original one.
|
783 |
+
In-place modifications on either of them will be seen, and may trigger
|
784 |
+
errors in correctness checks.
|
785 |
+
""",
|
786 |
+
)
|
787 |
+
|
788 |
+
detach_ = _C._add_docstr(
|
789 |
+
_C.TensorBase.detach_,
|
790 |
+
r"""
|
791 |
+
Detaches the Tensor from the graph that created it, making it a leaf.
|
792 |
+
Views cannot be detached in-place.
|
793 |
+
|
794 |
+
This method also affects forward mode AD gradients and the result will never
|
795 |
+
have forward mode AD gradients.
|
796 |
+
""",
|
797 |
+
)
|
798 |
+
|
799 |
+
def is_shared(self):
|
800 |
+
r"""Checks if tensor is in shared memory.
|
801 |
+
|
802 |
+
This is always ``True`` for CUDA tensors.
|
803 |
+
"""
|
804 |
+
if has_torch_function_unary(self):
|
805 |
+
return handle_torch_function(Tensor.is_shared, (self,), self)
|
806 |
+
return self._typed_storage()._is_shared()
|
807 |
+
|
808 |
+
def share_memory_(self):
|
809 |
+
r"""Moves the underlying storage to shared memory.
|
810 |
+
|
811 |
+
This is a no-op if the underlying storage is already in shared memory
|
812 |
+
and for CUDA tensors. Tensors in shared memory cannot be resized.
|
813 |
+
|
814 |
+
See :meth:`torch.UntypedStorage.share_memory_` for more details.
|
815 |
+
"""
|
816 |
+
if has_torch_function_unary(self):
|
817 |
+
return handle_torch_function(Tensor.share_memory_, (self,), self)
|
818 |
+
self._typed_storage()._share_memory_()
|
819 |
+
return self
|
820 |
+
|
821 |
+
def module_load(self, other, assign=False):
|
822 |
+
r"""Defines how to transform ``other`` when loading it into ``self`` in :meth:`~nn.Module.load_state_dict`.
|
823 |
+
|
824 |
+
Used when :func:`~torch.__future__.get_swap_module_params_on_conversion` is ``True``.
|
825 |
+
|
826 |
+
It is expected that ``self`` is a parameter or buffer in an ``nn.Module`` and ``other`` is the
|
827 |
+
value in the state dictionary with the corresponding key, this method defines
|
828 |
+
how ``other`` is remapped before being swapped with ``self`` via
|
829 |
+
:func:`~torch.utils.swap_tensors` in :meth:`~nn.Module.load_state_dict`.
|
830 |
+
|
831 |
+
.. note::
|
832 |
+
This method should always return a new object that is not ``self`` or ``other``.
|
833 |
+
For example, the default implementation returns ``self.copy_(other).detach()``
|
834 |
+
if ``assign`` is ``False`` or ``other.detach()`` if ``assign`` is ``True``.
|
835 |
+
|
836 |
+
Args:
|
837 |
+
other (Tensor): value in state dict with key corresponding to ``self``
|
838 |
+
assign (bool): the assign argument passed to :meth:`nn.Module.load_state_dict`
|
839 |
+
|
840 |
+
"""
|
841 |
+
if has_torch_function_variadic(self, other):
|
842 |
+
return handle_torch_function(
|
843 |
+
Tensor.module_load, (self, other), self, other, assign=assign
|
844 |
+
)
|
845 |
+
|
846 |
+
if assign:
|
847 |
+
return other.detach()
|
848 |
+
else:
|
849 |
+
return self.copy_(other).detach()
|
850 |
+
|
851 |
+
def __reversed__(self):
|
852 |
+
r"""Reverses the tensor along dimension 0."""
|
853 |
+
if has_torch_function_unary(self):
|
854 |
+
return handle_torch_function(Tensor.__reversed__, (self,), self)
|
855 |
+
if self.dim() == 0:
|
856 |
+
return self
|
857 |
+
else:
|
858 |
+
return self.flip(0)
|
859 |
+
|
860 |
+
def norm(
|
861 |
+
self,
|
862 |
+
p: Optional[Union[float, str]] = "fro",
|
863 |
+
dim=None,
|
864 |
+
keepdim=False,
|
865 |
+
dtype=None,
|
866 |
+
):
|
867 |
+
r"""See :func:`torch.norm`"""
|
868 |
+
if has_torch_function_unary(self):
|
869 |
+
return handle_torch_function(
|
870 |
+
Tensor.norm, (self,), self, p=p, dim=dim, keepdim=keepdim, dtype=dtype
|
871 |
+
)
|
872 |
+
return torch.norm(self, p, dim, keepdim, dtype=dtype)
|
873 |
+
|
874 |
+
def solve(self, other):
|
875 |
+
from torch._linalg_utils import solve
|
876 |
+
|
877 |
+
return solve(self, other)
|
878 |
+
|
879 |
+
def lstsq(self, other):
|
880 |
+
from torch._linalg_utils import lstsq
|
881 |
+
|
882 |
+
return lstsq(self, other)
|
883 |
+
|
884 |
+
def eig(self, eigenvectors=False):
|
885 |
+
from torch._linalg_utils import eig
|
886 |
+
|
887 |
+
return eig(self, eigenvectors=eigenvectors)
|
888 |
+
|
889 |
+
def symeig(self, eigenvectors=False):
|
890 |
+
from torch._linalg_utils import _symeig
|
891 |
+
|
892 |
+
return _symeig(self, eigenvectors=eigenvectors)
|
893 |
+
|
894 |
+
def lu(self, pivot=True, get_infos=False):
|
895 |
+
r"""See :func:`torch.lu`"""
|
896 |
+
# If get_infos is True, then we don't need to check for errors and vice versa
|
897 |
+
if has_torch_function_unary(self):
|
898 |
+
return handle_torch_function(
|
899 |
+
Tensor.lu, (self,), self, pivot=pivot, get_infos=get_infos
|
900 |
+
)
|
901 |
+
|
902 |
+
LU, pivots, infos = torch._lu_with_info(
|
903 |
+
self, pivot=pivot, check_errors=(not get_infos)
|
904 |
+
)
|
905 |
+
if get_infos:
|
906 |
+
return LU, pivots, infos
|
907 |
+
else:
|
908 |
+
return LU, pivots
|
909 |
+
|
910 |
+
def stft(
|
911 |
+
self,
|
912 |
+
n_fft: int,
|
913 |
+
hop_length: Optional[int] = None,
|
914 |
+
win_length: Optional[int] = None,
|
915 |
+
window: "Optional[Tensor]" = None,
|
916 |
+
center: bool = True,
|
917 |
+
pad_mode: str = "reflect",
|
918 |
+
normalized: bool = False,
|
919 |
+
onesided: Optional[bool] = None,
|
920 |
+
return_complex: Optional[bool] = None,
|
921 |
+
):
|
922 |
+
r"""See :func:`torch.stft`
|
923 |
+
|
924 |
+
.. warning::
|
925 |
+
This function changed signature at version 0.4.1. Calling with
|
926 |
+
the previous signature may cause error or return incorrect result.
|
927 |
+
"""
|
928 |
+
if has_torch_function_unary(self):
|
929 |
+
return handle_torch_function(
|
930 |
+
Tensor.stft,
|
931 |
+
(self,),
|
932 |
+
self,
|
933 |
+
n_fft,
|
934 |
+
hop_length=hop_length,
|
935 |
+
win_length=win_length,
|
936 |
+
window=window,
|
937 |
+
center=center,
|
938 |
+
pad_mode=pad_mode,
|
939 |
+
normalized=normalized,
|
940 |
+
onesided=onesided,
|
941 |
+
return_complex=return_complex,
|
942 |
+
)
|
943 |
+
return torch.stft(
|
944 |
+
self,
|
945 |
+
n_fft,
|
946 |
+
hop_length,
|
947 |
+
win_length,
|
948 |
+
window,
|
949 |
+
center,
|
950 |
+
pad_mode,
|
951 |
+
normalized,
|
952 |
+
onesided,
|
953 |
+
return_complex=return_complex,
|
954 |
+
)
|
955 |
+
|
956 |
+
def istft(
|
957 |
+
self,
|
958 |
+
n_fft: int,
|
959 |
+
hop_length: Optional[int] = None,
|
960 |
+
win_length: Optional[int] = None,
|
961 |
+
window: "Optional[Tensor]" = None,
|
962 |
+
center: bool = True,
|
963 |
+
normalized: bool = False,
|
964 |
+
onesided: Optional[bool] = None,
|
965 |
+
length: Optional[int] = None,
|
966 |
+
return_complex: bool = False,
|
967 |
+
):
|
968 |
+
r"""See :func:`torch.istft`"""
|
969 |
+
if has_torch_function_unary(self):
|
970 |
+
return handle_torch_function(
|
971 |
+
Tensor.istft,
|
972 |
+
(self,),
|
973 |
+
self,
|
974 |
+
n_fft,
|
975 |
+
hop_length=hop_length,
|
976 |
+
win_length=win_length,
|
977 |
+
window=window,
|
978 |
+
center=center,
|
979 |
+
normalized=normalized,
|
980 |
+
onesided=onesided,
|
981 |
+
length=length,
|
982 |
+
return_complex=return_complex,
|
983 |
+
)
|
984 |
+
return torch.istft(
|
985 |
+
self,
|
986 |
+
n_fft,
|
987 |
+
hop_length,
|
988 |
+
win_length,
|
989 |
+
window,
|
990 |
+
center,
|
991 |
+
normalized,
|
992 |
+
onesided,
|
993 |
+
length,
|
994 |
+
return_complex=return_complex,
|
995 |
+
)
|
996 |
+
|
997 |
+
def resize(self, *sizes):
|
998 |
+
if has_torch_function_unary(self):
|
999 |
+
return handle_torch_function(Tensor.resize, (self,), self, *sizes)
|
1000 |
+
warnings.warn("non-inplace resize is deprecated")
|
1001 |
+
from torch.autograd._functions import Resize
|
1002 |
+
|
1003 |
+
return Resize.apply(self, sizes)
|
1004 |
+
|
1005 |
+
def resize_as(self, tensor):
|
1006 |
+
if has_torch_function_variadic(self, tensor):
|
1007 |
+
return handle_torch_function(Tensor.resize_as, (self, tensor), self, tensor)
|
1008 |
+
warnings.warn("non-inplace resize_as is deprecated")
|
1009 |
+
from torch.autograd._functions import Resize
|
1010 |
+
|
1011 |
+
return Resize.apply(self, tensor.size())
|
1012 |
+
|
1013 |
+
def split(self, split_size, dim=0):
|
1014 |
+
r"""See :func:`torch.split`"""
|
1015 |
+
if has_torch_function_unary(self):
|
1016 |
+
return handle_torch_function(
|
1017 |
+
Tensor.split, (self,), self, split_size, dim=dim
|
1018 |
+
)
|
1019 |
+
if isinstance(split_size, Tensor):
|
1020 |
+
try:
|
1021 |
+
split_size = int(split_size)
|
1022 |
+
except ValueError:
|
1023 |
+
pass
|
1024 |
+
|
1025 |
+
if isinstance(split_size, (int, torch.SymInt)):
|
1026 |
+
return torch._VF.split(self, split_size, dim) # type: ignore[attr-defined]
|
1027 |
+
else:
|
1028 |
+
return torch._VF.split_with_sizes(self, split_size, dim)
|
1029 |
+
|
1030 |
+
def unique(self, sorted=True, return_inverse=False, return_counts=False, dim=None):
|
1031 |
+
r"""Returns the unique elements of the input tensor.
|
1032 |
+
|
1033 |
+
See :func:`torch.unique`
|
1034 |
+
"""
|
1035 |
+
if has_torch_function_unary(self):
|
1036 |
+
return handle_torch_function(
|
1037 |
+
Tensor.unique,
|
1038 |
+
(self,),
|
1039 |
+
self,
|
1040 |
+
sorted=sorted,
|
1041 |
+
return_inverse=return_inverse,
|
1042 |
+
return_counts=return_counts,
|
1043 |
+
dim=dim,
|
1044 |
+
)
|
1045 |
+
return torch.unique(
|
1046 |
+
self,
|
1047 |
+
sorted=sorted,
|
1048 |
+
return_inverse=return_inverse,
|
1049 |
+
return_counts=return_counts,
|
1050 |
+
dim=dim,
|
1051 |
+
)
|
1052 |
+
|
1053 |
+
def unique_consecutive(self, return_inverse=False, return_counts=False, dim=None):
|
1054 |
+
r"""Eliminates all but the first element from every consecutive group of equivalent elements.
|
1055 |
+
|
1056 |
+
See :func:`torch.unique_consecutive`
|
1057 |
+
"""
|
1058 |
+
if has_torch_function_unary(self):
|
1059 |
+
return handle_torch_function(
|
1060 |
+
Tensor.unique_consecutive,
|
1061 |
+
(self,),
|
1062 |
+
self,
|
1063 |
+
return_inverse=return_inverse,
|
1064 |
+
return_counts=return_counts,
|
1065 |
+
dim=dim,
|
1066 |
+
)
|
1067 |
+
return torch.unique_consecutive(
|
1068 |
+
self, return_inverse=return_inverse, return_counts=return_counts, dim=dim
|
1069 |
+
)
|
1070 |
+
|
1071 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
1072 |
+
def __rsub__(self, other):
|
1073 |
+
return _C._VariableFunctions.rsub(self, other)
|
1074 |
+
|
1075 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
1076 |
+
def __rdiv__(self, other):
|
1077 |
+
return self.reciprocal() * other
|
1078 |
+
|
1079 |
+
__rtruediv__ = __rdiv__
|
1080 |
+
__itruediv__ = _C.TensorBase.__idiv__
|
1081 |
+
|
1082 |
+
__pow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
|
1083 |
+
_C.TensorBase.pow
|
1084 |
+
)
|
1085 |
+
__ipow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
|
1086 |
+
_C.TensorBase.pow_
|
1087 |
+
)
|
1088 |
+
|
1089 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
1090 |
+
def __rmod__(self, other):
|
1091 |
+
return torch.remainder(other, self)
|
1092 |
+
|
1093 |
+
def __format__(self, format_spec):
|
1094 |
+
if has_torch_function_unary(self):
|
1095 |
+
return handle_torch_function(Tensor.__format__, (self,), self, format_spec)
|
1096 |
+
if self.dim() == 0 and not self.is_meta and type(self) is Tensor:
|
1097 |
+
return self.item().__format__(format_spec)
|
1098 |
+
return object.__format__(self, format_spec)
|
1099 |
+
|
1100 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
1101 |
+
def __rpow__(self, other):
|
1102 |
+
return torch.pow(other, self)
|
1103 |
+
|
1104 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
1105 |
+
def __floordiv__(self, other):
|
1106 |
+
return torch.floor_divide(self, other)
|
1107 |
+
|
1108 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
1109 |
+
def __rfloordiv__(self, other):
|
1110 |
+
return torch.floor_divide(other, self)
|
1111 |
+
|
1112 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
1113 |
+
def __rlshift__(self, other):
|
1114 |
+
return torch.bitwise_left_shift(other, self)
|
1115 |
+
|
1116 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
1117 |
+
def __rrshift__(self, other):
|
1118 |
+
return torch.bitwise_right_shift(other, self)
|
1119 |
+
|
1120 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
1121 |
+
def __rmatmul__(self, other):
|
1122 |
+
return torch.matmul(other, self)
|
1123 |
+
|
1124 |
+
__pos__ = _C.TensorBase.positive
|
1125 |
+
__neg__ = _C.TensorBase.neg
|
1126 |
+
__abs__ = _C.TensorBase.abs
|
1127 |
+
|
1128 |
+
def __len__(self):
|
1129 |
+
if has_torch_function_unary(self):
|
1130 |
+
return handle_torch_function(Tensor.__len__, (self,), self)
|
1131 |
+
if self.dim() == 0:
|
1132 |
+
raise TypeError("len() of a 0-d tensor")
|
1133 |
+
if torch._C._get_tracing_state():
|
1134 |
+
warnings.warn(
|
1135 |
+
"Using len to get tensor shape might cause the trace to be incorrect. "
|
1136 |
+
"Recommended usage would be tensor.shape[0]. "
|
1137 |
+
"Passing a tensor of different shape might lead to errors or silently give "
|
1138 |
+
"incorrect results.",
|
1139 |
+
category=torch.jit.TracerWarning,
|
1140 |
+
stacklevel=2,
|
1141 |
+
)
|
1142 |
+
return self.shape[0]
|
1143 |
+
|
1144 |
+
def __iter__(self):
|
1145 |
+
# NB: we use 'imap' and not 'map' here, so that in Python 2 we get a
|
1146 |
+
# generator and don't eagerly perform all the indexes. This could
|
1147 |
+
# save us work, and also helps keep trace ordering deterministic
|
1148 |
+
# (e.g., if you zip(*hiddens), the eager map will force all the
|
1149 |
+
# indexes of hiddens[0] before hiddens[1], while the generator
|
1150 |
+
# map will interleave them.)
|
1151 |
+
# NB: We have intentionally skipped __torch_function__ dispatch here.
|
1152 |
+
# See gh-54457
|
1153 |
+
if self.dim() == 0:
|
1154 |
+
raise TypeError("iteration over a 0-d tensor")
|
1155 |
+
if torch._C._get_tracing_state():
|
1156 |
+
warnings.warn(
|
1157 |
+
"Iterating over a tensor might cause the trace to be incorrect. "
|
1158 |
+
"Passing a tensor of different shape won't change the number of "
|
1159 |
+
"iterations executed (and might lead to errors or silently give "
|
1160 |
+
"incorrect results).",
|
1161 |
+
category=torch.jit.TracerWarning,
|
1162 |
+
stacklevel=2,
|
1163 |
+
)
|
1164 |
+
return iter(self.unbind(0))
|
1165 |
+
|
1166 |
+
def __hash__(self):
|
1167 |
+
# Do NOT handle __torch_function__ here as user's default
|
1168 |
+
# implementation that handle most functions will most likely do it wrong.
|
1169 |
+
# It can be easily overridden by defining this method on the user
|
1170 |
+
# subclass if needed.
|
1171 |
+
return id(self)
|
1172 |
+
|
1173 |
+
def __dir__(self):
|
1174 |
+
if has_torch_function_unary(self):
|
1175 |
+
return handle_torch_function(Tensor.__dir__, (self,), self)
|
1176 |
+
tensor_methods = dir(self.__class__)
|
1177 |
+
tensor_methods.remove("volatile") # deprecated
|
1178 |
+
attrs = list(self.__dict__.keys())
|
1179 |
+
keys = tensor_methods + attrs
|
1180 |
+
|
1181 |
+
# property only available dense, cuda tensors
|
1182 |
+
if (not self.is_cuda) or self.is_sparse:
|
1183 |
+
keys.remove("__cuda_array_interface__")
|
1184 |
+
|
1185 |
+
return sorted(keys)
|
1186 |
+
|
1187 |
+
# Numpy array interface, to support `numpy.asarray(tensor) -> ndarray`
|
1188 |
+
__array_priority__ = 1000 # prefer Tensor ops over numpy ones
|
1189 |
+
|
1190 |
+
def __array__(self, dtype=None):
|
1191 |
+
if has_torch_function_unary(self):
|
1192 |
+
return handle_torch_function(Tensor.__array__, (self,), self, dtype=dtype)
|
1193 |
+
if dtype is None:
|
1194 |
+
return self.numpy()
|
1195 |
+
else:
|
1196 |
+
return self.numpy().astype(dtype, copy=False)
|
1197 |
+
|
1198 |
+
# Wrap Numpy array again in a suitable tensor when done, to support e.g.
|
1199 |
+
# `numpy.sin(tensor) -> tensor` or `numpy.greater(tensor, 0) -> ByteTensor`
|
1200 |
+
def __array_wrap__(self, array):
|
1201 |
+
if has_torch_function_unary(self):
|
1202 |
+
return handle_torch_function(
|
1203 |
+
Tensor.__array_wrap__, (self,), self, array=array
|
1204 |
+
)
|
1205 |
+
if array.dtype == bool:
|
1206 |
+
# Workaround, torch has no built-in bool tensor
|
1207 |
+
array = array.astype("uint8")
|
1208 |
+
return torch.from_numpy(array)
|
1209 |
+
|
1210 |
+
def __contains__(self, element: Any, /) -> bool:
|
1211 |
+
r"""Check if `element` is present in tensor
|
1212 |
+
|
1213 |
+
Args:
|
1214 |
+
element (Tensor or scalar): element to be checked
|
1215 |
+
for presence in current tensor"
|
1216 |
+
"""
|
1217 |
+
if has_torch_function_unary(self):
|
1218 |
+
return handle_torch_function(Tensor.__contains__, (self,), self, element)
|
1219 |
+
if isinstance(
|
1220 |
+
element, (torch.Tensor, Number, torch.SymInt, torch.SymFloat, torch.SymBool)
|
1221 |
+
):
|
1222 |
+
# type hint doesn't understand the __contains__ result array
|
1223 |
+
return bool((element == self).any().item()) # type: ignore[union-attr]
|
1224 |
+
|
1225 |
+
raise RuntimeError(
|
1226 |
+
f"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {type(element)}."
|
1227 |
+
)
|
1228 |
+
|
1229 |
+
@property
|
1230 |
+
def __cuda_array_interface__(self):
|
1231 |
+
"""Array view description for cuda tensors.
|
1232 |
+
|
1233 |
+
See:
|
1234 |
+
https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html
|
1235 |
+
"""
|
1236 |
+
if has_torch_function_unary(self):
|
1237 |
+
# TODO mypy doesn't support @property, see: https://github.com/python/mypy/issues/6185
|
1238 |
+
return handle_torch_function(
|
1239 |
+
Tensor.__cuda_array_interface__.__get__, # type: ignore[attr-defined]
|
1240 |
+
(self,),
|
1241 |
+
self,
|
1242 |
+
)
|
1243 |
+
|
1244 |
+
# raise AttributeError for unsupported tensors, so that
|
1245 |
+
# hasattr(cpu_tensor, "__cuda_array_interface__") is False.
|
1246 |
+
if not self.is_cuda:
|
1247 |
+
raise AttributeError(
|
1248 |
+
f"Can't get __cuda_array_interface__ on non-CUDA tensor type: {self.type()} "
|
1249 |
+
"If CUDA data is required use tensor.cuda() to copy tensor to device memory."
|
1250 |
+
)
|
1251 |
+
|
1252 |
+
if self.is_sparse:
|
1253 |
+
raise AttributeError(
|
1254 |
+
f"Can't get __cuda_array_interface__ on sparse type: {self.type()} "
|
1255 |
+
"Use Tensor.to_dense() to convert to a dense tensor first."
|
1256 |
+
)
|
1257 |
+
|
1258 |
+
# RuntimeError, matching tensor.__array__() behavior.
|
1259 |
+
if self.requires_grad:
|
1260 |
+
raise RuntimeError(
|
1261 |
+
"Can't get __cuda_array_interface__ on Variable that requires grad. "
|
1262 |
+
"If gradients aren't required, use var.detach() to get Variable that doesn't require grad."
|
1263 |
+
)
|
1264 |
+
|
1265 |
+
# CUDA devices are little-endian and tensors are stored in native byte
|
1266 |
+
# order. 1-byte entries are endian-agnostic.
|
1267 |
+
typestr = {
|
1268 |
+
torch.complex64: "<c8",
|
1269 |
+
torch.complex128: "<c16",
|
1270 |
+
torch.bfloat16: "<f2",
|
1271 |
+
torch.float16: "<f2",
|
1272 |
+
torch.float32: "<f4",
|
1273 |
+
torch.float64: "<f8",
|
1274 |
+
torch.uint8: "|u1",
|
1275 |
+
torch.int8: "|i1",
|
1276 |
+
torch.uint16: "<u2",
|
1277 |
+
torch.int16: "<i2",
|
1278 |
+
torch.uint32: "<u4",
|
1279 |
+
torch.int32: "<i4",
|
1280 |
+
torch.uint64: "<u8",
|
1281 |
+
torch.int64: "<i8",
|
1282 |
+
torch.bool: "|b1",
|
1283 |
+
}[self.dtype]
|
1284 |
+
|
1285 |
+
itemsize = self.element_size()
|
1286 |
+
|
1287 |
+
shape = tuple(self.shape)
|
1288 |
+
if self.is_contiguous():
|
1289 |
+
# __cuda_array_interface__ v2 requires the strides to be omitted
|
1290 |
+
# (either not set or set to None) for C-contiguous arrays.
|
1291 |
+
strides = None
|
1292 |
+
else:
|
1293 |
+
strides = tuple(s * itemsize for s in self.stride())
|
1294 |
+
data_ptr = self.data_ptr() if self.numel() > 0 else 0
|
1295 |
+
data = (data_ptr, False) # read-only is false
|
1296 |
+
|
1297 |
+
return dict(typestr=typestr, shape=shape, strides=strides, data=data, version=2)
|
1298 |
+
|
1299 |
+
def storage_type(self):
|
1300 |
+
r"""storage_type() -> type
|
1301 |
+
|
1302 |
+
Returns the type of the underlying storage.
|
1303 |
+
|
1304 |
+
"""
|
1305 |
+
if has_torch_function_unary(self):
|
1306 |
+
return handle_torch_function(Tensor.storage_type, (self,), self)
|
1307 |
+
|
1308 |
+
torch.storage._warn_typed_storage_removal()
|
1309 |
+
|
1310 |
+
return self._typed_storage()._get_legacy_storage_class()
|
1311 |
+
|
1312 |
+
def refine_names(self, *names):
|
1313 |
+
r"""Refines the dimension names of :attr:`self` according to :attr:`names`.
|
1314 |
+
|
1315 |
+
Refining is a special case of renaming that "lifts" unnamed dimensions.
|
1316 |
+
A ``None`` dim can be refined to have any name; a named dim can only be
|
1317 |
+
refined to have the same name.
|
1318 |
+
|
1319 |
+
Because named tensors can coexist with unnamed tensors, refining names
|
1320 |
+
gives a nice way to write named-tensor-aware code that works with both
|
1321 |
+
named and unnamed tensors.
|
1322 |
+
|
1323 |
+
:attr:`names` may contain up to one Ellipsis (``...``).
|
1324 |
+
The Ellipsis is expanded greedily; it is expanded in-place to fill
|
1325 |
+
:attr:`names` to the same length as ``self.dim()`` using names from the
|
1326 |
+
corresponding indices of ``self.names``.
|
1327 |
+
|
1328 |
+
Python 2 does not support Ellipsis but one may use a string literal
|
1329 |
+
instead (``'...'``).
|
1330 |
+
|
1331 |
+
Args:
|
1332 |
+
names (iterable of str): The desired names of the output tensor. May
|
1333 |
+
contain up to one Ellipsis.
|
1334 |
+
|
1335 |
+
Examples::
|
1336 |
+
|
1337 |
+
>>> imgs = torch.randn(32, 3, 128, 128)
|
1338 |
+
>>> named_imgs = imgs.refine_names('N', 'C', 'H', 'W')
|
1339 |
+
>>> named_imgs.names
|
1340 |
+
('N', 'C', 'H', 'W')
|
1341 |
+
|
1342 |
+
>>> tensor = torch.randn(2, 3, 5, 7, 11)
|
1343 |
+
>>> tensor = tensor.refine_names('A', ..., 'B', 'C')
|
1344 |
+
>>> tensor.names
|
1345 |
+
('A', None, None, 'B', 'C')
|
1346 |
+
|
1347 |
+
.. warning::
|
1348 |
+
The named tensor API is experimental and subject to change.
|
1349 |
+
|
1350 |
+
"""
|
1351 |
+
if has_torch_function_unary(self):
|
1352 |
+
return handle_torch_function(Tensor.refine_names, (self,), self, *names)
|
1353 |
+
names = resolve_ellipsis(names, self.names, "refine_names")
|
1354 |
+
return super().refine_names(names)
|
1355 |
+
|
1356 |
+
def align_to(self, *names):
|
1357 |
+
r"""Permutes the dimensions of the :attr:`self` tensor to match the order
|
1358 |
+
specified in :attr:`names`, adding size-one dims for any new names.
|
1359 |
+
|
1360 |
+
All of the dims of :attr:`self` must be named in order to use this method.
|
1361 |
+
The resulting tensor is a view on the original tensor.
|
1362 |
+
|
1363 |
+
All dimension names of :attr:`self` must be present in :attr:`names`.
|
1364 |
+
:attr:`names` may contain additional names that are not in ``self.names``;
|
1365 |
+
the output tensor has a size-one dimension for each of those new names.
|
1366 |
+
|
1367 |
+
:attr:`names` may contain up to one Ellipsis (``...``).
|
1368 |
+
The Ellipsis is expanded to be equal to all dimension names of :attr:`self`
|
1369 |
+
that are not mentioned in :attr:`names`, in the order that they appear
|
1370 |
+
in :attr:`self`.
|
1371 |
+
|
1372 |
+
Python 2 does not support Ellipsis but one may use a string literal
|
1373 |
+
instead (``'...'``).
|
1374 |
+
|
1375 |
+
Args:
|
1376 |
+
names (iterable of str): The desired dimension ordering of the
|
1377 |
+
output tensor. May contain up to one Ellipsis that is expanded
|
1378 |
+
to all unmentioned dim names of :attr:`self`.
|
1379 |
+
|
1380 |
+
Examples::
|
1381 |
+
|
1382 |
+
>>> tensor = torch.randn(2, 2, 2, 2, 2, 2)
|
1383 |
+
>>> named_tensor = tensor.refine_names('A', 'B', 'C', 'D', 'E', 'F')
|
1384 |
+
|
1385 |
+
# Move the F and E dims to the front while keeping the rest in order
|
1386 |
+
>>> named_tensor.align_to('F', 'E', ...)
|
1387 |
+
|
1388 |
+
.. warning::
|
1389 |
+
The named tensor API is experimental and subject to change.
|
1390 |
+
|
1391 |
+
"""
|
1392 |
+
if has_torch_function_unary(self):
|
1393 |
+
return handle_torch_function(Tensor.align_to, (self,), self, *names)
|
1394 |
+
ellipsis_idx = single_ellipsis_index(names, "align_to")
|
1395 |
+
if ellipsis_idx is None:
|
1396 |
+
return super().align_to(names)
|
1397 |
+
return super().align_to(
|
1398 |
+
[name for name in names if not is_ellipsis(name)], ellipsis_idx
|
1399 |
+
)
|
1400 |
+
|
1401 |
+
def unflatten(self, dim, sizes): # type: ignore[override]
|
1402 |
+
r"""
|
1403 |
+
unflatten(dim, sizes) -> Tensor
|
1404 |
+
|
1405 |
+
See :func:`torch.unflatten`.
|
1406 |
+
|
1407 |
+
"""
|
1408 |
+
if has_torch_function_unary(self):
|
1409 |
+
return handle_torch_function(Tensor.unflatten, (self,), self, dim, sizes)
|
1410 |
+
|
1411 |
+
if not sizes:
|
1412 |
+
raise RuntimeError("unflatten: sizes must be non-empty")
|
1413 |
+
|
1414 |
+
names = None
|
1415 |
+
if isinstance(sizes, OrderedDict) or (
|
1416 |
+
isinstance(sizes, (tuple, list)) and isinstance(sizes[0], (tuple, list))
|
1417 |
+
):
|
1418 |
+
names, sizes = unzip_namedshape(sizes)
|
1419 |
+
return super().unflatten(dim, sizes, names)
|
1420 |
+
else:
|
1421 |
+
return super().unflatten(dim, sizes)
|
1422 |
+
|
1423 |
+
def rename_(self, *names, **rename_map):
|
1424 |
+
"""In-place version of :meth:`~Tensor.rename`."""
|
1425 |
+
|
1426 |
+
if has_torch_function_unary(self):
|
1427 |
+
return handle_torch_function(
|
1428 |
+
Tensor.rename_, (self,), self, *names, **rename_map
|
1429 |
+
)
|
1430 |
+
|
1431 |
+
# Note [rename_ / rename API]
|
1432 |
+
# The Python API for these is different from the C++ API. In Python:
|
1433 |
+
# 1) tensor.rename(*names) takes a vararglist of names
|
1434 |
+
# 2) tensor.rename(**rename_map) takes a map of names to rename.
|
1435 |
+
# C++ is static, making it difficult to implement similar behavior.
|
1436 |
+
return update_names(self, names, rename_map, inplace=True)
|
1437 |
+
|
1438 |
+
def rename(self, *names, **rename_map):
|
1439 |
+
"""Renames dimension names of :attr:`self`.
|
1440 |
+
|
1441 |
+
There are two main usages:
|
1442 |
+
|
1443 |
+
``self.rename(**rename_map)`` returns a view on tensor that has dims
|
1444 |
+
renamed as specified in the mapping :attr:`rename_map`.
|
1445 |
+
|
1446 |
+
``self.rename(*names)`` returns a view on tensor, renaming all
|
1447 |
+
dimensions positionally using :attr:`names`.
|
1448 |
+
Use ``self.rename(None)`` to drop names on a tensor.
|
1449 |
+
|
1450 |
+
One cannot specify both positional args :attr:`names` and keyword args
|
1451 |
+
:attr:`rename_map`.
|
1452 |
+
|
1453 |
+
Examples::
|
1454 |
+
|
1455 |
+
>>> imgs = torch.rand(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
|
1456 |
+
>>> renamed_imgs = imgs.rename(N='batch', C='channels')
|
1457 |
+
>>> renamed_imgs.names
|
1458 |
+
('batch', 'channels', 'H', 'W')
|
1459 |
+
|
1460 |
+
>>> renamed_imgs = imgs.rename(None)
|
1461 |
+
>>> renamed_imgs.names
|
1462 |
+
(None, None, None, None)
|
1463 |
+
|
1464 |
+
>>> renamed_imgs = imgs.rename('batch', 'channel', 'height', 'width')
|
1465 |
+
>>> renamed_imgs.names
|
1466 |
+
('batch', 'channel', 'height', 'width')
|
1467 |
+
|
1468 |
+
.. warning::
|
1469 |
+
The named tensor API is experimental and subject to change.
|
1470 |
+
|
1471 |
+
"""
|
1472 |
+
if has_torch_function_unary(self):
|
1473 |
+
return handle_torch_function(
|
1474 |
+
Tensor.rename, (self,), self, *names, **rename_map
|
1475 |
+
)
|
1476 |
+
|
1477 |
+
# See Note [rename_ / rename API]
|
1478 |
+
return update_names(self, names, rename_map, inplace=False)
|
1479 |
+
|
1480 |
+
def to_sparse_coo(self):
|
1481 |
+
"""Convert a tensor to :ref:`coordinate format <sparse-coo-docs>`.
|
1482 |
+
|
1483 |
+
Examples::
|
1484 |
+
|
1485 |
+
>>> dense = torch.randn(5, 5)
|
1486 |
+
>>> sparse = dense.to_sparse_coo()
|
1487 |
+
>>> sparse._nnz()
|
1488 |
+
25
|
1489 |
+
|
1490 |
+
"""
|
1491 |
+
return self.to_sparse()
|
1492 |
+
|
1493 |
+
def dim_order(
|
1494 |
+
self, *, ambiguity_check: Union[bool, List[torch.memory_format]] = False
|
1495 |
+
):
|
1496 |
+
"""
|
1497 |
+
dim_order(ambiguity_check=False) -> tuple
|
1498 |
+
|
1499 |
+
Returns the uniquely determined tuple of int describing the dim order or
|
1500 |
+
physical layout of :attr:`self`.
|
1501 |
+
|
1502 |
+
The dim order represents how dimensions are laid out in memory,
|
1503 |
+
starting from the outermost to the innermost dimension.
|
1504 |
+
|
1505 |
+
Note that the dim order may not always be uniquely determined.
|
1506 |
+
If `ambiguity_check` is True, this function raises a RuntimeError when the dim order cannot be uniquely determined;
|
1507 |
+
If `ambiguity_check` is a list of memory formats, this function raises a RuntimeError when tensor can not be interpreted
|
1508 |
+
into exactly one of the given memory formats, or it cannot be uniquely determined.
|
1509 |
+
If `ambiguity_check` is False, it will return one of legal dim order(s) without checking its uniqueness.
|
1510 |
+
Otherwise, it will raise TypeError.
|
1511 |
+
|
1512 |
+
Args:
|
1513 |
+
ambiguity_check (bool or List[torch.memory_format]): The check method for ambiguity of dim order.
|
1514 |
+
|
1515 |
+
>>> torch.empty((2, 3, 5, 7)).dim_order()
|
1516 |
+
(0, 1, 2, 3)
|
1517 |
+
>>> torch.empty((2, 3, 5, 7)).transpose(1, 2).dim_order()
|
1518 |
+
(0, 2, 1, 3)
|
1519 |
+
>>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).dim_order()
|
1520 |
+
(0, 2, 3, 1)
|
1521 |
+
>>> torch.empty((1, 2, 3, 4)).dim_order()
|
1522 |
+
(0, 1, 2, 3)
|
1523 |
+
>>> try:
|
1524 |
+
... torch.empty((1, 2, 3, 4)).dim_order(ambiguity_check=True)
|
1525 |
+
... except RuntimeError as e:
|
1526 |
+
... print(e)
|
1527 |
+
The tensor does not have unique dim order, or cannot map to exact one of the given memory formats.
|
1528 |
+
>>> torch.empty((1, 2, 3, 4)).dim_order(
|
1529 |
+
... ambiguity_check=[torch.contiguous_format, torch.channels_last]
|
1530 |
+
... ) # It can be mapped to contiguous format
|
1531 |
+
(0, 1, 2, 3)
|
1532 |
+
>>> try:
|
1533 |
+
... torch.empty((1, 2, 3, 4)).dim_order(ambiguity_check="ILLEGAL")
|
1534 |
+
... except TypeError as e:
|
1535 |
+
... print(e)
|
1536 |
+
The ambiguity_check argument must be a bool or a list of memory formats.
|
1537 |
+
.. warning::
|
1538 |
+
The dim_order tensor API is experimental and subject to change.
|
1539 |
+
"""
|
1540 |
+
if has_torch_function_unary(self):
|
1541 |
+
return handle_torch_function(Tensor.dim_order, (self,), self)
|
1542 |
+
|
1543 |
+
# Sanity check ambiguity_check data types
|
1544 |
+
if not isinstance(ambiguity_check, bool):
|
1545 |
+
if not isinstance(ambiguity_check, list):
|
1546 |
+
raise TypeError(
|
1547 |
+
"The ambiguity_check argument must be a bool or a list of memory formats."
|
1548 |
+
)
|
1549 |
+
for memory_format in ambiguity_check:
|
1550 |
+
if not isinstance(memory_format, torch.memory_format):
|
1551 |
+
raise TypeError(
|
1552 |
+
"The ambiguity_check argument must be a bool or a list of memory formats."
|
1553 |
+
)
|
1554 |
+
|
1555 |
+
def invalid_unique_memory_format(tensor, valid_memory_formats):
|
1556 |
+
"""
|
1557 |
+
Returns True if the tensor cannot be uniquely mapped to any of the given memory formats, False otherwise.
|
1558 |
+
"""
|
1559 |
+
|
1560 |
+
n_legality = 0
|
1561 |
+
|
1562 |
+
for memory_format in valid_memory_formats:
|
1563 |
+
if tensor.is_contiguous(memory_format=memory_format):
|
1564 |
+
n_legality += 1
|
1565 |
+
|
1566 |
+
return n_legality != 1
|
1567 |
+
|
1568 |
+
def has_multiple_dim_order(tensor):
|
1569 |
+
"""
|
1570 |
+
Returns True if there're multiple legal dim orders for given tensor, False otherwise.
|
1571 |
+
|
1572 |
+
The tensor is considered to have multiple legal dim orders if either of the following conditions is met:
|
1573 |
+
|
1574 |
+
* Singleton Dimensions: There's at least one singleteon dimension in the tensor.
|
1575 |
+
Since their size is 1, they don't affect the memory offset (stride * index
|
1576 |
+
is zero because index is always zero). Therefore, they can be placed anywhere
|
1577 |
+
in the dimension order without changing how data is accessed.
|
1578 |
+
* Same strides: Strides reflect how the tensor is stored in memory.
|
1579 |
+
If any two dimensions have the same stride, swapping these dimensions won't
|
1580 |
+
change how data is accessed, leading to multiple correct dimension orders.
|
1581 |
+
"""
|
1582 |
+
|
1583 |
+
sizes = tensor.size()
|
1584 |
+
strides = tensor.stride()
|
1585 |
+
|
1586 |
+
# Check if there are any duplicate strides
|
1587 |
+
has_duplicate_strides = any(
|
1588 |
+
earlier == later for earlier, later in zip(strides, strides[1:])
|
1589 |
+
)
|
1590 |
+
|
1591 |
+
# Check if there are any singleton dimensions
|
1592 |
+
has_singleton_dims = any(size == 1 for size in sizes)
|
1593 |
+
|
1594 |
+
return has_duplicate_strides or has_singleton_dims
|
1595 |
+
|
1596 |
+
valid_memory_formats = (
|
1597 |
+
ambiguity_check if isinstance(ambiguity_check, list) else []
|
1598 |
+
)
|
1599 |
+
check_multiple_dim_order = (
|
1600 |
+
ambiguity_check if isinstance(ambiguity_check, bool) else True
|
1601 |
+
)
|
1602 |
+
|
1603 |
+
if (
|
1604 |
+
check_multiple_dim_order and has_multiple_dim_order(self)
|
1605 |
+
) and invalid_unique_memory_format(self, valid_memory_formats):
|
1606 |
+
raise RuntimeError(
|
1607 |
+
"The tensor does not have unique dim order, or cannot map to exact one of the given memory formats."
|
1608 |
+
)
|
1609 |
+
|
1610 |
+
import torch._prims_common as utils
|
1611 |
+
|
1612 |
+
return tuple(utils.compute_elementwise_output_logical_to_physical_perm(self))
|
1613 |
+
|
1614 |
+
def _update_names(self, names, inplace):
|
1615 |
+
if has_torch_function_unary(self):
|
1616 |
+
return handle_torch_function(
|
1617 |
+
Tensor._update_names, (self,), self, names, inplace
|
1618 |
+
)
|
1619 |
+
|
1620 |
+
# See Note [rename_ / rename API]
|
1621 |
+
if inplace:
|
1622 |
+
return super().rename_(names)
|
1623 |
+
else:
|
1624 |
+
return super().rename(names)
|
1625 |
+
|
1626 |
+
@classmethod
|
1627 |
+
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
1628 |
+
"""
|
1629 |
+
This __torch_function__ implementation wraps subclasses such that
|
1630 |
+
methods called on subclasses return a subclass instance instead of
|
1631 |
+
a ``torch.Tensor`` instance.
|
1632 |
+
|
1633 |
+
One corollary to this is that you need coverage for torch.Tensor
|
1634 |
+
methods if implementing __torch_function__ for subclasses.
|
1635 |
+
|
1636 |
+
We recommend always calling ``super().__torch_function__`` as the base
|
1637 |
+
case when doing the above.
|
1638 |
+
|
1639 |
+
While not mandatory, we recommend making `__torch_function__` a classmethod.
|
1640 |
+
"""
|
1641 |
+
if kwargs is None:
|
1642 |
+
kwargs = {}
|
1643 |
+
|
1644 |
+
if not all(issubclass(cls, t) for t in types):
|
1645 |
+
return NotImplemented
|
1646 |
+
|
1647 |
+
with _C.DisableTorchFunctionSubclass():
|
1648 |
+
ret = func(*args, **kwargs)
|
1649 |
+
if func in get_default_nowrap_functions():
|
1650 |
+
return ret
|
1651 |
+
else:
|
1652 |
+
return _convert(ret, cls)
|
1653 |
+
|
1654 |
+
__torch_dispatch__ = _C._disabled_torch_dispatch_impl
|
1655 |
+
|
1656 |
+
def __dlpack__(self, stream=None):
|
1657 |
+
"""
|
1658 |
+
Creates a DLpack `capsule https://data-apis.org/array-api/latest/design_topics/data_interchange.html#data-interchange`_
|
1659 |
+
of the current tensor to be exported to other libraries.
|
1660 |
+
|
1661 |
+
This function will be called from the `from_dlpack` method
|
1662 |
+
of the library that will consume the capsule. `from_dlpack` passes the current
|
1663 |
+
stream to this method as part of the specification.
|
1664 |
+
|
1665 |
+
Args:
|
1666 |
+
stream (integer or None): An optional Python integer representing a
|
1667 |
+
pointer to a CUDA stream. The current stream is synchronized with
|
1668 |
+
this stream before the capsule is created, and since the capsule
|
1669 |
+
shares its storage with the tensor this make it safe to access from
|
1670 |
+
both streams. If None or -1 is passed then no synchronization is performed.
|
1671 |
+
If 1 (on CUDA) or 0 (on ROCM) then the default stream is used for
|
1672 |
+
synchronization.
|
1673 |
+
"""
|
1674 |
+
if has_torch_function_unary(self):
|
1675 |
+
return handle_torch_function(Tensor.__dlpack__, (self,), self, stream)
|
1676 |
+
|
1677 |
+
# DLPack capsules can't capture all of PyTorch's semantics,
|
1678 |
+
# so we prohibit exporting tensors that would lose their properties like
|
1679 |
+
# requires_grad and having the conjugate bit set.
|
1680 |
+
if self.requires_grad:
|
1681 |
+
raise RuntimeError(
|
1682 |
+
"Can't export tensors that require gradient, use tensor.detach()"
|
1683 |
+
)
|
1684 |
+
if self.is_conj():
|
1685 |
+
raise RuntimeError("Can't export tensors with the conjugate bit set")
|
1686 |
+
if self.layout != torch.strided:
|
1687 |
+
raise RuntimeError(
|
1688 |
+
"Can't export tensors with layout other than torch.strided"
|
1689 |
+
)
|
1690 |
+
|
1691 |
+
if stream is not None and type(stream) is not int:
|
1692 |
+
# Stream pointers in CUDA/ROCm are uniquely numbered and can
|
1693 |
+
# be retrieved from their integer value.
|
1694 |
+
raise TypeError("stream must be ``int`` or ``none``")
|
1695 |
+
elif stream is not None and stream != -1:
|
1696 |
+
if self.device.type == "cuda":
|
1697 |
+
# NB: This logic handles the special case values for default
|
1698 |
+
# streams and must be kept in sync with from_dlpack in
|
1699 |
+
# torch/utils/dlpack.py
|
1700 |
+
if stream == 1 and torch.version.hip is None:
|
1701 |
+
stream = torch.cuda.default_stream()
|
1702 |
+
elif stream == 0 and torch.version.hip is not None:
|
1703 |
+
stream = torch.cuda.default_stream()
|
1704 |
+
else:
|
1705 |
+
stream = torch.cuda.ExternalStream(stream)
|
1706 |
+
# Only synchronize on different streams
|
1707 |
+
sync_stream = torch.cuda.current_stream()
|
1708 |
+
if stream != sync_stream:
|
1709 |
+
event = torch.cuda.Event()
|
1710 |
+
event.record(sync_stream)
|
1711 |
+
stream.wait_event(event)
|
1712 |
+
if self.device.type == "xla":
|
1713 |
+
import torch_xla
|
1714 |
+
import torch_xla.utils.dlpack as xla_dlpack
|
1715 |
+
|
1716 |
+
if (
|
1717 |
+
len(torch_xla.real_devices()) <= 0
|
1718 |
+
or "cuda" not in torch_xla.real_devices()[0].lower()
|
1719 |
+
):
|
1720 |
+
raise RuntimeError(
|
1721 |
+
"Can't export to dlpack an XLA tensor that is not on CUDA."
|
1722 |
+
)
|
1723 |
+
return xla_dlpack.to_dlpack(self)
|
1724 |
+
return torch.to_dlpack(self)
|
1725 |
+
|
1726 |
+
def __dlpack_device__(self) -> Tuple[enum.IntEnum, int]:
|
1727 |
+
if has_torch_function_unary(self):
|
1728 |
+
return handle_torch_function(Tensor.__dlpack_device__, (self,), self)
|
1729 |
+
|
1730 |
+
from torch.utils.dlpack import DLDeviceType
|
1731 |
+
|
1732 |
+
device = self.device
|
1733 |
+
idx = device.index if device.index is not None else 0
|
1734 |
+
torch_device_type = device.type
|
1735 |
+
if torch_device_type == "cuda" and torch.version.hip is not None:
|
1736 |
+
device_type = DLDeviceType.kDLROCM
|
1737 |
+
elif torch_device_type == "cpu" and self.is_pinned():
|
1738 |
+
device_type = DLDeviceType.kDLCPUPinned
|
1739 |
+
elif torch_device_type == "cuda":
|
1740 |
+
device_type = DLDeviceType.kDLGPU
|
1741 |
+
elif torch_device_type == "cpu":
|
1742 |
+
device_type = DLDeviceType.kDLCPU
|
1743 |
+
elif torch_device_type == "xpu":
|
1744 |
+
device_type = DLDeviceType.kDLOneAPI
|
1745 |
+
elif self.device.type == "privateuse1":
|
1746 |
+
device_type = DLDeviceType.kDLExtDev
|
1747 |
+
elif torch_device_type == "xla":
|
1748 |
+
import torch_xla
|
1749 |
+
|
1750 |
+
if (
|
1751 |
+
len(torch_xla.real_devices()) <= 0
|
1752 |
+
or "cuda" not in torch_xla.real_devices()[0].lower()
|
1753 |
+
):
|
1754 |
+
raise ValueError(f"Unknown device type {torch_device_type} for Dlpack")
|
1755 |
+
|
1756 |
+
device_type = DLDeviceType.kDLGPU
|
1757 |
+
else:
|
1758 |
+
raise ValueError(f"Unknown device type {torch_device_type} for Dlpack")
|
1759 |
+
return (device_type, idx)
|
1760 |
+
|
1761 |
+
__module__ = "torch"
|
1762 |
+
|
1763 |
+
|
1764 |
+
def _convert(ret, cls):
|
1765 |
+
if cls is Tensor:
|
1766 |
+
return ret
|
1767 |
+
|
1768 |
+
if isinstance(ret, Tensor) and not isinstance(ret, cls):
|
1769 |
+
ret = ret.as_subclass(cls)
|
1770 |
+
|
1771 |
+
if isinstance(ret, (tuple, list)):
|
1772 |
+
# Also handles things like namedtuples
|
1773 |
+
ret = type(ret)(_convert(r, cls) for r in ret)
|
1774 |
+
|
1775 |
+
return ret
|
lib/python3.10/site-packages/torch/_tensor_docs.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
lib/python3.10/site-packages/torch/_tensor_str.py
ADDED
@@ -0,0 +1,704 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import contextlib
|
3 |
+
import dataclasses
|
4 |
+
import math
|
5 |
+
import textwrap
|
6 |
+
from typing import Any, Dict, Optional
|
7 |
+
|
8 |
+
import torch
|
9 |
+
from torch import inf
|
10 |
+
|
11 |
+
|
12 |
+
@dataclasses.dataclass
|
13 |
+
class __PrinterOptions:
|
14 |
+
precision: int = 4
|
15 |
+
threshold: float = 1000
|
16 |
+
edgeitems: int = 3
|
17 |
+
linewidth: int = 80
|
18 |
+
sci_mode: Optional[bool] = None
|
19 |
+
|
20 |
+
|
21 |
+
PRINT_OPTS = __PrinterOptions()
|
22 |
+
|
23 |
+
|
24 |
+
# We could use **kwargs, but this will give better docs
|
25 |
+
def set_printoptions(
|
26 |
+
precision=None,
|
27 |
+
threshold=None,
|
28 |
+
edgeitems=None,
|
29 |
+
linewidth=None,
|
30 |
+
profile=None,
|
31 |
+
sci_mode=None,
|
32 |
+
):
|
33 |
+
r"""Set options for printing. Items shamelessly taken from NumPy
|
34 |
+
|
35 |
+
Args:
|
36 |
+
precision: Number of digits of precision for floating point output
|
37 |
+
(default = 4).
|
38 |
+
threshold: Total number of array elements which trigger summarization
|
39 |
+
rather than full `repr` (default = 1000).
|
40 |
+
edgeitems: Number of array items in summary at beginning and end of
|
41 |
+
each dimension (default = 3).
|
42 |
+
linewidth: The number of characters per line for the purpose of
|
43 |
+
inserting line breaks (default = 80). Thresholded matrices will
|
44 |
+
ignore this parameter.
|
45 |
+
profile: Sane defaults for pretty printing. Can override with any of
|
46 |
+
the above options. (any one of `default`, `short`, `full`)
|
47 |
+
sci_mode: Enable (True) or disable (False) scientific notation. If
|
48 |
+
None (default) is specified, the value is defined by
|
49 |
+
`torch._tensor_str._Formatter`. This value is automatically chosen
|
50 |
+
by the framework.
|
51 |
+
|
52 |
+
Example::
|
53 |
+
|
54 |
+
>>> # Limit the precision of elements
|
55 |
+
>>> torch.set_printoptions(precision=2)
|
56 |
+
>>> torch.tensor([1.12345])
|
57 |
+
tensor([1.12])
|
58 |
+
>>> # Limit the number of elements shown
|
59 |
+
>>> torch.set_printoptions(threshold=5)
|
60 |
+
>>> torch.arange(10)
|
61 |
+
tensor([0, 1, 2, ..., 7, 8, 9])
|
62 |
+
>>> # Restore defaults
|
63 |
+
>>> torch.set_printoptions(profile='default')
|
64 |
+
>>> torch.tensor([1.12345])
|
65 |
+
tensor([1.1235])
|
66 |
+
>>> torch.arange(10)
|
67 |
+
tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
68 |
+
|
69 |
+
"""
|
70 |
+
if profile is not None:
|
71 |
+
if profile == "default":
|
72 |
+
PRINT_OPTS.precision = 4
|
73 |
+
PRINT_OPTS.threshold = 1000
|
74 |
+
PRINT_OPTS.edgeitems = 3
|
75 |
+
PRINT_OPTS.linewidth = 80
|
76 |
+
elif profile == "short":
|
77 |
+
PRINT_OPTS.precision = 2
|
78 |
+
PRINT_OPTS.threshold = 1000
|
79 |
+
PRINT_OPTS.edgeitems = 2
|
80 |
+
PRINT_OPTS.linewidth = 80
|
81 |
+
elif profile == "full":
|
82 |
+
PRINT_OPTS.precision = 4
|
83 |
+
PRINT_OPTS.threshold = inf
|
84 |
+
PRINT_OPTS.edgeitems = 3
|
85 |
+
PRINT_OPTS.linewidth = 80
|
86 |
+
|
87 |
+
if precision is not None:
|
88 |
+
PRINT_OPTS.precision = precision
|
89 |
+
if threshold is not None:
|
90 |
+
PRINT_OPTS.threshold = threshold
|
91 |
+
if edgeitems is not None:
|
92 |
+
PRINT_OPTS.edgeitems = edgeitems
|
93 |
+
if linewidth is not None:
|
94 |
+
PRINT_OPTS.linewidth = linewidth
|
95 |
+
PRINT_OPTS.sci_mode = sci_mode
|
96 |
+
|
97 |
+
|
98 |
+
def get_printoptions() -> Dict[str, Any]:
|
99 |
+
r"""Gets the current options for printing, as a dictionary that
|
100 |
+
can be passed as ``**kwargs`` to set_printoptions().
|
101 |
+
"""
|
102 |
+
return dataclasses.asdict(PRINT_OPTS)
|
103 |
+
|
104 |
+
|
105 |
+
@contextlib.contextmanager
|
106 |
+
def printoptions(**kwargs):
|
107 |
+
r"""Context manager that temporarily changes the print options. Accepted
|
108 |
+
arguments are same as :func:`set_printoptions`."""
|
109 |
+
old_kwargs = get_printoptions()
|
110 |
+
set_printoptions(**kwargs)
|
111 |
+
try:
|
112 |
+
yield
|
113 |
+
finally:
|
114 |
+
set_printoptions(**old_kwargs)
|
115 |
+
|
116 |
+
|
117 |
+
def tensor_totype(t):
|
118 |
+
dtype = (
|
119 |
+
torch.float
|
120 |
+
if (
|
121 |
+
t.is_mps
|
122 |
+
or (t.is_xpu and not torch.xpu.get_device_properties(t.device).has_fp64)
|
123 |
+
)
|
124 |
+
else torch.double
|
125 |
+
)
|
126 |
+
return t.to(dtype=dtype)
|
127 |
+
|
128 |
+
|
129 |
+
class _Formatter:
|
130 |
+
def __init__(self, tensor):
|
131 |
+
self.floating_dtype = tensor.dtype.is_floating_point
|
132 |
+
self.int_mode = True
|
133 |
+
self.sci_mode = False
|
134 |
+
self.max_width = 1
|
135 |
+
|
136 |
+
with torch.no_grad():
|
137 |
+
tensor_view = tensor.reshape(-1)
|
138 |
+
|
139 |
+
if not self.floating_dtype:
|
140 |
+
for value in tensor_view:
|
141 |
+
value_str = f"{value}"
|
142 |
+
self.max_width = max(self.max_width, len(value_str))
|
143 |
+
|
144 |
+
else:
|
145 |
+
nonzero_finite_vals = torch.masked_select(
|
146 |
+
tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0)
|
147 |
+
)
|
148 |
+
|
149 |
+
if nonzero_finite_vals.numel() == 0:
|
150 |
+
# no valid number, do nothing
|
151 |
+
return
|
152 |
+
|
153 |
+
# Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU.
|
154 |
+
nonzero_finite_abs = tensor_totype(nonzero_finite_vals.abs())
|
155 |
+
nonzero_finite_min = tensor_totype(nonzero_finite_abs.min())
|
156 |
+
nonzero_finite_max = tensor_totype(nonzero_finite_abs.max())
|
157 |
+
|
158 |
+
for value in nonzero_finite_vals:
|
159 |
+
if value != torch.ceil(value):
|
160 |
+
self.int_mode = False
|
161 |
+
break
|
162 |
+
|
163 |
+
if self.int_mode:
|
164 |
+
# in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites
|
165 |
+
# to indicate that the tensor is of floating type. add 1 to the len to account for this.
|
166 |
+
if (
|
167 |
+
nonzero_finite_max / nonzero_finite_min > 1000.0
|
168 |
+
or nonzero_finite_max > 1.0e8
|
169 |
+
):
|
170 |
+
self.sci_mode = True
|
171 |
+
for value in nonzero_finite_vals:
|
172 |
+
value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
|
173 |
+
self.max_width = max(self.max_width, len(value_str))
|
174 |
+
else:
|
175 |
+
for value in nonzero_finite_vals:
|
176 |
+
value_str = f"{value:.0f}"
|
177 |
+
self.max_width = max(self.max_width, len(value_str) + 1)
|
178 |
+
else:
|
179 |
+
# Check if scientific representation should be used.
|
180 |
+
if (
|
181 |
+
nonzero_finite_max / nonzero_finite_min > 1000.0
|
182 |
+
or nonzero_finite_max > 1.0e8
|
183 |
+
or nonzero_finite_min < 1.0e-4
|
184 |
+
):
|
185 |
+
self.sci_mode = True
|
186 |
+
for value in nonzero_finite_vals:
|
187 |
+
value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
|
188 |
+
self.max_width = max(self.max_width, len(value_str))
|
189 |
+
else:
|
190 |
+
for value in nonzero_finite_vals:
|
191 |
+
value_str = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
|
192 |
+
self.max_width = max(self.max_width, len(value_str))
|
193 |
+
|
194 |
+
if PRINT_OPTS.sci_mode is not None:
|
195 |
+
self.sci_mode = PRINT_OPTS.sci_mode
|
196 |
+
|
197 |
+
def width(self):
|
198 |
+
return self.max_width
|
199 |
+
|
200 |
+
def format(self, value):
|
201 |
+
if self.floating_dtype:
|
202 |
+
if self.sci_mode:
|
203 |
+
ret = f"{{:{self.max_width}.{PRINT_OPTS.precision}e}}".format(value)
|
204 |
+
elif self.int_mode:
|
205 |
+
ret = f"{value:.0f}"
|
206 |
+
if not (math.isinf(value) or math.isnan(value)):
|
207 |
+
ret += "."
|
208 |
+
else:
|
209 |
+
ret = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
|
210 |
+
else:
|
211 |
+
ret = f"{value}"
|
212 |
+
return (self.max_width - len(ret)) * " " + ret
|
213 |
+
|
214 |
+
|
215 |
+
def _scalar_str(self, formatter1, formatter2=None):
|
216 |
+
if formatter2 is not None:
|
217 |
+
real_str = _scalar_str(self.real, formatter1)
|
218 |
+
imag_str = (_scalar_str(self.imag, formatter2) + "j").lstrip()
|
219 |
+
# handles negative numbers, +0.0, -0.0
|
220 |
+
if imag_str[0] == "+" or imag_str[0] == "-":
|
221 |
+
return real_str + imag_str
|
222 |
+
else:
|
223 |
+
return real_str + "+" + imag_str
|
224 |
+
else:
|
225 |
+
return formatter1.format(self.item())
|
226 |
+
|
227 |
+
|
228 |
+
def _vector_str(self, indent, summarize, formatter1, formatter2=None):
|
229 |
+
# length includes spaces and comma between elements
|
230 |
+
element_length = formatter1.width() + 2
|
231 |
+
if formatter2 is not None:
|
232 |
+
# width for imag_formatter + an extra j for complex
|
233 |
+
element_length += formatter2.width() + 1
|
234 |
+
|
235 |
+
elements_per_line = max(
|
236 |
+
1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
|
237 |
+
)
|
238 |
+
|
239 |
+
def _val_formatter(val, formatter1=formatter1, formatter2=formatter2):
|
240 |
+
if formatter2 is not None:
|
241 |
+
real_str = formatter1.format(val.real)
|
242 |
+
imag_str = (formatter2.format(val.imag) + "j").lstrip()
|
243 |
+
# handles negative numbers, +0.0, -0.0
|
244 |
+
if imag_str[0] == "+" or imag_str[0] == "-":
|
245 |
+
return real_str + imag_str
|
246 |
+
else:
|
247 |
+
return real_str + "+" + imag_str
|
248 |
+
else:
|
249 |
+
return formatter1.format(val)
|
250 |
+
|
251 |
+
if summarize and not PRINT_OPTS.edgeitems:
|
252 |
+
# Deal with edge case that negative zero is zero
|
253 |
+
data = ["..."]
|
254 |
+
elif summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
255 |
+
data = (
|
256 |
+
[_val_formatter(val) for val in self[: PRINT_OPTS.edgeitems].tolist()]
|
257 |
+
+ [" ..."]
|
258 |
+
+ [_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems :].tolist()]
|
259 |
+
)
|
260 |
+
else:
|
261 |
+
data = [_val_formatter(val) for val in self.tolist()]
|
262 |
+
|
263 |
+
data_lines = [
|
264 |
+
data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line)
|
265 |
+
]
|
266 |
+
lines = [", ".join(line) for line in data_lines]
|
267 |
+
return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]"
|
268 |
+
|
269 |
+
|
270 |
+
# formatter2 is only used for printing complex tensors.
|
271 |
+
# For complex tensors, formatter1 and formatter2 are the formatters for tensor.real
|
272 |
+
# and tensor.imag respesectively
|
273 |
+
def _tensor_str_with_formatter(self, indent, summarize, formatter1, formatter2=None):
|
274 |
+
dim = self.dim()
|
275 |
+
|
276 |
+
if dim == 0:
|
277 |
+
return _scalar_str(self, formatter1, formatter2)
|
278 |
+
|
279 |
+
if dim == 1:
|
280 |
+
return _vector_str(self, indent, summarize, formatter1, formatter2)
|
281 |
+
|
282 |
+
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
283 |
+
slices = (
|
284 |
+
[
|
285 |
+
_tensor_str_with_formatter(
|
286 |
+
self[i], indent + 1, summarize, formatter1, formatter2
|
287 |
+
)
|
288 |
+
for i in range(0, PRINT_OPTS.edgeitems)
|
289 |
+
]
|
290 |
+
+ ["..."]
|
291 |
+
+ [
|
292 |
+
_tensor_str_with_formatter(
|
293 |
+
self[i], indent + 1, summarize, formatter1, formatter2
|
294 |
+
)
|
295 |
+
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))
|
296 |
+
]
|
297 |
+
)
|
298 |
+
else:
|
299 |
+
slices = [
|
300 |
+
_tensor_str_with_formatter(
|
301 |
+
self[i], indent + 1, summarize, formatter1, formatter2
|
302 |
+
)
|
303 |
+
for i in range(0, self.size(0))
|
304 |
+
]
|
305 |
+
|
306 |
+
tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices)
|
307 |
+
return "[" + tensor_str + "]"
|
308 |
+
|
309 |
+
|
310 |
+
def _tensor_str(self, indent):
|
311 |
+
if self.numel() == 0:
|
312 |
+
return "[]"
|
313 |
+
|
314 |
+
if self.has_names():
|
315 |
+
# There are two main codepaths (possibly more) that tensor printing goes through:
|
316 |
+
# - tensor data can fit comfortably on screen
|
317 |
+
# - tensor data needs to be summarized
|
318 |
+
# Some of the codepaths don't fully support named tensors, so we send in
|
319 |
+
# an unnamed tensor to the formatting code as a workaround.
|
320 |
+
self = self.rename(None)
|
321 |
+
|
322 |
+
summarize = self.numel() > PRINT_OPTS.threshold
|
323 |
+
|
324 |
+
if self._is_zerotensor():
|
325 |
+
self = self.clone()
|
326 |
+
|
327 |
+
# handle the negative bit
|
328 |
+
if self.is_neg():
|
329 |
+
self = self.resolve_neg()
|
330 |
+
|
331 |
+
# TODO: Remove me when `masked_select` is implemented for FP8
|
332 |
+
if self.dtype in [
|
333 |
+
torch.float8_e5m2,
|
334 |
+
torch.float8_e5m2fnuz,
|
335 |
+
torch.float8_e4m3fn,
|
336 |
+
torch.float8_e4m3fnuz,
|
337 |
+
]:
|
338 |
+
self = self.half()
|
339 |
+
|
340 |
+
if self.dtype.is_complex:
|
341 |
+
# handle the conjugate bit
|
342 |
+
self = self.resolve_conj()
|
343 |
+
real_formatter = _Formatter(
|
344 |
+
get_summarized_data(self.real) if summarize else self.real
|
345 |
+
)
|
346 |
+
imag_formatter = _Formatter(
|
347 |
+
get_summarized_data(self.imag) if summarize else self.imag
|
348 |
+
)
|
349 |
+
return _tensor_str_with_formatter(
|
350 |
+
self, indent, summarize, real_formatter, imag_formatter
|
351 |
+
)
|
352 |
+
else:
|
353 |
+
formatter = _Formatter(get_summarized_data(self) if summarize else self)
|
354 |
+
return _tensor_str_with_formatter(self, indent, summarize, formatter)
|
355 |
+
|
356 |
+
|
357 |
+
def _add_suffixes(tensor_str, suffixes, indent, force_newline):
|
358 |
+
tensor_strs = [tensor_str]
|
359 |
+
last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1
|
360 |
+
for suffix in suffixes:
|
361 |
+
suffix_len = len(suffix)
|
362 |
+
if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth:
|
363 |
+
tensor_strs.append(",\n" + " " * indent + suffix)
|
364 |
+
last_line_len = indent + suffix_len
|
365 |
+
force_newline = False
|
366 |
+
else:
|
367 |
+
tensor_strs.append(", " + suffix)
|
368 |
+
last_line_len += suffix_len + 2
|
369 |
+
tensor_strs.append(")")
|
370 |
+
return "".join(tensor_strs)
|
371 |
+
|
372 |
+
|
373 |
+
def get_summarized_data(self):
|
374 |
+
dim = self.dim()
|
375 |
+
if dim == 0:
|
376 |
+
return self
|
377 |
+
if dim == 1:
|
378 |
+
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
379 |
+
return torch.cat(
|
380 |
+
(self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :])
|
381 |
+
)
|
382 |
+
else:
|
383 |
+
return self
|
384 |
+
if not PRINT_OPTS.edgeitems:
|
385 |
+
return self.new_empty([0] * self.dim())
|
386 |
+
elif self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
387 |
+
start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
|
388 |
+
end = [self[i] for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))]
|
389 |
+
return torch.stack([get_summarized_data(x) for x in (start + end)])
|
390 |
+
else:
|
391 |
+
return torch.stack([get_summarized_data(x) for x in self])
|
392 |
+
|
393 |
+
|
394 |
+
def _str_intern(inp, *, tensor_contents=None):
|
395 |
+
if torch._C._functorch.is_functorch_wrapped_tensor(inp):
|
396 |
+
return _functorch_wrapper_str_intern(inp, tensor_contents=tensor_contents)
|
397 |
+
is_plain_tensor = type(inp) is torch.Tensor or type(inp) is torch.nn.Parameter
|
398 |
+
if inp.is_nested:
|
399 |
+
prefix = "nested_tensor("
|
400 |
+
elif is_plain_tensor:
|
401 |
+
prefix = "tensor("
|
402 |
+
else:
|
403 |
+
prefix = f"{type(inp).__name__}("
|
404 |
+
indent = len(prefix)
|
405 |
+
suffixes = []
|
406 |
+
custom_contents_provided = tensor_contents is not None
|
407 |
+
if custom_contents_provided:
|
408 |
+
tensor_str = tensor_contents
|
409 |
+
|
410 |
+
# This is used to extract the primal value and thus disable the forward AD
|
411 |
+
# within this function.
|
412 |
+
# TODO(albanD) This needs to be updated when more than one level is supported
|
413 |
+
self, tangent = torch.autograd.forward_ad.unpack_dual(inp)
|
414 |
+
|
415 |
+
# Note [Print tensor device]:
|
416 |
+
# A general logic here is we only print device when it doesn't match
|
417 |
+
# the device specified in default tensor type.
|
418 |
+
# Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus
|
419 |
+
# torch._C._get_default_device() only returns either cpu or cuda.
|
420 |
+
# In other cases, we don't have a way to set them as default yet,
|
421 |
+
# and we should always print out device for them.
|
422 |
+
if (
|
423 |
+
self.device.type != torch._C._get_default_device()
|
424 |
+
or (
|
425 |
+
self.device.type == "cuda"
|
426 |
+
and torch.cuda.current_device() != self.device.index
|
427 |
+
)
|
428 |
+
or (self.device.type == "mps")
|
429 |
+
):
|
430 |
+
suffixes.append("device='" + str(self.device) + "'")
|
431 |
+
|
432 |
+
# Tensor printing performs tensor operations like slice, indexing, etc to make it in a
|
433 |
+
# representable format. These operations on ipu/xla/lazy/mtia tensor results in compilations. Hence,
|
434 |
+
# to avoid compilations, copying the tensor to cpu before printing.
|
435 |
+
if self.device.type in ["xla", "lazy", "ipu", "mtia"]:
|
436 |
+
self = self.to("cpu")
|
437 |
+
|
438 |
+
# TODO: add an API to map real -> complex dtypes
|
439 |
+
_default_complex_dtype = (
|
440 |
+
torch.cdouble if torch.get_default_dtype() == torch.double else torch.cfloat
|
441 |
+
)
|
442 |
+
has_default_dtype = self.dtype in (
|
443 |
+
torch.get_default_dtype(),
|
444 |
+
_default_complex_dtype,
|
445 |
+
torch.int64,
|
446 |
+
torch.bool,
|
447 |
+
)
|
448 |
+
if self.is_sparse:
|
449 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
450 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
451 |
+
|
452 |
+
is_meta = self.is_meta or isinstance(self, FakeTensor)
|
453 |
+
if not is_meta:
|
454 |
+
suffixes.append("nnz=" + str(self._nnz()))
|
455 |
+
if not has_default_dtype:
|
456 |
+
suffixes.append("dtype=" + str(self.dtype))
|
457 |
+
if not custom_contents_provided:
|
458 |
+
indices_prefix = "indices=tensor("
|
459 |
+
indices = self._indices().detach()
|
460 |
+
if is_meta:
|
461 |
+
indices_str = "..."
|
462 |
+
else:
|
463 |
+
indices_str = _tensor_str(indices, indent + len(indices_prefix))
|
464 |
+
if is_meta or indices.numel() == 0:
|
465 |
+
indices_str += ", size=" + str(tuple(indices.shape))
|
466 |
+
values_prefix = "values=tensor("
|
467 |
+
values = self._values().detach()
|
468 |
+
if is_meta:
|
469 |
+
values_str = "..."
|
470 |
+
else:
|
471 |
+
values_str = _tensor_str(values, indent + len(values_prefix))
|
472 |
+
if is_meta or values.numel() == 0:
|
473 |
+
values_str += ", size=" + str(tuple(values.shape))
|
474 |
+
tensor_str = (
|
475 |
+
indices_prefix
|
476 |
+
+ indices_str
|
477 |
+
+ "),\n"
|
478 |
+
+ " " * indent
|
479 |
+
+ values_prefix
|
480 |
+
+ values_str
|
481 |
+
+ ")"
|
482 |
+
)
|
483 |
+
elif self.layout in {
|
484 |
+
torch.sparse_csr,
|
485 |
+
torch.sparse_csc,
|
486 |
+
torch.sparse_bsr,
|
487 |
+
torch.sparse_bsc,
|
488 |
+
}:
|
489 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
490 |
+
|
491 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
492 |
+
is_meta = self.is_meta or isinstance(self, FakeTensor)
|
493 |
+
if not is_meta:
|
494 |
+
suffixes.append("nnz=" + str(self._nnz()))
|
495 |
+
if not has_default_dtype:
|
496 |
+
suffixes.append("dtype=" + str(self.dtype))
|
497 |
+
if not custom_contents_provided:
|
498 |
+
compressed_indices_method, plain_indices_method = {
|
499 |
+
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
|
500 |
+
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
|
501 |
+
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
|
502 |
+
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
|
503 |
+
}[self.layout]
|
504 |
+
if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
|
505 |
+
cdimname, pdimname = "row", "column"
|
506 |
+
else:
|
507 |
+
cdimname, pdimname = "column", "row"
|
508 |
+
compressed_indices_prefix = f"c{cdimname[:3]}_indices=tensor("
|
509 |
+
compressed_indices = compressed_indices_method(self).detach()
|
510 |
+
if is_meta:
|
511 |
+
compressed_indices_str = "..."
|
512 |
+
else:
|
513 |
+
compressed_indices_str = _tensor_str(
|
514 |
+
compressed_indices, indent + len(compressed_indices_prefix)
|
515 |
+
)
|
516 |
+
if compressed_indices.numel() == 0 or is_meta:
|
517 |
+
compressed_indices_str += ", size=" + str(
|
518 |
+
tuple(compressed_indices.shape)
|
519 |
+
)
|
520 |
+
plain_indices_prefix = f"{pdimname[:3]}_indices=tensor("
|
521 |
+
plain_indices = plain_indices_method(self).detach()
|
522 |
+
if is_meta:
|
523 |
+
plain_indices_str = "..."
|
524 |
+
else:
|
525 |
+
plain_indices_str = _tensor_str(
|
526 |
+
plain_indices, indent + len(plain_indices_prefix)
|
527 |
+
)
|
528 |
+
if plain_indices.numel() == 0 or is_meta:
|
529 |
+
plain_indices_str += ", size=" + str(tuple(plain_indices.shape))
|
530 |
+
values_prefix = "values=tensor("
|
531 |
+
values = self.values().detach()
|
532 |
+
if is_meta:
|
533 |
+
values_str = "..."
|
534 |
+
else:
|
535 |
+
values_str = _tensor_str(values, indent + len(values_prefix))
|
536 |
+
if values.numel() == 0 or is_meta:
|
537 |
+
values_str += ", size=" + str(tuple(values.shape))
|
538 |
+
tensor_str = (
|
539 |
+
compressed_indices_prefix
|
540 |
+
+ compressed_indices_str
|
541 |
+
+ "),\n"
|
542 |
+
+ " " * indent
|
543 |
+
+ plain_indices_prefix
|
544 |
+
+ plain_indices_str
|
545 |
+
+ "),\n"
|
546 |
+
+ " " * indent
|
547 |
+
+ values_prefix
|
548 |
+
+ values_str
|
549 |
+
+ ")"
|
550 |
+
)
|
551 |
+
elif self.is_quantized:
|
552 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
553 |
+
if not has_default_dtype:
|
554 |
+
suffixes.append("dtype=" + str(self.dtype))
|
555 |
+
suffixes.append("quantization_scheme=" + str(self.qscheme()))
|
556 |
+
if (
|
557 |
+
self.qscheme() == torch.per_tensor_affine
|
558 |
+
or self.qscheme() == torch.per_tensor_symmetric
|
559 |
+
):
|
560 |
+
suffixes.append("scale=" + str(self.q_scale()))
|
561 |
+
suffixes.append("zero_point=" + str(self.q_zero_point()))
|
562 |
+
elif (
|
563 |
+
self.qscheme() == torch.per_channel_affine
|
564 |
+
or self.qscheme() == torch.per_channel_symmetric
|
565 |
+
or self.qscheme() == torch.per_channel_affine_float_qparams
|
566 |
+
):
|
567 |
+
suffixes.append("scale=" + str(self.q_per_channel_scales()))
|
568 |
+
suffixes.append("zero_point=" + str(self.q_per_channel_zero_points()))
|
569 |
+
suffixes.append("axis=" + str(self.q_per_channel_axis()))
|
570 |
+
if not custom_contents_provided:
|
571 |
+
tensor_str = _tensor_str(self.dequantize(), indent)
|
572 |
+
elif self.is_nested:
|
573 |
+
if not custom_contents_provided:
|
574 |
+
|
575 |
+
def indented_str(s, indent):
|
576 |
+
return "\n".join(f" {line}" for line in s.split("\n"))
|
577 |
+
|
578 |
+
strs = ",\n".join(
|
579 |
+
indented_str(str(t), indent + 1)
|
580 |
+
for t in torch.ops.aten.unbind.int(self, 0)
|
581 |
+
)
|
582 |
+
tensor_str = f"[\n{strs}\n]"
|
583 |
+
elif torch._is_functional_tensor(self):
|
584 |
+
prefix = "_to_functional_tensor("
|
585 |
+
tensor_str = repr(torch._from_functional_tensor(self))
|
586 |
+
else:
|
587 |
+
# Circular import problem, so we import it here
|
588 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
589 |
+
|
590 |
+
if self.is_meta or isinstance(self, FakeTensor):
|
591 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
592 |
+
if self.dtype != torch.get_default_dtype():
|
593 |
+
suffixes.append("dtype=" + str(self.dtype))
|
594 |
+
# TODO: This implies that ellipses is valid syntax for allocating
|
595 |
+
# a meta tensor or FakeTensor, which it could be, but it isn't right now
|
596 |
+
if not custom_contents_provided:
|
597 |
+
tensor_str = "..."
|
598 |
+
else:
|
599 |
+
if self.numel() == 0 and not self.is_sparse:
|
600 |
+
# Explicitly print the shape if it is not (0,), to match NumPy behavior
|
601 |
+
if self.dim() != 1:
|
602 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
603 |
+
|
604 |
+
# In an empty tensor, there are no elements to infer if the dtype
|
605 |
+
# should be int64, so it must be shown explicitly.
|
606 |
+
if self.dtype != torch.get_default_dtype():
|
607 |
+
suffixes.append("dtype=" + str(self.dtype))
|
608 |
+
if not custom_contents_provided:
|
609 |
+
tensor_str = "[]"
|
610 |
+
else:
|
611 |
+
if not PRINT_OPTS.edgeitems:
|
612 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
613 |
+
|
614 |
+
if not has_default_dtype:
|
615 |
+
suffixes.append("dtype=" + str(self.dtype))
|
616 |
+
|
617 |
+
if not custom_contents_provided:
|
618 |
+
if self.layout != torch.strided:
|
619 |
+
tensor_str = _tensor_str(self.to_dense(), indent)
|
620 |
+
else:
|
621 |
+
tensor_str = _tensor_str(self, indent)
|
622 |
+
|
623 |
+
if self.layout != torch.strided:
|
624 |
+
suffixes.append("layout=" + str(self.layout))
|
625 |
+
|
626 |
+
# Use inp here to get the original grad_fn and not the one generated by the forward grad
|
627 |
+
# unpacking.
|
628 |
+
grad_fn_name = None
|
629 |
+
try:
|
630 |
+
grad_fn = inp.grad_fn
|
631 |
+
except RuntimeError:
|
632 |
+
# Accessing the grad_fn calls rebasing logic which would cause an error
|
633 |
+
# if that tensor is a view created in no-grad mode modified in-place in
|
634 |
+
# no-grad mode. See: https://github.com/pytorch/pytorch/issues/99968
|
635 |
+
grad_fn_name = "Invalid"
|
636 |
+
|
637 |
+
if grad_fn_name is None and grad_fn is not None: # type: ignore[possibly-undefined]
|
638 |
+
grad_fn_name = type(grad_fn).__name__
|
639 |
+
if grad_fn_name == "CppFunction":
|
640 |
+
grad_fn_name = grad_fn.name().rsplit("::", 1)[-1]
|
641 |
+
|
642 |
+
if grad_fn_name is not None:
|
643 |
+
suffixes.append(f"grad_fn=<{grad_fn_name}>")
|
644 |
+
elif inp.requires_grad:
|
645 |
+
suffixes.append("requires_grad=True")
|
646 |
+
|
647 |
+
if self.has_names():
|
648 |
+
suffixes.append(f"names={self.names}")
|
649 |
+
|
650 |
+
if tangent is not None:
|
651 |
+
suffixes.append(f"tangent={tangent}")
|
652 |
+
|
653 |
+
string_repr = _add_suffixes(
|
654 |
+
prefix + tensor_str, # type: ignore[possibly-undefined]
|
655 |
+
suffixes,
|
656 |
+
indent,
|
657 |
+
force_newline=self.is_sparse,
|
658 |
+
)
|
659 |
+
|
660 |
+
# Check if this instance is flagged as a parameter and change the repr accordingly.
|
661 |
+
# Unfortunately, this function has to be aware of this detail.
|
662 |
+
# NB: This is currently skipped for plain tensor parameters to maintain BC. In the future,
|
663 |
+
# this should be done for those as well to produce a valid repr.
|
664 |
+
if isinstance(self, torch.nn.Parameter) and not is_plain_tensor:
|
665 |
+
string_repr = f"Parameter({string_repr})"
|
666 |
+
|
667 |
+
return string_repr
|
668 |
+
|
669 |
+
|
670 |
+
def _functorch_wrapper_str_intern(tensor, *, tensor_contents=None):
|
671 |
+
level = torch._C._functorch.maybe_get_level(tensor)
|
672 |
+
assert level != -1
|
673 |
+
|
674 |
+
if torch._C._functorch.is_functionaltensor(tensor):
|
675 |
+
# Since we're unwrapping the FunctionalTensorWrapper, we need to make sure
|
676 |
+
# that it's up to date first
|
677 |
+
torch._sync(tensor)
|
678 |
+
|
679 |
+
value = torch._C._functorch.get_unwrapped(tensor)
|
680 |
+
value_repr = repr(value)
|
681 |
+
|
682 |
+
indented_value_repr = textwrap.indent(value_repr, " " * 4)
|
683 |
+
if torch._C._functorch.is_batchedtensor(tensor):
|
684 |
+
bdim = torch._C._functorch.maybe_get_bdim(tensor)
|
685 |
+
assert bdim != -1
|
686 |
+
return (
|
687 |
+
f"BatchedTensor(lvl={level}, bdim={bdim}, value=\n"
|
688 |
+
f"{indented_value_repr}\n"
|
689 |
+
f")"
|
690 |
+
)
|
691 |
+
if torch._C._functorch.is_gradtrackingtensor(tensor):
|
692 |
+
return (
|
693 |
+
f"GradTrackingTensor(lvl={level}, value=\n" f"{indented_value_repr}\n" f")"
|
694 |
+
)
|
695 |
+
if torch._C._functorch.is_functionaltensor(tensor):
|
696 |
+
return f"FunctionalTensor(lvl={level}, value=\\\n{value_repr})"
|
697 |
+
|
698 |
+
raise ValueError("We don't know how to print this, please file us an issue")
|
699 |
+
|
700 |
+
|
701 |
+
def _str(self, *, tensor_contents=None):
|
702 |
+
with torch.no_grad(), torch.utils._python_dispatch._disable_current_modes():
|
703 |
+
guard = torch._C._DisableFuncTorch()
|
704 |
+
return _str_intern(self, tensor_contents=tensor_contents)
|
lib/python3.10/site-packages/torch/_torch_docs.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
lib/python3.10/site-packages/torch/_utils.py
ADDED
@@ -0,0 +1,1056 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import copyreg
|
3 |
+
import functools
|
4 |
+
import logging
|
5 |
+
import sys
|
6 |
+
import traceback
|
7 |
+
import warnings
|
8 |
+
from collections import defaultdict
|
9 |
+
from typing import Any, Callable, DefaultDict, Generic, List, Optional, TYPE_CHECKING
|
10 |
+
from typing_extensions import deprecated, ParamSpec
|
11 |
+
|
12 |
+
import torch
|
13 |
+
|
14 |
+
|
15 |
+
def _type(self, dtype=None, non_blocking=False, **kwargs):
|
16 |
+
"""Returns the type if `dtype` is not provided, else casts this object to
|
17 |
+
the specified type.
|
18 |
+
|
19 |
+
If this is already of the correct type, no copy is performed and the
|
20 |
+
original object is returned.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
dtype (type or string): The desired type
|
24 |
+
non_blocking (bool): If ``True``, and the source is in pinned memory
|
25 |
+
and destination is on the GPU or vice versa, the copy is performed
|
26 |
+
asynchronously with respect to the host. Otherwise, the argument
|
27 |
+
has no effect.
|
28 |
+
**kwargs: For compatibility, may contain the key ``async`` in place of
|
29 |
+
the ``non_blocking`` argument. The ``async`` arg is deprecated.
|
30 |
+
"""
|
31 |
+
non_blocking = _get_async_or_non_blocking("type", non_blocking, kwargs)
|
32 |
+
if dtype is None:
|
33 |
+
return self.__module__ + "." + self.__class__.__name__
|
34 |
+
|
35 |
+
if isinstance(dtype, str):
|
36 |
+
dtype = _import_dotted_name(dtype)
|
37 |
+
if dtype == type(self):
|
38 |
+
return self
|
39 |
+
if self.is_sparse:
|
40 |
+
if not dtype.is_sparse:
|
41 |
+
raise RuntimeError("Cannot cast sparse tensor to dense tensor")
|
42 |
+
new_module_name = dtype.__module__.replace(".sparse", "")
|
43 |
+
new_values_type_name = new_module_name + "." + dtype.__name__
|
44 |
+
new_values = torch.Tensor._values(self).type(new_values_type_name, non_blocking)
|
45 |
+
new_indices_type_name = new_module_name + ".LongTensor"
|
46 |
+
new_indices = torch.Tensor._indices(self).type(
|
47 |
+
new_indices_type_name, non_blocking
|
48 |
+
)
|
49 |
+
return dtype(new_indices, new_values, self.size())
|
50 |
+
if dtype.is_sparse:
|
51 |
+
raise RuntimeError("Cannot cast dense tensor to sparse tensor")
|
52 |
+
return dtype(self.size()).copy_(self, non_blocking)
|
53 |
+
|
54 |
+
|
55 |
+
def _to(self, device, non_blocking=False):
|
56 |
+
"""Returns a copy of this object in device memory.
|
57 |
+
|
58 |
+
If this object is already on the correct device, then no copy is performed
|
59 |
+
and the original object is returned.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
device (int): The destination device.
|
63 |
+
non_blocking (bool): If ``True`` and the source is in pinned memory,
|
64 |
+
the copy will be asynchronous with respect to the host. Otherwise,
|
65 |
+
the argument has no effect.
|
66 |
+
"""
|
67 |
+
if self.device == device:
|
68 |
+
return self
|
69 |
+
|
70 |
+
if device.type == "cpu":
|
71 |
+
pin_memory = non_blocking and self.device.type in (
|
72 |
+
"cuda",
|
73 |
+
torch._C._get_privateuse1_backend_name(),
|
74 |
+
)
|
75 |
+
untyped_storage = torch.empty(
|
76 |
+
self.nbytes(), dtype=torch.uint8, device=device, pin_memory=pin_memory
|
77 |
+
).untyped_storage()
|
78 |
+
untyped_storage.copy_(self, non_blocking)
|
79 |
+
return untyped_storage
|
80 |
+
|
81 |
+
device_module = getattr(torch, device.type, None)
|
82 |
+
assert (
|
83 |
+
device_module is not None
|
84 |
+
), f"{device.type.upper()} device module is not loaded"
|
85 |
+
with device_module.device(device):
|
86 |
+
if self.is_sparse and hasattr(device_module, "sparse"):
|
87 |
+
new_type = getattr(device_module.sparse, self.__class__.__name__)
|
88 |
+
indices = getattr(torch.Tensor._indices(self), device.type)(
|
89 |
+
device, non_blocking
|
90 |
+
)
|
91 |
+
values = getattr(torch.Tensor._values(self), device.type)(
|
92 |
+
device, non_blocking
|
93 |
+
)
|
94 |
+
return new_type(indices, values, self.size())
|
95 |
+
else:
|
96 |
+
assert (
|
97 |
+
not self.is_sparse
|
98 |
+
), f"sparse storage is not supported for {device.type.upper()} tensors"
|
99 |
+
untyped_storage = torch.UntypedStorage(self.size(), device=device)
|
100 |
+
untyped_storage.copy_(self, non_blocking)
|
101 |
+
return untyped_storage
|
102 |
+
|
103 |
+
|
104 |
+
def _get_async_or_non_blocking(function_name, non_blocking, kwargs):
|
105 |
+
"""Return the non-blocking flag given the function name and kwargs.
|
106 |
+
|
107 |
+
Args:
|
108 |
+
function_name (str): the name of the function being used.
|
109 |
+
non_blocking (bool): the default value.
|
110 |
+
**kwargs (dict): the kwargs passed to the function.
|
111 |
+
"""
|
112 |
+
if not kwargs:
|
113 |
+
return non_blocking
|
114 |
+
if len(kwargs) != 1 or "async" not in kwargs:
|
115 |
+
message = "{}() got an unexpected keyword argument '{}'"
|
116 |
+
argument = list(kwargs.keys()).pop()
|
117 |
+
raise TypeError(message.format(function_name, argument))
|
118 |
+
warnings.warn("'async' is deprecated; use 'non_blocking'")
|
119 |
+
return kwargs["async"]
|
120 |
+
|
121 |
+
|
122 |
+
def _get_restore_location(device):
|
123 |
+
"""Return the map_location location.
|
124 |
+
|
125 |
+
Used for rebuild functions where the tensor device is distinct from the storage
|
126 |
+
"""
|
127 |
+
|
128 |
+
map_location = torch.serialization._serialization_tls.map_location
|
129 |
+
if map_location is None:
|
130 |
+
return device
|
131 |
+
else:
|
132 |
+
if isinstance(map_location, dict):
|
133 |
+
return map_location.get(device, device)
|
134 |
+
elif isinstance(map_location, (str, torch.device)):
|
135 |
+
return map_location
|
136 |
+
else:
|
137 |
+
assert callable(map_location)
|
138 |
+
raise RuntimeError(
|
139 |
+
"Callable map_location not supported with _rebuild_wrapper_subclass "
|
140 |
+
"or _rebuild_device_tensor_from_numpy"
|
141 |
+
)
|
142 |
+
|
143 |
+
|
144 |
+
# Note [Don't serialize hooks]
|
145 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
146 |
+
# Since time immemorial, we have serialized the backward hooks associated with
|
147 |
+
# variables. This kind of half-worked--Python can pickle global functions
|
148 |
+
# (but not closures!)--but there were problems.
|
149 |
+
#
|
150 |
+
# - It's fragile. If you serialize a backward hook into a saved
|
151 |
+
# model, and then you rename the function associated with the hook,
|
152 |
+
# now your saved model is broken and you can't load it anymore.
|
153 |
+
#
|
154 |
+
# - It's not actually used. The standard recommendation is to
|
155 |
+
# serialize the *state_dict* of a model, not the model itself
|
156 |
+
# (since this is more stable to code changes affecting the model
|
157 |
+
# serialization), and the state dict saves "data" only, thus
|
158 |
+
# stripping the backward hooks. In some cases, hooks are
|
159 |
+
# essential to the well-functioning of a model (e.g., DDP),
|
160 |
+
# but DDP already manages readding the hooks!
|
161 |
+
#
|
162 |
+
# - We didn't serialize them in many cases. Prior to #10220, we
|
163 |
+
# were dropping backward hooks in ForkingPickler. We "fixed" this
|
164 |
+
# to be convenient with other serialization sites, but lack of
|
165 |
+
# serializing backward hooks wasn't actually the root cause of
|
166 |
+
# the bug.
|
167 |
+
#
|
168 |
+
# With these cases in mind, we have decided that a better strategy
|
169 |
+
# is to just NOT serialize hooks at all.
|
170 |
+
#
|
171 |
+
# Since this is a BC-breaking change, we should warn when we previously
|
172 |
+
# serialized a hook, but no longer do so. This will be done by adding a special
|
173 |
+
# sentinel property to hooks will be used to suppress this warning. If a hook
|
174 |
+
# has the property _torch_serialize_ignore, we will not emit a warning if we
|
175 |
+
# attempt to serialize a Tensor with this hook attached to it.
|
176 |
+
#
|
177 |
+
# By the way, when _backward_hooks is skipped, we must give an EMPTY
|
178 |
+
# OrderedDict(), if you pass a None you'll run afoul #12219.
|
179 |
+
|
180 |
+
|
181 |
+
# TODO: Once we decide to break serialization FC, `storage` no longer needs to
|
182 |
+
# be a TypedStorage
|
183 |
+
def _rebuild_tensor(storage, storage_offset, size, stride):
|
184 |
+
# first construct a tensor with the correct dtype/device
|
185 |
+
t = torch.empty((0,), dtype=storage.dtype, device=storage._untyped_storage.device)
|
186 |
+
return t.set_(storage._untyped_storage, storage_offset, size, stride)
|
187 |
+
|
188 |
+
|
189 |
+
def get_tensor_metadata(tensor):
|
190 |
+
# Tensor's Metadata for serializing.
|
191 |
+
# Currently, this only returns a dict[string, bool] specifing whether
|
192 |
+
# `conj` or `neg` bit is set.
|
193 |
+
assert isinstance(tensor, torch.Tensor)
|
194 |
+
return torch._C._get_tensor_metadata(tensor) # type: ignore[attr-defined]
|
195 |
+
|
196 |
+
|
197 |
+
def set_tensor_metadata(tensor, metadata):
|
198 |
+
# See `get_tensor_metadata` above
|
199 |
+
assert isinstance(metadata, dict)
|
200 |
+
assert isinstance(tensor, torch.Tensor)
|
201 |
+
torch._C._set_tensor_metadata(tensor, metadata) # type: ignore[attr-defined]
|
202 |
+
|
203 |
+
|
204 |
+
def _rebuild_tensor_v2(
|
205 |
+
storage,
|
206 |
+
storage_offset,
|
207 |
+
size,
|
208 |
+
stride,
|
209 |
+
requires_grad,
|
210 |
+
backward_hooks,
|
211 |
+
metadata=None,
|
212 |
+
):
|
213 |
+
tensor = _rebuild_tensor(storage, storage_offset, size, stride)
|
214 |
+
tensor.requires_grad = requires_grad
|
215 |
+
if metadata:
|
216 |
+
set_tensor_metadata(tensor, metadata)
|
217 |
+
|
218 |
+
# NB: This line exists only for backwards compatibility; the
|
219 |
+
# general expectation is that backward_hooks is an empty
|
220 |
+
# OrderedDict. See Note [Don't serialize hooks]
|
221 |
+
tensor._backward_hooks = backward_hooks
|
222 |
+
return tensor
|
223 |
+
|
224 |
+
|
225 |
+
def _rebuild_tensor_v3(
|
226 |
+
storage,
|
227 |
+
storage_offset,
|
228 |
+
size,
|
229 |
+
stride,
|
230 |
+
requires_grad,
|
231 |
+
backward_hooks,
|
232 |
+
dtype,
|
233 |
+
metadata=None,
|
234 |
+
):
|
235 |
+
t = torch.empty(
|
236 |
+
(0,),
|
237 |
+
dtype=dtype,
|
238 |
+
device=storage._untyped_storage.device,
|
239 |
+
requires_grad=requires_grad,
|
240 |
+
)
|
241 |
+
t.set_(storage._untyped_storage, storage_offset, size, stride)
|
242 |
+
if metadata:
|
243 |
+
set_tensor_metadata(t, metadata)
|
244 |
+
t._backward_hooks = backward_hooks
|
245 |
+
return t
|
246 |
+
|
247 |
+
|
248 |
+
_sparse_tensors_to_validate: List["torch.Tensor"] = []
|
249 |
+
|
250 |
+
|
251 |
+
# In _legacy_load() in serialization.py we unpickle storages after the sparse
|
252 |
+
# tensors have been already unpickled. Those storages contain data necessary for
|
253 |
+
# validating sparse tensors: indices and values. That's why sparse tensors are
|
254 |
+
# first unpickled without any validation, and then this function is called just
|
255 |
+
# before _legacy_load() returns, so that all the sparse tensors can be validated
|
256 |
+
# in bulk.
|
257 |
+
#
|
258 |
+
# The same procedure must be followed by _load() in serialization.py because due
|
259 |
+
# to Pickler semantics, we have to use the same (non-validating) function for
|
260 |
+
# unpickling sparse tensors, regardless of the caller.
|
261 |
+
def _validate_loaded_sparse_tensors():
|
262 |
+
try:
|
263 |
+
for t in _sparse_tensors_to_validate:
|
264 |
+
if t.layout is torch.sparse_coo:
|
265 |
+
torch._validate_sparse_coo_tensor_args(
|
266 |
+
t._indices(), t._values(), t.size(), t.is_coalesced()
|
267 |
+
)
|
268 |
+
elif t.layout in {
|
269 |
+
torch.sparse_csr,
|
270 |
+
torch.sparse_csc,
|
271 |
+
torch.sparse_bsr,
|
272 |
+
torch.sparse_bsc,
|
273 |
+
}:
|
274 |
+
# TODO: Validation currently involves an expensive traversal
|
275 |
+
# on CPU, which may include a device transfer.
|
276 |
+
if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
|
277 |
+
compressed_indices, plain_indices = (
|
278 |
+
t.crow_indices(),
|
279 |
+
t.col_indices(),
|
280 |
+
)
|
281 |
+
else:
|
282 |
+
compressed_indices, plain_indices = (
|
283 |
+
t.ccol_indices(),
|
284 |
+
t.row_indices(),
|
285 |
+
)
|
286 |
+
torch._validate_sparse_compressed_tensor_args(
|
287 |
+
compressed_indices, plain_indices, t.values(), t.size(), t.layout
|
288 |
+
)
|
289 |
+
else:
|
290 |
+
raise NotImplementedError(
|
291 |
+
f"_validate_loaded_sparse_tensors for layout `{t.layout}`"
|
292 |
+
)
|
293 |
+
|
294 |
+
finally:
|
295 |
+
_sparse_tensors_to_validate.clear()
|
296 |
+
|
297 |
+
|
298 |
+
def _rebuild_sparse_tensor(layout, data):
|
299 |
+
"""
|
300 |
+
Rebuilds a sparse tensor from its sparse storage representation.
|
301 |
+
|
302 |
+
Args:
|
303 |
+
layout (str): The sparse storage layout of the tensor.
|
304 |
+
data (tuple): The tensor's sparse storage representation.
|
305 |
+
"""
|
306 |
+
if layout == torch.sparse_coo:
|
307 |
+
if len(data) == 3:
|
308 |
+
# For BC:
|
309 |
+
indices, values, size = data
|
310 |
+
is_coalesced = None
|
311 |
+
else:
|
312 |
+
indices, values, size, is_coalesced = data
|
313 |
+
result = torch.sparse_coo_tensor(
|
314 |
+
indices, values, size, check_invariants=False, is_coalesced=is_coalesced
|
315 |
+
)
|
316 |
+
_sparse_tensors_to_validate.append(result)
|
317 |
+
return result
|
318 |
+
|
319 |
+
elif layout in {
|
320 |
+
torch.sparse_csr,
|
321 |
+
torch.sparse_csc,
|
322 |
+
torch.sparse_bsr,
|
323 |
+
torch.sparse_bsc,
|
324 |
+
}:
|
325 |
+
compressed_indices, plain_indices, values, size = data
|
326 |
+
result = torch.sparse_compressed_tensor(
|
327 |
+
compressed_indices,
|
328 |
+
plain_indices,
|
329 |
+
values,
|
330 |
+
size,
|
331 |
+
layout=layout,
|
332 |
+
check_invariants=False,
|
333 |
+
)
|
334 |
+
_sparse_tensors_to_validate.append(result)
|
335 |
+
return result
|
336 |
+
|
337 |
+
raise NotImplementedError(f"rebuilding sparse tensor for layout {layout}")
|
338 |
+
|
339 |
+
|
340 |
+
def _rebuild_nested_tensor(buffer, sizes, strides, storage_offsets):
|
341 |
+
return torch._nested_view_from_buffer(buffer, sizes, strides, storage_offsets)
|
342 |
+
|
343 |
+
|
344 |
+
def _rebuild_device_tensor_from_cpu_tensor(data, dtype, device, requires_grad):
|
345 |
+
device = _get_restore_location(device)
|
346 |
+
tensor = data.to(dtype=dtype, device=device)
|
347 |
+
tensor.requires_grad = requires_grad
|
348 |
+
return tensor
|
349 |
+
|
350 |
+
|
351 |
+
def _rebuild_device_tensor_from_numpy(data, dtype, device, requires_grad):
|
352 |
+
device = _get_restore_location(device)
|
353 |
+
tensor = torch.from_numpy(data).to(dtype=dtype, device=device)
|
354 |
+
tensor.requires_grad = requires_grad
|
355 |
+
return tensor
|
356 |
+
|
357 |
+
|
358 |
+
# Should not be used, only here to be able to load Tensors serialized with older versions of pytorch
|
359 |
+
_rebuild_xla_tensor = _rebuild_device_tensor_from_numpy
|
360 |
+
|
361 |
+
|
362 |
+
def _rebuild_meta_tensor_no_storage(dtype, size, stride, requires_grad):
|
363 |
+
return torch.empty_strided(
|
364 |
+
size, stride, dtype=dtype, device="meta", requires_grad=requires_grad
|
365 |
+
)
|
366 |
+
|
367 |
+
|
368 |
+
def _rebuild_wrapper_subclass(
|
369 |
+
cls,
|
370 |
+
dtype,
|
371 |
+
size,
|
372 |
+
stride,
|
373 |
+
storage_offset,
|
374 |
+
layout,
|
375 |
+
device,
|
376 |
+
requires_grad,
|
377 |
+
):
|
378 |
+
device = _get_restore_location(device)
|
379 |
+
return torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
|
380 |
+
cls,
|
381 |
+
size,
|
382 |
+
strides=stride,
|
383 |
+
dtype=dtype,
|
384 |
+
storage_offset=storage_offset,
|
385 |
+
layout=layout,
|
386 |
+
device=device,
|
387 |
+
requires_grad=requires_grad,
|
388 |
+
)
|
389 |
+
|
390 |
+
|
391 |
+
# TODO: Once we decide to break serialization FC, `storage` no longer needs to
|
392 |
+
# be a TypedStorage
|
393 |
+
def _rebuild_qtensor(
|
394 |
+
storage,
|
395 |
+
storage_offset,
|
396 |
+
size,
|
397 |
+
stride,
|
398 |
+
quantizer_params,
|
399 |
+
requires_grad,
|
400 |
+
backward_hooks,
|
401 |
+
):
|
402 |
+
qscheme = quantizer_params[0]
|
403 |
+
if qscheme == torch.per_tensor_affine:
|
404 |
+
_, scale, zero_point = quantizer_params
|
405 |
+
tensor = torch._empty_affine_quantized(
|
406 |
+
size,
|
407 |
+
scale=scale,
|
408 |
+
zero_point=zero_point,
|
409 |
+
dtype=storage.dtype,
|
410 |
+
device=storage.device,
|
411 |
+
)
|
412 |
+
elif qscheme in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
|
413 |
+
_, scales, zero_points, axis = quantizer_params
|
414 |
+
if type(scales) is list and type(zero_points) is list:
|
415 |
+
if qscheme == torch.per_channel_affine:
|
416 |
+
scales = torch.tensor(scales, dtype=torch.double, device=storage.device)
|
417 |
+
zero_points = torch.tensor(
|
418 |
+
zero_points, dtype=torch.long, device=storage.device
|
419 |
+
)
|
420 |
+
else:
|
421 |
+
scales = torch.tensor(scales, dtype=torch.float, device=storage.device)
|
422 |
+
zero_points = torch.tensor(
|
423 |
+
zero_points, dtype=torch.float, device=storage.device
|
424 |
+
)
|
425 |
+
tensor = torch._empty_per_channel_affine_quantized(
|
426 |
+
size,
|
427 |
+
scales=scales,
|
428 |
+
zero_points=zero_points,
|
429 |
+
axis=axis,
|
430 |
+
dtype=storage.dtype,
|
431 |
+
device=storage.device,
|
432 |
+
)
|
433 |
+
else:
|
434 |
+
raise RuntimeError(f"Can't deserialize quantized tensor with qscheme {qscheme}")
|
435 |
+
tensor.set_(storage, storage_offset, size, stride)
|
436 |
+
tensor.requires_grad = requires_grad
|
437 |
+
# NB: This line exists only for backwards compatibility; the
|
438 |
+
# general expectation is that backward_hooks is an empty
|
439 |
+
# OrderedDict. See Note [Don't serialize hooks]
|
440 |
+
tensor._backward_hooks = backward_hooks
|
441 |
+
return tensor
|
442 |
+
|
443 |
+
|
444 |
+
def _rebuild_parameter(data, requires_grad, backward_hooks):
|
445 |
+
param = torch.nn.Parameter(data, requires_grad)
|
446 |
+
# NB: This line exists only for backwards compatibility; the
|
447 |
+
# general expectation is that backward_hooks is an empty
|
448 |
+
# OrderedDict. See Note [Don't serialize hooks]
|
449 |
+
param._backward_hooks = backward_hooks
|
450 |
+
|
451 |
+
return param
|
452 |
+
|
453 |
+
|
454 |
+
def _rebuild_parameter_with_state(data, requires_grad, backward_hooks, state):
|
455 |
+
param = torch.nn.Parameter(data, requires_grad)
|
456 |
+
# NB: This line exists only for backwards compatibility; the
|
457 |
+
# general expectation is that backward_hooks is an empty
|
458 |
+
# OrderedDict. See Note [Don't serialize hooks]
|
459 |
+
param._backward_hooks = backward_hooks
|
460 |
+
|
461 |
+
# Restore state on Parameter like python attr.
|
462 |
+
param = _set_obj_state(param, state)
|
463 |
+
return param
|
464 |
+
|
465 |
+
|
466 |
+
def _get_obj_state(obj):
|
467 |
+
# Get the state of the python subclass
|
468 |
+
# This loosely mimicks the function on the object class but since Tensor do not inherit
|
469 |
+
# from it, we cannot call that function directly
|
470 |
+
# https://github.com/python/cpython/blob/c83919bd635f4433f1c6ae8504996a9fe3c215e5/Objects/typeobject.c#L4891
|
471 |
+
# Note that starting with Python 3.11, this `__getstate__` is always defined and thus
|
472 |
+
# the else branch will never be taken.
|
473 |
+
getstate_fn = getattr(obj, "__getstate__", None)
|
474 |
+
if getstate_fn:
|
475 |
+
state = getstate_fn()
|
476 |
+
else:
|
477 |
+
slots_to_save = copyreg._slotnames(obj.__class__) # type: ignore[attr-defined]
|
478 |
+
if slots_to_save:
|
479 |
+
state = (
|
480 |
+
obj.__dict__,
|
481 |
+
{
|
482 |
+
name: getattr(obj, name)
|
483 |
+
for name in slots_to_save
|
484 |
+
if hasattr(obj, name)
|
485 |
+
},
|
486 |
+
)
|
487 |
+
else:
|
488 |
+
state = obj.__dict__
|
489 |
+
|
490 |
+
return state
|
491 |
+
|
492 |
+
|
493 |
+
def _set_obj_state(obj, state):
|
494 |
+
if isinstance(state, tuple):
|
495 |
+
if not len(state) == 2:
|
496 |
+
raise RuntimeError(f"Invalid serialized state: {state}")
|
497 |
+
dict_state = state[0]
|
498 |
+
slots_state = state[1]
|
499 |
+
else:
|
500 |
+
dict_state = state
|
501 |
+
slots_state = None
|
502 |
+
|
503 |
+
# Starting with Python 3.11, the __dict__ attribute is lazily created
|
504 |
+
# and is serialized as None when not needed.
|
505 |
+
if dict_state:
|
506 |
+
for k, v in dict_state.items():
|
507 |
+
setattr(obj, k, v)
|
508 |
+
|
509 |
+
if slots_state:
|
510 |
+
for k, v in slots_state.items():
|
511 |
+
setattr(obj, k, v)
|
512 |
+
return obj
|
513 |
+
|
514 |
+
|
515 |
+
def _import_dotted_name(name):
|
516 |
+
components = name.split(".")
|
517 |
+
obj = __import__(components[0])
|
518 |
+
for component in components[1:]:
|
519 |
+
obj = getattr(obj, component)
|
520 |
+
return obj
|
521 |
+
|
522 |
+
|
523 |
+
def _flatten_dense_tensors(tensors):
|
524 |
+
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
|
525 |
+
same dense type.
|
526 |
+
|
527 |
+
Since inputs are dense, the resulting tensor will be a concatenated 1D
|
528 |
+
buffer. Element-wise operation on this buffer will be equivalent to
|
529 |
+
operating individually.
|
530 |
+
|
531 |
+
Args:
|
532 |
+
tensors (Iterable[Tensor]): dense tensors to flatten.
|
533 |
+
|
534 |
+
Returns:
|
535 |
+
A contiguous 1D buffer containing input tensors.
|
536 |
+
"""
|
537 |
+
return torch._C._nn.flatten_dense_tensors(tensors)
|
538 |
+
|
539 |
+
|
540 |
+
def _flatten_sparse_tensors(tensors):
|
541 |
+
"""Flatten sparse tensors into two contiguous 1D buffers, one of indices and
|
542 |
+
one of values. Assume tensors are of same sparse type.
|
543 |
+
|
544 |
+
Args:
|
545 |
+
tensors (Iterable[Tensor]): sparse tensors to flatten.
|
546 |
+
|
547 |
+
Returns:
|
548 |
+
A tuple of two contiguous 1D buffers, one containing input tensors'
|
549 |
+
indices and the other containing the values.
|
550 |
+
"""
|
551 |
+
flat_indices = torch._C._nn.flatten_dense_tensors(
|
552 |
+
[torch.Tensor._indices(t) for t in tensors]
|
553 |
+
)
|
554 |
+
flat_values = torch._C._nn.flatten_dense_tensors(
|
555 |
+
[torch.Tensor._values(t) for t in tensors]
|
556 |
+
)
|
557 |
+
return flat_indices, flat_values
|
558 |
+
|
559 |
+
|
560 |
+
def _unflatten_dense_tensors(flat, tensors):
|
561 |
+
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
|
562 |
+
same dense type, and that flat is given by _flatten_dense_tensors.
|
563 |
+
|
564 |
+
Args:
|
565 |
+
flat (Tensor): flattened dense tensors to unflatten.
|
566 |
+
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
|
567 |
+
unflatten flat.
|
568 |
+
|
569 |
+
Returns:
|
570 |
+
Unflattened dense tensors with sizes same as tensors and values from
|
571 |
+
flat.
|
572 |
+
"""
|
573 |
+
return torch._C._nn.unflatten_dense_tensors(flat, tensors)
|
574 |
+
|
575 |
+
|
576 |
+
def _unflatten_sparse_tensors(flat, tensors):
|
577 |
+
"""View flat buffer (containing indices and values) using the sizes of
|
578 |
+
tensors. Assume that tensors are of same sparse type, and that flat is given
|
579 |
+
by _flatten_sparse_tensors.
|
580 |
+
|
581 |
+
Args:
|
582 |
+
flat (tuple(Tensor, Tensor)): flattened indices and values of sparse
|
583 |
+
tensors to unflatten.
|
584 |
+
tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to
|
585 |
+
unflatten flat.
|
586 |
+
|
587 |
+
Returns:
|
588 |
+
Unflattened sparse tensors with sizes same as tensors and values from
|
589 |
+
flat.
|
590 |
+
"""
|
591 |
+
flat_indices, flat_values = flat
|
592 |
+
indices = torch._C._nn.unflatten_dense_tensors(
|
593 |
+
flat_indices, [torch.Tensor._indices(t) for t in tensors]
|
594 |
+
)
|
595 |
+
values = torch._C._nn.unflatten_dense_tensors(
|
596 |
+
flat_values, [torch.Tensor._values(t) for t in tensors]
|
597 |
+
)
|
598 |
+
outputs = []
|
599 |
+
for t, i, v in zip(tensors, indices, values):
|
600 |
+
outputs.append(t.new(i, v, t.size()))
|
601 |
+
return tuple(outputs)
|
602 |
+
|
603 |
+
|
604 |
+
def _reorder_tensors_as(tensors, ordered_tensors):
|
605 |
+
"""Assume that tensors are of same order as ordered_tensors within their
|
606 |
+
types, e.g., from _take_tensors. Reorder them to be of same order as
|
607 |
+
ordered_tensors.
|
608 |
+
|
609 |
+
Args:
|
610 |
+
tensors (Iterable[Tensor]): tensors to be reordered. They should be of
|
611 |
+
the same order as ordered_tensors within their own types.
|
612 |
+
ordered_tensors (Iterable[Tensor]): tensors whose order will be the
|
613 |
+
reference.
|
614 |
+
|
615 |
+
Returns:
|
616 |
+
Ordered tuple of tensors with contents from tensors and order of
|
617 |
+
ordered_tensors.
|
618 |
+
"""
|
619 |
+
type_dict = defaultdict(list)
|
620 |
+
for tensor in tensors:
|
621 |
+
type_dict[tensor.type()].append(tensor)
|
622 |
+
type_dict_ = {t: iter(coll) for t, coll in type_dict.items()}
|
623 |
+
return tuple(next(type_dict_[tensor.type()]) for tensor in ordered_tensors)
|
624 |
+
|
625 |
+
|
626 |
+
def _take_tensors(tensors, size_limit):
|
627 |
+
"""Group tensors into chunks. This generator yields a chunk at each time,
|
628 |
+
each containing tensors of same type up to certain byte limit in total size.
|
629 |
+
|
630 |
+
Args:
|
631 |
+
tensors (Sequence): A sequence of tensors to be separated into chunks.
|
632 |
+
size_limit (int): The limit of each chunk in bytes.
|
633 |
+
|
634 |
+
Yields:
|
635 |
+
Blocks of tensors of same type and within size_limit. The yielded
|
636 |
+
tensors are only ordered as the original sequence within its types.
|
637 |
+
"""
|
638 |
+
buf_dict: DefaultDict[str, List] = defaultdict(lambda: [[], 0])
|
639 |
+
for tensor in tensors:
|
640 |
+
t = tensor.type()
|
641 |
+
if tensor.is_sparse:
|
642 |
+
indices = torch.Tensor._indices(tensor)
|
643 |
+
values = torch.Tensor._values(tensor)
|
644 |
+
size = (
|
645 |
+
indices.numel() * indices.element_size()
|
646 |
+
+ values.numel() * values.element_size()
|
647 |
+
)
|
648 |
+
else:
|
649 |
+
size = tensor.numel() * tensor.element_size()
|
650 |
+
buf_and_size = buf_dict[t]
|
651 |
+
if buf_and_size[1] + size > size_limit and buf_and_size[1] > 0:
|
652 |
+
yield buf_and_size[0]
|
653 |
+
buf_and_size = buf_dict[t] = [[], 0]
|
654 |
+
buf_and_size[0].append(tensor)
|
655 |
+
buf_and_size[1] += size
|
656 |
+
for buf, _ in buf_dict.values():
|
657 |
+
if len(buf) > 0:
|
658 |
+
yield buf
|
659 |
+
|
660 |
+
|
661 |
+
# annotation decorator to get annotations in a way that is compatible
|
662 |
+
# with both Python 2 and 3
|
663 |
+
def annotate(ret, **kwargs):
|
664 |
+
def dec(fun):
|
665 |
+
fun.__annotations__ = dict(kwargs)
|
666 |
+
fun.__annotations__["return"] = ret
|
667 |
+
return fun
|
668 |
+
|
669 |
+
return dec
|
670 |
+
|
671 |
+
|
672 |
+
def render_call(fn, args, kwargs):
|
673 |
+
str_fn = torch.overrides.resolve_name(fn)
|
674 |
+
if str_fn is None:
|
675 |
+
str_fn = str(fn)
|
676 |
+
|
677 |
+
str_args: List[str] = []
|
678 |
+
with torch._tensor_str.printoptions(threshold=0, edgeitems=0):
|
679 |
+
str_args.extend(repr(a) for a in args)
|
680 |
+
str_args.extend(f"{k}={repr(v)}" for k, v in kwargs.items())
|
681 |
+
r = f"{str_fn}({', '.join(str_args)})"
|
682 |
+
return r
|
683 |
+
|
684 |
+
|
685 |
+
# NOTE [ Python Traceback Reference Cycle Problem ]
|
686 |
+
#
|
687 |
+
# When using sys.exc_info(), it is important to **not** store the exc_info[2],
|
688 |
+
# which is the traceback, because otherwise you will run into the traceback
|
689 |
+
# reference cycle problem, i.e., the traceback holding reference to the frame,
|
690 |
+
# and the frame (which holds reference to all the object in its temporary scope)
|
691 |
+
# holding reference the traceback.
|
692 |
+
|
693 |
+
|
694 |
+
class KeyErrorMessage(str):
|
695 |
+
r"""str subclass that returns itself in repr"""
|
696 |
+
|
697 |
+
def __repr__(self):
|
698 |
+
return self
|
699 |
+
|
700 |
+
|
701 |
+
class ExceptionWrapper:
|
702 |
+
r"""Wraps an exception plus traceback to communicate across threads"""
|
703 |
+
|
704 |
+
def __init__(self, exc_info=None, where="in background"):
|
705 |
+
# It is important that we don't store exc_info, see
|
706 |
+
# NOTE [ Python Traceback Reference Cycle Problem ]
|
707 |
+
if exc_info is None:
|
708 |
+
exc_info = sys.exc_info()
|
709 |
+
self.exc_type = exc_info[0]
|
710 |
+
self.exc_msg = "".join(traceback.format_exception(*exc_info))
|
711 |
+
self.where = where
|
712 |
+
|
713 |
+
def reraise(self):
|
714 |
+
r"""Reraises the wrapped exception in the current thread"""
|
715 |
+
# Format a message such as: "Caught ValueError in DataLoader worker
|
716 |
+
# process 2. Original Traceback:", followed by the traceback.
|
717 |
+
msg = f"Caught {self.exc_type.__name__} {self.where}.\nOriginal {self.exc_msg}"
|
718 |
+
if self.exc_type == KeyError:
|
719 |
+
# KeyError calls repr() on its argument (usually a dict key). This
|
720 |
+
# makes stack traces unreadable. It will not be changed in Python
|
721 |
+
# (https://bugs.python.org/issue2651), so we work around it.
|
722 |
+
msg = KeyErrorMessage(msg)
|
723 |
+
elif getattr(self.exc_type, "message", None):
|
724 |
+
# Some exceptions have first argument as non-str but explicitly
|
725 |
+
# have message field
|
726 |
+
raise self.exc_type(message=msg)
|
727 |
+
try:
|
728 |
+
exception = self.exc_type(msg)
|
729 |
+
except TypeError:
|
730 |
+
# If the exception takes multiple arguments, don't try to
|
731 |
+
# instantiate since we don't know how to
|
732 |
+
raise RuntimeError(msg) from None
|
733 |
+
raise exception
|
734 |
+
|
735 |
+
|
736 |
+
def _get_available_device_type():
|
737 |
+
if torch.cuda.is_available():
|
738 |
+
return "cuda"
|
739 |
+
if torch.backends.mps.is_available():
|
740 |
+
return "mps"
|
741 |
+
if hasattr(torch, "xpu") and torch.xpu.is_available(): # type: ignore[attr-defined]
|
742 |
+
return "xpu"
|
743 |
+
if hasattr(torch, "mtia") and torch.mtia.is_available():
|
744 |
+
return "mtia"
|
745 |
+
custom_backend_name = torch._C._get_privateuse1_backend_name()
|
746 |
+
custom_device_mod = getattr(torch, custom_backend_name, None)
|
747 |
+
if custom_device_mod and custom_device_mod.is_available():
|
748 |
+
return custom_backend_name
|
749 |
+
# add more available device types here
|
750 |
+
return None
|
751 |
+
|
752 |
+
|
753 |
+
def _get_device_attr(get_member):
|
754 |
+
device_type = _get_available_device_type()
|
755 |
+
if device_type and device_type.lower() == "cuda":
|
756 |
+
return get_member(torch.cuda)
|
757 |
+
if device_type and device_type.lower() == "mps":
|
758 |
+
return get_member(torch.mps)
|
759 |
+
if device_type and device_type.lower() == "xpu":
|
760 |
+
return get_member(torch.xpu) # type: ignore[attr-defined]
|
761 |
+
if device_type and device_type.lower() == "mtia":
|
762 |
+
return get_member(torch.mtia)
|
763 |
+
if device_type == torch._C._get_privateuse1_backend_name():
|
764 |
+
return get_member(getattr(torch, device_type))
|
765 |
+
# add more available device types here
|
766 |
+
return None
|
767 |
+
|
768 |
+
|
769 |
+
def _get_current_device_index():
|
770 |
+
# current device index
|
771 |
+
return _get_device_attr(lambda m: m.current_device())
|
772 |
+
|
773 |
+
|
774 |
+
def _get_all_device_indices():
|
775 |
+
# all device index
|
776 |
+
return _get_device_attr(lambda m: list(range(m.device_count())))
|
777 |
+
|
778 |
+
|
779 |
+
def _get_devices_properties(device_ids):
|
780 |
+
# all device properties
|
781 |
+
return [_get_device_attr(lambda m: m.get_device_properties(i)) for i in device_ids]
|
782 |
+
|
783 |
+
|
784 |
+
def get_current_device_index() -> int:
|
785 |
+
r"""Checks if there are CUDA devices available and
|
786 |
+
returns the device index of the current default CUDA device.
|
787 |
+
Returns -1 in case there are no CUDA devices available.
|
788 |
+
Arguments: ``None``
|
789 |
+
"""
|
790 |
+
if torch.cuda.device_count() > 0:
|
791 |
+
return torch.cuda.current_device()
|
792 |
+
return -1
|
793 |
+
|
794 |
+
|
795 |
+
def _get_device_index(
|
796 |
+
device: Any,
|
797 |
+
optional: bool = False,
|
798 |
+
allow_cpu: bool = False,
|
799 |
+
) -> int:
|
800 |
+
r"""Gets the device index from :attr:`device`, which can be a torch.device
|
801 |
+
object, a Python integer, or ``None``.
|
802 |
+
|
803 |
+
If :attr:`device` is a torch.device object, returns the device index if it
|
804 |
+
has index. Note that for a device without a specified index,
|
805 |
+
i.e., ``torch.device('xxx')``, this will return the current default
|
806 |
+
device of that type if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
|
807 |
+
CPU devices will be accepted and ``-1`` will be returned in this case.
|
808 |
+
|
809 |
+
If :attr:`device` is a Python integer, it is returned as is.
|
810 |
+
|
811 |
+
If :attr:`device` is ``None``, this will return the current default
|
812 |
+
device of the supported runtime platform if :attr:`optional` is ``True``.
|
813 |
+
i.e., the current default CUDA device will be returned if CUDA runtime is supported.
|
814 |
+
"""
|
815 |
+
if isinstance(device, str):
|
816 |
+
device = torch.device(device)
|
817 |
+
device_idx: Optional[int] = None
|
818 |
+
if isinstance(device, torch.device):
|
819 |
+
if not allow_cpu and device.type == "cpu":
|
820 |
+
raise ValueError(f"Expected a non cpu device, but got: {device}")
|
821 |
+
device_idx = -1 if device.type == "cpu" else device.index
|
822 |
+
if isinstance(device, int):
|
823 |
+
device_idx = device
|
824 |
+
if device_idx is None:
|
825 |
+
if optional:
|
826 |
+
# The eager API _get_current_device_index uses `lambda` functions which are
|
827 |
+
# not supported in JIT and hence not scriptable. The JIT equivalent API to get
|
828 |
+
# the current device index is `get_current_device_index()` which can
|
829 |
+
# be scripted. We use is_scripting to check the mode we are in and call the
|
830 |
+
# appropriate API.
|
831 |
+
if torch.jit.is_scripting():
|
832 |
+
device_idx = get_current_device_index()
|
833 |
+
else:
|
834 |
+
device_idx = _get_current_device_index()
|
835 |
+
else:
|
836 |
+
raise ValueError(
|
837 |
+
f"Expected a torch.device with a specified index or an integer, but got:{device}"
|
838 |
+
)
|
839 |
+
return device_idx
|
840 |
+
|
841 |
+
|
842 |
+
def _handle_complex(tensor):
|
843 |
+
"""
|
844 |
+
Returns a real view of a tensor if complex dtype else just the tensor
|
845 |
+
need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule
|
846 |
+
"""
|
847 |
+
return (
|
848 |
+
torch.view_as_real(tensor)
|
849 |
+
if not isinstance(tensor, torch.nn.UninitializedParameter)
|
850 |
+
and tensor.is_complex()
|
851 |
+
else tensor
|
852 |
+
)
|
853 |
+
|
854 |
+
|
855 |
+
def _element_size(dtype):
|
856 |
+
"""
|
857 |
+
Returns the element size for a dtype, in bytes
|
858 |
+
"""
|
859 |
+
if not isinstance(dtype, torch.dtype):
|
860 |
+
raise RuntimeError(f"expected torch.dtype, but got {type(dtype)}")
|
861 |
+
|
862 |
+
if dtype.is_complex:
|
863 |
+
return torch.finfo(dtype).bits >> 2
|
864 |
+
elif dtype.is_floating_point:
|
865 |
+
return torch.finfo(dtype).bits >> 3
|
866 |
+
elif dtype == torch.bool:
|
867 |
+
# NOTE: torch.bool is not supported in torch.iinfo()
|
868 |
+
return 1
|
869 |
+
else:
|
870 |
+
return torch.iinfo(dtype).bits >> 3
|
871 |
+
|
872 |
+
|
873 |
+
class _ClassPropertyDescriptor:
|
874 |
+
def __init__(self, fget, fset=None):
|
875 |
+
self.fget = fget
|
876 |
+
|
877 |
+
def __get__(self, instance, owner=None):
|
878 |
+
if owner is None:
|
879 |
+
owner = type(instance)
|
880 |
+
return self.fget.__get__(instance, owner)()
|
881 |
+
|
882 |
+
|
883 |
+
def classproperty(func):
|
884 |
+
if not isinstance(func, (classmethod, staticmethod)):
|
885 |
+
func = classmethod(func)
|
886 |
+
return _ClassPropertyDescriptor(func)
|
887 |
+
|
888 |
+
|
889 |
+
if TYPE_CHECKING:
|
890 |
+
# TorchScript does not support `@deprecated`
|
891 |
+
# This is a workaround to avoid breaking TorchScript
|
892 |
+
@deprecated(
|
893 |
+
"`torch._utils.is_compiling` is deprecated. Use `torch.compiler.is_compiling` instead.",
|
894 |
+
category=FutureWarning,
|
895 |
+
)
|
896 |
+
def is_compiling() -> bool:
|
897 |
+
return torch.compiler.is_compiling()
|
898 |
+
|
899 |
+
else:
|
900 |
+
|
901 |
+
def is_compiling() -> bool:
|
902 |
+
"""
|
903 |
+
Indicates whether we are tracing/compiling with torch.compile() or torch.export().
|
904 |
+
"""
|
905 |
+
warnings.warn( # use `warnings.warn` instead of `@deprecated`
|
906 |
+
"`torch._utils.is_compiling` is deprecated. Use `torch.compiler.is_compiling` instead.",
|
907 |
+
# FutureWarning, # TorchScript does not support Warning type
|
908 |
+
stacklevel=2,
|
909 |
+
)
|
910 |
+
return torch.compiler.is_compiling()
|
911 |
+
|
912 |
+
|
913 |
+
def _functionalize_sync(t):
|
914 |
+
# This code lives in python instead of C++ since conditioning on a certain python subclass
|
915 |
+
# is much more of a pain in C++.
|
916 |
+
from torch._subclasses.functional_tensor import FunctionalTensor
|
917 |
+
|
918 |
+
if isinstance(t, FunctionalTensor):
|
919 |
+
# If a FunctionalTensorMode is active while syncing, we don't want it to intercept any ops that get called
|
920 |
+
# when we sync our inner tensor.
|
921 |
+
# Why?
|
922 |
+
# (1) If there are input mutations in the graph, then they will be re-applied during
|
923 |
+
# AOTAutograd when we call _sync() from inside of our functionalization kernels.
|
924 |
+
# (2) _sync() causes us to regenerate our updated the tensor from the updated base,
|
925 |
+
# which dispatches to a bunch of view ops
|
926 |
+
# (3) The input to these view ops is our inner FunctionalTensorWrapper
|
927 |
+
# (since the sync was called from C++), not the python FunctionalTensor
|
928 |
+
# (4) if a python FunctionalTensorMode is active, it will complain when it intercepts
|
929 |
+
# the view op, since it will see an input that is a C++ FunctionalTensorWrapper
|
930 |
+
# (aka a normal torch.Tensor) instead of a python `FunctionalTensor).
|
931 |
+
maybe_functional_mode = torch._C._unset_dispatch_mode(
|
932 |
+
torch._C._TorchDispatchModeKey.FUNCTIONAL
|
933 |
+
)
|
934 |
+
try:
|
935 |
+
torch._functionalize_sync(t.elem) # type: ignore[attr-defined]
|
936 |
+
finally:
|
937 |
+
if maybe_functional_mode is not None:
|
938 |
+
torch._C._set_dispatch_mode(maybe_functional_mode)
|
939 |
+
else:
|
940 |
+
torch._functionalize_sync(t) # type: ignore[attr-defined]
|
941 |
+
|
942 |
+
|
943 |
+
@functools.lru_cache(2)
|
944 |
+
def _get_device_module(device_type: str):
|
945 |
+
device_module = getattr(torch, device_type, None)
|
946 |
+
if device_module is None:
|
947 |
+
raise RuntimeError(
|
948 |
+
f"Device '{device_type}' does not have a corresponding module registered as 'torch.{device_type}'."
|
949 |
+
)
|
950 |
+
return device_module
|
951 |
+
|
952 |
+
|
953 |
+
def _dummy_type(name: str) -> type:
|
954 |
+
def get_err_fn(is_init: bool):
|
955 |
+
def err_fn(obj, *args, **kwargs):
|
956 |
+
if is_init:
|
957 |
+
class_name = obj.__class__.__name__
|
958 |
+
else:
|
959 |
+
class_name = obj.__name__
|
960 |
+
raise RuntimeError(f"Tried to instantiate dummy base class {class_name}")
|
961 |
+
|
962 |
+
return err_fn
|
963 |
+
|
964 |
+
return type(
|
965 |
+
name, (object,), {"__init__": get_err_fn(True), "__new__": get_err_fn(False)}
|
966 |
+
)
|
967 |
+
|
968 |
+
|
969 |
+
class _LazySeedTracker:
|
970 |
+
# Since seeding is memory-less, only track the latest seed.
|
971 |
+
# Note: `manual_seed_all` followed by `manual_seed` overwrites
|
972 |
+
# the seed on current device. We track the order of **latest**
|
973 |
+
# calls between these two API.
|
974 |
+
def __init__(self):
|
975 |
+
self.manual_seed_all_cb = None
|
976 |
+
self.manual_seed_cb = None
|
977 |
+
self.call_order = []
|
978 |
+
|
979 |
+
def queue_seed_all(self, cb, traceback):
|
980 |
+
self.manual_seed_all_cb = (cb, traceback)
|
981 |
+
# update seed_all to be latest
|
982 |
+
self.call_order = [self.manual_seed_cb, self.manual_seed_all_cb]
|
983 |
+
|
984 |
+
def queue_seed(self, cb, traceback):
|
985 |
+
self.manual_seed_cb = (cb, traceback)
|
986 |
+
# update seed to be latest
|
987 |
+
self.call_order = [self.manual_seed_all_cb, self.manual_seed_cb]
|
988 |
+
|
989 |
+
def get_calls(self) -> List:
|
990 |
+
return self.call_order
|
991 |
+
|
992 |
+
|
993 |
+
logger = logging.getLogger(__name__)
|
994 |
+
P = ParamSpec("P")
|
995 |
+
|
996 |
+
|
997 |
+
class CallbackRegistry(Generic[P]):
|
998 |
+
def __init__(self, name: str):
|
999 |
+
self.name = name
|
1000 |
+
self.callback_list: List[Callable[P, None]] = []
|
1001 |
+
|
1002 |
+
def add_callback(self, cb: Callable[P, None]) -> None:
|
1003 |
+
self.callback_list.append(cb)
|
1004 |
+
|
1005 |
+
def fire_callbacks(self, *args: P.args, **kwargs: P.kwargs) -> None:
|
1006 |
+
for cb in self.callback_list:
|
1007 |
+
try:
|
1008 |
+
cb(*args, **kwargs)
|
1009 |
+
except Exception as e:
|
1010 |
+
logger.exception(
|
1011 |
+
"Exception in callback for %s registered with gpu trace", self.name
|
1012 |
+
)
|
1013 |
+
|
1014 |
+
|
1015 |
+
# IMPORT_MAPPING and NAME_MAPPING are adapted from https://github.com/python/cpython/blob/main/Lib/_compat_pickle.py
|
1016 |
+
# for use in the weights_only Unpickler.
|
1017 |
+
|
1018 |
+
IMPORT_MAPPING = {
|
1019 |
+
"__builtin__": "builtins",
|
1020 |
+
"copy_reg": "copyreg",
|
1021 |
+
"Queue": "queue",
|
1022 |
+
"repr": "reprlib",
|
1023 |
+
"_abcoll": "collections.abc",
|
1024 |
+
# Non-mutual mappings.
|
1025 |
+
"UserDict": "collections",
|
1026 |
+
"UserList": "collections",
|
1027 |
+
"UserString": "collections",
|
1028 |
+
"whichdb": "dbm",
|
1029 |
+
"StringIO": "io",
|
1030 |
+
"cStringIO": "io",
|
1031 |
+
}
|
1032 |
+
|
1033 |
+
|
1034 |
+
# This contains rename rules that are easy to handle. We ignore the more
|
1035 |
+
# complex stuff (e.g. mapping the names in the urllib and types modules).
|
1036 |
+
# These rules should be run before import names are fixed.
|
1037 |
+
NAME_MAPPING = {
|
1038 |
+
("__builtin__", "xrange"): ("builtins", "range"),
|
1039 |
+
("__builtin__", "reduce"): ("functools", "reduce"),
|
1040 |
+
("__builtin__", "intern"): ("sys", "intern"),
|
1041 |
+
("__builtin__", "unichr"): ("builtins", "chr"),
|
1042 |
+
("__builtin__", "unicode"): ("builtins", "str"),
|
1043 |
+
("__builtin__", "long"): ("builtins", "int"),
|
1044 |
+
("itertools", "izip"): ("builtins", "zip"),
|
1045 |
+
("itertools", "imap"): ("builtins", "map"),
|
1046 |
+
("itertools", "ifilter"): ("builtins", "filter"),
|
1047 |
+
("itertools", "ifilterfalse"): ("itertools", "filterfalse"),
|
1048 |
+
("itertools", "izip_longest"): ("itertools", "zip_longest"),
|
1049 |
+
("UserDict", "IterableUserDict"): ("collections", "UserDict"),
|
1050 |
+
("UserList", "UserList"): ("collections", "UserList"),
|
1051 |
+
("UserString", "UserString"): ("collections", "UserString"),
|
1052 |
+
# Non-mutual mappings.
|
1053 |
+
("__builtin__", "basestring"): ("builtins", "str"),
|
1054 |
+
("exceptions", "StandardError"): ("builtins", "Exception"),
|
1055 |
+
("UserDict", "UserDict"): ("collections", "UserDict"),
|
1056 |
+
}
|
lib/python3.10/site-packages/torch/_utils_internal.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import functools
|
3 |
+
import logging
|
4 |
+
import os
|
5 |
+
import sys
|
6 |
+
import tempfile
|
7 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar
|
8 |
+
from typing_extensions import ParamSpec
|
9 |
+
|
10 |
+
import torch
|
11 |
+
from torch._strobelight.compile_time_profiler import StrobelightCompileTimeProfiler
|
12 |
+
|
13 |
+
|
14 |
+
_T = TypeVar("_T")
|
15 |
+
_P = ParamSpec("_P")
|
16 |
+
|
17 |
+
log = logging.getLogger(__name__)
|
18 |
+
|
19 |
+
if os.environ.get("TORCH_COMPILE_STROBELIGHT", False):
|
20 |
+
import shutil
|
21 |
+
|
22 |
+
if not shutil.which("strobeclient"):
|
23 |
+
log.info(
|
24 |
+
"TORCH_COMPILE_STROBELIGHT is true, but seems like you are not on a FB machine."
|
25 |
+
)
|
26 |
+
else:
|
27 |
+
log.info("Strobelight profiler is enabled via environment variable")
|
28 |
+
StrobelightCompileTimeProfiler.enable()
|
29 |
+
|
30 |
+
# this arbitrary-looking assortment of functionality is provided here
|
31 |
+
# to have a central place for overrideable behavior. The motivating
|
32 |
+
# use is the FB build environment, where this source file is replaced
|
33 |
+
# by an equivalent.
|
34 |
+
|
35 |
+
if torch._running_with_deploy():
|
36 |
+
# __file__ is meaningless in the context of frozen torch used in torch deploy.
|
37 |
+
# setting empty torch_parent should allow below functions to operate without crashing,
|
38 |
+
# but it's unclear if there is a valid use case for them in the context of deploy.
|
39 |
+
torch_parent = ""
|
40 |
+
else:
|
41 |
+
if os.path.basename(os.path.dirname(__file__)) == "shared":
|
42 |
+
torch_parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
43 |
+
else:
|
44 |
+
torch_parent = os.path.dirname(os.path.dirname(__file__))
|
45 |
+
|
46 |
+
|
47 |
+
def get_file_path(*path_components: str) -> str:
|
48 |
+
return os.path.join(torch_parent, *path_components)
|
49 |
+
|
50 |
+
|
51 |
+
def get_file_path_2(*path_components: str) -> str:
|
52 |
+
return os.path.join(*path_components)
|
53 |
+
|
54 |
+
|
55 |
+
def get_writable_path(path: str) -> str:
|
56 |
+
if os.access(path, os.W_OK):
|
57 |
+
return path
|
58 |
+
return tempfile.mkdtemp(suffix=os.path.basename(path))
|
59 |
+
|
60 |
+
|
61 |
+
def prepare_multiprocessing_environment(path: str) -> None:
|
62 |
+
pass
|
63 |
+
|
64 |
+
|
65 |
+
def resolve_library_path(path: str) -> str:
|
66 |
+
return os.path.realpath(path)
|
67 |
+
|
68 |
+
|
69 |
+
def throw_abstract_impl_not_imported_error(opname, module, context):
|
70 |
+
if module in sys.modules:
|
71 |
+
raise NotImplementedError(
|
72 |
+
f"{opname}: We could not find the fake impl for this operator. "
|
73 |
+
)
|
74 |
+
else:
|
75 |
+
raise NotImplementedError(
|
76 |
+
f"{opname}: We could not find the fake impl for this operator. "
|
77 |
+
f"The operator specified that you may need to import the '{module}' "
|
78 |
+
f"Python module to load the fake impl. {context}"
|
79 |
+
)
|
80 |
+
|
81 |
+
|
82 |
+
# NB! This treats "skip" kwarg specially!!
|
83 |
+
def compile_time_strobelight_meta(
|
84 |
+
phase_name: str,
|
85 |
+
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
|
86 |
+
def compile_time_strobelight_meta_inner(
|
87 |
+
function: Callable[_P, _T],
|
88 |
+
) -> Callable[_P, _T]:
|
89 |
+
@functools.wraps(function)
|
90 |
+
def wrapper_function(*args: _P.args, **kwargs: _P.kwargs) -> _T:
|
91 |
+
if "skip" in kwargs and isinstance(skip := kwargs["skip"], int):
|
92 |
+
kwargs["skip"] = skip + 1
|
93 |
+
|
94 |
+
if not StrobelightCompileTimeProfiler.enabled:
|
95 |
+
return function(*args, **kwargs)
|
96 |
+
|
97 |
+
return StrobelightCompileTimeProfiler.profile_compile_time(
|
98 |
+
function, phase_name, *args, **kwargs
|
99 |
+
)
|
100 |
+
|
101 |
+
return wrapper_function
|
102 |
+
|
103 |
+
return compile_time_strobelight_meta_inner
|
104 |
+
|
105 |
+
|
106 |
+
# Meta only, see
|
107 |
+
# https://www.internalfb.com/intern/wiki/ML_Workflow_Observability/User_Guides/Adding_instrumentation_to_your_code/
|
108 |
+
#
|
109 |
+
# This will cause an event to get logged to Scuba via the signposts API. You
|
110 |
+
# can view samples on the API at https://fburl.com/scuba/workflow_signpost/zh9wmpqs
|
111 |
+
# we log to subsystem "torch", and the category and name you provide here.
|
112 |
+
# Each of the arguments translate into a Scuba column. We're still figuring
|
113 |
+
# out local conventions in PyTorch, but category should be something like
|
114 |
+
# "dynamo" or "inductor", and name should be a specific string describing what
|
115 |
+
# kind of event happened.
|
116 |
+
#
|
117 |
+
# Killswitch is at
|
118 |
+
# https://www.internalfb.com/intern/justknobs/?name=pytorch%2Fsignpost#event
|
119 |
+
def signpost_event(category: str, name: str, parameters: Dict[str, Any]):
|
120 |
+
log.info("%s %s: %r", category, name, parameters)
|
121 |
+
|
122 |
+
|
123 |
+
def log_compilation_event(metrics):
|
124 |
+
log.info("%s", metrics)
|
125 |
+
|
126 |
+
|
127 |
+
def upload_graph(graph):
|
128 |
+
pass
|
129 |
+
|
130 |
+
|
131 |
+
def set_pytorch_distributed_envs_from_justknobs():
|
132 |
+
pass
|
133 |
+
|
134 |
+
|
135 |
+
def log_export_usage(**kwargs):
|
136 |
+
pass
|
137 |
+
|
138 |
+
|
139 |
+
def log_trace_structured_event(*args, **kwargs) -> None:
|
140 |
+
pass
|
141 |
+
|
142 |
+
|
143 |
+
def log_cache_bypass(*args, **kwargs) -> None:
|
144 |
+
pass
|
145 |
+
|
146 |
+
|
147 |
+
def log_torchscript_usage(api: str, **kwargs):
|
148 |
+
_ = api
|
149 |
+
return
|
150 |
+
|
151 |
+
|
152 |
+
def check_if_torch_exportable():
|
153 |
+
return False
|
154 |
+
|
155 |
+
|
156 |
+
def export_training_ir_rollout_check() -> bool:
|
157 |
+
return True
|
158 |
+
|
159 |
+
|
160 |
+
def log_torch_jit_trace_exportability(
|
161 |
+
api: str,
|
162 |
+
type_of_export: str,
|
163 |
+
export_outcome: str,
|
164 |
+
result: str,
|
165 |
+
):
|
166 |
+
_, _, _, _ = api, type_of_export, export_outcome, result
|
167 |
+
return
|
168 |
+
|
169 |
+
|
170 |
+
def capture_pre_autograd_graph_using_training_ir() -> bool:
|
171 |
+
return False
|
172 |
+
|
173 |
+
|
174 |
+
def justknobs_check(name: str, default: bool = True) -> bool:
|
175 |
+
"""
|
176 |
+
This function can be used to killswitch functionality in FB prod,
|
177 |
+
where you can toggle this value to False in JK without having to
|
178 |
+
do a code push. In OSS, we always have everything turned on all
|
179 |
+
the time, because downstream users can simply choose to not update
|
180 |
+
PyTorch. (If more fine-grained enable/disable is needed, we could
|
181 |
+
potentially have a map we lookup name in to toggle behavior. But
|
182 |
+
the point is that it's all tied to source code in OSS, since there's
|
183 |
+
no live server to query.)
|
184 |
+
|
185 |
+
This is the bare minimum functionality I needed to do some killswitches.
|
186 |
+
We have a more detailed plan at
|
187 |
+
https://docs.google.com/document/d/1Ukerh9_42SeGh89J-tGtecpHBPwGlkQ043pddkKb3PU/edit
|
188 |
+
In particular, in some circumstances it may be necessary to read in
|
189 |
+
a knob once at process start, and then use it consistently for the
|
190 |
+
rest of the process. Future functionality will codify these patterns
|
191 |
+
into a better high level API.
|
192 |
+
|
193 |
+
WARNING: Do NOT call this function at module import time, JK is not
|
194 |
+
fork safe and you will break anyone who forks the process and then
|
195 |
+
hits JK again.
|
196 |
+
"""
|
197 |
+
return default
|
198 |
+
|
199 |
+
|
200 |
+
def justknobs_getval_int(name: str) -> int:
|
201 |
+
"""
|
202 |
+
Read warning on justknobs_check
|
203 |
+
"""
|
204 |
+
return 0
|
205 |
+
|
206 |
+
|
207 |
+
def is_fb_unit_test() -> bool:
|
208 |
+
return False
|
209 |
+
|
210 |
+
|
211 |
+
@functools.lru_cache(None)
|
212 |
+
def max_clock_rate():
|
213 |
+
if not torch.version.hip:
|
214 |
+
from triton.testing import nvsmi
|
215 |
+
|
216 |
+
return nvsmi(["clocks.max.sm"])[0]
|
217 |
+
else:
|
218 |
+
# Manually set max-clock speeds on ROCm until equivalent nvmsi
|
219 |
+
# functionality in triton.testing or via pyamdsmi enablement. Required
|
220 |
+
# for test_snode_runtime unit tests.
|
221 |
+
gcn_arch = str(torch.cuda.get_device_properties(0).gcnArchName.split(":", 1)[0])
|
222 |
+
if "gfx94" in gcn_arch:
|
223 |
+
return 1700
|
224 |
+
elif "gfx90a" in gcn_arch:
|
225 |
+
return 1700
|
226 |
+
elif "gfx908" in gcn_arch:
|
227 |
+
return 1502
|
228 |
+
elif "gfx11" in gcn_arch:
|
229 |
+
return 1700
|
230 |
+
elif "gfx103" in gcn_arch:
|
231 |
+
return 1967
|
232 |
+
elif "gfx101" in gcn_arch:
|
233 |
+
return 1144
|
234 |
+
else:
|
235 |
+
return 1100
|
236 |
+
|
237 |
+
|
238 |
+
def get_mast_job_name_version() -> Optional[Tuple[str, int]]:
|
239 |
+
return None
|
240 |
+
|
241 |
+
|
242 |
+
TEST_MASTER_ADDR = "127.0.0.1"
|
243 |
+
TEST_MASTER_PORT = 29500
|
244 |
+
# USE_GLOBAL_DEPS controls whether __init__.py tries to load
|
245 |
+
# libtorch_global_deps, see Note [Global dependencies]
|
246 |
+
USE_GLOBAL_DEPS = True
|
247 |
+
# USE_RTLD_GLOBAL_WITH_LIBTORCH controls whether __init__.py tries to load
|
248 |
+
# _C.so with RTLD_GLOBAL during the call to dlopen.
|
249 |
+
USE_RTLD_GLOBAL_WITH_LIBTORCH = False
|
250 |
+
# If an op was defined in C++ and extended from Python using the
|
251 |
+
# torch.library.register_fake, returns if we require that there be a
|
252 |
+
# m.set_python_module("mylib.ops") call from C++ that associates
|
253 |
+
# the C++ op with a python module.
|
254 |
+
REQUIRES_SET_PYTHON_MODULE = False
|
255 |
+
|
256 |
+
|
257 |
+
def maybe_upload_prof_stats_to_manifold(profile_path: str) -> Optional[str]:
|
258 |
+
print("Uploading profile stats (fb-only otherwise no-op)")
|
259 |
+
return None
|
260 |
+
|
261 |
+
|
262 |
+
def log_chromium_event_internal(
|
263 |
+
event: Dict[str, Any],
|
264 |
+
stack: List[str],
|
265 |
+
logger_uuid: str,
|
266 |
+
start_time_ns: int,
|
267 |
+
):
|
268 |
+
return None
|
269 |
+
|
270 |
+
|
271 |
+
def record_chromium_event_internal(
|
272 |
+
event: Dict[str, Any],
|
273 |
+
):
|
274 |
+
return None
|
lib/python3.10/site-packages/torch/_vmap_internals.py
ADDED
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
import functools
|
3 |
+
from typing import Any, Callable, List, Optional, Tuple, Union
|
4 |
+
from typing_extensions import deprecated
|
5 |
+
|
6 |
+
import torch
|
7 |
+
from torch import Tensor
|
8 |
+
from torch.utils._pytree import _broadcast_to_and_flatten, tree_flatten, tree_unflatten
|
9 |
+
|
10 |
+
|
11 |
+
in_dims_t = Union[int, Tuple]
|
12 |
+
out_dims_t = Union[int, Tuple[int, ...]]
|
13 |
+
|
14 |
+
|
15 |
+
# Checks that all args-to-be-batched have the same batch dim size
|
16 |
+
def _validate_and_get_batch_size(
|
17 |
+
flat_in_dims: List[Optional[int]],
|
18 |
+
flat_args: List,
|
19 |
+
) -> int:
|
20 |
+
batch_sizes = [
|
21 |
+
arg.size(in_dim)
|
22 |
+
for in_dim, arg in zip(flat_in_dims, flat_args)
|
23 |
+
if in_dim is not None
|
24 |
+
]
|
25 |
+
if batch_sizes and any(size != batch_sizes[0] for size in batch_sizes):
|
26 |
+
raise ValueError(
|
27 |
+
f"vmap: Expected all tensors to have the same size in the mapped "
|
28 |
+
f"dimension, got sizes {batch_sizes} for the mapped dimension"
|
29 |
+
)
|
30 |
+
return batch_sizes[0]
|
31 |
+
|
32 |
+
|
33 |
+
def _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int:
|
34 |
+
if isinstance(batched_outputs, tuple):
|
35 |
+
return len(batched_outputs)
|
36 |
+
return 1
|
37 |
+
|
38 |
+
|
39 |
+
# If value is a tuple, check it has length `num_elements`.
|
40 |
+
# If value is not a tuple, make a tuple with `value` repeated `num_elements` times
|
41 |
+
def _as_tuple(
|
42 |
+
value: Any,
|
43 |
+
num_elements: int,
|
44 |
+
error_message_lambda: Callable[[], str],
|
45 |
+
) -> Tuple:
|
46 |
+
if not isinstance(value, tuple):
|
47 |
+
return (value,) * num_elements
|
48 |
+
if len(value) != num_elements:
|
49 |
+
raise ValueError(error_message_lambda())
|
50 |
+
return value
|
51 |
+
|
52 |
+
|
53 |
+
# Creates BatchedTensors for every Tensor in arg that should be batched.
|
54 |
+
# Returns the (potentially) batched arguments and the batch_size.
|
55 |
+
def _create_batched_inputs(
|
56 |
+
in_dims: in_dims_t,
|
57 |
+
args: Tuple,
|
58 |
+
vmap_level: int,
|
59 |
+
func: Callable,
|
60 |
+
) -> Tuple[Tuple, int]:
|
61 |
+
if not isinstance(in_dims, int) and not isinstance(in_dims, tuple):
|
62 |
+
raise ValueError(
|
63 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
64 |
+
f"expected `in_dims` to be int or a (potentially nested) tuple "
|
65 |
+
f"matching the structure of inputs, got: {type(in_dims)}."
|
66 |
+
)
|
67 |
+
if len(args) == 0:
|
68 |
+
raise ValueError(
|
69 |
+
f"vmap({_get_name(func)})(<inputs>): got no inputs. Maybe you forgot to add "
|
70 |
+
f"inputs, or you are trying to vmap over a function with no inputs. "
|
71 |
+
f"The latter is unsupported."
|
72 |
+
)
|
73 |
+
|
74 |
+
flat_args, args_spec = tree_flatten(args)
|
75 |
+
flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec)
|
76 |
+
if flat_in_dims is None:
|
77 |
+
raise ValueError(
|
78 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
79 |
+
f"in_dims is not compatible with the structure of `inputs`. "
|
80 |
+
f"in_dims has structure {tree_flatten(in_dims)[1]} but inputs "
|
81 |
+
f"has structure {args_spec}."
|
82 |
+
)
|
83 |
+
|
84 |
+
for arg, in_dim in zip(flat_args, flat_in_dims):
|
85 |
+
if not isinstance(in_dim, int) and in_dim is not None:
|
86 |
+
raise ValueError(
|
87 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
88 |
+
f"Got in_dim={in_dim} for an input but in_dim must be either "
|
89 |
+
f"an integer dimension or None."
|
90 |
+
)
|
91 |
+
if isinstance(in_dim, int) and not isinstance(arg, Tensor):
|
92 |
+
raise ValueError(
|
93 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
94 |
+
f"Got in_dim={in_dim} for an input but the input is of type "
|
95 |
+
f"{type(arg)}. We cannot vmap over non-Tensor arguments, "
|
96 |
+
f"please use None as the respective in_dim"
|
97 |
+
)
|
98 |
+
if in_dim is not None and (in_dim < 0 or in_dim >= arg.dim()):
|
99 |
+
raise ValueError(
|
100 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
101 |
+
f"Got in_dim={in_dim} for some input, but that input is a Tensor "
|
102 |
+
f"of dimensionality {arg.dim()} so expected in_dim to satisfy "
|
103 |
+
f"0 <= in_dim < {arg.dim()}."
|
104 |
+
)
|
105 |
+
|
106 |
+
batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args)
|
107 |
+
# See NOTE [Ignored _remove_batch_dim, _add_batch_dim]
|
108 |
+
batched_inputs = [
|
109 |
+
arg if in_dim is None else torch._add_batch_dim(arg, in_dim, vmap_level)
|
110 |
+
for in_dim, arg in zip(flat_in_dims, flat_args)
|
111 |
+
]
|
112 |
+
return tree_unflatten(batched_inputs, args_spec), batch_size
|
113 |
+
|
114 |
+
|
115 |
+
# Undos the batching (and any batch dimensions) associated with the `vmap_level`.
|
116 |
+
def _unwrap_batched(
|
117 |
+
batched_outputs: Union[Tensor, Tuple[Tensor, ...]],
|
118 |
+
out_dims: out_dims_t,
|
119 |
+
vmap_level: int,
|
120 |
+
batch_size: int,
|
121 |
+
func: Callable,
|
122 |
+
allow_none_pass_through: bool = False,
|
123 |
+
) -> Tuple:
|
124 |
+
num_outputs = _num_outputs(batched_outputs)
|
125 |
+
out_dims_as_tuple = _as_tuple(
|
126 |
+
out_dims,
|
127 |
+
num_outputs,
|
128 |
+
lambda: f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must "
|
129 |
+
f"have one dim per output (got {num_outputs} outputs) of {_get_name(func)}.",
|
130 |
+
)
|
131 |
+
|
132 |
+
# NOTE [Ignored _remove_batch_dim, _add_batch_dim]
|
133 |
+
# There is something wrong with our type bindings for functions that begin
|
134 |
+
# with '_', see #40397.
|
135 |
+
if isinstance(batched_outputs, Tensor):
|
136 |
+
out_dim = out_dims_as_tuple[0]
|
137 |
+
return torch._remove_batch_dim(batched_outputs, vmap_level, batch_size, out_dim) # type: ignore[return-value]
|
138 |
+
if allow_none_pass_through:
|
139 |
+
return tuple(
|
140 |
+
(
|
141 |
+
torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)
|
142 |
+
if out is not None
|
143 |
+
else None
|
144 |
+
)
|
145 |
+
for out, out_dim in zip(batched_outputs, out_dims_as_tuple)
|
146 |
+
)
|
147 |
+
else:
|
148 |
+
return tuple(
|
149 |
+
torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)
|
150 |
+
for out, out_dim in zip(batched_outputs, out_dims_as_tuple)
|
151 |
+
)
|
152 |
+
|
153 |
+
|
154 |
+
# Checks that `fn` returned one or more Tensors and nothing else.
|
155 |
+
# NB: A python function that return multiple arguments returns a single tuple,
|
156 |
+
# so we are effectively checking that `outputs` is a single Tensor or a tuple of
|
157 |
+
# Tensors.
|
158 |
+
def _validate_outputs(outputs: Any, func: Callable) -> None:
|
159 |
+
if isinstance(outputs, Tensor):
|
160 |
+
return
|
161 |
+
if not isinstance(outputs, tuple):
|
162 |
+
raise ValueError(
|
163 |
+
f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return "
|
164 |
+
f"Tensors, got type {type(outputs)} as the return."
|
165 |
+
)
|
166 |
+
for idx, output in enumerate(outputs):
|
167 |
+
if isinstance(output, Tensor):
|
168 |
+
continue
|
169 |
+
raise ValueError(
|
170 |
+
f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return "
|
171 |
+
f"Tensors, got type {type(output)} for return {idx}."
|
172 |
+
)
|
173 |
+
|
174 |
+
|
175 |
+
def _check_out_dims_is_int_or_int_tuple(out_dims: out_dims_t, func: Callable) -> None:
|
176 |
+
if isinstance(out_dims, int):
|
177 |
+
return
|
178 |
+
if not isinstance(out_dims, tuple) or not all(
|
179 |
+
isinstance(out_dim, int) for out_dim in out_dims
|
180 |
+
):
|
181 |
+
raise ValueError(
|
182 |
+
f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be "
|
183 |
+
f"an int or a tuple of int representing where in the outputs the "
|
184 |
+
f"vmapped dimension should appear."
|
185 |
+
)
|
186 |
+
|
187 |
+
|
188 |
+
def _get_name(func: Callable):
|
189 |
+
if hasattr(func, "__name__"):
|
190 |
+
return func.__name__
|
191 |
+
|
192 |
+
# Not all callables have __name__, in fact, only static functions/methods do.
|
193 |
+
# A callable created via functools.partial or an nn.Module, to name some
|
194 |
+
# examples, don't have a __name__.
|
195 |
+
return repr(func)
|
196 |
+
|
197 |
+
|
198 |
+
# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,
|
199 |
+
# sends those into func, and then unwraps the output BatchedTensors. Operations
|
200 |
+
# on BatchedTensors perform the batched operations that the user is asking for.
|
201 |
+
@deprecated(
|
202 |
+
"Please use `torch.vmap` instead of `torch._vmap_internals.vmap`.",
|
203 |
+
category=FutureWarning,
|
204 |
+
)
|
205 |
+
def vmap(func: Callable, in_dims: in_dims_t = 0, out_dims: out_dims_t = 0) -> Callable:
|
206 |
+
"""
|
207 |
+
Please use torch.vmap instead of this API.
|
208 |
+
"""
|
209 |
+
return _vmap(func, in_dims, out_dims)
|
210 |
+
|
211 |
+
|
212 |
+
# A version of vmap but without the initial "experimental prototype" warning
|
213 |
+
def _vmap(
|
214 |
+
func: Callable,
|
215 |
+
in_dims: in_dims_t = 0,
|
216 |
+
out_dims: out_dims_t = 0,
|
217 |
+
allow_none_pass_through: bool = False,
|
218 |
+
) -> Callable:
|
219 |
+
# The `allow_none_pass_through` argument is a temporary workaround may be removed.
|
220 |
+
# Currently it enables us to wrap the call in `autograd.grad` to the autograd engine,
|
221 |
+
# which may return None if any of the inputs are unused. See the issue discussing this:
|
222 |
+
# https://github.com/facebookresearch/functorch/issues/159.
|
223 |
+
@functools.wraps(func)
|
224 |
+
def wrapped(*args):
|
225 |
+
_check_out_dims_is_int_or_int_tuple(out_dims, func)
|
226 |
+
vmap_level = torch._C._vmapmode_increment_nesting()
|
227 |
+
try:
|
228 |
+
batched_inputs, batch_size = _create_batched_inputs(
|
229 |
+
in_dims, args, vmap_level, func
|
230 |
+
)
|
231 |
+
batched_outputs = func(*batched_inputs)
|
232 |
+
if not allow_none_pass_through:
|
233 |
+
_validate_outputs(batched_outputs, func)
|
234 |
+
return _unwrap_batched(
|
235 |
+
batched_outputs,
|
236 |
+
out_dims,
|
237 |
+
vmap_level,
|
238 |
+
batch_size,
|
239 |
+
func,
|
240 |
+
allow_none_pass_through=allow_none_pass_through,
|
241 |
+
)
|
242 |
+
finally:
|
243 |
+
torch._C._vmapmode_decrement_nesting()
|
244 |
+
|
245 |
+
return wrapped
|
lib/python3.10/site-packages/torch/_weights_only_unpickler.py
ADDED
@@ -0,0 +1,553 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# mypy: allow-untyped-defs
|
2 |
+
# Unpickler restricted to loading only state dicts
|
3 |
+
# Restrict constructing types to a list defined in _get_allowed_globals()
|
4 |
+
# Restrict BUILD operation to `Tensor`, `Parameter` and `OrderedDict` types only
|
5 |
+
# Restrict APPEND/APPENDS to `list`
|
6 |
+
# In `GLOBALS` operation do not do class lookup by name, but rather rely on dictionary
|
7 |
+
# defined by `_get_allowed_globals()` method, that contains:
|
8 |
+
# - torch types (Storage, dtypes, Tensor, `torch.Size`),
|
9 |
+
# - `torch._utils._rebuild` functions.
|
10 |
+
# - `torch.nn.Parameter`
|
11 |
+
# - `collections.Counter`
|
12 |
+
# - `collections.OrderedDict`
|
13 |
+
# Additionally, users can use an allowlist for adding classes they have deemed as safe using
|
14 |
+
# `_add_safe_globals()` (`torch.serialization.add_safe_globals`)
|
15 |
+
# `_clear_safe_globals()` (`torch.serialization.clear_safe_globals`)
|
16 |
+
# `_get_safe_globals()` (`torch.serialization.get_safe_globals`)
|
17 |
+
|
18 |
+
# Based of https://github.com/python/cpython/blob/main/Lib/pickle.py
|
19 |
+
# Expected to be useful for loading PyTorch model weights
|
20 |
+
# For example:
|
21 |
+
# data = urllib.request.urlopen('https://download.pytorch.org/models/resnet50-0676ba61.pth').read()
|
22 |
+
# buf = io.BytesIO(data)
|
23 |
+
# weights = torch.load(buf, weights_only = True)
|
24 |
+
|
25 |
+
import functools as _functools
|
26 |
+
import warnings
|
27 |
+
|
28 |
+
from _codecs import encode
|
29 |
+
from collections import Counter, OrderedDict
|
30 |
+
from pickle import (
|
31 |
+
APPEND,
|
32 |
+
APPENDS,
|
33 |
+
BINFLOAT,
|
34 |
+
BINGET,
|
35 |
+
BININT,
|
36 |
+
BININT1,
|
37 |
+
BININT2,
|
38 |
+
BINPERSID,
|
39 |
+
BINPUT,
|
40 |
+
BINUNICODE,
|
41 |
+
BUILD,
|
42 |
+
bytes_types,
|
43 |
+
decode_long,
|
44 |
+
EMPTY_DICT,
|
45 |
+
EMPTY_LIST,
|
46 |
+
EMPTY_SET,
|
47 |
+
EMPTY_TUPLE,
|
48 |
+
GLOBAL,
|
49 |
+
LONG1,
|
50 |
+
LONG_BINGET,
|
51 |
+
LONG_BINPUT,
|
52 |
+
MARK,
|
53 |
+
NEWFALSE,
|
54 |
+
NEWOBJ,
|
55 |
+
NEWTRUE,
|
56 |
+
NONE,
|
57 |
+
PROTO,
|
58 |
+
REDUCE,
|
59 |
+
SETITEM,
|
60 |
+
SETITEMS,
|
61 |
+
SHORT_BINSTRING,
|
62 |
+
STOP,
|
63 |
+
TUPLE,
|
64 |
+
TUPLE1,
|
65 |
+
TUPLE2,
|
66 |
+
TUPLE3,
|
67 |
+
UnpicklingError,
|
68 |
+
)
|
69 |
+
from struct import unpack
|
70 |
+
from sys import maxsize
|
71 |
+
from typing import Any, Callable, Dict, List, Set, Tuple, Union
|
72 |
+
|
73 |
+
import torch
|
74 |
+
from torch._utils import IMPORT_MAPPING, NAME_MAPPING
|
75 |
+
|
76 |
+
|
77 |
+
# modules in this list are never allowed, even if the user attempts to allowlist
|
78 |
+
# functions/classes from them
|
79 |
+
_blocklisted_modules = [
|
80 |
+
"sys",
|
81 |
+
"os",
|
82 |
+
"posix",
|
83 |
+
"nt",
|
84 |
+
]
|
85 |
+
|
86 |
+
_marked_safe_globals_set: Set[Union[Callable, Tuple[Callable, str]]] = set()
|
87 |
+
|
88 |
+
|
89 |
+
def _add_safe_globals(safe_globals: List[Union[Callable, Tuple[Callable, str]]]):
|
90 |
+
global _marked_safe_globals_set
|
91 |
+
_marked_safe_globals_set = _marked_safe_globals_set.union(set(safe_globals))
|
92 |
+
|
93 |
+
|
94 |
+
def _get_safe_globals() -> List[Union[Callable, Tuple[Callable, str]]]:
|
95 |
+
global _marked_safe_globals_set
|
96 |
+
return list(_marked_safe_globals_set)
|
97 |
+
|
98 |
+
|
99 |
+
def _clear_safe_globals():
|
100 |
+
global _marked_safe_globals_set
|
101 |
+
_marked_safe_globals_set = set()
|
102 |
+
|
103 |
+
|
104 |
+
def _remove_safe_globals(
|
105 |
+
globals_to_remove: List[Union[Callable, Tuple[Callable, str]]],
|
106 |
+
):
|
107 |
+
global _marked_safe_globals_set
|
108 |
+
_marked_safe_globals_set = _marked_safe_globals_set - set(globals_to_remove)
|
109 |
+
|
110 |
+
|
111 |
+
class _safe_globals:
|
112 |
+
def __init__(self, safe_globals: List[Union[Callable, Tuple[Callable, str]]]):
|
113 |
+
self.safe_globals = safe_globals
|
114 |
+
|
115 |
+
def __enter__(self):
|
116 |
+
_add_safe_globals(self.safe_globals)
|
117 |
+
|
118 |
+
def __exit__(self, type, value, tb):
|
119 |
+
_remove_safe_globals(self.safe_globals)
|
120 |
+
|
121 |
+
|
122 |
+
# Separate from _get_allowed_globals because of the lru_cache on _get_allowed_globals
|
123 |
+
# For example if user had a script like
|
124 |
+
# torch.load(file_a)
|
125 |
+
# torch.serialization._add_safe_globals([torch.foo])
|
126 |
+
# torch.load(file_b)
|
127 |
+
# the dynamic additions to safe_globals would not be picked up by
|
128 |
+
# _get_allowed_globals due to the lru_cache
|
129 |
+
def _get_user_allowed_globals():
|
130 |
+
rc: Dict[str, Any] = {}
|
131 |
+
for f in _marked_safe_globals_set:
|
132 |
+
if isinstance(f, tuple):
|
133 |
+
if len(f) != 2:
|
134 |
+
raise ValueError(
|
135 |
+
f"Expected tuple of length 2 (global, str of callable full path), but got tuple of length: {len(f)}"
|
136 |
+
)
|
137 |
+
if type(f[1]) is not str:
|
138 |
+
raise TypeError(
|
139 |
+
f"Expected second item in tuple to be str of callable full path, but got: {type(f[1])}"
|
140 |
+
)
|
141 |
+
f, name = f
|
142 |
+
rc[name] = f
|
143 |
+
else:
|
144 |
+
module, name = f.__module__, f.__name__
|
145 |
+
rc[f"{module}.{name}"] = f
|
146 |
+
return rc
|
147 |
+
|
148 |
+
|
149 |
+
def _tensor_rebuild_functions():
|
150 |
+
return {
|
151 |
+
torch._utils._rebuild_parameter,
|
152 |
+
torch._utils._rebuild_parameter_with_state,
|
153 |
+
torch._utils._rebuild_qtensor,
|
154 |
+
torch._utils._rebuild_tensor,
|
155 |
+
torch._utils._rebuild_tensor_v2,
|
156 |
+
torch._utils._rebuild_tensor_v3,
|
157 |
+
torch._utils._rebuild_sparse_tensor,
|
158 |
+
torch._utils._rebuild_meta_tensor_no_storage,
|
159 |
+
torch._utils._rebuild_nested_tensor,
|
160 |
+
torch._utils._rebuild_wrapper_subclass,
|
161 |
+
# Allowlisting this, but not allowlisting the numpy functions by default
|
162 |
+
# Reasoning is that we don't have control over the numpy functions, but
|
163 |
+
# this utility is provided by pytorch
|
164 |
+
torch._utils._rebuild_device_tensor_from_numpy,
|
165 |
+
# In 2.6, we should no longer have a dependency on numpy and the above
|
166 |
+
# _rebuild_device_tensor_from_numpy function.
|
167 |
+
torch._utils._rebuild_device_tensor_from_cpu_tensor,
|
168 |
+
}
|
169 |
+
|
170 |
+
|
171 |
+
# Unpickling machinery
|
172 |
+
@_functools.lru_cache(maxsize=1)
|
173 |
+
def _get_allowed_globals():
|
174 |
+
rc: Dict[str, Any] = {
|
175 |
+
"collections.OrderedDict": OrderedDict,
|
176 |
+
"collections.Counter": Counter,
|
177 |
+
"torch.nn.parameter.Parameter": torch.nn.Parameter,
|
178 |
+
"torch.serialization._get_layout": torch.serialization._get_layout,
|
179 |
+
"torch.Size": torch.Size,
|
180 |
+
"torch.Tensor": torch.Tensor,
|
181 |
+
"torch.device": torch.device,
|
182 |
+
"_codecs.encode": encode, # for bytes
|
183 |
+
"builtins.bytearray": bytearray, # for bytearray
|
184 |
+
"builtins.set": set, # for set
|
185 |
+
"builtins.complex": complex, # for complex
|
186 |
+
}
|
187 |
+
|
188 |
+
# dtype
|
189 |
+
for t in torch.storage._dtype_to_storage_type_map().keys():
|
190 |
+
rc[str(t)] = t
|
191 |
+
for t in torch.storage._new_dtypes():
|
192 |
+
rc[str(t)] = t
|
193 |
+
# Tensor classes
|
194 |
+
for tt in torch._tensor_classes:
|
195 |
+
rc[f"{tt.__module__}.{tt.__name__}"] = tt
|
196 |
+
# Storage classes
|
197 |
+
for ts in torch._storage_classes:
|
198 |
+
if ts not in (torch.storage.TypedStorage, torch.storage.UntypedStorage):
|
199 |
+
# Wrap legacy storage types in a dummy class
|
200 |
+
rc[f"{ts.__module__}.{ts.__name__}"] = torch.serialization.StorageType(
|
201 |
+
ts.__name__
|
202 |
+
)
|
203 |
+
else:
|
204 |
+
rc[f"{ts.__module__}.{ts.__name__}"] = ts
|
205 |
+
# Quantization specific
|
206 |
+
for qt in [
|
207 |
+
torch.per_tensor_affine,
|
208 |
+
torch.per_tensor_symmetric,
|
209 |
+
torch.per_channel_affine,
|
210 |
+
torch.per_channel_symmetric,
|
211 |
+
torch.per_channel_affine_float_qparams,
|
212 |
+
]:
|
213 |
+
rc[str(qt)] = qt
|
214 |
+
# Rebuild functions
|
215 |
+
for f in _tensor_rebuild_functions():
|
216 |
+
rc[f"torch._utils.{f.__name__}"] = f
|
217 |
+
|
218 |
+
# Handles Tensor Subclasses, Tensor's with attributes.
|
219 |
+
# NOTE: It calls into above rebuild functions for regular Tensor types.
|
220 |
+
rc["torch._tensor._rebuild_from_type_v2"] = torch._tensor._rebuild_from_type_v2
|
221 |
+
return rc
|
222 |
+
|
223 |
+
|
224 |
+
def _read_global_instruction(readline: Callable) -> Tuple[str, str]:
|
225 |
+
module = readline()[:-1].decode("utf-8")
|
226 |
+
name = readline()[:-1].decode("utf-8")
|
227 |
+
# Patch since torch.save default protocol is 2
|
228 |
+
# users will be running this code in python > 3
|
229 |
+
if (module, name) in NAME_MAPPING:
|
230 |
+
module, name = NAME_MAPPING[(module, name)]
|
231 |
+
elif module in IMPORT_MAPPING:
|
232 |
+
module = IMPORT_MAPPING[module]
|
233 |
+
return module, name
|
234 |
+
|
235 |
+
|
236 |
+
def get_globals_in_pkl(file) -> Set[str]:
|
237 |
+
globals_in_checkpoint = set()
|
238 |
+
protocol = None
|
239 |
+
read = file.read
|
240 |
+
readline = file.readline
|
241 |
+
op_to_bytes_to_read = {
|
242 |
+
NEWOBJ[0]: 0,
|
243 |
+
REDUCE[0]: 0,
|
244 |
+
BUILD[0]: 0,
|
245 |
+
APPEND[0]: 0,
|
246 |
+
APPENDS[0]: 0,
|
247 |
+
SETITEM[0]: 0,
|
248 |
+
SETITEMS[0]: 0,
|
249 |
+
MARK[0]: 0,
|
250 |
+
TUPLE[0]: 0,
|
251 |
+
TUPLE1[0]: 0,
|
252 |
+
TUPLE2[0]: 0,
|
253 |
+
TUPLE3[0]: 0,
|
254 |
+
NONE[0]: 0,
|
255 |
+
NEWFALSE[0]: 0,
|
256 |
+
NEWTRUE[0]: 0,
|
257 |
+
EMPTY_TUPLE[0]: 0,
|
258 |
+
EMPTY_LIST[0]: 0,
|
259 |
+
EMPTY_DICT[0]: 0,
|
260 |
+
EMPTY_SET[0]: 0,
|
261 |
+
BINPERSID[0]: 0,
|
262 |
+
BININT[0]: 4,
|
263 |
+
BININT1[0]: 1,
|
264 |
+
BININT2[0]: 2,
|
265 |
+
BINFLOAT[0]: 8,
|
266 |
+
BINGET[0]: 1,
|
267 |
+
LONG_BINGET[0]: 4,
|
268 |
+
BINPUT[0]: 1,
|
269 |
+
LONG_BINPUT[0]: 4,
|
270 |
+
}
|
271 |
+
while True:
|
272 |
+
key = read(1)
|
273 |
+
if not key:
|
274 |
+
raise EOFError
|
275 |
+
assert isinstance(key, bytes_types)
|
276 |
+
if key[0] == GLOBAL[0]:
|
277 |
+
module, name = _read_global_instruction(readline)
|
278 |
+
globals_in_checkpoint.add(f"{module}.{name}")
|
279 |
+
elif key[0] in op_to_bytes_to_read:
|
280 |
+
bytes_to_read = op_to_bytes_to_read[key[0]]
|
281 |
+
if bytes_to_read:
|
282 |
+
read(bytes_to_read)
|
283 |
+
# ops where bytes to read depends on the data
|
284 |
+
elif key[0] == BINUNICODE[0]:
|
285 |
+
strlen = unpack("<I", read(4))[0]
|
286 |
+
if strlen > maxsize:
|
287 |
+
raise UnpicklingError("String is too long")
|
288 |
+
read(strlen)
|
289 |
+
elif key[0] in {SHORT_BINSTRING[0], LONG1[0]}:
|
290 |
+
strlen = read(1)[0]
|
291 |
+
read(strlen)
|
292 |
+
# first and last op
|
293 |
+
elif key[0] == PROTO[0]:
|
294 |
+
protocol = read(1)[0]
|
295 |
+
elif key[0] == STOP[0]:
|
296 |
+
return globals_in_checkpoint
|
297 |
+
else:
|
298 |
+
raise UnpicklingError(f"Unsupported operand {key[0]}")
|
299 |
+
|
300 |
+
|
301 |
+
class Unpickler:
|
302 |
+
def __init__(self, file, *, encoding: str = "bytes"):
|
303 |
+
self.encoding = encoding
|
304 |
+
self.readline = file.readline
|
305 |
+
self.read = file.read
|
306 |
+
self.memo: Dict[int, Any] = {}
|
307 |
+
self.proto: int = -1
|
308 |
+
|
309 |
+
def load(self):
|
310 |
+
"""Read a pickled object representation from the open file.
|
311 |
+
|
312 |
+
Return the reconstituted object hierarchy specified in the file.
|
313 |
+
"""
|
314 |
+
self.metastack = []
|
315 |
+
self.stack: List[Any] = []
|
316 |
+
self.append = self.stack.append
|
317 |
+
read = self.read
|
318 |
+
readline = self.readline
|
319 |
+
while True:
|
320 |
+
key = read(1)
|
321 |
+
if not key:
|
322 |
+
raise EOFError
|
323 |
+
assert isinstance(key, bytes_types)
|
324 |
+
# Risky operators
|
325 |
+
if key[0] == GLOBAL[0]:
|
326 |
+
module, name = _read_global_instruction(self.readline)
|
327 |
+
full_path = f"{module}.{name}"
|
328 |
+
if module in _blocklisted_modules:
|
329 |
+
raise UnpicklingError(
|
330 |
+
f"Trying to load unsupported GLOBAL {full_path} whose module {module} is blocked."
|
331 |
+
)
|
332 |
+
if full_path in _get_allowed_globals():
|
333 |
+
self.append(_get_allowed_globals()[full_path])
|
334 |
+
elif full_path in _get_user_allowed_globals():
|
335 |
+
self.append(_get_user_allowed_globals()[full_path])
|
336 |
+
elif full_path in (
|
337 |
+
[
|
338 |
+
"torch.nested._internal.nested_tensor.NestedTensor",
|
339 |
+
"torch.nested._internal.nested_tensor._rebuild_njt",
|
340 |
+
"torch._dynamo.decorators._DimRange",
|
341 |
+
]
|
342 |
+
):
|
343 |
+
raise UnpicklingError(
|
344 |
+
"``torch.nested`` and ``torch._dynamo`` must be imported to load nested jagged tensors (NJTs)"
|
345 |
+
)
|
346 |
+
elif full_path in (
|
347 |
+
[
|
348 |
+
"torch.distributed.device_mesh.DeviceMesh",
|
349 |
+
"torch.distributed.tensor._dtensor_spec.DTensorSpec",
|
350 |
+
"torch.distributed.tensor._dtensor_spec.TensorMeta",
|
351 |
+
"torch.distributed.tensor.DTensor",
|
352 |
+
"torch.distributed.tensor.placement_types.Partial",
|
353 |
+
"torch.distributed.tensor.placement_types.Replicate",
|
354 |
+
"torch.distributed.tensor.placement_types.Shard",
|
355 |
+
]
|
356 |
+
):
|
357 |
+
raise UnpicklingError(
|
358 |
+
"``torch.distributed.tensor`` must be imported to load DTensors"
|
359 |
+
)
|
360 |
+
else:
|
361 |
+
raise UnpicklingError(
|
362 |
+
f"Unsupported global: GLOBAL {full_path} was not an allowed global by default. "
|
363 |
+
f"Please use `torch.serialization.add_safe_globals([{name}])` or the "
|
364 |
+
f"`torch.serialization.safe_globals([{name}])` context manager to allowlist this global "
|
365 |
+
"if you trust this class/function."
|
366 |
+
)
|
367 |
+
elif key[0] == NEWOBJ[0]:
|
368 |
+
args = self.stack.pop()
|
369 |
+
cls = self.stack.pop()
|
370 |
+
if cls is torch.nn.Parameter:
|
371 |
+
self.append(torch.nn.Parameter(*args))
|
372 |
+
elif (
|
373 |
+
cls in _get_user_allowed_globals().values()
|
374 |
+
or cls in _get_allowed_globals().values()
|
375 |
+
):
|
376 |
+
self.append(cls.__new__(cls, *args))
|
377 |
+
else:
|
378 |
+
raise UnpicklingError(
|
379 |
+
"Can only create new object for nn.Parameter or classes allowlisted "
|
380 |
+
f"via `add_safe_globals` but got {cls}"
|
381 |
+
)
|
382 |
+
elif key[0] == REDUCE[0]:
|
383 |
+
args = self.stack.pop()
|
384 |
+
func = self.stack[-1]
|
385 |
+
if (
|
386 |
+
func not in _get_allowed_globals().values()
|
387 |
+
and func not in _get_user_allowed_globals().values()
|
388 |
+
):
|
389 |
+
raise UnpicklingError(
|
390 |
+
f"Trying to call reduce for unrecognized function {func}"
|
391 |
+
)
|
392 |
+
self.stack[-1] = func(*args)
|
393 |
+
elif key[0] == BUILD[0]:
|
394 |
+
state = self.stack.pop()
|
395 |
+
inst = self.stack[-1]
|
396 |
+
if type(inst) is torch.Tensor:
|
397 |
+
# Legacy unpickling
|
398 |
+
inst.set_(*state)
|
399 |
+
elif type(inst) is torch.nn.Parameter:
|
400 |
+
inst.__setstate__(state)
|
401 |
+
elif type(inst) is OrderedDict:
|
402 |
+
inst.__dict__.update(state)
|
403 |
+
elif (
|
404 |
+
type(inst) in _get_user_allowed_globals().values()
|
405 |
+
or type(inst) in _get_allowed_globals().values()
|
406 |
+
):
|
407 |
+
if hasattr(inst, "__setstate__"):
|
408 |
+
inst.__setstate__(state)
|
409 |
+
else:
|
410 |
+
# mimics load_build in pickle
|
411 |
+
# https://github.com/python/cpython/blob/f0c6fccd08904787a39269367f09f263d496114c/Lib/pickle.py#L1854-L1867
|
412 |
+
slotstate = None
|
413 |
+
if isinstance(state, tuple) and len(state) == 2:
|
414 |
+
state, slotstate = state
|
415 |
+
if state:
|
416 |
+
inst.__dict__.update(state)
|
417 |
+
if slotstate:
|
418 |
+
for k, v in slotstate.items():
|
419 |
+
setattr(inst, k, v)
|
420 |
+
else:
|
421 |
+
raise UnpicklingError(
|
422 |
+
"Can only build Tensor, Parameter, OrderedDict or types allowlisted "
|
423 |
+
f"via `add_safe_globals`, but got {type(inst)}"
|
424 |
+
)
|
425 |
+
# Stack manipulation
|
426 |
+
elif key[0] == APPEND[0]:
|
427 |
+
item = self.stack.pop()
|
428 |
+
list_obj = self.stack[-1]
|
429 |
+
if type(list_obj) is not list:
|
430 |
+
raise UnpicklingError(
|
431 |
+
f"Can only append to lists, but got {type(list_obj)}"
|
432 |
+
)
|
433 |
+
list_obj.append(item)
|
434 |
+
elif key[0] == APPENDS[0]:
|
435 |
+
items = self.pop_mark()
|
436 |
+
list_obj = self.stack[-1]
|
437 |
+
if type(list_obj) is not list:
|
438 |
+
raise UnpicklingError(
|
439 |
+
f"Can only extend lists, but got {type(list_obj)}"
|
440 |
+
)
|
441 |
+
list_obj.extend(items)
|
442 |
+
elif key[0] == SETITEM[0]:
|
443 |
+
(v, k) = (self.stack.pop(), self.stack.pop())
|
444 |
+
self.stack[-1][k] = v
|
445 |
+
elif key[0] == SETITEMS[0]:
|
446 |
+
items = self.pop_mark()
|
447 |
+
for i in range(0, len(items), 2):
|
448 |
+
self.stack[-1][items[i]] = items[i + 1]
|
449 |
+
elif key[0] == MARK[0]:
|
450 |
+
self.metastack.append(self.stack)
|
451 |
+
self.stack = []
|
452 |
+
self.append = self.stack.append
|
453 |
+
elif key[0] == TUPLE[0]:
|
454 |
+
items = self.pop_mark()
|
455 |
+
self.append(tuple(items))
|
456 |
+
elif key[0] == TUPLE1[0]:
|
457 |
+
self.stack[-1] = (self.stack[-1],)
|
458 |
+
elif key[0] == TUPLE2[0]:
|
459 |
+
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
|
460 |
+
elif key[0] == TUPLE3[0]:
|
461 |
+
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
|
462 |
+
# Basic types construction
|
463 |
+
elif key[0] == NONE[0]:
|
464 |
+
self.append(None)
|
465 |
+
elif key[0] == NEWFALSE[0]:
|
466 |
+
self.append(False)
|
467 |
+
elif key[0] == NEWTRUE[0]:
|
468 |
+
self.append(True)
|
469 |
+
elif key[0] == EMPTY_TUPLE[0]:
|
470 |
+
self.append(())
|
471 |
+
elif key[0] == EMPTY_LIST[0]:
|
472 |
+
self.append([])
|
473 |
+
elif key[0] == EMPTY_DICT[0]:
|
474 |
+
self.append({})
|
475 |
+
elif key[0] == EMPTY_SET[0]:
|
476 |
+
self.append(set())
|
477 |
+
elif key[0] == BININT[0]:
|
478 |
+
self.append(unpack("<i", read(4))[0])
|
479 |
+
elif key[0] == BININT1[0]:
|
480 |
+
self.append(self.read(1)[0])
|
481 |
+
elif key[0] == BININT2[0]:
|
482 |
+
self.append(unpack("<H", read(2))[0])
|
483 |
+
elif key[0] == BINFLOAT[0]:
|
484 |
+
self.append(unpack(">d", self.read(8))[0])
|
485 |
+
elif key[0] == BINUNICODE[0]:
|
486 |
+
strlen = unpack("<I", read(4))[0]
|
487 |
+
if strlen > maxsize:
|
488 |
+
raise UnpicklingError("String is too long")
|
489 |
+
strval = str(read(strlen), "utf-8", "surrogatepass")
|
490 |
+
self.append(strval)
|
491 |
+
elif key[0] == SHORT_BINSTRING[0]:
|
492 |
+
strlen = read(1)[0]
|
493 |
+
strdata = read(strlen)
|
494 |
+
if self.encoding != "bytes":
|
495 |
+
strdata = strdata.decode(self.encoding, "strict")
|
496 |
+
self.append(strdata)
|
497 |
+
elif key[0] == BINPERSID[0]:
|
498 |
+
pid = self.stack.pop()
|
499 |
+
# Only allow persistent load of storage
|
500 |
+
if type(pid) is not tuple and not type(pid) is not int:
|
501 |
+
raise UnpicklingError(
|
502 |
+
f"persistent_load id must be tuple or int, but got {type(pid)}"
|
503 |
+
)
|
504 |
+
if (
|
505 |
+
type(pid) is tuple
|
506 |
+
and len(pid) > 0
|
507 |
+
and torch.serialization._maybe_decode_ascii(pid[0]) != "storage"
|
508 |
+
):
|
509 |
+
raise UnpicklingError(
|
510 |
+
f"Only persistent_load of storage is allowed, but got {pid[0]}"
|
511 |
+
)
|
512 |
+
self.append(self.persistent_load(pid))
|
513 |
+
elif key[0] in [BINGET[0], LONG_BINGET[0]]:
|
514 |
+
idx = (read(1) if key[0] == BINGET[0] else unpack("<I", read(4)))[0]
|
515 |
+
self.append(self.memo[idx])
|
516 |
+
elif key[0] in [BINPUT[0], LONG_BINPUT[0]]:
|
517 |
+
i = (read(1) if key[0] == BINPUT[0] else unpack("<I", read(4)))[0]
|
518 |
+
if i < 0:
|
519 |
+
raise ValueError("negative argument")
|
520 |
+
self.memo[i] = self.stack[-1]
|
521 |
+
elif key[0] == LONG1[0]:
|
522 |
+
n = read(1)[0]
|
523 |
+
data = read(n)
|
524 |
+
self.append(decode_long(data))
|
525 |
+
# First and last deserializer ops
|
526 |
+
elif key[0] == PROTO[0]:
|
527 |
+
self.proto = read(1)[0]
|
528 |
+
if self.proto != 2:
|
529 |
+
warnings.warn(
|
530 |
+
f"Detected pickle protocol {self.proto} in the checkpoint, which was "
|
531 |
+
"not the default pickle protocol used by `torch.load` (2). The weights_only "
|
532 |
+
"Unpickler might not support all instructions implemented by this protocol, "
|
533 |
+
"please file an issue for adding support if you encounter this."
|
534 |
+
)
|
535 |
+
elif key[0] == STOP[0]:
|
536 |
+
rc = self.stack.pop()
|
537 |
+
return rc
|
538 |
+
else:
|
539 |
+
raise UnpicklingError(f"Unsupported operand {key[0]}")
|
540 |
+
|
541 |
+
# Return a list of items pushed in the stack after last MARK instruction.
|
542 |
+
def pop_mark(self):
|
543 |
+
items = self.stack
|
544 |
+
self.stack = self.metastack.pop()
|
545 |
+
self.append = self.stack.append
|
546 |
+
return items
|
547 |
+
|
548 |
+
def persistent_load(self, pid):
|
549 |
+
raise UnpicklingError("unsupported persistent id encountered")
|
550 |
+
|
551 |
+
|
552 |
+
def load(file, *, encoding: str = "ASCII"):
|
553 |
+
return Unpickler(file, encoding=encoding).load()
|