nnilayy commited on
Commit
89cbce5
·
verified ·
1 Parent(s): 5d150a8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. lib/python3.10/site-packages/babel/locale-data/am.dat +3 -0
  3. lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h +321 -0
  4. lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h +13 -0
  5. lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h +479 -0
  6. lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h +153 -0
  7. lib/python3.10/site-packages/torch/include/ATen/native/FusedAdam.h +27 -0
  8. lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h +298 -0
  9. lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h +71 -0
  10. lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h +16 -0
  11. lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h +40 -0
  12. lib/python3.10/site-packages/torch/include/ATen/native/Sorting.h +28 -0
  13. lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cpu_dispatch.h +25 -0
  14. lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_ops.h +39 -0
  15. lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_cpu_dispatch.h +23 -0
  16. lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h +23 -0
  17. lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h +25 -0
  18. lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values_ops.h +28 -0
  19. lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_cpu_dispatch.h +23 -0
  20. lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution.h +91 -0
  21. lib/python3.10/site-packages/torch/include/ATen/ops/_pad_circular_native.h +21 -0
  22. lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h +23 -0
  23. lib/python3.10/site-packages/torch/include/ATen/ops/_print_ops.h +28 -0
  24. lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_ops.h +28 -0
  25. lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_ops.h +28 -0
  26. lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_ops.h +61 -0
  27. lib/python3.10/site-packages/torch/include/ATen/ops/_test_ambiguous_defaults_ops.h +39 -0
  28. lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_native.h +26 -0
  29. lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_meta_dispatch.h +23 -0
  30. lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h +26 -0
  31. lib/python3.10/site-packages/torch/include/ATen/ops/all_native.h +34 -0
  32. lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward.h +30 -0
  33. lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h +39 -0
  34. lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward.h +30 -0
  35. lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_compositeexplicitautograd_dispatch.h +24 -0
  36. lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_compositeexplicitautograd_dispatch.h +23 -0
  37. lib/python3.10/site-packages/torch/include/ATen/ops/digamma_native.h +23 -0
  38. lib/python3.10/site-packages/torch/include/ATen/ops/div_ops.h +149 -0
  39. lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_ops.h +28 -0
  40. lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cpu_dispatch.h +26 -0
  41. lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_native.h +22 -0
  42. lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2.h +91 -0
  43. lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h +39 -0
  44. lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h +39 -0
  45. lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h +26 -0
  46. lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h +23 -0
  47. lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h +23 -0
  48. lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h +25 -0
  49. lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h +25 -0
  50. lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_native.h +21 -0
.gitattributes CHANGED
@@ -177,3 +177,4 @@ lib/python3.10/site-packages/babel/locale-data/blo.dat filter=lfs diff=lfs merge
177
  lib/python3.10/site-packages/babel/locale-data/ia.dat filter=lfs diff=lfs merge=lfs -text
178
  lib/python3.10/site-packages/babel/locale-data/lt.dat filter=lfs diff=lfs merge=lfs -text
179
  lib/python3.10/site-packages/babel/locale-data/lb.dat filter=lfs diff=lfs merge=lfs -text
 
 
177
  lib/python3.10/site-packages/babel/locale-data/ia.dat filter=lfs diff=lfs merge=lfs -text
178
  lib/python3.10/site-packages/babel/locale-data/lt.dat filter=lfs diff=lfs merge=lfs -text
179
  lib/python3.10/site-packages/babel/locale-data/lb.dat filter=lfs diff=lfs merge=lfs -text
180
+ lib/python3.10/site-packages/babel/locale-data/am.dat filter=lfs diff=lfs merge=lfs -text
lib/python3.10/site-packages/babel/locale-data/am.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dd75f79121216fce34c5a41faf1f782348ab36afc893dc261c92bae289b5d96
3
+ size 173260
lib/python3.10/site-packages/torch/include/ATen/native/BatchLinearAlgebra.h ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <optional>
4
+ #include <c10/util/string_view.h>
5
+ #include <ATen/Config.h>
6
+ #include <ATen/native/DispatchStub.h>
7
+
8
+ // Forward declare TI
9
+ namespace at {
10
+ class Tensor;
11
+ struct TensorIterator;
12
+
13
+ namespace native {
14
+ enum class TransposeType;
15
+ }
16
+
17
+ }
18
+
19
+ namespace at::native {
20
+
21
+ enum class LapackLstsqDriverType : int64_t { Gels, Gelsd, Gelsy, Gelss};
22
+
23
+ #if AT_BUILD_WITH_LAPACK()
24
+ // Define per-batch functions to be used in the implementation of batched
25
+ // linear algebra operations
26
+
27
+ template <class scalar_t>
28
+ void lapackCholesky(char uplo, int n, scalar_t *a, int lda, int *info);
29
+
30
+ template <class scalar_t>
31
+ void lapackCholeskyInverse(char uplo, int n, scalar_t *a, int lda, int *info);
32
+
33
+ template <class scalar_t, class value_t=scalar_t>
34
+ void lapackEig(char jobvl, char jobvr, int n, scalar_t *a, int lda, scalar_t *w, scalar_t* vl, int ldvl, scalar_t *vr, int ldvr, scalar_t *work, int lwork, value_t *rwork, int *info);
35
+
36
+ template <class scalar_t>
37
+ void lapackGeqrf(int m, int n, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info);
38
+
39
+ template <class scalar_t>
40
+ void lapackOrgqr(int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *work, int lwork, int *info);
41
+
42
+ template <class scalar_t>
43
+ void lapackOrmqr(char side, char trans, int m, int n, int k, scalar_t *a, int lda, scalar_t *tau, scalar_t *c, int ldc, scalar_t *work, int lwork, int *info);
44
+
45
+ template <class scalar_t, class value_t = scalar_t>
46
+ void lapackSyevd(char jobz, char uplo, int n, scalar_t* a, int lda, value_t* w, scalar_t* work, int lwork, value_t* rwork, int lrwork, int* iwork, int liwork, int* info);
47
+
48
+ template <class scalar_t>
49
+ void lapackGels(char trans, int m, int n, int nrhs,
50
+ scalar_t *a, int lda, scalar_t *b, int ldb,
51
+ scalar_t *work, int lwork, int *info);
52
+
53
+ template <class scalar_t, class value_t = scalar_t>
54
+ void lapackGelsd(int m, int n, int nrhs,
55
+ scalar_t *a, int lda, scalar_t *b, int ldb,
56
+ value_t *s, value_t rcond, int *rank,
57
+ scalar_t* work, int lwork,
58
+ value_t *rwork, int* iwork, int *info);
59
+
60
+ template <class scalar_t, class value_t = scalar_t>
61
+ void lapackGelsy(int m, int n, int nrhs,
62
+ scalar_t *a, int lda, scalar_t *b, int ldb,
63
+ int *jpvt, value_t rcond, int *rank,
64
+ scalar_t *work, int lwork, value_t* rwork, int *info);
65
+
66
+ template <class scalar_t, class value_t = scalar_t>
67
+ void lapackGelss(int m, int n, int nrhs,
68
+ scalar_t *a, int lda, scalar_t *b, int ldb,
69
+ value_t *s, value_t rcond, int *rank,
70
+ scalar_t *work, int lwork,
71
+ value_t *rwork, int *info);
72
+
73
+ template <LapackLstsqDriverType, class scalar_t, class value_t = scalar_t>
74
+ struct lapackLstsq_impl;
75
+
76
+ template <class scalar_t, class value_t>
77
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gels, scalar_t, value_t> {
78
+ static void call(
79
+ char trans, int m, int n, int nrhs,
80
+ scalar_t *a, int lda, scalar_t *b, int ldb,
81
+ scalar_t *work, int lwork, int *info, // Gels flavor
82
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
83
+ value_t *s, // Gelss flavor
84
+ int *iwork // Gelsd flavor
85
+ ) {
86
+ lapackGels<scalar_t>(
87
+ trans, m, n, nrhs,
88
+ a, lda, b, ldb,
89
+ work, lwork, info);
90
+ }
91
+ };
92
+
93
+ template <class scalar_t, class value_t>
94
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gelsy, scalar_t, value_t> {
95
+ static void call(
96
+ char trans, int m, int n, int nrhs,
97
+ scalar_t *a, int lda, scalar_t *b, int ldb,
98
+ scalar_t *work, int lwork, int *info, // Gels flavor
99
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
100
+ value_t *s, // Gelss flavor
101
+ int *iwork // Gelsd flavor
102
+ ) {
103
+ lapackGelsy<scalar_t, value_t>(
104
+ m, n, nrhs,
105
+ a, lda, b, ldb,
106
+ jpvt, rcond, rank,
107
+ work, lwork, rwork, info);
108
+ }
109
+ };
110
+
111
+ template <class scalar_t, class value_t>
112
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gelsd, scalar_t, value_t> {
113
+ static void call(
114
+ char trans, int m, int n, int nrhs,
115
+ scalar_t *a, int lda, scalar_t *b, int ldb,
116
+ scalar_t *work, int lwork, int *info, // Gels flavor
117
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
118
+ value_t *s, // Gelss flavor
119
+ int *iwork // Gelsd flavor
120
+ ) {
121
+ lapackGelsd<scalar_t, value_t>(
122
+ m, n, nrhs,
123
+ a, lda, b, ldb,
124
+ s, rcond, rank,
125
+ work, lwork,
126
+ rwork, iwork, info);
127
+ }
128
+ };
129
+
130
+ template <class scalar_t, class value_t>
131
+ struct lapackLstsq_impl<LapackLstsqDriverType::Gelss, scalar_t, value_t> {
132
+ static void call(
133
+ char trans, int m, int n, int nrhs,
134
+ scalar_t *a, int lda, scalar_t *b, int ldb,
135
+ scalar_t *work, int lwork, int *info, // Gels flavor
136
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
137
+ value_t *s, // Gelss flavor
138
+ int *iwork // Gelsd flavor
139
+ ) {
140
+ lapackGelss<scalar_t, value_t>(
141
+ m, n, nrhs,
142
+ a, lda, b, ldb,
143
+ s, rcond, rank,
144
+ work, lwork,
145
+ rwork, info);
146
+ }
147
+ };
148
+
149
+ template <LapackLstsqDriverType driver_type, class scalar_t, class value_t = scalar_t>
150
+ void lapackLstsq(
151
+ char trans, int m, int n, int nrhs,
152
+ scalar_t *a, int lda, scalar_t *b, int ldb,
153
+ scalar_t *work, int lwork, int *info, // Gels flavor
154
+ int *jpvt, value_t rcond, int *rank, value_t* rwork, // Gelsy flavor
155
+ value_t *s, // Gelss flavor
156
+ int *iwork // Gelsd flavor
157
+ ) {
158
+ lapackLstsq_impl<driver_type, scalar_t, value_t>::call(
159
+ trans, m, n, nrhs,
160
+ a, lda, b, ldb,
161
+ work, lwork, info,
162
+ jpvt, rcond, rank, rwork,
163
+ s,
164
+ iwork);
165
+ }
166
+
167
+ template <class scalar_t>
168
+ void lapackLuSolve(char trans, int n, int nrhs, scalar_t *a, int lda, int *ipiv, scalar_t *b, int ldb, int *info);
169
+
170
+ template <class scalar_t>
171
+ void lapackLu(int m, int n, scalar_t *a, int lda, int *ipiv, int *info);
172
+
173
+ template <class scalar_t>
174
+ void lapackLdlHermitian(
175
+ char uplo,
176
+ int n,
177
+ scalar_t* a,
178
+ int lda,
179
+ int* ipiv,
180
+ scalar_t* work,
181
+ int lwork,
182
+ int* info);
183
+
184
+ template <class scalar_t>
185
+ void lapackLdlSymmetric(
186
+ char uplo,
187
+ int n,
188
+ scalar_t* a,
189
+ int lda,
190
+ int* ipiv,
191
+ scalar_t* work,
192
+ int lwork,
193
+ int* info);
194
+
195
+ template <class scalar_t>
196
+ void lapackLdlSolveHermitian(
197
+ char uplo,
198
+ int n,
199
+ int nrhs,
200
+ scalar_t* a,
201
+ int lda,
202
+ int* ipiv,
203
+ scalar_t* b,
204
+ int ldb,
205
+ int* info);
206
+
207
+ template <class scalar_t>
208
+ void lapackLdlSolveSymmetric(
209
+ char uplo,
210
+ int n,
211
+ int nrhs,
212
+ scalar_t* a,
213
+ int lda,
214
+ int* ipiv,
215
+ scalar_t* b,
216
+ int ldb,
217
+ int* info);
218
+
219
+ template<class scalar_t, class value_t=scalar_t>
220
+ void lapackSvd(char jobz, int m, int n, scalar_t *a, int lda, value_t *s, scalar_t *u, int ldu, scalar_t *vt, int ldvt, scalar_t *work, int lwork, value_t *rwork, int *iwork, int *info);
221
+ #endif
222
+
223
+ #if AT_BUILD_WITH_BLAS()
224
+ template <class scalar_t>
225
+ void blasTriangularSolve(char side, char uplo, char trans, char diag, int n, int nrhs, scalar_t* a, int lda, scalar_t* b, int ldb);
226
+ #endif
227
+
228
+ using cholesky_fn = void (*)(const Tensor& /*input*/, const Tensor& /*info*/, bool /*upper*/);
229
+ DECLARE_DISPATCH(cholesky_fn, cholesky_stub)
230
+
231
+ using cholesky_inverse_fn = Tensor& (*)(Tensor& /*result*/, Tensor& /*infos*/, bool /*upper*/);
232
+
233
+ DECLARE_DISPATCH(cholesky_inverse_fn, cholesky_inverse_stub)
234
+
235
+ using linalg_eig_fn = void (*)(Tensor& /*eigenvalues*/, Tensor& /*eigenvectors*/, Tensor& /*infos*/, const Tensor& /*input*/, bool /*compute_eigenvectors*/);
236
+
237
+ DECLARE_DISPATCH(linalg_eig_fn, linalg_eig_stub)
238
+
239
+ using geqrf_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/);
240
+ DECLARE_DISPATCH(geqrf_fn, geqrf_stub)
241
+
242
+ using orgqr_fn = Tensor& (*)(Tensor& /*result*/, const Tensor& /*tau*/);
243
+ DECLARE_DISPATCH(orgqr_fn, orgqr_stub)
244
+
245
+ using ormqr_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/, const Tensor& /*other*/, bool /*left*/, bool /*transpose*/);
246
+ DECLARE_DISPATCH(ormqr_fn, ormqr_stub)
247
+
248
+ using linalg_eigh_fn = void (*)(
249
+ const Tensor& /*eigenvalues*/,
250
+ const Tensor& /*eigenvectors*/,
251
+ const Tensor& /*infos*/,
252
+ bool /*upper*/,
253
+ bool /*compute_eigenvectors*/);
254
+ DECLARE_DISPATCH(linalg_eigh_fn, linalg_eigh_stub)
255
+
256
+ using lstsq_fn = void (*)(
257
+ const Tensor& /*a*/,
258
+ Tensor& /*b*/,
259
+ Tensor& /*rank*/,
260
+ Tensor& /*singular_values*/,
261
+ Tensor& /*infos*/,
262
+ double /*rcond*/,
263
+ std::string /*driver_name*/);
264
+ DECLARE_DISPATCH(lstsq_fn, lstsq_stub)
265
+
266
+ using triangular_solve_fn = void (*)(
267
+ const Tensor& /*A*/,
268
+ const Tensor& /*B*/,
269
+ bool /*left*/,
270
+ bool /*upper*/,
271
+ TransposeType /*transpose*/,
272
+ bool /*unitriangular*/);
273
+ DECLARE_DISPATCH(triangular_solve_fn, triangular_solve_stub)
274
+
275
+ using lu_factor_fn = void (*)(
276
+ const Tensor& /*input*/,
277
+ const Tensor& /*pivots*/,
278
+ const Tensor& /*infos*/,
279
+ bool /*compute_pivots*/);
280
+ DECLARE_DISPATCH(lu_factor_fn, lu_factor_stub)
281
+
282
+ using unpack_pivots_fn = void(*)(
283
+ TensorIterator& iter,
284
+ const int64_t dim_size,
285
+ const int64_t max_pivot);
286
+ DECLARE_DISPATCH(unpack_pivots_fn, unpack_pivots_stub)
287
+
288
+ using lu_solve_fn = void (*)(
289
+ const Tensor& /*LU*/,
290
+ const Tensor& /*pivots*/,
291
+ const Tensor& /*B*/,
292
+ TransposeType /*trans*/);
293
+ DECLARE_DISPATCH(lu_solve_fn, lu_solve_stub)
294
+
295
+ using ldl_factor_fn = void (*)(
296
+ const Tensor& /*LD*/,
297
+ const Tensor& /*pivots*/,
298
+ const Tensor& /*info*/,
299
+ bool /*upper*/,
300
+ bool /*hermitian*/);
301
+ DECLARE_DISPATCH(ldl_factor_fn, ldl_factor_stub)
302
+
303
+ using svd_fn = void (*)(
304
+ const Tensor& /*A*/,
305
+ const bool /*full_matrices*/,
306
+ const bool /*compute_uv*/,
307
+ const std::optional<std::string_view>& /*driver*/,
308
+ const Tensor& /*U*/,
309
+ const Tensor& /*S*/,
310
+ const Tensor& /*Vh*/,
311
+ const Tensor& /*info*/);
312
+ DECLARE_DISPATCH(svd_fn, svd_stub)
313
+
314
+ using ldl_solve_fn = void (*)(
315
+ const Tensor& /*LD*/,
316
+ const Tensor& /*pivots*/,
317
+ const Tensor& /*result*/,
318
+ bool /*upper*/,
319
+ bool /*hermitian*/);
320
+ DECLARE_DISPATCH(ldl_solve_fn, ldl_solve_stub)
321
+ } // namespace at::native
lib/python3.10/site-packages/torch/include/ATen/native/CanUse32BitIndexMath.h ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <c10/macros/Export.h>
3
+ #include <limits>
4
+
5
+ namespace at {
6
+ class TensorBase;
7
+ }
8
+
9
+ namespace at::native {
10
+
11
+ TORCH_API bool canUse32BitIndexMath(const at::TensorBase &t, int64_t max_elem=std::numeric_limits<int32_t>::max());
12
+
13
+ }
lib/python3.10/site-packages/torch/include/ATen/native/DispatchStub.h ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/DeviceType.h>
4
+ #include <c10/macros/Macros.h>
5
+ #include <c10/util/Array.h>
6
+
7
+ #include <atomic>
8
+ #include <utility>
9
+ #include <variant>
10
+
11
+ // Implements instruction set specific function dispatch.
12
+ //
13
+ // Kernels that may make use of specialized instruction sets (e.g. AVX2) are
14
+ // compiled multiple times with different compiler flags (e.g. -mavx2). A
15
+ // DispatchStub contains a table of function pointers for a kernel. At runtime,
16
+ // the fastest available kernel is chosen based on the features reported by
17
+ // cpuinfo.
18
+ //
19
+ // Example:
20
+ //
21
+ // In native/MyKernel.h:
22
+ // using fn_type = void(*)(const Tensor& x);
23
+ // DECLARE_DISPATCH(fn_type, stub)
24
+ //
25
+ // In native/MyKernel.cpp
26
+ // DEFINE_DISPATCH(stub);
27
+ //
28
+ // In native/cpu/MyKernel.cpp:
29
+ // namespace {
30
+ // // use anonymous namespace so that different cpu versions won't conflict
31
+ // void kernel(const Tensor& x) { ... }
32
+ // }
33
+ // REGISTER_DISPATCH(stub, &kernel);
34
+ //
35
+ // To call:
36
+ // stub(kCPU, tensor);
37
+ //
38
+ // TODO: CPU instruction set selection should be folded into whatever
39
+ // the main dispatch mechanism is.
40
+ //
41
+ // Supported device types for registration:
42
+ // - CPU: Central Processing Unit
43
+ // - CUDA: NVIDIA GPUs
44
+ // - HIP: AMD GPUs
45
+ // - MPS: Apple Silicon GPUs (Metal Performance Shaders)
46
+ // - MTIA: Meta Training and Inference Devices
47
+ // - XPU: Intel GPUs
48
+ // - PrivateUse1: Reserved for private/custom device types
49
+ //
50
+ // If you want to update the list of supported devices, add a new dispatch_ptr
51
+ // member in DispatchStubImpl.h and update the get_call_ptr switch.
52
+ // As well you will need to update the inlined list in 'is_device_supported`
53
+ //
54
+ //
55
+ // ignore warnings about DispatchStub::DEFAULT, AVX, AVX2 defined elsewhere
56
+ C10_CLANG_DIAGNOSTIC_PUSH()
57
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wundefined-var-template")
58
+
59
+ namespace at::native {
60
+
61
+ enum class CPUCapability {
62
+ DEFAULT = 0,
63
+ #if defined(HAVE_VSX_CPU_DEFINITION)
64
+ VSX = 1,
65
+ #elif defined(HAVE_ZVECTOR_CPU_DEFINITION)
66
+ ZVECTOR = 1,
67
+ #elif defined(HAVE_SVE_CPU_DEFINITION)
68
+ SVE256 = 1,
69
+ #else
70
+ AVX2 = 1,
71
+ AVX512 = 2,
72
+ #endif
73
+ NUM_OPTIONS
74
+ };
75
+
76
+ // Enum for error types
77
+ enum class ErrorType {
78
+ MissingDeviceKernel,
79
+ DeviceNotSupported
80
+ };
81
+
82
+ // Alias for the return type using std::variant
83
+ using DispatchResult = std::variant<void*, ErrorType>;
84
+
85
+ CPUCapability get_cpu_capability();
86
+
87
+ template <typename FnPtr, typename T>
88
+ struct DispatchStub;
89
+
90
+ /**
91
+ * The sole purpose of this class is to outline methods that don't need to be
92
+ * specialized or otherwise inlined and duplicated (by the compiler due to
93
+ * template expansion), since it causes size bloat if there are a significant
94
+ * number of specialization of the DispatchStub<> class.
95
+ */
96
+ struct TORCH_API DispatchStubImpl {
97
+
98
+ // The DispatchStubImpl::try_get_call_ptr() method is used to get the call
99
+ // pointer for a given device type. If the call pointer is not found,
100
+ // DispatchStubImpl::try_get_call_ptr() returns an ErrorType.
101
+ // The main difference between try_get_call_ptr() and get_call_ptr() is that
102
+ // try_get_call_ptr() will return the ErrorType and not raise an exception.
103
+ DispatchResult try_get_call_ptr(
104
+ c10::DeviceType device_type
105
+ , void *DEFAULT
106
+ #ifdef HAVE_AVX512_CPU_DEFINITION
107
+ , void *AVX512
108
+ #endif
109
+ #ifdef HAVE_AVX2_CPU_DEFINITION
110
+ , void *AVX2
111
+ #endif
112
+ #ifdef HAVE_VSX_CPU_DEFINITION
113
+ , void *VSX
114
+ #endif
115
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
116
+ , void *ZVECTOR
117
+ #endif
118
+ #ifdef HAVE_SVE256_CPU_DEFINITION
119
+ , void *SVE256
120
+ #endif
121
+ );
122
+
123
+ // Analogous to try_get_call_ptr(), but it will return the ErrorType and not
124
+ // raise an exception.
125
+ DispatchResult try_choose_cpu_impl(
126
+ void *DEFAULT
127
+ #ifdef HAVE_AVX512_CPU_DEFINITION
128
+ , void *AVX512
129
+ #endif
130
+ #ifdef HAVE_AVX2_CPU_DEFINITION
131
+ , void *AVX2
132
+ #endif
133
+ #ifdef HAVE_VSX_CPU_DEFINITION
134
+ , void *VSX
135
+ #endif
136
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
137
+ , void *ZVECTOR
138
+ #endif
139
+ #ifdef HAVE_SVE256_CPU_DEFINITION
140
+ , void *SVE256
141
+ #endif
142
+ );
143
+
144
+
145
+ void* get_call_ptr(
146
+ c10::DeviceType device_type
147
+ , void *DEFAULT
148
+ #ifdef HAVE_AVX512_CPU_DEFINITION
149
+ , void *AVX512
150
+ #endif
151
+ #ifdef HAVE_AVX2_CPU_DEFINITION
152
+ , void *AVX2
153
+ #endif
154
+ #ifdef HAVE_VSX_CPU_DEFINITION
155
+ , void *VSX
156
+ #endif
157
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
158
+ , void *ZVECTOR
159
+ #endif
160
+ #ifdef HAVE_SVE256_CPU_DEFINITION
161
+ , void *SVE256
162
+ #endif
163
+ );
164
+
165
+ /**
166
+ * The CPU Dispatch actual method is chosen in decreasing order of preference by
167
+ * DispatchStubImpl::choose_cpu_impl() in case none is found by
168
+ * DispatchStubImpl::get_call_ptr() in cpu_dispatch_ptr.
169
+ */
170
+ void* choose_cpu_impl(
171
+ void *DEFAULT
172
+ #ifdef HAVE_AVX512_CPU_DEFINITION
173
+ , void *AVX512
174
+ #endif
175
+ #ifdef HAVE_AVX2_CPU_DEFINITION
176
+ , void *AVX2
177
+ #endif
178
+ #ifdef HAVE_VSX_CPU_DEFINITION
179
+ , void *VSX
180
+ #endif
181
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
182
+ , void *ZVECTOR
183
+ #endif
184
+ #ifdef HAVE_SVE256_CPU_DEFINITION
185
+ , void *SVE256
186
+ #endif
187
+ );
188
+
189
+ // Fixing dispatch error in Windows debug builds.
190
+ // See https://github.com/pytorch/pytorch/issues/22681 for more details.
191
+ #if defined(_MSC_VER) && defined(_DEBUG)
192
+ std::atomic<void*> cpu_dispatch_ptr;
193
+ void* cuda_dispatch_ptr;
194
+ void* hip_dispatch_ptr;
195
+ void* mps_dispatch_ptr;
196
+ void* mtia_dispatch_ptr;
197
+ #if defined(USE_XPU)
198
+ void* xpu_dispatch_ptr;
199
+ #endif
200
+ void* privateuse1_dispatch_ptr;
201
+ #else
202
+ std::atomic<void*> cpu_dispatch_ptr{nullptr};
203
+ void* cuda_dispatch_ptr = nullptr;
204
+ void* hip_dispatch_ptr = nullptr;
205
+ void* mps_dispatch_ptr = nullptr;
206
+ void* mtia_dispatch_ptr = nullptr;
207
+ #if defined(USE_XPU)
208
+ void* xpu_dispatch_ptr = nullptr;
209
+ #endif
210
+ void* privateuse1_dispatch_ptr = nullptr;
211
+ #endif
212
+ };
213
+
214
+ template <typename rT, typename T, typename... Args>
215
+ struct DispatchStub<rT (*)(Args...), T> {
216
+ using FnPtr = rT (*) (Args...);
217
+
218
+ DispatchStub() = default;
219
+ DispatchStub(const DispatchStub&) = delete;
220
+ DispatchStub& operator=(const DispatchStub&) = delete;
221
+
222
+ private:
223
+ FnPtr get_call_ptr(const c10::DeviceType device_type) {
224
+ return reinterpret_cast<FnPtr>(
225
+ impl.get_call_ptr(device_type
226
+ , reinterpret_cast<void*>(DEFAULT)
227
+ #ifdef HAVE_AVX512_CPU_DEFINITION
228
+ , reinterpret_cast<void*>(AVX512)
229
+ #endif
230
+ #ifdef HAVE_AVX2_CPU_DEFINITION
231
+ , reinterpret_cast<void*>(AVX2)
232
+ #endif
233
+ #ifdef HAVE_VSX_CPU_DEFINITION
234
+ , reinterpret_cast<void*>(VSX)
235
+ #endif
236
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
237
+ , reinterpret_cast<void*>(ZVECTOR)
238
+ #endif
239
+ #ifdef HAVE_SVE256_CPU_DEFINITION
240
+ , reinterpret_cast<void*>(SVE256)
241
+ #endif
242
+ )
243
+ );
244
+ }
245
+
246
+ public:
247
+ template <typename... ArgTypes>
248
+ rT operator()(c10::DeviceType device_type, ArgTypes&&... args) {
249
+ FnPtr call_ptr = get_call_ptr(device_type);
250
+ return (*call_ptr)(std::forward<ArgTypes>(args)...);
251
+ }
252
+
253
+ void set_cuda_dispatch_ptr(FnPtr fn_ptr) {
254
+ impl.cuda_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
255
+ }
256
+
257
+ #if defined(USE_XPU)
258
+ void set_xpu_dispatch_ptr(FnPtr fn_ptr){
259
+ impl.xpu_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
260
+ }
261
+ #endif
262
+
263
+ void set_hip_dispatch_ptr(FnPtr fn_ptr) {
264
+ impl.hip_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
265
+ }
266
+
267
+ void set_mps_dispatch_ptr(FnPtr fn_ptr) {
268
+ impl.mps_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
269
+ }
270
+
271
+ void set_mtia_dispatch_ptr(FnPtr fn_ptr) {
272
+ impl.mtia_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
273
+ }
274
+
275
+ void set_privateuse1_dispatch_ptr(FnPtr fn_ptr) {
276
+ impl.privateuse1_dispatch_ptr = reinterpret_cast<void*>(fn_ptr);
277
+ }
278
+
279
+ // Returns true if the dispatcher has a kernel registered for this device
280
+ // type.
281
+ bool is_device_supported(const c10::DeviceType device_type) {
282
+ auto result = impl.try_get_call_ptr(device_type
283
+ , reinterpret_cast<void*>(DEFAULT)
284
+ #ifdef HAVE_AVX512_CPU_DEFINITION
285
+ , reinterpret_cast<void*>(AVX512)
286
+ #endif
287
+ #ifdef HAVE_AVX2_CPU_DEFINITION
288
+ , reinterpret_cast<void*>(AVX2)
289
+ #endif
290
+ #ifdef HAVE_VSX_CPU_DEFINITION
291
+ , reinterpret_cast<void*>(VSX)
292
+ #endif
293
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
294
+ , reinterpret_cast<void*>(ZVECTOR)
295
+ #endif
296
+ #ifdef HAVE_SVE256_CPU_DEFINITION
297
+ , reinterpret_cast<void*>(SVE256)
298
+ #endif
299
+ );
300
+ if (std::holds_alternative<ErrorType>(result)){
301
+ return false;
302
+ }
303
+ return true;
304
+ }
305
+
306
+ static TORCH_API FnPtr DEFAULT;
307
+ #ifdef HAVE_AVX512_CPU_DEFINITION
308
+ static TORCH_API FnPtr AVX512;
309
+ #endif
310
+ #ifdef HAVE_AVX2_CPU_DEFINITION
311
+ static TORCH_API FnPtr AVX2;
312
+ #endif
313
+ #ifdef HAVE_VSX_CPU_DEFINITION
314
+ static TORCH_API FnPtr VSX;
315
+ #endif
316
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
317
+ static TORCH_API FnPtr ZVECTOR;
318
+ #endif
319
+ #ifdef HAVE_SVE256_CPU_DEFINITION
320
+ static TORCH_API FnPtr SVE256;
321
+ #endif
322
+ private:
323
+ DispatchStubImpl impl;
324
+ };
325
+
326
+ namespace {
327
+ template <typename DispatchStub>
328
+ struct RegisterCUDADispatch {
329
+ RegisterCUDADispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
330
+ stub.set_cuda_dispatch_ptr(value);
331
+ }
332
+ };
333
+
334
+ template <typename DispatchStub>
335
+ struct RegisterXPUDispatch {
336
+ RegisterXPUDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value){
337
+ stub.set_xpu_dispatch_ptr(value);
338
+ }
339
+ };
340
+
341
+ template <typename DispatchStub>
342
+ struct RegisterMPSDispatch {
343
+ RegisterMPSDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
344
+ stub.set_mps_dispatch_ptr(value);
345
+ }
346
+ };
347
+
348
+ template <typename DispatchStub>
349
+ struct RegisterHIPDispatch {
350
+ RegisterHIPDispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
351
+ // TODO: make this point at hip_dispatch_ptr
352
+ stub.set_cuda_dispatch_ptr(value);
353
+ }
354
+ };
355
+
356
+ template <typename DispatchStub>
357
+ struct RegisterMTIADispatch {
358
+ RegisterMTIADispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
359
+ stub.set_mtia_dispatch_ptr(value);
360
+ }
361
+ };
362
+
363
+ template <typename DispatchStub>
364
+ struct RegisterPRIVATEUSE1Dispatch {
365
+ RegisterPRIVATEUSE1Dispatch(DispatchStub &stub, typename DispatchStub::FnPtr value) {
366
+ stub.set_privateuse1_dispatch_ptr(value);
367
+ }
368
+ };
369
+
370
+ } // anonymous namespace
371
+ // Compiler will complain if you put things like std::tuple<Tensor, Tensor> in
372
+ // the `fn` argument of DECLARE_DISPATCH. Some possible workarounds, e.g.,
373
+ // adding parentheses and using helper struct to get rid of the parentheses, do
374
+ // not work with MSVC. So do a `using`-declaration if you need to pass in such
375
+ // `fn`, e.g., grid_sampler_2d_backward_cpu_kernel in GridSampleKernel.h.
376
+ #define DECLARE_DISPATCH(fn, name) \
377
+ struct name##_DECLARE_DISPATCH_type : DispatchStub<fn, name##_DECLARE_DISPATCH_type> { \
378
+ name##_DECLARE_DISPATCH_type() = default; \
379
+ name##_DECLARE_DISPATCH_type(const name##_DECLARE_DISPATCH_type&) = delete; \
380
+ name##_DECLARE_DISPATCH_type& operator=(const name##_DECLARE_DISPATCH_type&) = delete; \
381
+ name##_DECLARE_DISPATCH_type(name##_DECLARE_DISPATCH_type&&) = delete; \
382
+ name##_DECLARE_DISPATCH_type& operator=(name##_DECLARE_DISPATCH_type&&) = delete; \
383
+ ~name##_DECLARE_DISPATCH_type() = default; \
384
+ }; \
385
+ extern TORCH_API struct name##_DECLARE_DISPATCH_type name;
386
+
387
+ #define DEFINE_DISPATCH(name) struct name##_DECLARE_DISPATCH_type name
388
+
389
+ #define REGISTER_ARCH_DISPATCH(name, arch, fn) \
390
+ template <> name##_DECLARE_DISPATCH_type::FnPtr TORCH_API DispatchStub<name##_DECLARE_DISPATCH_type::FnPtr, struct name##_DECLARE_DISPATCH_type>::arch = fn;
391
+
392
+ #ifdef HAVE_AVX512_CPU_DEFINITION
393
+ #define REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX512, fn)
394
+ #else
395
+ #define REGISTER_AVX512_DISPATCH(name, fn)
396
+ #endif
397
+
398
+ #ifdef HAVE_AVX2_CPU_DEFINITION
399
+ #define REGISTER_AVX2_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, AVX2, fn)
400
+ #else
401
+ #define REGISTER_AVX2_DISPATCH(name, fn)
402
+ #endif
403
+
404
+ #ifdef HAVE_VSX_CPU_DEFINITION
405
+ #define REGISTER_VSX_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, VSX, fn)
406
+ #else
407
+ #define REGISTER_VSX_DISPATCH(name, fn)
408
+ #endif
409
+
410
+ #ifdef HAVE_ZVECTOR_CPU_DEFINITION
411
+ #define REGISTER_ZVECTOR_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, ZVECTOR, fn)
412
+ #else
413
+ #define REGISTER_ZVECTOR_DISPATCH(name, fn)
414
+ #endif
415
+
416
+ #ifdef HAVE_SVE256_CPU_DEFINITION
417
+ #define REGISTER_SVE256_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, SVE256, fn)
418
+ #else
419
+ #define REGISTER_SVE256_DISPATCH(name, fn)
420
+ #endif
421
+
422
+ // Macro to register the same kernel for all CPU arch types. This is useful
423
+ // if a kernel does not benefit from being recompiled across different arch types.
424
+ #define REGISTER_ALL_CPU_DISPATCH(name, fn) \
425
+ REGISTER_ARCH_DISPATCH(name, DEFAULT, fn) \
426
+ REGISTER_AVX512_DISPATCH(name, fn) \
427
+ REGISTER_AVX2_DISPATCH(name, fn) \
428
+ REGISTER_VSX_DISPATCH(name, fn) \
429
+ REGISTER_ZVECTOR_DISPATCH(name, fn) \
430
+ REGISTER_SVE256_DISPATCH(name, fn)
431
+
432
+ #define REGISTER_NO_CPU_DISPATCH(name) \
433
+ REGISTER_ALL_CPU_DISPATCH(name, nullptr)
434
+
435
+ #define REGISTER_CUDA_DISPATCH(name, fn) \
436
+ static RegisterCUDADispatch<struct name##_DECLARE_DISPATCH_type> name ## __register(name, fn);
437
+
438
+ #define REGISTER_XPU_DISPATCH(name, fn) \
439
+ static RegisterXPUDispatch<struct name##_DECLARE_DISPATCH_type> name ## __register(name, fn);
440
+
441
+ #define REGISTER_HIP_DISPATCH(name, fn) \
442
+ static RegisterHIPDispatch<struct name##_DECLARE_DISPATCH_type> name ## __register(name, fn);
443
+
444
+ #define REGISTER_MPS_DISPATCH(name, fn) \
445
+ static RegisterMPSDispatch<struct name##_DECLARE_DISPATCH_type> name ## __register(name, fn);
446
+
447
+ #define REGISTER_MTIA_DISPATCH(name, fn) \
448
+ static RegisterMTIADispatch<struct name##_DECLARE_DISPATCH_type> name ## __register(name, fn);
449
+
450
+ #define REGISTER_PRIVATEUSE1_DISPATCH(name, fn) \
451
+ static RegisterPRIVATEUSE1Dispatch<struct name##_DECLARE_DISPATCH_type> name ## __register(name, fn);
452
+
453
+ // NB: This macro must be used in an actual 'cu' file; if you try using
454
+ // it from a 'cpp' file it will not work!
455
+ #if defined(__CUDACC__)
456
+ #define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn)
457
+ #elif defined(__HIPCC__)
458
+ // TODO: cut this over to HIP dispatch once we stop pretending that CUDA
459
+ // is HIP in the PyTorch HIPify build.
460
+ #define REGISTER_DISPATCH(name, fn) REGISTER_CUDA_DISPATCH(name, fn)
461
+ // #define REGISTER_DISPATCH(name, fn) REGISTER_HIP_DISPATCH(name, fn)
462
+ #elif defined(__OBJC__) && defined(USE_MPS)
463
+ // NB: this macro must be used from a 'mm' file in order to dispatch a MPS kernel
464
+ #define REGISTER_DISPATCH(name, fn) REGISTER_MPS_DISPATCH(name, fn)
465
+ #elif defined(CPU_CAPABILITY)
466
+ // REGISTER_DISPATCH now dispatches an AVX512 kernel to nullptr but registers other dispatches.
467
+ // ALSO_REGISTER_AVX512_DISPATCH should be used for ensuring AVX512 dispatch, among others.
468
+ // ALSO_REGISTER_SVE256_DISPATCH should be used for ensuring SVE256 dispatch, among others.
469
+ #ifdef CPU_CAPABILITY_AVX512
470
+ #define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, ((void*)(fn) ? nullptr : nullptr))
471
+ #else
472
+ #define REGISTER_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn)
473
+ #endif
474
+ #define ALSO_REGISTER_AVX512_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn)
475
+ #define ALSO_REGISTER_SVE256_DISPATCH(name, fn) REGISTER_ARCH_DISPATCH(name, CPU_CAPABILITY, fn)
476
+ #endif
477
+ } // namespace at::native
478
+
479
+ C10_CLANG_DIAGNOSTIC_POP()
lib/python3.10/site-packages/torch/include/ATen/native/EmbeddingBag.h ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+ #include <ATen/Config.h>
3
+ #include <cstdint>
4
+
5
+ #ifdef USE_FBGEMM
6
+ #include <fbgemm/FbgemmEmbedding.h>
7
+ #endif
8
+
9
+ namespace at::native {
10
+
11
+ enum class EmbeddingBagMode {
12
+ SUM = 0,
13
+ MEAN = 1,
14
+ MAX = 2,
15
+ };
16
+
17
+ [[maybe_unused]] static bool operator==(int64_t op1, EmbeddingBagMode op2) {
18
+ return op1 == static_cast<int64_t>(op2);
19
+ }
20
+
21
+ [[maybe_unused]] static bool operator!=(int64_t op1, EmbeddingBagMode op2) {
22
+ return !(op1 == op2);
23
+ }
24
+
25
+ void check_arguments(
26
+ const Tensor& weight,
27
+ const Tensor& indices,
28
+ const Tensor& offsets,
29
+ const int64_t mode,
30
+ const std::optional<Tensor>& per_sample_weights,
31
+ bool include_last_offset);
32
+
33
+ void make_bag_size_out(
34
+ Tensor& bag_size_out,
35
+ const Tensor& offsets,
36
+ const Tensor& indices,
37
+ const int64_t mode,
38
+ const bool include_last_offset,
39
+ const bool requires_grad);
40
+
41
+ void make_max_indices_out(
42
+ Tensor& max_indices_out,
43
+ const Tensor& weight,
44
+ const Tensor& indices,
45
+ const Tensor& offsets,
46
+ const Tensor& bag_size,
47
+ const int64_t mode,
48
+ bool include_last_offset);
49
+
50
+ void make_offset2bag_out(
51
+ Tensor& offset2bag,
52
+ Tensor& output,
53
+ const Tensor& weight,
54
+ const Tensor& indices,
55
+ const Tensor& offsets,
56
+ const int64_t mode,
57
+ const std::optional<Tensor>& per_sample_weights,
58
+ const int64_t padding_idx = -1);
59
+
60
+ #ifdef USE_FBGEMM
61
+
62
+ template<bool has_weight, typename TIndex, typename TData>
63
+ struct _CallbackAndBlockSize {
64
+ using TCallback = typename fbgemm::EmbeddingSpMDMKernelSignature<TData, TIndex, TIndex, TData>::Type;
65
+
66
+ int64_t blockSize = -1;
67
+ TCallback callback = nullptr;
68
+
69
+ static TCallback generateCallback(int64_t block_size) {
70
+ return fbgemm::GenerateEmbeddingSpMDM<TData, TIndex, TIndex, TData>(
71
+ block_size,
72
+ has_weight,
73
+ /* normalize_by_lengths */false,
74
+ /* prefetch */16,
75
+ /* is_weight_positional */false,
76
+ /* use_offsets */true);
77
+ }
78
+
79
+ _CallbackAndBlockSize() = default;
80
+
81
+ explicit _CallbackAndBlockSize(std::optional<int64_t> maybe_block_size)
82
+ : blockSize(maybe_block_size.value_or(-1))
83
+ , callback(maybe_block_size.has_value() ? generateCallback(maybe_block_size.value()) : nullptr)
84
+ {}
85
+ };
86
+
87
+ template<typename... StorageMixins>
88
+ struct _EmbeddingBagKernelCacheImpl : private StorageMixins... {
89
+
90
+ _EmbeddingBagKernelCacheImpl() = default;
91
+ // use each of the mixins to store corresponding kernel and block size
92
+ explicit _EmbeddingBagKernelCacheImpl(std::optional<int64_t> maybe_block_size)
93
+ : StorageMixins(maybe_block_size)...
94
+ {}
95
+
96
+ // this method is thread safe (call sites may call from different threads)
97
+ template<bool has_weight, typename TIndex, typename TData>
98
+ typename _CallbackAndBlockSize<has_weight, TIndex, TData>::TCallback
99
+ getCallback(int64_t block_size) const {
100
+ // if the cache doesn't store the kernel for the incoming block size
101
+ // (so it is different from the one stored in corresponding mixin)
102
+ // regenerate the kernel (not writing it into the cache so we avoid locks)
103
+ if (block_size != _CallbackAndBlockSize<has_weight, TIndex, TData>::blockSize) {
104
+ return _CallbackAndBlockSize<has_weight, TIndex, TData>::generateCallback(block_size);
105
+ }
106
+ // else retrieve the cached kernel from the corresponding mixin
107
+ return _CallbackAndBlockSize<has_weight, TIndex, TData>::callback;
108
+ }
109
+ };
110
+
111
+ // instantiate the cache with the list of storage mixins
112
+ // for each of the 8 _EmbeddingBagKernelCache* usages in the EmbeddingBag.cpp impl file
113
+ using _EmbeddingBagKernelCache = _EmbeddingBagKernelCacheImpl<
114
+ _CallbackAndBlockSize<true, int32_t, float>,
115
+ _CallbackAndBlockSize<false, int32_t, float>,
116
+ _CallbackAndBlockSize<true, int64_t, float>,
117
+ _CallbackAndBlockSize<false, int64_t, float>,
118
+ _CallbackAndBlockSize<true, int32_t, unsigned short>,
119
+ _CallbackAndBlockSize<false, int32_t, unsigned short>,
120
+ _CallbackAndBlockSize<true, int64_t, unsigned short>,
121
+ _CallbackAndBlockSize<false, int64_t, unsigned short>>;
122
+ #else
123
+ struct _EmbeddingBagKernelCache {
124
+ explicit _EmbeddingBagKernelCache(std::optional<int64_t> /* maybe_block_size */) {}
125
+ };
126
+ #endif
127
+
128
+ void _embedding_bag_cpu_impl_out(Tensor& output, Tensor& offset2bag,
129
+ Tensor& bag_size, Tensor* max_indices,
130
+ const Tensor &weight, const Tensor &indices,
131
+ const Tensor &offsets, const int64_t mode = 0,
132
+ const std::optional<Tensor>& per_sample_weights = std::nullopt,
133
+ bool include_last_offset = false,
134
+ int64_t padding_idx = -1,
135
+ _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr);
136
+
137
+ void _embedding_bag_cpu_out(
138
+ at::Tensor& output,
139
+ at::Tensor& offset2bag,
140
+ at::Tensor& bag_size,
141
+ at::Tensor* p_max_indices,
142
+ const at::Tensor& weight,
143
+ const at::Tensor& indices,
144
+ const at::Tensor& offsets,
145
+ const bool scale_grad_by_freq,
146
+ const int64_t mode,
147
+ const bool sparse,
148
+ const std::optional<at::Tensor>& per_sample_weights,
149
+ const bool include_last_offset,
150
+ const std::optional<int64_t>& padding_idx,
151
+ _EmbeddingBagKernelCache* fbgemm_kernel_cache = nullptr);
152
+
153
+ } // namespace at::native
lib/python3.10/site-packages/torch/include/ATen/native/FusedAdam.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+ #include <ATen/native/DispatchStub.h>
3
+
4
+ namespace at::native {
5
+
6
+ enum class ADAM_MODE : uint8_t { ORIGINAL = 0, ADAMW = 1 };
7
+
8
+ using fused_adam_fn = void (*)(
9
+ const at::Tensor& param,
10
+ const at::Tensor& grad,
11
+ const at::Tensor& exp_avg,
12
+ const at::Tensor& exp_avg_sq,
13
+ const at::Tensor& max_exp_avg_sq,
14
+ const at::Tensor& state_step,
15
+ const double lr,
16
+ const double beta1,
17
+ const double beta2,
18
+ const double weight_decay,
19
+ const double eps,
20
+ const bool amsgrad,
21
+ const bool maximize,
22
+ const float* grad_scale_ptr,
23
+ const ADAM_MODE);
24
+
25
+ DECLARE_DISPATCH(fused_adam_fn, fused_adam_stub)
26
+
27
+ } // namespace at::native
lib/python3.10/site-packages/torch/include/ATen/native/GridSampler.h ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <algorithm>
4
+ #include <cmath>
5
+ #include <cstdint>
6
+ #include <utility>
7
+
8
+ #include <ATen/native/GridSamplerUtils.h>
9
+
10
+ namespace at::native {
11
+
12
+ using detail::GridSamplerInterpolation;
13
+ using detail::GridSamplerPadding;
14
+
15
+ // Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value,
16
+ // where we view each pixel as an area between (idx - 0.5) and (idx + 0.5).
17
+ // if align_corners: -1 and +1 get sent to the centers of the corner pixels
18
+ // -1 --> 0
19
+ // +1 --> (size - 1)
20
+ // scale_factor = (size - 1) / 2
21
+ // if not align_corners: -1 and +1 get sent to the image edges
22
+ // -1 --> -0.5
23
+ // +1 --> (size - 1) + 0.5 == size - 0.5
24
+ // scale_factor = size / 2
25
+ template <typename scalar_t>
26
+ static inline scalar_t grid_sampler_unnormalize(scalar_t coord, int64_t size,
27
+ bool align_corners) {
28
+ if (align_corners) {
29
+ // unnormalize coord from [-1, 1] to [0, size - 1]
30
+ return ((coord + 1) / 2) * (size - 1);
31
+ } else {
32
+ // unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
33
+ return ((coord + 1) * size - 1) / 2;
34
+ }
35
+ }
36
+
37
+ // grid_sampler_unnormalize_set_grad works the same as grid_sampler_unnormalize
38
+ // except that it also returns the `d output / d input` via pointer argument
39
+ // `grad_in`.
40
+ // This is useful in the backward pass of grid_sampler.
41
+ template <typename scalar_t>
42
+ static inline scalar_t grid_sampler_unnormalize_set_grad(scalar_t coord, int64_t size,
43
+ bool align_corners, scalar_t *grad_in) {
44
+ if (align_corners) {
45
+ // unnormalize coord from [-1, 1] to [0, size - 1]
46
+ *grad_in = static_cast<scalar_t>(size - 1) / 2;
47
+ return ((coord + 1) / 2) * (size - 1);
48
+ } else {
49
+ // unnormalize coord from [-1, 1] to [-0.5, size - 0.5]
50
+ *grad_in = static_cast<scalar_t>(size) / 2;
51
+ return ((coord + 1) * size - 1) / 2;
52
+ }
53
+ }
54
+
55
+ // Clips coordinates to between 0 and clip_limit - 1
56
+ template<typename scalar_t>
57
+ static inline scalar_t clip_coordinates(scalar_t in, int64_t clip_limit) {
58
+ return std::min(static_cast<scalar_t>(clip_limit - 1), std::max(in, static_cast<scalar_t>(0)));
59
+ }
60
+
61
+ // clip_coordinates_set_grad works similarly to clip_coordinates except that
62
+ // it also returns the `d output / d input` via pointer argument `grad_in`.
63
+ // This is useful in the backward pass of grid_sampler.
64
+ template<typename scalar_t>
65
+ static inline scalar_t clip_coordinates_set_grad(scalar_t in, int64_t clip_limit,
66
+ scalar_t *grad_in) {
67
+ // Note that it is important for the gradient calculation that borders
68
+ // are considered out of bounds.
69
+ if (in <= static_cast<scalar_t>(0)) {
70
+ *grad_in = static_cast<scalar_t>(0);
71
+ return static_cast<scalar_t>(0);
72
+ } else {
73
+ scalar_t max = static_cast<scalar_t>(clip_limit - 1);
74
+ if (in >= max) {
75
+ *grad_in = static_cast<scalar_t>(0);
76
+ return max;
77
+ } else {
78
+ *grad_in = static_cast<scalar_t>(1);
79
+ return in;
80
+ }
81
+ }
82
+ }
83
+
84
+ // Reflects coordinates until they fall between low and high (inclusive).
85
+ // The bounds are passed as twice their value so that half-integer values
86
+ // can be represented as ints.
87
+ template<typename scalar_t>
88
+ static inline scalar_t reflect_coordinates(scalar_t in, int64_t twice_low,
89
+ int64_t twice_high) {
90
+ if (twice_low == twice_high) {
91
+ return static_cast<scalar_t>(0);
92
+ }
93
+ scalar_t min = static_cast<scalar_t>(twice_low) / 2;
94
+ scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
95
+ in = std::fabs(in - min);
96
+ // `fmod` returns same sign as `in`, which is positive after the `fabs` above.
97
+ scalar_t extra = std::fmod(in, span);
98
+ int flips = static_cast<int>(std::floor(in / span));
99
+ if (flips % 2 == 0) {
100
+ return extra + min;
101
+ } else {
102
+ return span - extra + min;
103
+ }
104
+ }
105
+
106
+ // reflect_coordinates_set_grad works similarly to reflect_coordinates except
107
+ // that it also returns the `d output / d input` via pointer argument
108
+ // `grad_in`.
109
+ // This is useful in the backward pass of grid_sampler.
110
+ template<typename scalar_t>
111
+ static inline scalar_t reflect_coordinates_set_grad(scalar_t in, int64_t twice_low,
112
+ int64_t twice_high, scalar_t *grad_in) {
113
+ if (twice_low == twice_high) {
114
+ *grad_in = static_cast<scalar_t>(0);
115
+ return static_cast<scalar_t>(0);
116
+ }
117
+ int grad_in_mult_;
118
+ scalar_t min = static_cast<scalar_t>(twice_low) / 2;
119
+ scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2;
120
+ in = in - min;
121
+ if (in < static_cast<scalar_t>(0)) {
122
+ grad_in_mult_ = -1;
123
+ in = -in;
124
+ } else {
125
+ grad_in_mult_ = 1;
126
+ }
127
+ // `fmod` returns same sign as `in`, which is positive after the `if` above.
128
+ scalar_t extra = std::fmod(in, span);
129
+ int flips = static_cast<int>(std::floor(in / span));
130
+ if (flips % 2 == 0) {
131
+ *grad_in = static_cast<scalar_t>(grad_in_mult_);
132
+ return extra + min;
133
+ } else {
134
+ *grad_in = static_cast<scalar_t>(-grad_in_mult_);
135
+ return span - extra + min;
136
+ }
137
+ }
138
+
139
+ // Mapping the out-of-boundary points back into boundary
140
+ // This would only affect padding_mode=border or reflection
141
+ template<typename scalar_t>
142
+ static inline scalar_t compute_coordinates(scalar_t coord, int64_t size,
143
+ GridSamplerPadding padding_mode,
144
+ bool align_corners) {
145
+ if (padding_mode == GridSamplerPadding::Border) {
146
+ // clip coordinates to image borders
147
+ coord = clip_coordinates(coord, size);
148
+ } else if (padding_mode == GridSamplerPadding::Reflection) {
149
+ // reflect coordinates by image borders
150
+ if (align_corners) {
151
+ coord = reflect_coordinates(coord, 0, 2*(size - 1));
152
+ } else {
153
+ coord = reflect_coordinates(coord, -1, 2*size - 1);
154
+ }
155
+ // clip coordinates to image borders
156
+ coord = clip_coordinates(coord, size);
157
+ }
158
+ return coord;
159
+ }
160
+
161
+ // Computes the pixel source index value for a grid coordinate
162
+ template <typename scalar_t>
163
+ static inline scalar_t grid_sampler_compute_source_index(
164
+ scalar_t coord,
165
+ int64_t size,
166
+ GridSamplerPadding padding_mode,
167
+ bool align_corners) {
168
+ coord = grid_sampler_unnormalize(coord, size, align_corners);
169
+ coord = compute_coordinates(coord, size, padding_mode, align_corners);
170
+ return coord;
171
+ }
172
+
173
+ // grid_sampler_compute_source_index_set_grad works similarly to
174
+ // grid_sampler_compute_source_index except that it also returns the
175
+ // `d output / d input` via pointer argument `grad_in`.
176
+ // This is useful in the backward pass of grid_sampler.
177
+ template <typename scalar_t>
178
+ static inline scalar_t grid_sampler_compute_source_index_set_grad(
179
+ scalar_t coord,
180
+ int64_t size,
181
+ GridSamplerPadding padding_mode,
182
+ bool align_corners,
183
+ scalar_t *grad_in) {
184
+ scalar_t grad_clip, grad_refl;
185
+ coord = grid_sampler_unnormalize_set_grad(coord, size, align_corners, grad_in);
186
+ if (padding_mode == GridSamplerPadding::Border) {
187
+ // clip coordinates to image borders
188
+ coord = clip_coordinates_set_grad(coord, size, &grad_clip);
189
+ *grad_in = (*grad_in) * grad_clip;
190
+ } else if (padding_mode == GridSamplerPadding::Reflection) {
191
+ // reflect coordinates by image borders
192
+ if (align_corners) {
193
+ coord = reflect_coordinates_set_grad(coord, 0, 2*(size - 1), &grad_refl);
194
+ } else {
195
+ coord = reflect_coordinates_set_grad(coord, -1, 2*size - 1, &grad_refl);
196
+ }
197
+ // clip coordinates to image borders
198
+ coord = clip_coordinates_set_grad(coord, size, &grad_clip);
199
+ *grad_in = (*grad_in) * grad_refl * grad_clip;
200
+ }
201
+ return coord;
202
+ }
203
+
204
+ static inline bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W) {
205
+ return h >= 0 && h < H && w >= 0 && w < W;
206
+ }
207
+
208
+ static inline bool within_bounds_3d(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) {
209
+ return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W;
210
+ }
211
+
212
+ template<typename scalar_t>
213
+ static inline scalar_t get_value_bounded(
214
+ const scalar_t* data,
215
+ scalar_t x,
216
+ scalar_t y,
217
+ int64_t W,
218
+ int64_t H,
219
+ int64_t sW,
220
+ int64_t sH,
221
+ GridSamplerPadding padding_mode,
222
+ bool align_corners) {
223
+
224
+ x = compute_coordinates(x, W, padding_mode, align_corners);
225
+ y = compute_coordinates(y, H, padding_mode, align_corners);
226
+
227
+ int64_t ix = static_cast<int64_t>(x);
228
+ int64_t iy = static_cast<int64_t>(y);
229
+
230
+ if (within_bounds_2d(iy, ix, H, W)) {
231
+ return data[iy * sH + ix * sW];
232
+ }
233
+ return static_cast<scalar_t>(0);
234
+ }
235
+
236
+ template<typename scalar_t>
237
+ static inline void safe_add_2d(scalar_t *data, int64_t h, int64_t w,
238
+ int64_t sH, int64_t sW, int64_t H, int64_t W,
239
+ scalar_t delta) {
240
+ if (within_bounds_2d(h, w, H, W)) {
241
+ data[h * sH + w * sW] += delta;
242
+ }
243
+ }
244
+
245
+ template<typename scalar_t>
246
+ static inline void safe_add_3d(scalar_t *data, int64_t d, int64_t h, int64_t w,
247
+ int64_t sD, int64_t sH, int64_t sW,
248
+ int64_t D, int64_t H, int64_t W,
249
+ scalar_t delta) {
250
+ if (within_bounds_3d(d, h, w, D, H, W)) {
251
+ data[d * sD + h * sH + w * sW] += delta;
252
+ }
253
+ }
254
+
255
+ template<typename scalar_t>
256
+ static inline void add_value_bounded(
257
+ scalar_t* data,
258
+ scalar_t x,
259
+ scalar_t y,
260
+ int64_t W,
261
+ int64_t H,
262
+ int64_t sW,
263
+ int64_t sH,
264
+ scalar_t delta,
265
+ GridSamplerPadding padding_mode,
266
+ bool align_corners) {
267
+
268
+ x = compute_coordinates(x, W, padding_mode, align_corners);
269
+ y = compute_coordinates(y, H, padding_mode, align_corners);
270
+
271
+ int64_t ix = static_cast<int64_t>(x);
272
+ int64_t iy = static_cast<int64_t>(y);
273
+
274
+ safe_add_2d(data, iy, ix, sH, sW, H, W, delta);
275
+ }
276
+
277
+ // Calculate the differential of the cubic convolution, i.e. `d coeff / d x`
278
+ template<typename scalar_t>
279
+ static inline void get_cubic_coefficients_grad(
280
+ scalar_t coeffs[4],
281
+ scalar_t t) {
282
+
283
+ // Must be the same as forward calculation in
284
+ // aten/src/ATen/native/UpSample.h:get_cubic_upsample_coefficients
285
+ scalar_t A = -0.75;
286
+
287
+ scalar_t x;
288
+ x = -1 - t; // 1 < x = |-1 - tx| < 2
289
+ coeffs[0] = (-3 * A * x - 10 * A ) * x - 8 * A;
290
+ x = -t; // x = |0 - tx| <= 1
291
+ coeffs[1] = (-3 * (A + 2) * x - 2 * (A + 3)) * x;
292
+ x = 1 - t; // x = |1 - tx| <= 1
293
+ coeffs[2] = (3 * (A + 2) * x - 2 * (A + 3)) * x;
294
+ x = 2 - t; // 1 < x = |2 - tx| < 2
295
+ coeffs[3] = (3 * A * x - 10 * A) * x + 8 * A;
296
+ }
297
+
298
+ } // namespace at::native
lib/python3.10/site-packages/torch/include/ATen/native/MathBitFallThroughLists.h ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at {
4
+ // views and their in-place version ops
5
+ #define TORCH_VIEW_FNS(m) \
6
+ m.impl("as_strided_", torch::CppFunction::makeFallthrough()); \
7
+ m.impl("detach", torch::CppFunction::makeFallthrough()); \
8
+ m.impl("detach_", torch::CppFunction::makeFallthrough()); \
9
+ m.impl("diagonal", torch::CppFunction::makeFallthrough()); \
10
+ m.impl("expand", torch::CppFunction::makeFallthrough()); \
11
+ m.impl("expand_as", torch::CppFunction::makeFallthrough()); \
12
+ m.impl("movedim.int", torch::CppFunction::makeFallthrough()); \
13
+ m.impl("movedim.intlist", torch::CppFunction::makeFallthrough()); \
14
+ m.impl("narrow", torch::CppFunction::makeFallthrough()); \
15
+ m.impl("permute", torch::CppFunction::makeFallthrough()); \
16
+ m.impl("select.Dimname", torch::CppFunction::makeFallthrough()); \
17
+ m.impl("select.int", torch::CppFunction::makeFallthrough()); \
18
+ m.impl("squeeze", torch::CppFunction::makeFallthrough()); \
19
+ m.impl("squeeze_", torch::CppFunction::makeFallthrough()); \
20
+ m.impl("transpose.int", torch::CppFunction::makeFallthrough()); \
21
+ m.impl("transpose.Dimname", torch::CppFunction::makeFallthrough()); \
22
+ m.impl("transpose_", torch::CppFunction::makeFallthrough()); \
23
+ m.impl("t", torch::CppFunction::makeFallthrough()); \
24
+ m.impl("t_", torch::CppFunction::makeFallthrough()); \
25
+ m.impl("real", torch::CppFunction::makeFallthrough()); \
26
+ m.impl("imag", torch::CppFunction::makeFallthrough()); \
27
+ m.impl("view_as_real", torch::CppFunction::makeFallthrough()); \
28
+ m.impl("unflatten.int", torch::CppFunction::makeFallthrough()); \
29
+ m.impl("unflatten.Dimname", torch::CppFunction::makeFallthrough()); \
30
+ m.impl("unfold", torch::CppFunction::makeFallthrough()); \
31
+ m.impl("unsqueeze", torch::CppFunction::makeFallthrough()); \
32
+ m.impl("unsqueeze_", torch::CppFunction::makeFallthrough()); \
33
+ m.impl("view_as", torch::CppFunction::makeFallthrough()); \
34
+ m.impl("unbind.int", torch::CppFunction::makeFallthrough()); \
35
+ m.impl("unbind.Dimname", torch::CppFunction::makeFallthrough()); \
36
+ m.impl("split.Tensor", torch::CppFunction::makeFallthrough()); \
37
+ m.impl("split_with_sizes", torch::CppFunction::makeFallthrough()); \
38
+ m.impl("swapaxes", torch::CppFunction::makeFallthrough()); \
39
+ m.impl("swapdims", torch::CppFunction::makeFallthrough()); \
40
+ m.impl("chunk", torch::CppFunction::makeFallthrough()); \
41
+ m.impl("reshape", torch::CppFunction::makeFallthrough()); \
42
+ m.impl("alias", torch::CppFunction::makeFallthrough()); \
43
+ m.impl("hsplit.int", torch::CppFunction::makeFallthrough()); \
44
+ m.impl("hsplit.array", torch::CppFunction::makeFallthrough()); \
45
+ m.impl("dsplit.int", torch::CppFunction::makeFallthrough()); \
46
+ m.impl("dsplit.array", torch::CppFunction::makeFallthrough()); \
47
+ m.impl("vsplit.int", torch::CppFunction::makeFallthrough()); \
48
+ m.impl("vsplit.array", torch::CppFunction::makeFallthrough()); \
49
+ m.impl("conj", torch::CppFunction::makeFallthrough()); \
50
+ m.impl("_conj", torch::CppFunction::makeFallthrough()); \
51
+ m.impl("_unsafe_view", torch::CppFunction::makeFallthrough()); \
52
+ m.impl("resize_", torch::CppFunction::makeFallthrough());
53
+
54
+ #define TENSOR_UTILITIES_AND_CONSTRUCTORS(m) \
55
+ m.impl("empty_like", torch::CppFunction::makeFallthrough()); \
56
+ m.impl("empty.memory_format", torch::CppFunction::makeFallthrough()); \
57
+ m.impl("empty.out", torch::CppFunction::makeFallthrough()); \
58
+ m.impl("empty_strided", torch::CppFunction::makeFallthrough()); \
59
+ m.impl("full_like", torch::CppFunction::makeFallthrough()); \
60
+ m.impl("stride.int", torch::CppFunction::makeFallthrough()); \
61
+ m.impl("stride.Dimname", torch::CppFunction::makeFallthrough()); \
62
+ m.impl("size.int", torch::CppFunction::makeFallthrough()); \
63
+ m.impl("size.Dimname", torch::CppFunction::makeFallthrough()); \
64
+ m.impl("is_complex", torch::CppFunction::makeFallthrough()); \
65
+ m.impl("is_floating_point", torch::CppFunction::makeFallthrough()); \
66
+ m.impl("requires_grad_", torch::CppFunction::makeFallthrough());
67
+ }
68
+
69
+ #define TORCH_VIEW_FNS_NATIVE_FN_REGISTRATION(m) \
70
+ m.impl("as_strided", torch::CppFunction::makeFallthrough()); \
71
+ m.impl("view", torch::CppFunction::makeFallthrough());
lib/python3.10/site-packages/torch/include/ATen/native/ReduceAllOps.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+
5
+ namespace at {
6
+ class Tensor;
7
+ }
8
+
9
+ namespace at::native {
10
+
11
+ using reduce_all_fn = void (*)(Tensor & result, const Tensor & self);
12
+ using reduce_min_max_fn = void (*)(Tensor & max_result, Tensor & min_result, const Tensor & self);
13
+ DECLARE_DISPATCH(reduce_all_fn, min_all_stub)
14
+ DECLARE_DISPATCH(reduce_all_fn, max_all_stub)
15
+
16
+ } // namespace at::native
lib/python3.10/site-packages/torch/include/ATen/native/ReductionType.h ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Scalar.h>
4
+
5
+ namespace at::native {
6
+
7
+ enum class ReductionType {MAX, MEAN, MIN, SUM, PROD};
8
+
9
+ inline ReductionType get_reduction_enum(const std::string_view& reduce) {
10
+ if (reduce == "max" || reduce == "amax") {
11
+ return ReductionType::MAX;
12
+ } else if (reduce == "mean") {
13
+ return ReductionType::MEAN;
14
+ } else if (reduce == "min" || reduce == "amin") {
15
+ return ReductionType::MIN;
16
+ } else if (reduce == "sum") {
17
+ return ReductionType::SUM;
18
+ } else if (reduce == "prod") {
19
+ return ReductionType::PROD;
20
+ } else {
21
+ TORCH_CHECK(false, "reduce argument must be either sum, prod, mean, amax or amin, got ", reduce);
22
+ }
23
+ }
24
+
25
+ // used for `scatter_reduce`, old options for BC.
26
+ inline ReductionType get_operator_enum(const std::string_view reduce, bool use_new_options) {
27
+ if (use_new_options) {
28
+ return get_reduction_enum(reduce);
29
+ } else {
30
+ if (reduce == "add") {
31
+ return ReductionType::SUM;
32
+ } else if (reduce == "multiply") {
33
+ return ReductionType::PROD;
34
+ } else {
35
+ TORCH_CHECK(false, "reduce argument must be either add or multiply.")
36
+ }
37
+ }
38
+ }
39
+
40
+ } // at::native
lib/python3.10/site-packages/torch/include/ATen/native/Sorting.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <cstdint>
5
+
6
+ namespace at {
7
+ class TensorBase;
8
+ }
9
+
10
+ namespace at::native {
11
+
12
+ enum class QUANTILE_INTERPOLATION_MODE : uint8_t {
13
+ LINEAR,
14
+ LOWER,
15
+ HIGHER,
16
+ MIDPOINT,
17
+ NEAREST
18
+ };
19
+
20
+ using sort_fn = void(*)(const TensorBase&, const TensorBase&, const TensorBase&, int64_t, bool, bool);
21
+ using topk_fn = void(*)(const TensorBase&, const TensorBase&, const TensorBase&, int64_t, int64_t, bool, bool);
22
+
23
+ DECLARE_DISPATCH(sort_fn, sort_stub)
24
+ DECLARE_DISPATCH(topk_fn, topk_stub)
25
+
26
+ void _fill_indices(const TensorBase &indices, int64_t dim);
27
+
28
+ } // namespace at::native
lib/python3.10/site-packages/torch/include/ATen/ops/_addmm_activation_cpu_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor _addmm_activation(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false);
21
+ TORCH_API at::Tensor & _addmm_activation_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1, bool use_gelu=false);
22
+ TORCH_API at::Tensor & _addmm_activation_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu, at::Tensor & out);
23
+
24
+ } // namespace cpu
25
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/_empty_affine_quantized_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _empty_affine_quantized {
18
+ using schema = at::Tensor (c10::SymIntArrayRef, ::std::optional<at::ScalarType>, ::std::optional<at::Layout>, ::std::optional<at::Device>, ::std::optional<bool>, double, int64_t, ::std::optional<at::MemoryFormat>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::_empty_affine_quantized";
22
+ static constexpr const char* overload_name = "";
23
+ static constexpr const char* schema_str = "_empty_affine_quantized(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor";
24
+ static at::Tensor call(c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, ::std::optional<at::ScalarType> dtype, ::std::optional<at::Layout> layout, ::std::optional<at::Device> device, ::std::optional<bool> pin_memory, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format);
26
+ };
27
+
28
+ struct TORCH_API _empty_affine_quantized_out {
29
+ using schema = at::Tensor & (c10::SymIntArrayRef, double, int64_t, ::std::optional<at::MemoryFormat>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ static constexpr const char* name = "aten::_empty_affine_quantized";
33
+ static constexpr const char* overload_name = "out";
34
+ static constexpr const char* schema_str = "_empty_affine_quantized.out(SymInt[] size, *, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format, Tensor(a!) out) -> Tensor(a!)";
35
+ static at::Tensor & call(c10::SymIntArrayRef size, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, c10::SymIntArrayRef size, double scale, int64_t zero_point, ::std::optional<at::MemoryFormat> memory_format, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/_fused_sdp_choice_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API int64_t _fused_sdp_choice(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask={}, double dropout_p=0.0, bool is_causal=false, ::std::optional<double> scale=::std::nullopt, bool enable_gqa=false);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/_index_put_impl_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor & _index_put_impl_(at::Tensor & self, const c10::List<::std::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate=false, bool unsafe=false);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cpu_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype);
21
+ TORCH_API at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype);
22
+ TORCH_API at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out);
23
+
24
+ } // namespace cpu
25
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/_nested_get_values_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _nested_get_values {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::_nested_get_values";
22
+ static constexpr const char* overload_name = "";
23
+ static constexpr const char* schema_str = "_nested_get_values(Tensor(a) self) -> Tensor(a)";
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_buffer_cpu_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor _nested_view_from_buffer(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets);
21
+
22
+ } // namespace cpu
23
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/_nnpack_spatial_convolution.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+
18
+
19
+
20
+ #include <ATen/ops/_nnpack_spatial_convolution_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor
26
+ inline at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) {
27
+ return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride));
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
31
+ at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) {
32
+ return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride));
33
+ }
34
+ }
35
+
36
+ // aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1) -> Tensor
37
+ inline at::Tensor _nnpack_spatial_convolution_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) {
38
+ return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
42
+ at::Tensor _nnpack_spatial_convolution(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) {
43
+ return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride);
44
+ }
45
+ }
46
+
47
+ // aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
48
+ inline at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) {
49
+ return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out);
50
+ }
51
+ namespace symint {
52
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
53
+ at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride=1) {
54
+ return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out);
55
+ }
56
+ }
57
+
58
+ // aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
59
+ inline at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
60
+ return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out);
61
+ }
62
+ namespace symint {
63
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
64
+ at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
65
+ return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(stride), out);
66
+ }
67
+ }
68
+
69
+ // aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
70
+ inline at::Tensor & _nnpack_spatial_convolution_symint_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) {
71
+ return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out);
72
+ }
73
+ namespace symint {
74
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
75
+ at::Tensor & _nnpack_spatial_convolution_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride=c10::SymInt(1)) {
76
+ return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out);
77
+ }
78
+ }
79
+
80
+ // aten::_nnpack_spatial_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[2] padding, SymInt[2] stride=1, *, Tensor(a!) out) -> Tensor(a!)
81
+ inline at::Tensor & _nnpack_spatial_convolution_symint_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out) {
82
+ return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out);
83
+ }
84
+ namespace symint {
85
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
86
+ at::Tensor & _nnpack_spatial_convolution_outf(const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, at::Tensor & out) {
87
+ return at::_ops::_nnpack_spatial_convolution_out::call(input, weight, bias, padding, stride, out);
88
+ }
89
+ }
90
+
91
+ }
lib/python3.10/site-packages/torch/include/ATen/ops/_pad_circular_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _pad_circular_symint(const at::Tensor & self, c10::SymIntArrayRef pad);
20
+ } // namespace native
21
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/_prelu_kernel_cuda_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor _prelu_kernel(const at::Tensor & self, const at::Tensor & weight);
21
+
22
+ } // namespace cuda
23
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/_print_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _print {
18
+ using schema = void (c10::string_view);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::_print";
22
+ static constexpr const char* overload_name = "";
23
+ static constexpr const char* schema_str = "_print(str s) -> ()";
24
+ static void call(c10::string_view s);
25
+ static void redispatch(c10::DispatchKeySet dispatchKeySet, c10::string_view s);
26
+ };
27
+
28
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_attention_math_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _scaled_dot_product_attention_math {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional<at::Tensor> &, double, bool, const ::std::optional<at::Tensor> &, ::std::optional<double>, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::_scaled_dot_product_attention_math";
22
+ static constexpr const char* overload_name = "";
23
+ static constexpr const char* schema_str = "_scaled_dot_product_attention_math(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, Tensor? dropout_mask=None, *, float? scale=None, bool enable_gqa=False) -> (Tensor, Tensor)";
24
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & dropout_mask, ::std::optional<double> scale, bool enable_gqa);
25
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const ::std::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const ::std::optional<at::Tensor> & dropout_mask, ::std::optional<double> scale, bool enable_gqa);
26
+ };
27
+
28
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _scaled_dot_product_flash_attention {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, const at::Tensor &, double, bool, bool, ::std::optional<double>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::_scaled_dot_product_flash_attention";
22
+ static constexpr const char* overload_name = "";
23
+ static constexpr const char* schema_str = "_scaled_dot_product_flash_attention(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, bool return_debug_mask=False, *, float? scale=None) -> (Tensor output, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, Tensor philox_seed, Tensor philox_offset, Tensor debug_attn_mask)";
24
+ static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale);
25
+ static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,c10::SymInt,c10::SymInt,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, ::std::optional<double> scale);
26
+ };
27
+
28
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_ops.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _sparse_log_softmax_int {
18
+ using schema = at::Tensor (const at::Tensor &, int64_t, ::std::optional<at::ScalarType>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::_sparse_log_softmax";
22
+ static constexpr const char* overload_name = "int";
23
+ static constexpr const char* schema_str = "_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor";
24
+ static at::Tensor call(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype);
26
+ };
27
+
28
+ struct TORCH_API _sparse_log_softmax_Dimname {
29
+ using schema = at::Tensor (const at::Tensor &, at::Dimname, ::std::optional<at::ScalarType>);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ static constexpr const char* name = "aten::_sparse_log_softmax";
33
+ static constexpr const char* overload_name = "Dimname";
34
+ static constexpr const char* schema_str = "_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor";
35
+ static at::Tensor call(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype);
37
+ };
38
+
39
+ struct TORCH_API _sparse_log_softmax {
40
+ using schema = at::Tensor (const at::Tensor &, int64_t, bool);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ static constexpr const char* name = "aten::_sparse_log_softmax";
44
+ static constexpr const char* overload_name = "";
45
+ static constexpr const char* schema_str = "_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor";
46
+ static at::Tensor call(const at::Tensor & self, int64_t dim, bool half_to_float);
47
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float);
48
+ };
49
+
50
+ struct TORCH_API _sparse_log_softmax_out {
51
+ using schema = at::Tensor & (const at::Tensor &, int64_t, bool, at::Tensor &);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ static constexpr const char* name = "aten::_sparse_log_softmax";
55
+ static constexpr const char* overload_name = "out";
56
+ static constexpr const char* schema_str = "_sparse_log_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)";
57
+ static at::Tensor & call(const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out);
58
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, bool half_to_float, at::Tensor & out);
59
+ };
60
+
61
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/_test_ambiguous_defaults_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _test_ambiguous_defaults_a {
18
+ using schema = at::Tensor (const at::Tensor &, int64_t, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::_test_ambiguous_defaults";
22
+ static constexpr const char* overload_name = "a";
23
+ static constexpr const char* schema_str = "_test_ambiguous_defaults.a(Tensor dummy, int a=1, int b=1) -> Tensor";
24
+ static at::Tensor call(const at::Tensor & dummy, int64_t a, int64_t b);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a, int64_t b);
26
+ };
27
+
28
+ struct TORCH_API _test_ambiguous_defaults_b {
29
+ using schema = at::Tensor (const at::Tensor &, int64_t, c10::string_view);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ static constexpr const char* name = "aten::_test_ambiguous_defaults";
33
+ static constexpr const char* overload_name = "b";
34
+ static constexpr const char* schema_str = "_test_ambiguous_defaults.b(Tensor dummy, int a=2, str b=\"2\") -> Tensor";
35
+ static at::Tensor call(const at::Tensor & dummy, int64_t a, c10::string_view b);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dummy, int64_t a, c10::string_view b);
37
+ };
38
+
39
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward_native.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/_upsample_nearest_exact1d_backward_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured__upsample_nearest_exact1d_backward_out_cpu : public at::meta::structured__upsample_nearest_exact1d_backward {
20
+ void impl(const at::Tensor & grad_output, at::ArrayRef<int64_t> output_size, at::ArrayRef<int64_t> input_size, ::std::optional<double> scales, const at::Tensor & grad_input);
21
+ };
22
+ struct TORCH_API structured__upsample_nearest_exact1d_backward_out_cuda : public at::meta::structured__upsample_nearest_exact1d_backward {
23
+ void impl(const at::Tensor & grad_output, at::ArrayRef<int64_t> output_size, at::ArrayRef<int64_t> input_size, ::std::optional<double> scales, const at::Tensor & grad_input);
24
+ };
25
+ } // namespace native
26
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/addbmm_meta_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor & addbmm_(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1);
21
+
22
+ } // namespace meta
23
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor addcmul(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1);
21
+ TORCH_API at::Tensor & addcmul_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1);
22
+ TORCH_API at::Tensor & addcmul_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out);
23
+ TORCH_API at::Tensor & addcmul_(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1);
24
+
25
+ } // namespace meta
26
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/all_native.h ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/all_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_all_out : public at::meta::structured_all_dim {
20
+ void impl(const at::Tensor & self, int64_t dim, bool keepdim, const at::Tensor & out);
21
+ };
22
+ TORCH_API at::Tensor NestedTensor_all(const at::Tensor & self, int64_t dim, bool keepdim=false);
23
+ TORCH_API at::Tensor all_dims_default(const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false);
24
+ TORCH_API at::Tensor & all_dims_out_default(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, at::Tensor & out);
25
+ struct TORCH_API structured_all_dims_out : public at::meta::structured_all_dims {
26
+ void impl(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, const at::Tensor & out);
27
+ };
28
+ TORCH_API at::Tensor all(const at::Tensor & self, at::Dimname dim, bool keepdim=false);
29
+ TORCH_API at::Tensor & all_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out);
30
+ struct TORCH_API structured_all_all_out : public at::meta::structured_all {
31
+ void impl(const at::Tensor & self, const at::Tensor & out);
32
+ };
33
+ } // namespace native
34
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+
18
+
19
+
20
+ #include <ATen/ops/batch_norm_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::batch_norm_backward(Tensor grad_out, Tensor input, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, bool update, float eps, bool[3] output_mask, Tensor reserve) -> (Tensor, Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> batch_norm_backward(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & weight, const ::std::optional<at::Tensor> & running_mean, const ::std::optional<at::Tensor> & running_var, const ::std::optional<at::Tensor> & save_mean, const ::std::optional<at::Tensor> & save_var, bool update, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reserve) {
27
+ return at::_ops::batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_var, update, eps, output_mask, reserve);
28
+ }
29
+
30
+ }
lib/python3.10/site-packages/torch/include/ATen/ops/binary_cross_entropy_with_logits_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API binary_cross_entropy_with_logits {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, int64_t);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::binary_cross_entropy_with_logits";
22
+ static constexpr const char* overload_name = "";
23
+ static constexpr const char* schema_str = "binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor";
24
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction);
26
+ };
27
+
28
+ struct TORCH_API binary_cross_entropy_with_logits_out {
29
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const ::std::optional<at::Tensor> &, const ::std::optional<at::Tensor> &, int64_t, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ static constexpr const char* name = "aten::binary_cross_entropy_with_logits";
33
+ static constexpr const char* overload_name = "out";
34
+ static constexpr const char* schema_str = "binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)";
35
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & target, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & pos_weight, int64_t reduction, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/conv_tbc_backward.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+
18
+
19
+
20
+ #include <ATen/ops/conv_tbc_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
27
+ return at::_ops::conv_tbc_backward::call(self, input, weight, bias, pad);
28
+ }
29
+
30
+ }
lib/python3.10/site-packages/torch/include/ATen/ops/detach_copy_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & detach_copy_out(at::Tensor & out, const at::Tensor & self);
21
+ TORCH_API at::Tensor & detach_copy_outf(const at::Tensor & self, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor diagonal(const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1);
21
+
22
+ } // namespace compositeexplicitautograd
23
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/digamma_native.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #include <ATen/ops/digamma_meta.h>
16
+
17
+ namespace at {
18
+ namespace native {
19
+ struct TORCH_API structured_digamma_out : public at::meta::structured_digamma {
20
+ void impl(const at::Tensor & self, const at::Tensor & out);
21
+ };
22
+ } // namespace native
23
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/div_ops.h ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API div_Tensor {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::div";
22
+ static constexpr const char* overload_name = "Tensor";
23
+ static constexpr const char* schema_str = "div.Tensor(Tensor self, Tensor other) -> Tensor";
24
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
26
+ };
27
+
28
+ struct TORCH_API div__Tensor {
29
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ static constexpr const char* name = "aten::div_";
33
+ static constexpr const char* overload_name = "Tensor";
34
+ static constexpr const char* schema_str = "div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)";
35
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
37
+ };
38
+
39
+ struct TORCH_API div_out {
40
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ static constexpr const char* name = "aten::div";
44
+ static constexpr const char* overload_name = "out";
45
+ static constexpr const char* schema_str = "div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)";
46
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
48
+ };
49
+
50
+ struct TORCH_API div_Tensor_mode {
51
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional<c10::string_view>);
52
+ using ptr_schema = schema*;
53
+ // See Note [static constexpr char* members for windows NVCC]
54
+ static constexpr const char* name = "aten::div";
55
+ static constexpr const char* overload_name = "Tensor_mode";
56
+ static constexpr const char* schema_str = "div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor";
57
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode);
58
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode);
59
+ };
60
+
61
+ struct TORCH_API div__Tensor_mode {
62
+ using schema = at::Tensor & (at::Tensor &, const at::Tensor &, ::std::optional<c10::string_view>);
63
+ using ptr_schema = schema*;
64
+ // See Note [static constexpr char* members for windows NVCC]
65
+ static constexpr const char* name = "aten::div_";
66
+ static constexpr const char* overload_name = "Tensor_mode";
67
+ static constexpr const char* schema_str = "div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)";
68
+ static at::Tensor & call(at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode);
69
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode);
70
+ };
71
+
72
+ struct TORCH_API div_out_mode {
73
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional<c10::string_view>, at::Tensor &);
74
+ using ptr_schema = schema*;
75
+ // See Note [static constexpr char* members for windows NVCC]
76
+ static constexpr const char* name = "aten::div";
77
+ static constexpr const char* overload_name = "out_mode";
78
+ static constexpr const char* schema_str = "div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)";
79
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out);
80
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out);
81
+ };
82
+
83
+ struct TORCH_API div_Scalar {
84
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
85
+ using ptr_schema = schema*;
86
+ // See Note [static constexpr char* members for windows NVCC]
87
+ static constexpr const char* name = "aten::div";
88
+ static constexpr const char* overload_name = "Scalar";
89
+ static constexpr const char* schema_str = "div.Scalar(Tensor self, Scalar other) -> Tensor";
90
+ static at::Tensor call(const at::Tensor & self, const at::Scalar & other);
91
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other);
92
+ };
93
+
94
+ struct TORCH_API div__Scalar {
95
+ using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
96
+ using ptr_schema = schema*;
97
+ // See Note [static constexpr char* members for windows NVCC]
98
+ static constexpr const char* name = "aten::div_";
99
+ static constexpr const char* overload_name = "Scalar";
100
+ static constexpr const char* schema_str = "div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)";
101
+ static at::Tensor & call(at::Tensor & self, const at::Scalar & other);
102
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other);
103
+ };
104
+
105
+ struct TORCH_API div_Scalar_mode {
106
+ using schema = at::Tensor (const at::Tensor &, const at::Scalar &, ::std::optional<c10::string_view>);
107
+ using ptr_schema = schema*;
108
+ // See Note [static constexpr char* members for windows NVCC]
109
+ static constexpr const char* name = "aten::div";
110
+ static constexpr const char* overload_name = "Scalar_mode";
111
+ static constexpr const char* schema_str = "div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor";
112
+ static at::Tensor call(const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode);
113
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode);
114
+ };
115
+
116
+ struct TORCH_API div__Scalar_mode {
117
+ using schema = at::Tensor & (at::Tensor &, const at::Scalar &, ::std::optional<c10::string_view>);
118
+ using ptr_schema = schema*;
119
+ // See Note [static constexpr char* members for windows NVCC]
120
+ static constexpr const char* name = "aten::div_";
121
+ static constexpr const char* overload_name = "Scalar_mode";
122
+ static constexpr const char* schema_str = "div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)";
123
+ static at::Tensor & call(at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode);
124
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode);
125
+ };
126
+
127
+ struct TORCH_API div_Scalar_out {
128
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
129
+ using ptr_schema = schema*;
130
+ // See Note [static constexpr char* members for windows NVCC]
131
+ static constexpr const char* name = "aten::div";
132
+ static constexpr const char* overload_name = "Scalar_out";
133
+ static constexpr const char* schema_str = "div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)";
134
+ static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
135
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
136
+ };
137
+
138
+ struct TORCH_API div_Scalar_mode_out {
139
+ using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, ::std::optional<c10::string_view>, at::Tensor &);
140
+ using ptr_schema = schema*;
141
+ // See Note [static constexpr char* members for windows NVCC]
142
+ static constexpr const char* name = "aten::div";
143
+ static constexpr const char* overload_name = "Scalar_mode_out";
144
+ static constexpr const char* schema_str = "div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)";
145
+ static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out);
146
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, ::std::optional<c10::string_view> rounding_mode, at::Tensor & out);
147
+ };
148
+
149
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/embedding_sparse_backward_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API embedding_sparse_backward {
18
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t, int64_t, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::embedding_sparse_backward";
22
+ static constexpr const char* overload_name = "";
23
+ static constexpr const char* schema_str = "embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor";
24
+ static at::Tensor call(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq);
26
+ };
27
+
28
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/exp2_cpu_dispatch.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor exp2(const at::Tensor & self);
21
+ TORCH_API at::Tensor & exp2_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & exp2_outf(const at::Tensor & self, at::Tensor & out);
23
+ TORCH_API at::Tensor & exp2_(at::Tensor & self);
24
+
25
+ } // namespace cpu
26
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/fft_hfft_native.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor fft_hfft_symint(const at::Tensor & self, ::std::optional<c10::SymInt> n=::std::nullopt, int64_t dim=-1, ::std::optional<c10::string_view> norm=::std::nullopt);
20
+ TORCH_API at::Tensor & fft_hfft_symint_out(const at::Tensor & self, ::std::optional<c10::SymInt> n, int64_t dim, ::std::optional<c10::string_view> norm, at::Tensor & out);
21
+ } // namespace native
22
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/fft_irfft2.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+
18
+
19
+
20
+ #include <ATen/ops/fft_irfft2_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
26
+ inline at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional<c10::string_view> norm=::std::nullopt) {
27
+ return at::_ops::fft_irfft2::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm);
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
31
+ at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional<c10::string_view> norm=::std::nullopt) {
32
+ return at::_ops::fft_irfft2::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm);
33
+ }
34
+ }
35
+
36
+ // aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor
37
+ inline at::Tensor fft_irfft2_symint(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional<c10::string_view> norm=::std::nullopt) {
38
+ return at::_ops::fft_irfft2::call(self, s, dim, norm);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
42
+ at::Tensor fft_irfft2(const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional<c10::string_view> norm=::std::nullopt) {
43
+ return at::_ops::fft_irfft2::call(self, s, dim, norm);
44
+ }
45
+ }
46
+
47
+ // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
48
+ inline at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional<c10::string_view> norm=::std::nullopt) {
49
+ return at::_ops::fft_irfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out);
50
+ }
51
+ namespace symint {
52
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
53
+ at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional<c10::string_view> norm=::std::nullopt) {
54
+ return at::_ops::fft_irfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out);
55
+ }
56
+ }
57
+
58
+ // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
59
+ inline at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
60
+ return at::_ops::fft_irfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out);
61
+ }
62
+ namespace symint {
63
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
64
+ at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
65
+ return at::_ops::fft_irfft2_out::call(self, s.has_value() ? ::std::make_optional(c10::fromIntArrayRefSlow(*s)) : ::std::nullopt, dim, norm, out);
66
+ }
67
+ }
68
+
69
+ // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
70
+ inline at::Tensor & fft_irfft2_symint_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional<c10::string_view> norm=::std::nullopt) {
71
+ return at::_ops::fft_irfft2_out::call(self, s, dim, norm, out);
72
+ }
73
+ namespace symint {
74
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
75
+ at::Tensor & fft_irfft2_out(at::Tensor & out, const at::Tensor & self, at::OptionalSymIntArrayRef s=::std::nullopt, at::IntArrayRef dim={-2,-1}, ::std::optional<c10::string_view> norm=::std::nullopt) {
76
+ return at::_ops::fft_irfft2_out::call(self, s, dim, norm, out);
77
+ }
78
+ }
79
+
80
+ // aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)
81
+ inline at::Tensor & fft_irfft2_symint_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
82
+ return at::_ops::fft_irfft2_out::call(self, s, dim, norm, out);
83
+ }
84
+ namespace symint {
85
+ template <typename T, typename = std::enable_if_t<std::is_same_v<T, c10::SymInt>>>
86
+ at::Tensor & fft_irfft2_outf(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, ::std::optional<c10::string_view> norm, at::Tensor & out) {
87
+ return at::_ops::fft_irfft2_out::call(self, s, dim, norm, out);
88
+ }
89
+ }
90
+
91
+ }
lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API gelu_backward_grad_input {
18
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::string_view, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::gelu_backward";
22
+ static constexpr const char* overload_name = "grad_input";
23
+ static constexpr const char* schema_str = "gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)";
24
+ static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate, at::Tensor & grad_input);
26
+ };
27
+
28
+ struct TORCH_API gelu_backward {
29
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::string_view);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ static constexpr const char* name = "aten::gelu_backward";
33
+ static constexpr const char* overload_name = "";
34
+ static constexpr const char* schema_str = "gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor";
35
+ static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate);
37
+ };
38
+
39
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/hardshrink_backward_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API hardshrink_backward_grad_input {
18
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ static constexpr const char* name = "aten::hardshrink_backward";
22
+ static constexpr const char* overload_name = "grad_input";
23
+ static constexpr const char* schema_str = "hardshrink_backward.grad_input(Tensor grad_out, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)";
24
+ static at::Tensor & call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd, at::Tensor & grad_input);
26
+ };
27
+
28
+ struct TORCH_API hardshrink_backward {
29
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ static constexpr const char* name = "aten::hardshrink_backward";
33
+ static constexpr const char* overload_name = "";
34
+ static constexpr const char* schema_str = "hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor";
35
+ static at::Tensor call(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd);
36
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd);
37
+ };
38
+
39
+ }} // namespace at::_ops
lib/python3.10/site-packages/torch/include/ATen/ops/is_coalesced.h ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <optional>
17
+
18
+
19
+
20
+ #include <ATen/ops/is_coalesced_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+
26
+ }
lib/python3.10/site-packages/torch/include/ATen/ops/is_same_size_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API bool is_same_size(const at::Tensor & self, const at::Tensor & other);
21
+
22
+ } // namespace compositeexplicitautograd
23
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/isfinite_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor isfinite(const at::Tensor & self);
21
+
22
+ } // namespace compositeimplicitautograd
23
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API at::Tensor isneginf(const at::Tensor & self);
21
+ TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/isposinf_meta_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace meta {
19
+
20
+ TORCH_API at::Tensor isposinf(const at::Tensor & self);
21
+ TORCH_API at::Tensor & isposinf_out(at::Tensor & out, const at::Tensor & self);
22
+ TORCH_API at::Tensor & isposinf_outf(const at::Tensor & self, at::Tensor & out);
23
+
24
+ } // namespace meta
25
+ } // namespace at
lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <optional>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const ::std::optional<at::Tensor> & weight={}, const ::std::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enable=true);
20
+ } // namespace native
21
+ } // namespace at